From: Amos Jeffries Date: Tue, 19 Aug 2014 12:31:33 +0000 (-0700) Subject: Merged from trunk rev.13534 X-Git-Tag: merge-candidate-3-v1~506^2~10 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=17437edfeb65511798e1c6956cacac4484c2be7b;p=thirdparty%2Fsquid.git Merged from trunk rev.13534 --- 17437edfeb65511798e1c6956cacac4484c2be7b diff --cc src/HttpHeader.h index baf8b34cb0,b5db4c4b13..d6eefe9eea --- a/src/HttpHeader.h +++ b/src/HttpHeader.h @@@ -46,7 -45,119 +46,8 @@@ class HttpHdrRange class HttpHdrSc; class Packer; class StoreEntry; + class SBuf; -/* constant attributes of http header fields */ - -/// recognized or "known" header fields; and the RFC which defines them (or not) -/// http://www.iana.org/assignments/message-headers/message-headers.xhtml -typedef enum { - HDR_BAD_HDR = -1, - HDR_ACCEPT = 0, /**< RFC 7231 */ - HDR_ACCEPT_CHARSET, /**< RFC 7231 */ - HDR_ACCEPT_ENCODING, /**< RFC 7231 */ - /*HDR_ACCEPT_FEATURES,*/ /* RFC 2295 */ - HDR_ACCEPT_LANGUAGE, /**< RFC 7231 */ - HDR_ACCEPT_RANGES, /**< RFC 7233 */ - HDR_AGE, /**< RFC 7234 */ - HDR_ALLOW, /**< RFC 7231 */ - HDR_AUTHENTICATION_INFO, /**< RFC 2617 */ - HDR_AUTHORIZATION, /**< RFC 7235, 4559 */ - HDR_CACHE_CONTROL, /**< RFC 7234 */ - HDR_CONNECTION, /**< RFC 7230 */ - HDR_CONTENT_BASE, /**< obsoleted RFC 2068 */ - HDR_CONTENT_DISPOSITION, /**< RFC 2183, 6266 */ - HDR_CONTENT_ENCODING, /**< RFC 7231 */ - HDR_CONTENT_LANGUAGE, /**< RFC 7231 */ - HDR_CONTENT_LENGTH, /**< RFC 7230 */ - HDR_CONTENT_LOCATION, /**< RFC 7231 */ - HDR_CONTENT_MD5, /**< deprecated, RFC 2616 */ - HDR_CONTENT_RANGE, /**< RFC 7233 */ - HDR_CONTENT_TYPE, /**< RFC 7231 */ - HDR_COOKIE, /**< RFC 6265 header we may need to erase */ - HDR_COOKIE2, /**< obsolete RFC 2965 header we may need to erase */ - HDR_DATE, /**< RFC 7231 */ - /*HDR_DAV,*/ /* RFC 2518 */ - /*HDR_DEPTH,*/ /* RFC 2518 */ - /*HDR_DERIVED_FROM,*/ /* deprecated RFC 2068 */ - /*HDR_DESTINATION,*/ /* RFC 2518 */ - HDR_ETAG, /**< RFC 7232 */ - HDR_EXPECT, /**< RFC 7231 */ - HDR_EXPIRES, /**< RFC 7234 */ - HDR_FORWARDED, /**< RFC 7239 */ - HDR_FROM, /**< RFC 7231 */ - HDR_HOST, /**< RFC 7230 */ - HDR_HTTP2_SETTINGS, /**< HTTP/2.0 upgrade header. see draft-ietf-httpbis-http2-13 */ - /*HDR_IF,*/ /* RFC 2518 */ - HDR_IF_MATCH, /**< RFC 7232 */ - HDR_IF_MODIFIED_SINCE, /**< RFC 7232 */ - HDR_IF_NONE_MATCH, /**< RFC 7232 */ - HDR_IF_RANGE, /**< RFC 7233 */ - HDR_IF_UNMODIFIED_SINCE, /**< RFC 7232 */ - HDR_KEEP_ALIVE, /**< obsoleted RFC 2068 header we may need to erase */ - HDR_KEY, /**< experimental RFC Draft draft-fielding-http-key-02 */ - HDR_LAST_MODIFIED, /**< RFC 7232 */ - HDR_LINK, /**< RFC 5988 */ - HDR_LOCATION, /**< RFC 7231 */ - /*HDR_LOCK_TOKEN,*/ /* RFC 2518 */ - HDR_MAX_FORWARDS, /**< RFC 7231 */ - HDR_MIME_VERSION, /**< RFC 2045, 7231 */ - HDR_NEGOTIATE, /**< experimental RFC 2295. Why only this one from 2295? */ - /*HDR_OVERWRITE,*/ /* RFC 2518 */ - HDR_ORIGIN, /* CORS Draft specification (see http://www.w3.org/TR/cors/) */ - HDR_PRAGMA, /**< RFC 7234 */ - HDR_PROXY_AUTHENTICATE, /**< RFC 7235 */ - HDR_PROXY_AUTHENTICATION_INFO, /**< RFC 2617 */ - HDR_PROXY_AUTHORIZATION, /**< RFC 7235 */ - HDR_PROXY_CONNECTION, /**< obsolete Netscape header we may need to erase. */ - HDR_PROXY_SUPPORT, /**< RFC 4559 */ - HDR_PUBLIC, /**< RFC 2068 */ - HDR_RANGE, /**< RFC 7233 */ - HDR_REFERER, /**< RFC 7231 */ - HDR_REQUEST_RANGE, /**< some clients use this, sigh */ - HDR_RETRY_AFTER, /**< RFC 7231 */ - HDR_SERVER, /**< RFC 7231 */ - HDR_SET_COOKIE, /**< RFC 6265 header we may need to erase */ - HDR_SET_COOKIE2, /**< obsoleted RFC 2965 header we may need to erase */ - /*HDR_STATUS_URI,*/ /* RFC 2518 */ - /*HDR_TCN,*/ /* experimental RFC 2295 */ - HDR_TE, /**< RFC 7230 */ - /*HDR_TIMEOUT,*/ /* RFC 2518 */ - HDR_TITLE, /* obsolete draft suggested header */ - HDR_TRAILER, /**< RFC 7230 */ - HDR_TRANSFER_ENCODING, /**< RFC 7230 */ - HDR_TRANSLATE, /**< IIS custom header we may need to erase */ - HDR_UNLESS_MODIFIED_SINCE, /**< IIS custom header we may need to erase */ - HDR_UPGRADE, /**< RFC 7230 */ - HDR_USER_AGENT, /**< RFC 7231 */ - /*HDR_VARIANT_VARY,*/ /* experimental RFC 2295 */ - HDR_VARY, /**< RFC 7231 */ - HDR_VIA, /**< RFC 7230 */ - HDR_WARNING, /**< RFC 7234 */ - HDR_WWW_AUTHENTICATE, /**< RFC 7235, 4559 */ - HDR_X_CACHE, /**< Squid custom header */ - HDR_X_CACHE_LOOKUP, /**< Squid custom header. temporary hack that became de-facto. TODO remove */ - HDR_X_FORWARDED_FOR, /**< obsolete Squid custom header, RFC 7239 */ - HDR_X_REQUEST_URI, /**< Squid custom header appended if ADD_X_REQUEST_URI is defined */ - HDR_X_SQUID_ERROR, /**< Squid custom header on generated error responses */ -#if X_ACCELERATOR_VARY - HDR_X_ACCELERATOR_VARY, /**< obsolete Squid custom header. */ -#endif -#if USE_ADAPTATION - HDR_X_NEXT_SERVICES, /**< Squid custom ICAP header */ -#endif - HDR_SURROGATE_CAPABILITY, /**< Edge Side Includes (ESI) header */ - HDR_SURROGATE_CONTROL, /**< Edge Side Includes (ESI) header */ - HDR_FRONT_END_HTTPS, /**< MS Exchange custom header we may have to add */ - HDR_FTP_COMMAND, /**< Internal header for FTP command */ - HDR_FTP_ARGUMENTS, /**< Internal header for FTP command arguments */ - HDR_FTP_PRE, /**< Internal header containing leading FTP control response lines */ - HDR_FTP_STATUS, /**< Internal header for FTP reply status */ - HDR_FTP_REASON, /**< Internal header for FTP reply reason */ - HDR_OTHER, /**< internal tag value for "unknown" headers */ - HDR_ENUM_END -} http_hdr_type; - /** possible types for http header fields */ typedef enum { ftInvalid = HDR_ENUM_END, /**< to catch nasty errors with hdr_id<->fld_type clashes */ diff --cc src/Makefile.am index 05c03f629c,f42403cd44..092cf88916 --- a/src/Makefile.am +++ b/src/Makefile.am @@@ -1598,8 -1607,10 +1601,11 @@@ nodist_tests_testCacheManager_SOURCES $(DISKIO_GEN_SOURCE) # comm.cc only requires comm/libcomm.la until fdc_table is dead. tests_testCacheManager_LDADD = \ + clients/libclients.la \ + servers/libservers.la \ http/libsquid-http.la \ + ftp/libftp.la \ + parser/libsquid-parser.la \ ident/libident.la \ acl/libacls.la \ acl/libstate.la \ @@@ -2030,8 -2043,10 +2035,11 @@@ nodist_tests_testEvent_SOURCES = $(BUILT_SOURCES) \ $(DISKIO_GEN_SOURCE) tests_testEvent_LDADD = \ + clients/libclients.la \ + servers/libservers.la \ http/libsquid-http.la \ + ftp/libftp.la \ + parser/libsquid-parser.la \ ident/libident.la \ acl/libacls.la \ acl/libstate.la \ @@@ -2278,8 -2295,10 +2285,11 @@@ nodist_tests_testEventLoop_SOURCES = $(BUILT_SOURCES) \ $(DISKIO_GEN_SOURCE) tests_testEventLoop_LDADD = \ + clients/libclients.la \ + servers/libservers.la \ http/libsquid-http.la \ + ftp/libftp.la \ + parser/libsquid-parser.la \ ident/libident.la \ acl/libacls.la \ acl/libstate.la \ @@@ -2519,8 -2540,10 +2528,11 @@@ nodist_tests_test_http_range_SOURCES = $(BUILT_SOURCES) \ $(DISKIO_GEN_SOURCE) tests_test_http_range_LDADD = \ + clients/libclients.la \ + servers/libservers.la \ http/libsquid-http.la \ + ftp/libftp.la \ + parser/libsquid-parser.la \ ident/libident.la \ acl/libacls.la \ acl/libstate.la \ @@@ -3650,8 -3677,10 +3662,11 @@@ tests_testURL_SOURCES = nodist_tests_testURL_SOURCES = \ $(BUILT_SOURCES) tests_testURL_LDADD = \ + clients/libclients.la \ + servers/libservers.la \ http/libsquid-http.la \ + ftp/libftp.la \ + parser/libsquid-parser.la \ anyp/libanyp.la \ ident/libident.la \ acl/libacls.la \ diff --cc src/cache_cf.cc index 00755df3e5,a96c3269fe..b7eae46731 --- a/src/cache_cf.cc +++ b/src/cache_cf.cc @@@ -52,8 -52,10 +52,9 @@@ #include "eui/Config.h" #include "ExternalACL.h" #include "format/Format.h" + #include "ftp/Elements.h" #include "globals.h" #include "HttpHeaderTools.h" -#include "HttpRequestMethod.h" #include "ident/Config.h" #include "ip/Intercept.h" #include "ip/QosConfig.h" diff --cc src/client_side.cc index 44eb104c5d,d8966a220e..1e60f46597 --- a/src/client_side.cc +++ b/src/client_side.cc @@@ -120,9 -119,9 +120,10 @@@ #include "MemBuf.h" #include "MemObject.h" #include "mime_header.h" +#include "parser/Tokenizer.h" #include "profiler/Profiler.h" #include "rfc1738.h" + #include "servers/forward.h" #include "SquidConfig.h" #include "SquidTime.h" #include "StatCounters.h" @@@ -2155,19 -2089,19 +2115,19 @@@ prepareAcceleratedURL(ConnStateData * c if (vport > 0) { snprintf(vportStr, sizeof(vportStr),":%d",vport); } - snprintf(http->uri, url_sz, "%s://%s%s%s", - AnyP::UriScheme(conn->port->transport.protocol).c_str(), conn->port->defaultsite, vportStr, url); - debugs(33, 5, "ACCEL DEFAULTSITE REWRITE: '" << http->uri <<"'"); + snprintf(http->uri, url_sz, "%s://%s%s" SQUIDSBUFPH, + AnyP::UriScheme(conn->port->transport.protocol).c_str(), conn->port->defaultsite, vportStr, SQUIDSBUFPRINT(url)); + debugs(33, 5, "ACCEL DEFAULTSITE REWRITE: " << http->uri); } else if (vport > 0 /* && (!vhost || no Host:) */) { - debugs(33, 5, "ACCEL VPORT REWRITE: http_port IP + vport=" << vport); + debugs(33, 5, "ACCEL VPORT REWRITE: *_port IP + vport=" << vport); /* Put the local socket IP address as the hostname, with whatever vport we found */ - int url_sz = strlen(url) + 32 + Config.appendDomainLen; + const int url_sz = hp.requestUri().length() + 32 + Config.appendDomainLen; http->uri = (char *)xcalloc(url_sz, 1); http->getConn()->clientConnection->local.toHostStr(ipbuf,MAX_IPSTRLEN); - snprintf(http->uri, url_sz, "%s://%s:%d%s", + snprintf(http->uri, url_sz, "%s://%s:%d" SQUIDSBUFPH, AnyP::UriScheme(conn->port->transport.protocol).c_str(), - ipbuf, vport, url); - debugs(33, 5, "ACCEL VPORT REWRITE: '" << http->uri << "'"); + ipbuf, vport, SQUIDSBUFPRINT(url)); + debugs(33, 5, "ACCEL VPORT REWRITE: " << http->uri); } } @@@ -2214,41 -2147,85 +2174,41 @@@ prepareTransparentURL(ConnStateData * c * \return NULL on incomplete requests, * a ClientSocketContext structure on success or failure. */ - static ClientSocketContext * + ClientSocketContext * -parseHttpRequest(ConnStateData *csd, HttpParser *hp, HttpRequestMethod * method_p, Http::ProtocolVersion *http_ver) +parseHttpRequest(ConnStateData *csd, Http1::RequestParser &hp) { - char *req_hdr = NULL; - char *end; - size_t req_sz; - ClientHttpRequest *http; - ClientSocketContext *result; - StoreIOBuffer tempBuffer; - int r; - - /* pre-set these values to make aborting simpler */ - *method_p = Http::METHOD_NONE; + /* Attempt to parse the first line; this will define where the method, url, version and header begin */ + { + const bool parsedOk = hp.parse(csd->in.buf); - /* NP: don't be tempted to move this down or remove again. - * It's the only DDoS protection old-String has against long URL */ - if ( hp->bufsiz <= 0) { - debugs(33, 5, "Incomplete request, waiting for end of request line"); - return NULL; - } else if ( (size_t)hp->bufsiz >= Config.maxRequestHeaderSize && headersEnd(hp->buf, Config.maxRequestHeaderSize) == 0) { - debugs(33, 5, "parseHttpRequest: Too large request"); - hp->request_parse_status = Http::scHeaderTooLarge; - return parseHttpRequestAbort(csd, "error:request-too-large"); - } - - /* Attempt to parse the first line; this'll define the method, url, version and header begin */ - r = HttpParserParseReqLine(hp); + // sync the buffers after parsing. + csd->in.buf = hp.remaining(); - if (r == 0) { - debugs(33, 5, "Incomplete request, waiting for end of request line"); - return NULL; - } - - if (r == -1) { - return parseHttpRequestAbort(csd, "error:invalid-request"); - } - - /* Request line is valid here .. */ - *http_ver = Http::ProtocolVersion(hp->req.v_maj, hp->req.v_min); - - /* This call scans the entire request, not just the headers */ - if (hp->req.v_maj > 0) { - if ((req_sz = headersEnd(hp->buf, hp->bufsiz)) == 0) { - debugs(33, 5, "Incomplete request, waiting for end of headers"); + if (hp.needsMoreData()) { + debugs(33, 5, "Incomplete request, waiting for end of request line"); return NULL; } - } else { - debugs(33, 3, "parseHttpRequest: Missing HTTP identifier"); - req_sz = HttpParserReqSz(hp); - } - - /* We know the whole request is in hp->buf now */ - - assert(req_sz <= (size_t) hp->bufsiz); - /* Will the following be true with HTTP/0.9 requests? probably not .. */ - /* So the rest of the code will need to deal with '0'-byte headers (ie, none, so don't try parsing em) */ - assert(req_sz > 0); + if (!parsedOk) { + if (hp.request_parse_status == Http::scHeaderTooLarge) + return parseHttpRequestAbort(csd, "error:request-too-large"); - hp->hdr_end = req_sz - 1; - - hp->hdr_start = hp->req.end + 1; - - /* Enforce max_request_size */ - if (req_sz >= Config.maxRequestHeaderSize) { - debugs(33, 5, "parseHttpRequest: Too large request"); - hp->request_parse_status = Http::scHeaderTooLarge; - return parseHttpRequestAbort(csd, "error:request-too-large"); + return parseHttpRequestAbort(csd, "error:invalid-request"); + } } - /* Set method_p */ - *method_p = HttpRequestMethod(&hp->buf[hp->req.m_start], &hp->buf[hp->req.m_end]+1); + /* We know the whole request is in parser now */ + debugs(11, 2, "HTTP Client " << csd->clientConnection); + debugs(11, 2, "HTTP Client REQUEST:\n---------\n" << + hp.method() << " " << hp.requestUri() << " " << hp.messageProtocol() << "\n" << + hp.mimeHeader() << + "\n----------"); /* deny CONNECT via accelerated ports */ - if (*method_p == Http::METHOD_CONNECT && csd->port != NULL && csd->port->flags.accelSurrogate) { + if (hp.method() == Http::METHOD_CONNECT && csd->port != NULL && csd->port->flags.accelSurrogate) { debugs(33, DBG_IMPORTANT, "WARNING: CONNECT method received on " << csd->port->transport.protocol << " Accelerator port " << csd->port->s.port()); - /* XXX need a way to say "this many character length string" */ - debugs(33, DBG_IMPORTANT, "WARNING: for request: " << hp->buf); - hp->request_parse_status = Http::scMethodNotAllowed; + debugs(33, DBG_IMPORTANT, "WARNING: for request: " << hp.method() << " " << hp.requestUri() << " " << hp.messageProtocol()); + hp.request_parse_status = Http::scMethodNotAllowed; return parseHttpRequestAbort(csd, "error:method-not-allowed"); } @@@ -2395,11 -2412,30 +2364,9 @@@ ConnStateData::consumeInput(const size_ void connNoteUseOfBuffer(ConnStateData* conn, size_t byteCount) { - assert(byteCount > 0 && byteCount <= conn->in.buf.length()); - conn->in.buf.consume(byteCount); - debugs(33, 5, "conn->in.buf has " << conn->in.buf.length() << " bytes unused."); + conn->consumeInput(byteCount); } -/// respond with ERR_TOO_BIG if request header exceeds request_header_max_size -void -ConnStateData::checkHeaderLimits() -{ - if (in.buf.length() < Config.maxRequestHeaderSize) - return; // can accumulte more header data - - debugs(33, 3, "Request header is too large (" << in.buf.length() << " > " << - Config.maxRequestHeaderSize << " bytes)"); - - ClientSocketContext *context = parseHttpRequestAbort(this, "error:request-too-large"); - clientStreamNode *node = context->getClientReplyContext(); - clientReplyContext *repContext = dynamic_cast(node->data.getRaw()); - assert (repContext); - repContext->setReplyToError(ERR_TOO_BIG, - Http::scBadRequest, Http::METHOD_NONE, NULL, - clientConnection->remote, NULL, NULL, NULL); - context->registerWithConn(); - context->pullData(); -} - void ConnStateData::clientAfterReadingRequests() { @@@ -2521,16 -2558,22 +2488,24 @@@ clientProcessRequest(ConnStateData *con bool mustReplyToOptions = false; bool unsupportedTe = false; bool expectBody = false; + const AnyP::ProtocolVersion &http_ver = hp.messageProtocol(); + const HttpRequestMethod &method = hp.method(); - /* We have an initial client stream in place should it be needed */ - /* setup our private context */ - context->registerWithConn(); + // temporary hack to avoid splitting this huge function with sensitive code + const bool isFtp = !hp; + if (isFtp) { + // In FTP, case, we already have the request parsed and checked, so we + // only need to go through the final body/conn setup to doCallouts(). + assert(http->request); + request = http->request; + notedUseOfBuffer = true; + goto doFtpAndHttp; + } if (context->flags.parsed_ok == 0) { + assert(hp); clientStreamNode *node = context->getClientReplyContext(); - debugs(33, 2, "clientProcessRequest: Invalid Request"); + debugs(33, 2, "Invalid Request"); conn->quitAfterError(NULL); // setLogUri should called before repContext->setReplyToError setLogUri(http, http->uri, true); @@@ -2777,6 -2855,21 +2767,12 @@@ finish } } -static void -connStripBufferWhitespace (ConnStateData * conn) -{ - // XXX: kill this whole function. - while (!conn->in.buf.isEmpty() && xisspace(conn->in.buf.at(0))) { - conn->in.buf.consume(1); - } -} - + int + ConnStateData::pipelinePrefetchMax() const + { + return Config.pipeline_max_prefetch; + } + /** * Limit the number of concurrent requests. * \return true when there are available position(s) in the pipeline queue for another request. @@@ -2825,19 -2919,17 +2821,9 @@@ ConnStateData::clientParseRequests( if (concurrentRequestQueueFilled()) break; - /* Begin the parsing */ - PROF_start(parseHttpRequest); - - // parser is incremental. Generate new parser state if we, - // a) dont have one already - // b) have completed the previous request parsing already - if (!parser_ || !parser_->needsMoreData()) - parser_ = new Http1::RequestParser(); - - /* Process request */ - ClientSocketContext *context = parseHttpRequest(this, *parser_); - PROF_stop(parseHttpRequest); + Http::ProtocolVersion http_ver; + ClientSocketContext *context = parseOneRequest(http_ver); - /* partial or incomplete request */ - if (!context) { - // TODO: why parseHttpRequest can just return parseHttpRequestAbort - // (which becomes context) but checkHeaderLimits cannot? - checkHeaderLimits(); - break; - } - /* status -1 or 1 */ if (context) { debugs(33, 5, HERE << clientConnection << ": parsed a request"); @@@ -2845,7 -2937,7 +2831,7 @@@ CommTimeoutCbPtrFun(clientLifetimeTimeout, context->http)); commSetConnTimeout(clientConnection, Config.Timeout.lifetime, timeoutCall); - clientProcessRequest(this, *parser_, context); - processParsedRequest(context, http_ver); ++ processParsedRequest(context, *parser_); parsed_req = true; // XXX: do we really need to parse everything right NOW ? diff --cc src/client_side.h index d3607f985c,238c684cc1..7e5031f5c4 --- a/src/client_side.h +++ b/src/client_side.h @@@ -33,9 -33,11 +33,11 @@@ #ifndef SQUID_CLIENTSIDE_H #define SQUID_CLIENTSIDE_H + #include "clientStreamForward.h" #include "comm.h" #include "HttpControlMsg.h" -#include "HttpParser.h" +#include "http/forward.h" + #include "ipc/FdNotes.h" #include "SBuf.h" #if USE_AUTH #include "auth/UserRequest.h" diff --cc src/http/RegisteredHeaders.h index dcf0a40fa4,0000000000..3891203f80 mode 100644,000000..100644 --- a/src/http/RegisteredHeaders.h +++ b/src/http/RegisteredHeaders.h @@@ -1,107 -1,0 +1,113 @@@ +#ifndef SQUID_HTTP_REGISTEREDHEADERS_H +#define SQUID_HTTP_REGISTEREDHEADERS_H + +/// recognized or "known" header fields; and the RFC which defines them (or not) ++/// http://www.iana.org/assignments/message-headers/message-headers.xhtml +typedef enum { + HDR_BAD_HDR = -1, + HDR_ACCEPT = 0, /**< RFC 7231 */ + HDR_ACCEPT_CHARSET, /**< RFC 7231 */ + HDR_ACCEPT_ENCODING, /**< RFC 7231 */ + /*HDR_ACCEPT_FEATURES,*/ /* RFC 2295 */ + HDR_ACCEPT_LANGUAGE, /**< RFC 7231 */ + HDR_ACCEPT_RANGES, /**< RFC 7233 */ + HDR_AGE, /**< RFC 7234 */ + HDR_ALLOW, /**< RFC 7231 */ + HDR_AUTHENTICATION_INFO, /**< RFC 2617 */ + HDR_AUTHORIZATION, /**< RFC 7235, 4559 */ + HDR_CACHE_CONTROL, /**< RFC 7234 */ + HDR_CONNECTION, /**< RFC 7230 */ + HDR_CONTENT_BASE, /**< obsoleted RFC 2068 */ + HDR_CONTENT_DISPOSITION, /**< RFC 2183, 6266 */ + HDR_CONTENT_ENCODING, /**< RFC 7231 */ + HDR_CONTENT_LANGUAGE, /**< RFC 7231 */ + HDR_CONTENT_LENGTH, /**< RFC 7230 */ + HDR_CONTENT_LOCATION, /**< RFC 7231 */ + HDR_CONTENT_MD5, /**< deprecated, RFC 2616 */ + HDR_CONTENT_RANGE, /**< RFC 7233 */ + HDR_CONTENT_TYPE, /**< RFC 7231 */ + HDR_COOKIE, /**< RFC 6265 header we may need to erase */ + HDR_COOKIE2, /**< obsolete RFC 2965 header we may need to erase */ + HDR_DATE, /**< RFC 7231 */ + /*HDR_DAV,*/ /* RFC 2518 */ + /*HDR_DEPTH,*/ /* RFC 2518 */ + /*HDR_DERIVED_FROM,*/ /* deprecated RFC 2068 */ + /*HDR_DESTINATION,*/ /* RFC 2518 */ + HDR_ETAG, /**< RFC 7232 */ + HDR_EXPECT, /**< RFC 7231 */ + HDR_EXPIRES, /**< RFC 7234 */ + HDR_FORWARDED, /**< RFC 7239 */ + HDR_FROM, /**< RFC 7231 */ + HDR_HOST, /**< RFC 7230 */ + HDR_HTTP2_SETTINGS, /**< HTTP/2.0 upgrade header. see draft-ietf-httpbis-http2-13 */ + /*HDR_IF,*/ /* RFC 2518 */ + HDR_IF_MATCH, /**< RFC 7232 */ + HDR_IF_MODIFIED_SINCE, /**< RFC 7232 */ + HDR_IF_NONE_MATCH, /**< RFC 7232 */ + HDR_IF_RANGE, /**< RFC 7233 */ + HDR_IF_UNMODIFIED_SINCE, /**< RFC 7232 */ + HDR_KEEP_ALIVE, /**< obsoleted RFC 2068 header we may need to erase */ + HDR_KEY, /**< experimental RFC Draft draft-fielding-http-key-02 */ + HDR_LAST_MODIFIED, /**< RFC 7232 */ + HDR_LINK, /**< RFC 5988 */ + HDR_LOCATION, /**< RFC 7231 */ + /*HDR_LOCK_TOKEN,*/ /* RFC 2518 */ + HDR_MAX_FORWARDS, /**< RFC 7231 */ + HDR_MIME_VERSION, /**< RFC 2045, 7231 */ + HDR_NEGOTIATE, /**< experimental RFC 2295. Why only this one from 2295? */ + /*HDR_OVERWRITE,*/ /* RFC 2518 */ + HDR_ORIGIN, /* CORS Draft specification (see http://www.w3.org/TR/cors/) */ + HDR_PRAGMA, /**< RFC 7234 */ + HDR_PROXY_AUTHENTICATE, /**< RFC 7235 */ + HDR_PROXY_AUTHENTICATION_INFO, /**< RFC 2617 */ + HDR_PROXY_AUTHORIZATION, /**< RFC 7235 */ + HDR_PROXY_CONNECTION, /**< obsolete Netscape header we may need to erase. */ + HDR_PROXY_SUPPORT, /**< RFC 4559 */ + HDR_PUBLIC, /**< RFC 2068 */ + HDR_RANGE, /**< RFC 7233 */ + HDR_REFERER, /**< RFC 7231 */ + HDR_REQUEST_RANGE, /**< some clients use this, sigh */ + HDR_RETRY_AFTER, /**< RFC 7231 */ + HDR_SERVER, /**< RFC 7231 */ + HDR_SET_COOKIE, /**< RFC 6265 header we may need to erase */ + HDR_SET_COOKIE2, /**< obsoleted RFC 2965 header we may need to erase */ + /*HDR_STATUS_URI,*/ /* RFC 2518 */ + /*HDR_TCN,*/ /* experimental RFC 2295 */ + HDR_TE, /**< RFC 7230 */ + /*HDR_TIMEOUT,*/ /* RFC 2518 */ + HDR_TITLE, /* obsolete draft suggested header */ + HDR_TRAILER, /**< RFC 7230 */ + HDR_TRANSFER_ENCODING, /**< RFC 7230 */ + HDR_TRANSLATE, /**< IIS custom header we may need to erase */ + HDR_UNLESS_MODIFIED_SINCE, /**< IIS custom header we may need to erase */ + HDR_UPGRADE, /**< RFC 7230 */ + HDR_USER_AGENT, /**< RFC 7231 */ + /*HDR_VARIANT_VARY,*/ /* experimental RFC 2295 */ + HDR_VARY, /**< RFC 7231 */ + HDR_VIA, /**< RFC 7230 */ + HDR_WARNING, /**< RFC 7234 */ + HDR_WWW_AUTHENTICATE, /**< RFC 7235, 4559 */ + HDR_X_CACHE, /**< Squid custom header */ + HDR_X_CACHE_LOOKUP, /**< Squid custom header. temporary hack that became de-facto. TODO remove */ + HDR_X_FORWARDED_FOR, /**< obsolete Squid custom header, RFC 7239 */ + HDR_X_REQUEST_URI, /**< Squid custom header appended if ADD_X_REQUEST_URI is defined */ + HDR_X_SQUID_ERROR, /**< Squid custom header on generated error responses */ +#if X_ACCELERATOR_VARY + HDR_X_ACCELERATOR_VARY, /**< obsolete Squid custom header. */ +#endif +#if USE_ADAPTATION + HDR_X_NEXT_SERVICES, /**< Squid custom ICAP header */ +#endif + HDR_SURROGATE_CAPABILITY, /**< Edge Side Includes (ESI) header */ + HDR_SURROGATE_CONTROL, /**< Edge Side Includes (ESI) header */ + HDR_FRONT_END_HTTPS, /**< MS Exchange custom header we may have to add */ ++ HDR_FTP_COMMAND, /**< Internal header for FTP command */ ++ HDR_FTP_ARGUMENTS, /**< Internal header for FTP command arguments */ ++ HDR_FTP_PRE, /**< Internal header containing leading FTP control response lines */ ++ HDR_FTP_STATUS, /**< Internal header for FTP reply status */ ++ HDR_FTP_REASON, /**< Internal header for FTP reply reason */ + HDR_OTHER, /**< internal tag value for "unknown" headers */ + HDR_ENUM_END +} http_hdr_type; + +#endif /* SQUID_HTTP_REGISTEREDHEADERS_H */ diff --cc src/servers/HttpServer.cc index 0000000000,1f3dab3cca..e14369c1d6 mode 000000,100644..100644 --- a/src/servers/HttpServer.cc +++ b/src/servers/HttpServer.cc @@@ -1,0 -1,194 +1,201 @@@ + /* + * DEBUG: section 33 Client-side Routines + */ + + #include "squid.h" + #include "client_side.h" + #include "client_side_request.h" + #include "comm/Write.h" + #include "HttpHeaderTools.h" + #include "profiler/Profiler.h" + #include "servers/forward.h" + #include "SquidConfig.h" + + namespace Http + { + + /// Manages a connection from an HTTP client. + class Server: public ConnStateData + { + public: + Server(const MasterXaction::Pointer &xact, const bool beHttpsServer); + virtual ~Server() {} + + void readSomeHttpData(); + + protected: + /* ConnStateData API */ + virtual ClientSocketContext *parseOneRequest(Http::ProtocolVersion &ver); + virtual void processParsedRequest(ClientSocketContext *context, const Http::ProtocolVersion &ver); + virtual void handleReply(HttpReply *rep, StoreIOBuffer receivedData); + virtual void writeControlMsgAndCall(ClientSocketContext *context, HttpReply *rep, AsyncCall::Pointer &call); + virtual time_t idleTimeout() const; + + /* BodyPipe API */ + virtual void noteMoreBodySpaceAvailable(BodyPipe::Pointer); + virtual void noteBodyConsumerAborted(BodyPipe::Pointer); + + /* AsyncJob API */ + virtual void start(); + + private: + void processHttpRequest(ClientSocketContext *const context); + void handleHttpRequestData(); + + HttpParser parser_; + HttpRequestMethod method_; ///< parsed HTTP method + + /// temporary hack to avoid creating a true HttpsServer class + const bool isHttpsServer; + + CBDATA_CLASS2(Server); + }; + + } // namespace Http + + CBDATA_NAMESPACED_CLASS_INIT(Http, Server); + + Http::Server::Server(const MasterXaction::Pointer &xact, bool beHttpsServer): + AsyncJob("Http::Server"), + ConnStateData(xact), + isHttpsServer(beHttpsServer) + { + } + + time_t + Http::Server::idleTimeout() const + { + return Config.Timeout.clientIdlePconn; + } + + void + Http::Server::start() + { + ConnStateData::start(); + + #if USE_OPENSSL + // XXX: Until we create an HttpsServer class, use this hack to allow old + // client_side.cc code to manipulate ConnStateData object directly + if (isHttpsServer) { + postHttpsAccept(); + return; + } + #endif + + typedef CommCbMemFunT TimeoutDialer; + AsyncCall::Pointer timeoutCall = JobCallback(33, 5, + TimeoutDialer, this, Http::Server::requestTimeout); + commSetConnTimeout(clientConnection, Config.Timeout.request, timeoutCall); + readSomeData(); + } + + void + Http::Server::noteMoreBodySpaceAvailable(BodyPipe::Pointer) + { + if (!handleRequestBodyData()) + return; + + // too late to read more body + if (!isOpen() || stoppedReceiving()) + return; + + readSomeData(); + } + + ClientSocketContext * + Http::Server::parseOneRequest(Http::ProtocolVersion &ver) + { - ClientSocketContext *context = NULL; + PROF_start(HttpServer_parseOneRequest); - HttpParserInit(&parser_, in.buf.c_str(), in.buf.length()); - context = parseHttpRequest(this, &parser_, &method_, &ver); ++ ++ // parser is incremental. Generate new parser state if we, ++ // a) dont have one already ++ // b) have completed the previous request parsing already ++ if (!parser_ || !parser_->needsMoreData()) ++ parser_ = new Http1::RequestParser(); ++ ++ /* Process request */ ++ ClientSocketContext *context = parseHttpRequest(this, *parser_); ++ + PROF_stop(HttpServer_parseOneRequest); + return context; + } + + void + Http::Server::processParsedRequest(ClientSocketContext *context, const Http::ProtocolVersion &ver) + { + /* We have an initial client stream in place should it be needed */ + /* setup our private context */ + context->registerWithConn(); + clientProcessRequest(this, &parser_, context, method_, ver); + } + + void + Http::Server::noteBodyConsumerAborted(BodyPipe::Pointer ptr) + { + ConnStateData::noteBodyConsumerAborted(ptr); + stopReceiving("virgin request body consumer aborted"); // closes ASAP + } + + void + Http::Server::handleReply(HttpReply *rep, StoreIOBuffer receivedData) + { + // the caller guarantees that we are dealing with the current context only + ClientSocketContext::Pointer context = getCurrentContext(); + Must(context != NULL); + const ClientHttpRequest *http = context->http; + Must(http != NULL); + + // After sending Transfer-Encoding: chunked (at least), always send + // the last-chunk if there was no error, ignoring responseFinishedOrFailed. + const bool mustSendLastChunk = http->request->flags.chunkedReply && + !http->request->flags.streamError && + !context->startOfOutput(); + const bool responseFinishedOrFailed = !rep && + !receivedData.data && + !receivedData.length; + if (responseFinishedOrFailed && !mustSendLastChunk) { + context->writeComplete(context->clientConnection, NULL, 0, Comm::OK); + return; + } + + if (!context->startOfOutput()) { + context->sendBody(rep, receivedData); + return; + } + + assert(rep); + http->al->reply = rep; + HTTPMSGLOCK(http->al->reply); + context->sendStartOfMessage(rep, receivedData); + } + + void + Http::Server::writeControlMsgAndCall(ClientSocketContext *context, HttpReply *rep, AsyncCall::Pointer &call) + { + // apply selected clientReplyContext::buildReplyHeader() mods + // it is not clear what headers are required for control messages + rep->header.removeHopByHopEntries(); + rep->header.putStr(HDR_CONNECTION, "keep-alive"); + httpHdrMangleList(&rep->header, getCurrentContext()->http->request, ROR_REPLY); + + MemBuf *mb = rep->pack(); + + debugs(11, 2, "HTTP Client " << clientConnection); + debugs(11, 2, "HTTP Client CONTROL MSG:\n---------\n" << mb->buf << "\n----------"); + + Comm::Write(context->clientConnection, mb, call); + + delete mb; + } + + ConnStateData * + Http::NewServer(MasterXactionPointer &xact) + { + return new Server(xact, false); + } + + ConnStateData * + Https::NewServer(MasterXactionPointer &xact) + { + return new Http::Server(xact, true); + }