From: Amos Jeffries Date: Sat, 4 Jan 2014 15:16:29 +0000 (-0800) Subject: Shuffle request_header_max_size limit checks into RequestParser X-Git-Tag: merge-candidate-3-v1~506^2~65 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=016a316b9407f0a1aebbd7e30c62be1e324e7dd5;p=thirdparty%2Fsquid.git Shuffle request_header_max_size limit checks into RequestParser TODO: make parseHttpRequestAbort() produce error pages based on Parser. For now this alters the client-side error page displayed to the generic INVALID_REQUEST page. --- diff --git a/src/client_side.cc b/src/client_side.cc index da2fa8f6ac..9531b199f5 100644 --- a/src/client_side.cc +++ b/src/client_side.cc @@ -2216,17 +2216,6 @@ prepareTransparentURL(ConnStateData * conn, ClientHttpRequest *http, Http1::Requ static ClientSocketContext * parseHttpRequest(ConnStateData *csd, Http1::RequestParser &hp) { - /* NP: don't be tempted to move this down or remove again. - * It's the only DDoS protection old-String has against long URL */ - if ( hp.bufsiz <= 0) { - debugs(33, 5, "Incomplete request, waiting for end of request line"); - return NULL; - } else if ( (size_t)hp.bufsiz >= Config.maxRequestHeaderSize && headersEnd(hp.buf, Config.maxRequestHeaderSize) == 0) { - debugs(33, 5, "parseHttpRequest: Too large request"); - hp.request_parse_status = Http::scHeaderTooLarge; - return parseHttpRequestAbort(csd, "error:request-too-large"); - } - /* Attempt to parse the first line; this will define where the method, url, version and header begin */ { const bool parsedOk = hp.parse(); @@ -2236,8 +2225,12 @@ parseHttpRequest(ConnStateData *csd, Http1::RequestParser &hp) return NULL; } - if (!parsedOk) + if (!parsedOk) { + if (hp.request_parse_status == Http::scHeaderTooLarge) + return parseHttpRequestAbort(csd, "error:request-too-large"); + return parseHttpRequestAbort(csd, "error:invalid-request"); + } } /* We know the whole request is in hp.buf now */ @@ -2248,13 +2241,6 @@ parseHttpRequest(ConnStateData *csd, Http1::RequestParser &hp) /* So the rest of the code will need to deal with '0'-byte headers (ie, none, so don't try parsing em) */ assert(req_sz > 0); - /* Enforce max_request_size */ - if (req_sz >= Config.maxRequestHeaderSize) { - debugs(33, 5, "parseHttpRequest: Too large request"); - hp.request_parse_status = Http::scHeaderTooLarge; - return parseHttpRequestAbort(csd, "error:request-too-large"); - } - /* deny CONNECT via accelerated ports */ if (hp.method() == Http::METHOD_CONNECT && csd->port && csd->port->flags.accelSurrogate) { debugs(33, DBG_IMPORTANT, "WARNING: CONNECT method received on " << csd->port->transport.protocol << " Accelerator port " << csd->port->s.port()); @@ -2457,27 +2443,6 @@ connNoteUseOfBuffer(ConnStateData* conn, size_t byteCount) memmove(conn->in.buf, conn->in.buf + byteCount, conn->in.notYetUsed); } -/// respond with ERR_TOO_BIG if request header exceeds request_header_max_size -void -ConnStateData::checkHeaderLimits() -{ - if (in.notYetUsed < Config.maxRequestHeaderSize) - return; // can accumulte more header data - - debugs(33, 3, "Request header is too large (" << in.notYetUsed << " > " << - Config.maxRequestHeaderSize << " bytes)"); - - ClientSocketContext *context = parseHttpRequestAbort(this, "error:request-too-large"); - clientStreamNode *node = context->getClientReplyContext(); - clientReplyContext *repContext = dynamic_cast(node->data.getRaw()); - assert (repContext); - repContext->setReplyToError(ERR_TOO_BIG, - Http::scBadRequest, Http::METHOD_NONE, NULL, - clientConnection->remote, NULL, NULL, NULL); - context->registerWithConn(); - context->pullData(); -} - void ConnStateData::clientAfterReadingRequests() { @@ -2951,14 +2916,6 @@ ConnStateData::clientParseRequests() ClientSocketContext *context = parseHttpRequest(this, *parser_); PROF_stop(parseHttpRequest); - /* partial or incomplete request */ - if (!context) { - // TODO: why parseHttpRequest can just return parseHttpRequestAbort - // (which becomes context) but checkHeaderLimits cannot? - checkHeaderLimits(); - break; - } - /* status -1 or 1 */ if (context) { debugs(33, 5, HERE << clientConnection << ": parsed a request"); diff --git a/src/client_side.h b/src/client_side.h index a5f5cbe012..887185e58f 100644 --- a/src/client_side.h +++ b/src/client_side.h @@ -201,7 +201,6 @@ public: void addContextToQueue(ClientSocketContext * context); int getConcurrentRequestCount() const; bool isOpen() const; - void checkHeaderLimits(); // HttpControlMsgSink API virtual void sendControlMsg(HttpControlMsg msg); diff --git a/src/http/Http1Parser.cc b/src/http/Http1Parser.cc index d264b6b0a1..0ee5bff501 100644 --- a/src/http/Http1Parser.cc +++ b/src/http/Http1Parser.cc @@ -175,7 +175,16 @@ Http1::RequestParser::parseRequestFirstLine() return -1; } } + if (req.end == -1) { + // DoS protection against long first-line + if ( (size_t)bufsiz >= Config.maxRequestHeaderSize) { + debugs(33, 5, "Too large request-line"); + // XXX: return URL-too-log status code if second_whitespace is not yet found. + request_parse_status = Http::scHeaderTooLarge; + return -1; + } + debugs(74, 5, "Parser: retval 0: from " << req.start << "->" << req.end << ": needs more data to complete first line."); return 0; @@ -186,6 +195,13 @@ Http1::RequestParser::parseRequestFirstLine() // Input Validation: + // DoS protection against long first-line + if ((size_t)(req.end-req.start) >= Config.maxRequestHeaderSize) { + debugs(33, 5, "Too large request-line"); + request_parse_status = Http::scHeaderTooLarge; + return -1; + } + // Process what we now know about the line structure into field offsets // generating HTTP status for any aborts as we go. @@ -356,7 +372,12 @@ Http1::RequestParser::parse() */ int64_t mimeHeaderBytes = 0; if ((mimeHeaderBytes = headersEnd(buf+parseOffset_, bufsiz-parseOffset_)) == 0) { - debugs(33, 5, "Incomplete request, waiting for end of headers"); + if (bufsiz-parseOffset_ >= Config.maxRequestHeaderSize) { + debugs(33, 5, "Too large request"); + request_parse_status = Http::scHeaderTooLarge; + completedState_ = HTTP_PARSE_DONE; + } else + debugs(33, 5, "Incomplete request, waiting for end of headers"); return false; } mimeHeaderBlock_.assign(&buf[req.end+1], mimeHeaderBytes); @@ -367,6 +388,13 @@ Http1::RequestParser::parse() // NP: planned name for this stage is HTTP_PARSE_MIME // but we do not do any further stages here yet so go straight to DONE completedState_ = HTTP_PARSE_DONE; + + // Squid could handle these headers, but admin does not want to + if (messageHeaderSize() >= Config.maxRequestHeaderSize) { + debugs(33, 5, "Too large request"); + request_parse_status = Http::scHeaderTooLarge; + return false; + } } return isDone(); diff --git a/src/tests/stub_client_side.cc b/src/tests/stub_client_side.cc index 77d9ff4692..8c3dfab52c 100644 --- a/src/tests/stub_client_side.cc +++ b/src/tests/stub_client_side.cc @@ -39,7 +39,6 @@ bool ConnStateData::maybeMakeSpaceAvailable() STUB_RETVAL(false) void ConnStateData::addContextToQueue(ClientSocketContext * context) STUB int ConnStateData::getConcurrentRequestCount() const STUB_RETVAL(0) bool ConnStateData::isOpen() const STUB_RETVAL(false) -void ConnStateData::checkHeaderLimits() STUB void ConnStateData::sendControlMsg(HttpControlMsg msg) STUB char *ConnStateData::In::addressToReadInto() const STUB_RETVAL(NULL) int64_t ConnStateData::mayNeedToReadMoreBody() const STUB_RETVAL(0) diff --git a/src/tests/testHttp1Parser.cc b/src/tests/testHttp1Parser.cc index 335c588c39..3173a07e77 100644 --- a/src/tests/testHttp1Parser.cc +++ b/src/tests/testHttp1Parser.cc @@ -25,6 +25,11 @@ testHttp1Parser::globalSetup() Mem::Init(); setup_done = true; + + // default to strict parser. set for loose parsing specifically where behaviour differs. + Config.onoff.relaxed_header_parser = 0; + + Config.maxRequestHeaderSize = 1024; // XXX: unit test the RequestParser handling of this limit } void