]> git.ipfire.org Git - thirdparty/squid.git/blob - src/client_side.cc
SourceLayout: Add Ip namespace for internal libip
[thirdparty/squid.git] / src / client_side.cc
1 /*
2 * $Id$
3 *
4 * DEBUG: section 33 Client-side Routines
5 * AUTHOR: Duane Wessels
6 *
7 * SQUID Web Proxy Cache http://www.squid-cache.org/
8 * ----------------------------------------------------------
9 *
10 * Squid is the result of efforts by numerous individuals from
11 * the Internet community; see the CONTRIBUTORS file for full
12 * details. Many organizations have provided support for Squid's
13 * development; see the SPONSORS file for full details. Squid is
14 * Copyrighted (C) 2001 by the Regents of the University of
15 * California; see the COPYRIGHT file for full details. Squid
16 * incorporates software developed and/or copyrighted by other
17 * sources; see the CREDITS file for full details.
18 *
19 * This program is free software; you can redistribute it and/or modify
20 * it under the terms of the GNU General Public License as published by
21 * the Free Software Foundation; either version 2 of the License, or
22 * (at your option) any later version.
23 *
24 * This program is distributed in the hope that it will be useful,
25 * but WITHOUT ANY WARRANTY; without even the implied warranty of
26 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
27 * GNU General Public License for more details.
28 *
29 * You should have received a copy of the GNU General Public License
30 * along with this program; if not, write to the Free Software
31 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
32 *
33 */
34
35 /**
36 \defgroup ClientSide Client-Side Logics
37 *
38 \section cserrors Errors and client side
39 *
40 \par Problem the first:
41 * the store entry is no longer authoritative on the
42 * reply status. EBITTEST (E_ABORT) is no longer a valid test outside
43 * of client_side_reply.c.
44 * Problem the second: resources are wasted if we delay in cleaning up.
45 * Problem the third we can't depend on a connection close to clean up.
46 *
47 \par Nice thing the first:
48 * Any step in the stream can callback with data
49 * representing an error.
50 * Nice thing the second: once you stop requesting reads from upstream,
51 * upstream can be stopped too.
52 *
53 \par Solution #1:
54 * Error has a callback mechanism to hand over a membuf
55 * with the error content. The failing node pushes that back as the
56 * reply. Can this be generalised to reduce duplicate efforts?
57 * A: Possibly. For now, only one location uses this.
58 * How to deal with pre-stream errors?
59 * Tell client_side_reply that we *want* an error page before any
60 * stream calls occur. Then we simply read as normal.
61 *
62 *
63 \section pconn_logic Persistent connection logic:
64 *
65 \par
66 * requests (httpClientRequest structs) get added to the connection
67 * list, with the current one being chr
68 *
69 \par
70 * The request is *immediately* kicked off, and data flows through
71 * to clientSocketRecipient.
72 *
73 \par
74 * If the data that arrives at clientSocketRecipient is not for the current
75 * request, clientSocketRecipient simply returns, without requesting more
76 * data, or sending it.
77 *
78 \par
79 * ClientKeepAliveNextRequest will then detect the presence of data in
80 * the next ClientHttpRequest, and will send it, restablishing the
81 * data flow.
82 */
83
84 #include "squid.h"
85
86 #include "acl/FilledChecklist.h"
87 #include "auth/UserRequest.h"
88 #include "ChunkedCodingParser.h"
89 #include "client_side.h"
90 #include "client_side_reply.h"
91 #include "client_side_request.h"
92 #include "ClientRequestContext.h"
93 #include "clientStream.h"
94 #include "comm.h"
95 #include "comm/ListenStateData.h"
96 #include "ConnectionDetail.h"
97 #include "eui/Config.h"
98 #include "fde.h"
99 #include "HttpHdrContRange.h"
100 #include "HttpReply.h"
101 #include "HttpRequest.h"
102 #include "ident/Config.h"
103 #include "ident/Ident.h"
104 #include "ip/IpIntercept.h"
105 #include "MemBuf.h"
106 #include "MemObject.h"
107 #include "ProtoPort.h"
108 #include "rfc1738.h"
109 #include "SquidTime.h"
110 #include "Store.h"
111
112 #if LINGERING_CLOSE
113 #define comm_close comm_lingering_close
114 #endif
115
116 /* our socket-related context */
117
118
119 CBDATA_CLASS_INIT(ClientSocketContext);
120
121 void *
122 ClientSocketContext::operator new (size_t byteCount)
123 {
124 /* derived classes with different sizes must implement their own new */
125 assert (byteCount == sizeof (ClientSocketContext));
126 CBDATA_INIT_TYPE(ClientSocketContext);
127 return cbdataAlloc(ClientSocketContext);
128 }
129
130 void
131 ClientSocketContext::operator delete (void *address)
132 {
133 cbdataFree (address);
134 }
135
136 /* Local functions */
137 /* ClientSocketContext */
138 static ClientSocketContext *ClientSocketContextNew(ClientHttpRequest *);
139 /* other */
140 static IOCB clientWriteComplete;
141 static IOCB clientWriteBodyComplete;
142 static bool clientParseRequest(ConnStateData * conn, bool &do_next_read);
143 static PF clientLifetimeTimeout;
144 static ClientSocketContext *parseHttpRequestAbort(ConnStateData * conn, const char *uri);
145 static ClientSocketContext *parseHttpRequest(ConnStateData *, HttpParser *, HttpRequestMethod *, HttpVersion *);
146 #if USE_IDENT
147 static IDCB clientIdentDone;
148 #endif
149 static CSCB clientSocketRecipient;
150 static CSD clientSocketDetach;
151 static void clientSetKeepaliveFlag(ClientHttpRequest *);
152 static int clientIsContentLengthValid(HttpRequest * r);
153 static int clientIsRequestBodyValid(int64_t bodyLength);
154 static int clientIsRequestBodyTooLargeForPolicy(int64_t bodyLength);
155
156 static void clientUpdateStatHistCounters(log_type logType, int svc_time);
157 static void clientUpdateStatCounters(log_type logType);
158 static void clientUpdateHierCounters(HierarchyLogEntry *);
159 static bool clientPingHasFinished(ping_data const *aPing);
160 void prepareLogWithRequestDetails(HttpRequest *, AccessLogEntry *);
161 #ifndef PURIFY
162 static int connIsUsable(ConnStateData * conn);
163 #endif
164 static int responseFinishedOrFailed(HttpReply * rep, StoreIOBuffer const &receivedData);
165 static void ClientSocketContextPushDeferredIfNeeded(ClientSocketContext::Pointer deferredRequest, ConnStateData * conn);
166 static void clientUpdateSocketStats(log_type logType, size_t size);
167
168 char *skipLeadingSpace(char *aString);
169 static void connNoteUseOfBuffer(ConnStateData* conn, size_t byteCount);
170 static int connKeepReadingIncompleteRequest(ConnStateData * conn);
171 static void connCancelIncompleteRequests(ConnStateData * conn);
172
173 static ConnStateData *connStateCreate(const Ip::Address &peer, const Ip::Address &me, int fd, http_port_list *port);
174
175
176 int
177 ClientSocketContext::fd() const
178 {
179 assert (http);
180 assert (http->getConn() != NULL);
181 return http->getConn()->fd;
182 }
183
184 clientStreamNode *
185 ClientSocketContext::getTail() const
186 {
187 if (http->client_stream.tail)
188 return (clientStreamNode *)http->client_stream.tail->data;
189
190 return NULL;
191 }
192
193 clientStreamNode *
194 ClientSocketContext::getClientReplyContext() const
195 {
196 return (clientStreamNode *)http->client_stream.tail->prev->data;
197 }
198
199 /**
200 * This routine should be called to grow the inbuf and then
201 * call comm_read().
202 */
203 void
204 ConnStateData::readSomeData()
205 {
206 if (reading())
207 return;
208
209 reading(true);
210
211 debugs(33, 4, "clientReadSomeData: FD " << fd << ": reading request...");
212
213 makeSpaceAvailable();
214
215 typedef CommCbMemFunT<ConnStateData, CommIoCbParams> Dialer;
216 AsyncCall::Pointer call = asyncCall(33, 5, "ConnStateData::clientReadRequest",
217 Dialer(this, &ConnStateData::clientReadRequest));
218 comm_read(fd, in.addressToReadInto(), getAvailableBufferLength(), call);
219 }
220
221
222 void
223 ClientSocketContext::removeFromConnectionList(ConnStateData * conn)
224 {
225 ClientSocketContext::Pointer *tempContextPointer;
226 assert(conn != NULL && cbdataReferenceValid(conn));
227 assert(conn->getCurrentContext() != NULL);
228 /* Unlink us from the connection request list */
229 tempContextPointer = & conn->currentobject;
230
231 while (tempContextPointer->getRaw()) {
232 if (*tempContextPointer == this)
233 break;
234
235 tempContextPointer = &(*tempContextPointer)->next;
236 }
237
238 assert(tempContextPointer->getRaw() != NULL);
239 *tempContextPointer = next;
240 next = NULL;
241 }
242
243 ClientSocketContext::~ClientSocketContext()
244 {
245 clientStreamNode *node = getTail();
246
247 if (node) {
248 ClientSocketContext *streamContext = dynamic_cast<ClientSocketContext *> (node->data.getRaw());
249
250 if (streamContext) {
251 /* We are *always* the tail - prevent recursive free */
252 assert(this == streamContext);
253 node->data = NULL;
254 }
255 }
256
257 if (connRegistered_)
258 deRegisterWithConn();
259
260 httpRequestFree(http);
261
262 /* clean up connection links to us */
263 assert(this != next.getRaw());
264 }
265
266 void
267 ClientSocketContext::registerWithConn()
268 {
269 assert (!connRegistered_);
270 assert (http);
271 assert (http->getConn() != NULL);
272 connRegistered_ = true;
273 http->getConn()->addContextToQueue(this);
274 }
275
276 void
277 ClientSocketContext::deRegisterWithConn()
278 {
279 assert (connRegistered_);
280 removeFromConnectionList(http->getConn());
281 connRegistered_ = false;
282 }
283
284 void
285 ClientSocketContext::connIsFinished()
286 {
287 assert (http);
288 assert (http->getConn() != NULL);
289 deRegisterWithConn();
290 /* we can't handle any more stream data - detach */
291 clientStreamDetach(getTail(), http);
292 }
293
294 ClientSocketContext::ClientSocketContext() : http(NULL), reply(NULL), next(NULL),
295 writtenToSocket(0),
296 mayUseConnection_ (false),
297 connRegistered_ (false)
298 {
299 memset (reqbuf, '\0', sizeof (reqbuf));
300 flags.deferred = 0;
301 flags.parsed_ok = 0;
302 deferredparams.node = NULL;
303 deferredparams.rep = NULL;
304 }
305
306 ClientSocketContext *
307 ClientSocketContextNew(ClientHttpRequest * http)
308 {
309 ClientSocketContext *newContext;
310 assert(http != NULL);
311 newContext = new ClientSocketContext;
312 newContext->http = http;
313 return newContext;
314 }
315
316 #if USE_IDENT
317 static void
318 clientIdentDone(const char *ident, void *data)
319 {
320 ConnStateData *conn = (ConnStateData *)data;
321 xstrncpy(conn->rfc931, ident ? ident : dash_str, USER_IDENT_SZ);
322 }
323 #endif
324
325 void
326 clientUpdateStatCounters(log_type logType)
327 {
328 statCounter.client_http.requests++;
329
330 if (logTypeIsATcpHit(logType))
331 statCounter.client_http.hits++;
332
333 if (logType == LOG_TCP_HIT)
334 statCounter.client_http.disk_hits++;
335 else if (logType == LOG_TCP_MEM_HIT)
336 statCounter.client_http.mem_hits++;
337 }
338
339 void
340 clientUpdateStatHistCounters(log_type logType, int svc_time)
341 {
342 statHistCount(&statCounter.client_http.all_svc_time, svc_time);
343 /**
344 * The idea here is not to be complete, but to get service times
345 * for only well-defined types. For example, we don't include
346 * LOG_TCP_REFRESH_FAIL because its not really a cache hit
347 * (we *tried* to validate it, but failed).
348 */
349
350 switch (logType) {
351
352 case LOG_TCP_REFRESH_UNMODIFIED:
353 statHistCount(&statCounter.client_http.nh_svc_time, svc_time);
354 break;
355
356 case LOG_TCP_IMS_HIT:
357 statHistCount(&statCounter.client_http.nm_svc_time, svc_time);
358 break;
359
360 case LOG_TCP_HIT:
361
362 case LOG_TCP_MEM_HIT:
363
364 case LOG_TCP_OFFLINE_HIT:
365 statHistCount(&statCounter.client_http.hit_svc_time, svc_time);
366 break;
367
368 case LOG_TCP_MISS:
369
370 case LOG_TCP_CLIENT_REFRESH_MISS:
371 statHistCount(&statCounter.client_http.miss_svc_time, svc_time);
372 break;
373
374 default:
375 /* make compiler warnings go away */
376 break;
377 }
378 }
379
380 bool
381 clientPingHasFinished(ping_data const *aPing)
382 {
383 if (0 != aPing->stop.tv_sec && 0 != aPing->start.tv_sec)
384 return true;
385
386 return false;
387 }
388
389 void
390 clientUpdateHierCounters(HierarchyLogEntry * someEntry)
391 {
392 ping_data *i;
393
394 switch (someEntry->code) {
395 #if USE_CACHE_DIGESTS
396
397 case CD_PARENT_HIT:
398
399 case CD_SIBLING_HIT:
400 statCounter.cd.times_used++;
401 break;
402 #endif
403
404 case SIBLING_HIT:
405
406 case PARENT_HIT:
407
408 case FIRST_PARENT_MISS:
409
410 case CLOSEST_PARENT_MISS:
411 statCounter.icp.times_used++;
412 i = &someEntry->ping;
413
414 if (clientPingHasFinished(i))
415 statHistCount(&statCounter.icp.query_svc_time,
416 tvSubUsec(i->start, i->stop));
417
418 if (i->timeout)
419 statCounter.icp.query_timeouts++;
420
421 break;
422
423 case CLOSEST_PARENT:
424
425 case CLOSEST_DIRECT:
426 statCounter.netdb.times_used++;
427
428 break;
429
430 default:
431 break;
432 }
433 }
434
435 void
436 ClientHttpRequest::updateCounters()
437 {
438 clientUpdateStatCounters(logType);
439
440 if (request->errType != ERR_NONE)
441 statCounter.client_http.errors++;
442
443 clientUpdateStatHistCounters(logType,
444 tvSubMsec(start_time, current_time));
445
446 clientUpdateHierCounters(&request->hier);
447 }
448
449 void
450 prepareLogWithRequestDetails(HttpRequest * request, AccessLogEntry * aLogEntry)
451 {
452 assert(request);
453 assert(aLogEntry);
454
455 #if ICAP_CLIENT
456 Adaptation::Icap::History::Pointer ih = request->icapHistory();
457 #endif
458 if (Config.onoff.log_mime_hdrs) {
459 Packer p;
460 MemBuf mb;
461 mb.init();
462 packerToMemInit(&p, &mb);
463 request->header.packInto(&p);
464 //This is the request after adaptation or redirection
465 aLogEntry->headers.adapted_request = xstrdup(mb.buf);
466
467 // the virgin request is saved to aLogEntry->request
468 if (aLogEntry->request) {
469 packerClean(&p);
470 mb.reset();
471 packerToMemInit(&p, &mb);
472 aLogEntry->request->header.packInto(&p);
473 aLogEntry->headers.request = xstrdup(mb.buf);
474 }
475
476 #if ICAP_CLIENT
477 packerClean(&p);
478 mb.reset();
479 packerToMemInit(&p, &mb);
480
481 if (ih != NULL)
482 ih->lastIcapHeader.packInto(&p);
483 aLogEntry->headers.icap = xstrdup(mb.buf);
484 #endif
485
486 packerClean(&p);
487 mb.clean();
488 }
489
490 #if ICAP_CLIENT
491 if (ih != NULL)
492 aLogEntry->icap.processingTime = ih->processingTime();
493 #endif
494
495 aLogEntry->http.method = request->method;
496 aLogEntry->http.version = request->http_ver;
497 aLogEntry->hier = request->hier;
498 if (request->content_length > 0) // negative when no body or unknown length
499 aLogEntry->cache.requestSize += request->content_length;
500 aLogEntry->cache.extuser = request->extacl_user.termedBuf();
501
502 if (request->auth_user_request) {
503
504 if (request->auth_user_request->username())
505 aLogEntry->cache.authuser =
506 xstrdup(request->auth_user_request->username());
507
508 AUTHUSERREQUESTUNLOCK(request->auth_user_request, "request via clientPrepareLogWithRequestDetails");
509 }
510 }
511
512 void
513 ClientHttpRequest::logRequest()
514 {
515 if (out.size || logType) {
516 al.icp.opcode = ICP_INVALID;
517 al.url = log_uri;
518 debugs(33, 9, "clientLogRequest: al.url='" << al.url << "'");
519
520 if (al.reply) {
521 al.http.code = al.reply->sline.status;
522 al.http.content_type = al.reply->content_type.termedBuf();
523 } else if (loggingEntry() && loggingEntry()->mem_obj) {
524 al.http.code = loggingEntry()->mem_obj->getReply()->sline.status;
525 al.http.content_type = loggingEntry()->mem_obj->getReply()->content_type.termedBuf();
526 }
527
528 debugs(33, 9, "clientLogRequest: http.code='" << al.http.code << "'");
529
530 if (loggingEntry() && loggingEntry()->mem_obj)
531 al.cache.objectSize = loggingEntry()->contentLen();
532
533 al.cache.caddr.SetNoAddr();
534
535 if (getConn() != NULL) al.cache.caddr = getConn()->log_addr;
536
537 al.cache.requestSize = req_sz;
538 al.cache.requestHeadersSize = req_sz;
539
540 al.cache.replySize = out.size;
541 al.cache.replyHeadersSize = out.headers_sz;
542
543 al.cache.highOffset = out.offset;
544
545 al.cache.code = logType;
546
547 al.cache.msec = tvSubMsec(start_time, current_time);
548
549 if (request)
550 prepareLogWithRequestDetails(request, &al);
551
552 if (getConn() != NULL && getConn()->rfc931[0])
553 al.cache.rfc931 = getConn()->rfc931;
554
555 #if USE_SSL && 0
556
557 /* This is broken. Fails if the connection has been closed. Needs
558 * to snarf the ssl details some place earlier..
559 */
560 if (getConn() != NULL)
561 al.cache.ssluser = sslGetUserEmail(fd_table[getConn()->fd].ssl);
562
563 #endif
564
565 ACLFilledChecklist *checklist = clientAclChecklistCreate(Config.accessList.log, this);
566
567 if (al.reply)
568 checklist->reply = HTTPMSGLOCK(al.reply);
569
570 if (!Config.accessList.log || checklist->fastCheck()) {
571 if (request)
572 al.adapted_request = HTTPMSGLOCK(request);
573 accessLogLog(&al, checklist);
574 updateCounters();
575
576 if (getConn() != NULL)
577 clientdbUpdate(getConn()->peer, logType, PROTO_HTTP, out.size);
578 }
579
580 delete checklist;
581
582 accessLogFreeMemory(&al);
583 }
584 }
585
586 void
587 ClientHttpRequest::freeResources()
588 {
589 safe_free(uri);
590 safe_free(log_uri);
591 safe_free(redirect.location);
592 range_iter.boundary.clean();
593 HTTPMSGUNLOCK(request);
594
595 if (client_stream.tail)
596 clientStreamAbort((clientStreamNode *)client_stream.tail->data, this);
597 }
598
599 void
600 httpRequestFree(void *data)
601 {
602 ClientHttpRequest *http = (ClientHttpRequest *)data;
603 assert(http != NULL);
604 delete http;
605 }
606
607 bool
608 ConnStateData::areAllContextsForThisConnection() const
609 {
610 assert(this != NULL);
611 ClientSocketContext::Pointer context = getCurrentContext();
612
613 while (context.getRaw()) {
614 if (context->http->getConn() != this)
615 return false;
616
617 context = context->next;
618 }
619
620 return true;
621 }
622
623 void
624 ConnStateData::freeAllContexts()
625 {
626 ClientSocketContext::Pointer context;
627
628 while ((context = getCurrentContext()).getRaw() != NULL) {
629 assert(getCurrentContext() !=
630 getCurrentContext()->next);
631 context->connIsFinished();
632 assert (context != currentobject);
633 }
634 }
635
636 /// propagates abort event to all contexts
637 void
638 ConnStateData::notifyAllContexts(int xerrno)
639 {
640 typedef ClientSocketContext::Pointer CSCP;
641 for (CSCP c = getCurrentContext(); c.getRaw(); c = c->next)
642 c->noteIoError(xerrno);
643 }
644
645 /* This is a handler normally called by comm_close() */
646 void ConnStateData::connStateClosed(const CommCloseCbParams &io)
647 {
648 assert (fd == io.fd);
649 deleteThis("ConnStateData::connStateClosed");
650 }
651
652 // cleans up before destructor is called
653 void
654 ConnStateData::swanSong()
655 {
656 debugs(33, 2, "ConnStateData::swanSong: FD " << fd);
657 fd = -1;
658 flags.readMoreRequests = false;
659 clientdbEstablished(peer, -1); /* decrement */
660 assert(areAllContextsForThisConnection());
661 freeAllContexts();
662
663 if (auth_user_request != NULL) {
664 debugs(33, 4, "ConnStateData::swanSong: freeing auth_user_request '" << auth_user_request << "' (this is '" << this << "')");
665 auth_user_request->onConnectionClose(this);
666 }
667
668 if (pinning.fd >= 0)
669 comm_close(pinning.fd);
670
671 BodyProducer::swanSong();
672 flags.swanSang = true;
673 }
674
675 bool
676 ConnStateData::isOpen() const
677 {
678 return cbdataReferenceValid(this) && // XXX: checking "this" in a method
679 fd >= 0 &&
680 !fd_table[fd].closing();
681 }
682
683 ConnStateData::~ConnStateData()
684 {
685 assert(this != NULL);
686 debugs(33, 3, "ConnStateData::~ConnStateData: FD " << fd);
687
688 if (isOpen())
689 debugs(33, 1, "BUG: ConnStateData did not close FD " << fd);
690
691 if (!flags.swanSang)
692 debugs(33, 1, "BUG: ConnStateData was not destroyed properly; FD " << fd);
693
694 AUTHUSERREQUESTUNLOCK(auth_user_request, "~conn");
695
696 cbdataReferenceDone(port);
697
698 if (bodyPipe != NULL)
699 stopProducingFor(bodyPipe, false);
700 }
701
702 /**
703 * clientSetKeepaliveFlag() sets request->flags.proxy_keepalive.
704 * This is the client-side persistent connection flag. We need
705 * to set this relatively early in the request processing
706 * to handle hacks for broken servers and clients.
707 */
708 static void
709 clientSetKeepaliveFlag(ClientHttpRequest * http)
710 {
711 HttpRequest *request = http->request;
712 const HttpHeader *req_hdr = &request->header;
713
714 debugs(33, 3, "clientSetKeepaliveFlag: http_ver = " <<
715 request->http_ver.major << "." << request->http_ver.minor);
716 debugs(33, 3, "clientSetKeepaliveFlag: method = " <<
717 RequestMethodStr(request->method));
718
719 /* We are HTTP/1.1 facing clients now*/
720 HttpVersion http_ver(1,1);
721
722 if (httpMsgIsPersistent(http_ver, req_hdr))
723 request->flags.proxy_keepalive = 1;
724 }
725
726 static int
727 clientIsContentLengthValid(HttpRequest * r)
728 {
729 switch (r->method.id()) {
730
731 case METHOD_PUT:
732
733 case METHOD_POST:
734 /* PUT/POST requires a request entity */
735 return (r->content_length >= 0);
736
737 case METHOD_GET:
738
739 case METHOD_HEAD:
740 /* We do not want to see a request entity on GET/HEAD requests */
741 return (r->content_length <= 0 || Config.onoff.request_entities);
742
743 default:
744 /* For other types of requests we don't care */
745 return 1;
746 }
747
748 /* NOT REACHED */
749 }
750
751 int
752 clientIsRequestBodyValid(int64_t bodyLength)
753 {
754 if (bodyLength >= 0)
755 return 1;
756
757 return 0;
758 }
759
760 int
761 clientIsRequestBodyTooLargeForPolicy(int64_t bodyLength)
762 {
763 if (Config.maxRequestBodySize &&
764 bodyLength > Config.maxRequestBodySize)
765 return 1; /* too large */
766
767 return 0;
768 }
769
770 #ifndef PURIFY
771 int
772 connIsUsable(ConnStateData * conn)
773 {
774 if (conn == NULL || !cbdataReferenceValid(conn) || conn->fd == -1)
775 return 0;
776
777 return 1;
778 }
779
780 #endif
781
782 ClientSocketContext::Pointer
783 ConnStateData::getCurrentContext() const
784 {
785 assert(this);
786 return currentobject;
787 }
788
789 void
790 ClientSocketContext::deferRecipientForLater(clientStreamNode * node, HttpReply * rep, StoreIOBuffer receivedData)
791 {
792 debugs(33, 2, "clientSocketRecipient: Deferring request " << http->uri);
793 assert(flags.deferred == 0);
794 flags.deferred = 1;
795 deferredparams.node = node;
796 deferredparams.rep = rep;
797 deferredparams.queuedBuffer = receivedData;
798 return;
799 }
800
801 int
802 responseFinishedOrFailed(HttpReply * rep, StoreIOBuffer const & receivedData)
803 {
804 if (rep == NULL && receivedData.data == NULL && receivedData.length == 0)
805 return 1;
806
807 return 0;
808 }
809
810 bool
811 ClientSocketContext::startOfOutput() const
812 {
813 return http->out.size == 0;
814 }
815
816 size_t
817 ClientSocketContext::lengthToSend(Range<int64_t> const &available)
818 {
819 /*the size of available range can always fit in a size_t type*/
820 size_t maximum = (size_t)available.size();
821
822 if (!http->request->range)
823 return maximum;
824
825 assert (canPackMoreRanges());
826
827 if (http->range_iter.debt() == -1)
828 return maximum;
829
830 assert (http->range_iter.debt() > 0);
831
832 /* TODO this + the last line could be a range intersection calculation */
833 if (available.start < http->range_iter.currentSpec()->offset)
834 return 0;
835
836 return min(http->range_iter.debt(), (int64_t)maximum);
837 }
838
839 void
840 ClientSocketContext::noteSentBodyBytes(size_t bytes)
841 {
842 http->out.offset += bytes;
843
844 if (!http->request->range)
845 return;
846
847 if (http->range_iter.debt() != -1) {
848 http->range_iter.debt(http->range_iter.debt() - bytes);
849 assert (http->range_iter.debt() >= 0);
850 }
851
852 /* debt() always stops at -1, below that is a bug */
853 assert (http->range_iter.debt() >= -1);
854 }
855
856 bool
857 ClientHttpRequest::multipartRangeRequest() const
858 {
859 return request->multipartRangeRequest();
860 }
861
862 bool
863 ClientSocketContext::multipartRangeRequest() const
864 {
865 return http->multipartRangeRequest();
866 }
867
868 void
869 ClientSocketContext::sendBody(HttpReply * rep, StoreIOBuffer bodyData)
870 {
871 assert(rep == NULL);
872
873 if (!multipartRangeRequest()) {
874 size_t length = lengthToSend(bodyData.range());
875 noteSentBodyBytes (length);
876 AsyncCall::Pointer call = commCbCall(33, 5, "clientWriteBodyComplete",
877 CommIoCbPtrFun(clientWriteBodyComplete, this));
878 comm_write(fd(), bodyData.data, length, call );
879 return;
880 }
881
882 MemBuf mb;
883 mb.init();
884 packRange(bodyData, &mb);
885
886 if (mb.contentSize()) {
887 /* write */
888 AsyncCall::Pointer call = commCbCall(33, 5, "clientWriteComplete",
889 CommIoCbPtrFun(clientWriteComplete, this));
890 comm_write_mbuf(fd(), &mb, call);
891 } else
892 writeComplete(fd(), NULL, 0, COMM_OK);
893 }
894
895 /** put terminating boundary for multiparts */
896 static void
897 clientPackTermBound(String boundary, MemBuf * mb)
898 {
899 mb->Printf("\r\n--" SQUIDSTRINGPH "--\r\n", SQUIDSTRINGPRINT(boundary));
900 debugs(33, 6, "clientPackTermBound: buf offset: " << mb->size);
901 }
902
903 /** appends a "part" HTTP header (as in a multi-part/range reply) to the buffer */
904 static void
905 clientPackRangeHdr(const HttpReply * rep, const HttpHdrRangeSpec * spec, String boundary, MemBuf * mb)
906 {
907 HttpHeader hdr(hoReply);
908 Packer p;
909 assert(rep);
910 assert(spec);
911
912 /* put boundary */
913 debugs(33, 5, "clientPackRangeHdr: appending boundary: " << boundary);
914 /* rfc2046 requires to _prepend_ boundary with <crlf>! */
915 mb->Printf("\r\n--" SQUIDSTRINGPH "\r\n", SQUIDSTRINGPRINT(boundary));
916
917 /* stuff the header with required entries and pack it */
918
919 if (rep->header.has(HDR_CONTENT_TYPE))
920 hdr.putStr(HDR_CONTENT_TYPE, rep->header.getStr(HDR_CONTENT_TYPE));
921
922 httpHeaderAddContRange(&hdr, *spec, rep->content_length);
923
924 packerToMemInit(&p, mb);
925
926 hdr.packInto(&p);
927
928 packerClean(&p);
929
930 hdr.clean();
931
932 /* append <crlf> (we packed a header, not a reply) */
933 mb->Printf("\r\n");
934 }
935
936 /**
937 * extracts a "range" from *buf and appends them to mb, updating
938 * all offsets and such.
939 */
940 void
941 ClientSocketContext::packRange(StoreIOBuffer const &source, MemBuf * mb)
942 {
943 HttpHdrRangeIter * i = &http->range_iter;
944 Range<int64_t> available (source.range());
945 char const *buf = source.data;
946
947 while (i->currentSpec() && available.size()) {
948 const size_t copy_sz = lengthToSend(available);
949
950 if (copy_sz) {
951 /*
952 * intersection of "have" and "need" ranges must not be empty
953 */
954 assert(http->out.offset < i->currentSpec()->offset + i->currentSpec()->length);
955 assert(http->out.offset + available.size() > i->currentSpec()->offset);
956
957 /*
958 * put boundary and headers at the beginning of a range in a
959 * multi-range
960 */
961
962 if (http->multipartRangeRequest() && i->debt() == i->currentSpec()->length) {
963 assert(http->memObject());
964 clientPackRangeHdr(
965 http->memObject()->getReply(), /* original reply */
966 i->currentSpec(), /* current range */
967 i->boundary, /* boundary, the same for all */
968 mb);
969 }
970
971 /*
972 * append content
973 */
974 debugs(33, 3, "clientPackRange: appending " << copy_sz << " bytes");
975
976 noteSentBodyBytes (copy_sz);
977
978 mb->append(buf, copy_sz);
979
980 /*
981 * update offsets
982 */
983 available.start += copy_sz;
984
985 buf += copy_sz;
986
987 }
988
989 /*
990 * paranoid check
991 */
992 assert((available.size() >= 0 && i->debt() >= 0) || i->debt() == -1);
993
994 if (!canPackMoreRanges()) {
995 debugs(33, 3, "clientPackRange: Returning because !canPackMoreRanges.");
996
997 if (i->debt() == 0)
998 /* put terminating boundary for multiparts */
999 clientPackTermBound(i->boundary, mb);
1000
1001 return;
1002 }
1003
1004 int64_t nextOffset = getNextRangeOffset();
1005
1006 assert (nextOffset >= http->out.offset);
1007
1008 int64_t skip = nextOffset - http->out.offset;
1009
1010 /* adjust for not to be transmitted bytes */
1011 http->out.offset = nextOffset;
1012
1013 if (available.size() <= skip)
1014 return;
1015
1016 available.start += skip;
1017
1018 buf += skip;
1019
1020 if (copy_sz == 0)
1021 return;
1022 }
1023 }
1024
1025 /** returns expected content length for multi-range replies
1026 * note: assumes that httpHdrRangeCanonize has already been called
1027 * warning: assumes that HTTP headers for individual ranges at the
1028 * time of the actuall assembly will be exactly the same as
1029 * the headers when clientMRangeCLen() is called */
1030 int
1031 ClientHttpRequest::mRangeCLen()
1032 {
1033 int64_t clen = 0;
1034 MemBuf mb;
1035
1036 assert(memObject());
1037
1038 mb.init();
1039 HttpHdrRange::iterator pos = request->range->begin();
1040
1041 while (pos != request->range->end()) {
1042 /* account for headers for this range */
1043 mb.reset();
1044 clientPackRangeHdr(memObject()->getReply(),
1045 *pos, range_iter.boundary, &mb);
1046 clen += mb.size;
1047
1048 /* account for range content */
1049 clen += (*pos)->length;
1050
1051 debugs(33, 6, "clientMRangeCLen: (clen += " << mb.size << " + " << (*pos)->length << ") == " << clen);
1052 ++pos;
1053 }
1054
1055 /* account for the terminating boundary */
1056 mb.reset();
1057
1058 clientPackTermBound(range_iter.boundary, &mb);
1059
1060 clen += mb.size;
1061
1062 mb.clean();
1063
1064 return clen;
1065 }
1066
1067 /**
1068 * returns true if If-Range specs match reply, false otherwise
1069 */
1070 static int
1071 clientIfRangeMatch(ClientHttpRequest * http, HttpReply * rep)
1072 {
1073 const TimeOrTag spec = http->request->header.getTimeOrTag(HDR_IF_RANGE);
1074 /* check for parsing falure */
1075
1076 if (!spec.valid)
1077 return 0;
1078
1079 /* got an ETag? */
1080 if (spec.tag.str) {
1081 ETag rep_tag = rep->header.getETag(HDR_ETAG);
1082 debugs(33, 3, "clientIfRangeMatch: ETags: " << spec.tag.str << " and " <<
1083 (rep_tag.str ? rep_tag.str : "<none>"));
1084
1085 if (!rep_tag.str)
1086 return 0; /* entity has no etag to compare with! */
1087
1088 if (spec.tag.weak || rep_tag.weak) {
1089 debugs(33, 1, "clientIfRangeMatch: Weak ETags are not allowed in If-Range: " << spec.tag.str << " ? " << rep_tag.str);
1090 return 0; /* must use strong validator for sub-range requests */
1091 }
1092
1093 return etagIsEqual(&rep_tag, &spec.tag);
1094 }
1095
1096 /* got modification time? */
1097 if (spec.time >= 0) {
1098 return http->storeEntry()->lastmod <= spec.time;
1099 }
1100
1101 assert(0); /* should not happen */
1102 return 0;
1103 }
1104
1105 /**
1106 * generates a "unique" boundary string for multipart responses
1107 * the caller is responsible for cleaning the string */
1108 String
1109 ClientHttpRequest::rangeBoundaryStr() const
1110 {
1111 assert(this);
1112 const char *key;
1113 String b(APP_FULLNAME);
1114 b.append(":",1);
1115 key = storeEntry()->getMD5Text();
1116 b.append(key, strlen(key));
1117 return b;
1118 }
1119
1120 /** adds appropriate Range headers if needed */
1121 void
1122 ClientSocketContext::buildRangeHeader(HttpReply * rep)
1123 {
1124 HttpHeader *hdr = rep ? &rep->header : 0;
1125 const char *range_err = NULL;
1126 HttpRequest *request = http->request;
1127 assert(request->range);
1128 /* check if we still want to do ranges */
1129
1130 int64_t roffLimit = request->getRangeOffsetLimit();
1131
1132 if (!rep)
1133 range_err = "no [parse-able] reply";
1134 else if ((rep->sline.status != HTTP_OK) && (rep->sline.status != HTTP_PARTIAL_CONTENT))
1135 range_err = "wrong status code";
1136 else if (hdr->has(HDR_CONTENT_RANGE))
1137 range_err = "origin server does ranges";
1138 else if (rep->content_length < 0)
1139 range_err = "unknown length";
1140 else if (rep->content_length != http->memObject()->getReply()->content_length)
1141 range_err = "INCONSISTENT length"; /* a bug? */
1142
1143 /* hits only - upstream peer determines correct behaviour on misses, and client_side_reply determines
1144 * hits candidates
1145 */
1146 else if (logTypeIsATcpHit(http->logType) && http->request->header.has(HDR_IF_RANGE) && !clientIfRangeMatch(http, rep))
1147 range_err = "If-Range match failed";
1148 else if (!http->request->range->canonize(rep))
1149 range_err = "canonization failed";
1150 else if (http->request->range->isComplex())
1151 range_err = "too complex range header";
1152 else if (!logTypeIsATcpHit(http->logType) && http->request->range->offsetLimitExceeded(roffLimit))
1153 range_err = "range outside range_offset_limit";
1154
1155 /* get rid of our range specs on error */
1156 if (range_err) {
1157 /* XXX We do this here because we need canonisation etc. However, this current
1158 * code will lead to incorrect store offset requests - the store will have the
1159 * offset data, but we won't be requesting it.
1160 * So, we can either re-request, or generate an error
1161 */
1162 debugs(33, 3, "clientBuildRangeHeader: will not do ranges: " << range_err << ".");
1163 delete http->request->range;
1164 http->request->range = NULL;
1165 } else {
1166 /* XXX: TODO: Review, this unconditional set may be wrong. - TODO: review. */
1167 httpStatusLineSet(&rep->sline, rep->sline.version,
1168 HTTP_PARTIAL_CONTENT, NULL);
1169 // web server responded with a valid, but unexpected range.
1170 // will (try-to) forward as-is.
1171 //TODO: we should cope with multirange request/responses
1172 bool replyMatchRequest = rep->content_range != NULL ?
1173 request->range->contains(rep->content_range->spec) :
1174 true;
1175 const int spec_count = http->request->range->specs.count;
1176 int64_t actual_clen = -1;
1177
1178 debugs(33, 3, "clientBuildRangeHeader: range spec count: " <<
1179 spec_count << " virgin clen: " << rep->content_length);
1180 assert(spec_count > 0);
1181 /* ETags should not be returned with Partial Content replies? */
1182 hdr->delById(HDR_ETAG);
1183 /* append appropriate header(s) */
1184
1185 if (spec_count == 1) {
1186 if (!replyMatchRequest) {
1187 hdr->delById(HDR_CONTENT_RANGE);
1188 hdr->putContRange(rep->content_range);
1189 actual_clen = rep->content_length;
1190 //http->range_iter.pos = rep->content_range->spec.begin();
1191 (*http->range_iter.pos)->offset = rep->content_range->spec.offset;
1192 (*http->range_iter.pos)->length = rep->content_range->spec.length;
1193
1194 } else {
1195 HttpHdrRange::iterator pos = http->request->range->begin();
1196 assert(*pos);
1197 /* append Content-Range */
1198
1199 if (!hdr->has(HDR_CONTENT_RANGE)) {
1200 /* No content range, so this was a full object we are
1201 * sending parts of.
1202 */
1203 httpHeaderAddContRange(hdr, **pos, rep->content_length);
1204 }
1205
1206 /* set new Content-Length to the actual number of bytes
1207 * transmitted in the message-body */
1208 actual_clen = (*pos)->length;
1209 }
1210 } else {
1211 /* multipart! */
1212 /* generate boundary string */
1213 http->range_iter.boundary = http->rangeBoundaryStr();
1214 /* delete old Content-Type, add ours */
1215 hdr->delById(HDR_CONTENT_TYPE);
1216 httpHeaderPutStrf(hdr, HDR_CONTENT_TYPE,
1217 "multipart/byteranges; boundary=\"" SQUIDSTRINGPH "\"",
1218 SQUIDSTRINGPRINT(http->range_iter.boundary));
1219 /* Content-Length is not required in multipart responses
1220 * but it is always nice to have one */
1221 actual_clen = http->mRangeCLen();
1222 /* http->out needs to start where we want data at */
1223 http->out.offset = http->range_iter.currentSpec()->offset;
1224 }
1225
1226 /* replace Content-Length header */
1227 assert(actual_clen >= 0);
1228
1229 hdr->delById(HDR_CONTENT_LENGTH);
1230
1231 hdr->putInt64(HDR_CONTENT_LENGTH, actual_clen);
1232
1233 debugs(33, 3, "clientBuildRangeHeader: actual content length: " << actual_clen);
1234
1235 /* And start the range iter off */
1236 http->range_iter.updateSpec();
1237 }
1238 }
1239
1240 void
1241 ClientSocketContext::prepareReply(HttpReply * rep)
1242 {
1243 reply = rep;
1244
1245 if (http->request->range)
1246 buildRangeHeader(rep);
1247 }
1248
1249 void
1250 ClientSocketContext::sendStartOfMessage(HttpReply * rep, StoreIOBuffer bodyData)
1251 {
1252 prepareReply(rep);
1253 assert (rep);
1254 MemBuf *mb = rep->pack();
1255 /* Save length of headers for persistent conn checks */
1256 http->out.headers_sz = mb->contentSize();
1257 #if HEADERS_LOG
1258
1259 headersLog(0, 0, http->request->method, rep);
1260 #endif
1261
1262 if (bodyData.data && bodyData.length) {
1263 if (!multipartRangeRequest()) {
1264 size_t length = lengthToSend(bodyData.range());
1265 noteSentBodyBytes (length);
1266
1267 mb->append(bodyData.data, length);
1268 } else {
1269 packRange(bodyData, mb);
1270 }
1271 }
1272
1273 /* write */
1274 debugs(33,7, HERE << "sendStartOfMessage schedules clientWriteComplete");
1275 AsyncCall::Pointer call = commCbCall(33, 5, "clientWriteComplete",
1276 CommIoCbPtrFun(clientWriteComplete, this));
1277 comm_write_mbuf(fd(), mb, call);
1278
1279 delete mb;
1280 }
1281
1282 /**
1283 * Write a chunk of data to a client socket. If the reply is present,
1284 * send the reply headers down the wire too, and clean them up when
1285 * finished.
1286 * Pre-condition:
1287 * The request is one backed by a connection, not an internal request.
1288 * data context is not NULL
1289 * There are no more entries in the stream chain.
1290 */
1291 static void
1292 clientSocketRecipient(clientStreamNode * node, ClientHttpRequest * http,
1293 HttpReply * rep, StoreIOBuffer receivedData)
1294 {
1295 int fd;
1296 /* Test preconditions */
1297 assert(node != NULL);
1298 PROF_start(clientSocketRecipient);
1299 /* TODO: handle this rather than asserting
1300 * - it should only ever happen if we cause an abort and
1301 * the callback chain loops back to here, so we can simply return.
1302 * However, that itself shouldn't happen, so it stays as an assert for now.
1303 */
1304 assert(cbdataReferenceValid(node));
1305 assert(node->node.next == NULL);
1306 ClientSocketContext::Pointer context = dynamic_cast<ClientSocketContext *>(node->data.getRaw());
1307 assert(context != NULL);
1308 assert(connIsUsable(http->getConn()));
1309 fd = http->getConn()->fd;
1310 /* TODO: check offset is what we asked for */
1311
1312 if (context != http->getConn()->getCurrentContext()) {
1313 context->deferRecipientForLater(node, rep, receivedData);
1314 PROF_stop(clientSocketRecipient);
1315 return;
1316 }
1317
1318 if (responseFinishedOrFailed(rep, receivedData)) {
1319 context->writeComplete(fd, NULL, 0, COMM_OK);
1320 PROF_stop(clientSocketRecipient);
1321 return;
1322 }
1323
1324 if (!context->startOfOutput())
1325 context->sendBody(rep, receivedData);
1326 else {
1327 assert(rep);
1328 http->al.reply = HTTPMSGLOCK(rep);
1329 context->sendStartOfMessage(rep, receivedData);
1330 }
1331
1332 PROF_stop(clientSocketRecipient);
1333 }
1334
1335 /**
1336 * Called when a downstream node is no longer interested in
1337 * our data. As we are a terminal node, this means on aborts
1338 * only
1339 */
1340 void
1341 clientSocketDetach(clientStreamNode * node, ClientHttpRequest * http)
1342 {
1343 /* Test preconditions */
1344 assert(node != NULL);
1345 /* TODO: handle this rather than asserting
1346 * - it should only ever happen if we cause an abort and
1347 * the callback chain loops back to here, so we can simply return.
1348 * However, that itself shouldn't happen, so it stays as an assert for now.
1349 */
1350 assert(cbdataReferenceValid(node));
1351 /* Set null by ContextFree */
1352 assert(node->node.next == NULL);
1353 /* this is the assert discussed above */
1354 assert(NULL == dynamic_cast<ClientSocketContext *>(node->data.getRaw()));
1355 /* We are only called when the client socket shutsdown.
1356 * Tell the prev pipeline member we're finished
1357 */
1358 clientStreamDetach(node, http);
1359 }
1360
1361 static void
1362 clientWriteBodyComplete(int fd, char *buf, size_t size, comm_err_t errflag, int xerrno, void *data)
1363 {
1364 debugs(33,7, HERE << "clientWriteBodyComplete schedules clientWriteComplete");
1365 clientWriteComplete(fd, NULL, size, errflag, xerrno, data);
1366 }
1367
1368 void
1369 ConnStateData::readNextRequest()
1370 {
1371 debugs(33, 5, "ConnStateData::readNextRequest: FD " << fd << " reading next req");
1372
1373 fd_note(fd, "Waiting for next request");
1374 /**
1375 * Set the timeout BEFORE calling clientReadRequest().
1376 */
1377 typedef CommCbMemFunT<ConnStateData, CommTimeoutCbParams> TimeoutDialer;
1378 AsyncCall::Pointer timeoutCall = asyncCall(33, 5, "ConnStateData::requestTimeout",
1379 TimeoutDialer(this, &ConnStateData::requestTimeout));
1380 commSetTimeout(fd, Config.Timeout.persistent_request, timeoutCall);
1381
1382 readSomeData();
1383 /** Please don't do anything with the FD past here! */
1384 }
1385
1386 static void
1387 ClientSocketContextPushDeferredIfNeeded(ClientSocketContext::Pointer deferredRequest, ConnStateData * conn)
1388 {
1389 debugs(33, 2, "ClientSocketContextPushDeferredIfNeeded: FD " << conn->fd << " Sending next");
1390
1391 /** If the client stream is waiting on a socket write to occur, then */
1392
1393 if (deferredRequest->flags.deferred) {
1394 /** NO data is allowed to have been sent. */
1395 assert(deferredRequest->http->out.size == 0);
1396 /** defer now. */
1397 clientSocketRecipient(deferredRequest->deferredparams.node,
1398 deferredRequest->http,
1399 deferredRequest->deferredparams.rep,
1400 deferredRequest->deferredparams.queuedBuffer);
1401 }
1402
1403 /** otherwise, the request is still active in a callbacksomewhere,
1404 * and we are done
1405 */
1406 }
1407
1408 void
1409 ClientSocketContext::keepaliveNextRequest()
1410 {
1411 ConnStateData * conn = http->getConn();
1412 bool do_next_read = false;
1413
1414 debugs(33, 3, "ClientSocketContext::keepaliveNextRequest: FD " << conn->fd);
1415 connIsFinished();
1416
1417 if (conn->pinning.pinned && conn->pinning.fd == -1) {
1418 debugs(33, 2, "clientKeepaliveNextRequest: FD " << conn->fd << " Connection was pinned but server side gone. Terminating client connection");
1419 comm_close(conn->fd);
1420 return;
1421 }
1422
1423 /** \par
1424 * Attempt to parse a request from the request buffer.
1425 * If we've been fed a pipelined request it may already
1426 * be in our read buffer.
1427 *
1428 \par
1429 * This needs to fall through - if we're unlucky and parse the _last_ request
1430 * from our read buffer we may never re-register for another client read.
1431 */
1432
1433 if (clientParseRequest(conn, do_next_read)) {
1434 debugs(33, 3, "clientSocketContext::keepaliveNextRequest: FD " << conn->fd << ": parsed next request from buffer");
1435 }
1436
1437 /** \par
1438 * Either we need to kick-start another read or, if we have
1439 * a half-closed connection, kill it after the last request.
1440 * This saves waiting for half-closed connections to finished being
1441 * half-closed _AND_ then, sometimes, spending "Timeout" time in
1442 * the keepalive "Waiting for next request" state.
1443 */
1444 if (commIsHalfClosed(conn->fd) && (conn->getConcurrentRequestCount() == 0)) {
1445 debugs(33, 3, "ClientSocketContext::keepaliveNextRequest: half-closed client with no pending requests, closing");
1446 comm_close(conn->fd);
1447 return;
1448 }
1449
1450 ClientSocketContext::Pointer deferredRequest;
1451
1452 /** \par
1453 * At this point we either have a parsed request (which we've
1454 * kicked off the processing for) or not. If we have a deferred
1455 * request (parsed but deferred for pipeling processing reasons)
1456 * then look at processing it. If not, simply kickstart
1457 * another read.
1458 */
1459
1460 if ((deferredRequest = conn->getCurrentContext()).getRaw()) {
1461 debugs(33, 3, "ClientSocketContext:: FD " << conn->fd << ": calling PushDeferredIfNeeded");
1462 ClientSocketContextPushDeferredIfNeeded(deferredRequest, conn);
1463 } else {
1464 debugs(33, 3, "ClientSocketContext:: FD " << conn->fd << ": calling conn->readNextRequest()");
1465 conn->readNextRequest();
1466 }
1467 }
1468
1469 void
1470 clientUpdateSocketStats(log_type logType, size_t size)
1471 {
1472 if (size == 0)
1473 return;
1474
1475 kb_incr(&statCounter.client_http.kbytes_out, size);
1476
1477 if (logTypeIsATcpHit(logType))
1478 kb_incr(&statCounter.client_http.hit_kbytes_out, size);
1479 }
1480
1481 /**
1482 * increments iterator "i"
1483 * used by clientPackMoreRanges
1484 *
1485 \retval true there is still data available to pack more ranges
1486 \retval false
1487 */
1488 bool
1489 ClientSocketContext::canPackMoreRanges() const
1490 {
1491 /** first update iterator "i" if needed */
1492
1493 if (!http->range_iter.debt()) {
1494 debugs(33, 5, "ClientSocketContext::canPackMoreRanges: At end of current range spec for FD " << fd());
1495
1496 if (http->range_iter.pos.incrementable())
1497 ++http->range_iter.pos;
1498
1499 http->range_iter.updateSpec();
1500 }
1501
1502 assert(!http->range_iter.debt() == !http->range_iter.currentSpec());
1503
1504 /* paranoid sync condition */
1505 /* continue condition: need_more_data */
1506 debugs(33, 5, "ClientSocketContext::canPackMoreRanges: returning " << (http->range_iter.currentSpec() ? true : false));
1507 return http->range_iter.currentSpec() ? true : false;
1508 }
1509
1510 int64_t
1511 ClientSocketContext::getNextRangeOffset() const
1512 {
1513 if (http->request->range) {
1514 /* offset in range specs does not count the prefix of an http msg */
1515 debugs (33, 5, "ClientSocketContext::getNextRangeOffset: http offset " << http->out.offset);
1516 /* check: reply was parsed and range iterator was initialized */
1517 assert(http->range_iter.valid);
1518 /* filter out data according to range specs */
1519 assert (canPackMoreRanges());
1520 {
1521 int64_t start; /* offset of still missing data */
1522 assert(http->range_iter.currentSpec());
1523 start = http->range_iter.currentSpec()->offset + http->range_iter.currentSpec()->length - http->range_iter.debt();
1524 debugs(33, 3, "clientPackMoreRanges: in: offset: " << http->out.offset);
1525 debugs(33, 3, "clientPackMoreRanges: out:"
1526 " start: " << start <<
1527 " spec[" << http->range_iter.pos - http->request->range->begin() << "]:" <<
1528 " [" << http->range_iter.currentSpec()->offset <<
1529 ", " << http->range_iter.currentSpec()->offset + http->range_iter.currentSpec()->length << "),"
1530 " len: " << http->range_iter.currentSpec()->length <<
1531 " debt: " << http->range_iter.debt());
1532 if (http->range_iter.currentSpec()->length != -1)
1533 assert(http->out.offset <= start); /* we did not miss it */
1534
1535 return start;
1536 }
1537
1538 } else if (reply && reply->content_range) {
1539 /* request does not have ranges, but reply does */
1540 /** \todo FIXME: should use range_iter_pos on reply, as soon as reply->content_range
1541 * becomes HttpHdrRange rather than HttpHdrRangeSpec.
1542 */
1543 return http->out.offset + reply->content_range->spec.offset;
1544 }
1545
1546 return http->out.offset;
1547 }
1548
1549 void
1550 ClientSocketContext::pullData()
1551 {
1552 debugs(33, 5, "ClientSocketContext::pullData: FD " << fd() <<
1553 " attempting to pull upstream data");
1554
1555 /* More data will be coming from the stream. */
1556 StoreIOBuffer readBuffer;
1557 /* XXX: Next requested byte in the range sequence */
1558 /* XXX: length = getmaximumrangelenfgth */
1559 readBuffer.offset = getNextRangeOffset();
1560 readBuffer.length = HTTP_REQBUF_SZ;
1561 readBuffer.data = reqbuf;
1562 /* we may note we have reached the end of the wanted ranges */
1563 clientStreamRead(getTail(), http, readBuffer);
1564 }
1565
1566 clientStream_status_t
1567 ClientSocketContext::socketState()
1568 {
1569 switch (clientStreamStatus(getTail(), http)) {
1570
1571 case STREAM_NONE:
1572 /* check for range support ending */
1573
1574 if (http->request->range) {
1575 /* check: reply was parsed and range iterator was initialized */
1576 assert(http->range_iter.valid);
1577 /* filter out data according to range specs */
1578
1579 if (!canPackMoreRanges()) {
1580 debugs(33, 5, HERE << "Range request at end of returnable " <<
1581 "range sequence on FD " << fd());
1582
1583 if (http->request->flags.proxy_keepalive)
1584 return STREAM_COMPLETE;
1585 else
1586 return STREAM_UNPLANNED_COMPLETE;
1587 }
1588 } else if (reply && reply->content_range) {
1589 /* reply has content-range, but Squid is not managing ranges */
1590 const int64_t &bytesSent = http->out.offset;
1591 const int64_t &bytesExpected = reply->content_range->spec.length;
1592
1593 debugs(33, 7, HERE << "body bytes sent vs. expected: " <<
1594 bytesSent << " ? " << bytesExpected << " (+" <<
1595 reply->content_range->spec.offset << ")");
1596
1597 // did we get at least what we expected, based on range specs?
1598
1599 if (bytesSent == bytesExpected) { // got everything
1600 if (http->request->flags.proxy_keepalive)
1601 return STREAM_COMPLETE;
1602 else
1603 return STREAM_UNPLANNED_COMPLETE;
1604 }
1605
1606 // The logic below is not clear: If we got more than we
1607 // expected why would persistency matter? Should not this
1608 // always be an error?
1609 if (bytesSent > bytesExpected) { // got extra
1610 if (http->request->flags.proxy_keepalive)
1611 return STREAM_COMPLETE;
1612 else
1613 return STREAM_UNPLANNED_COMPLETE;
1614 }
1615
1616 // did not get enough yet, expecting more
1617 }
1618
1619 return STREAM_NONE;
1620
1621 case STREAM_COMPLETE:
1622 return STREAM_COMPLETE;
1623
1624 case STREAM_UNPLANNED_COMPLETE:
1625 return STREAM_UNPLANNED_COMPLETE;
1626
1627 case STREAM_FAILED:
1628 return STREAM_FAILED;
1629 }
1630
1631 fatal ("unreachable code\n");
1632 return STREAM_NONE;
1633 }
1634
1635 /**
1636 * A write has just completed to the client, or we have just realised there is
1637 * no more data to send.
1638 */
1639 void
1640 clientWriteComplete(int fd, char *bufnotused, size_t size, comm_err_t errflag, int xerrno, void *data)
1641 {
1642 ClientSocketContext *context = (ClientSocketContext *)data;
1643 context->writeComplete (fd, bufnotused, size, errflag);
1644 }
1645
1646 /// remembers the abnormal connection termination for logging purposes
1647 void
1648 ClientSocketContext::noteIoError(const int xerrno)
1649 {
1650 if (http) {
1651 if (xerrno == ETIMEDOUT)
1652 http->al.http.timedout = true;
1653 else // even if xerrno is zero (which means read abort/eof)
1654 http->al.http.aborted = true;
1655 }
1656 }
1657
1658
1659 void
1660 ClientSocketContext::doClose()
1661 {
1662 comm_close(fd());
1663 }
1664
1665 /** Called to initiate (and possibly complete) closing of the context.
1666 * The underlying socket may be already closed */
1667 void
1668 ClientSocketContext::initiateClose(const char *reason)
1669 {
1670 debugs(33, 5, HERE << "initiateClose: closing for " << reason);
1671
1672 if (http != NULL) {
1673 ConnStateData * conn = http->getConn();
1674
1675 if (conn != NULL) {
1676 if (const int64_t expecting = conn->bodySizeLeft()) {
1677 debugs(33, 5, HERE << "ClientSocketContext::initiateClose: " <<
1678 "closing, but first " << conn << " needs to read " <<
1679 expecting << " request body bytes with " <<
1680 conn->in.notYetUsed << " notYetUsed");
1681
1682 if (conn->closing()) {
1683 debugs(33, 2, HERE << "avoiding double-closing " << conn);
1684 return;
1685 }
1686
1687 /*
1688 * XXX We assume the reply fits in the TCP transmit
1689 * window. If not the connection may stall while sending
1690 * the reply (before reaching here) if the client does not
1691 * try to read the response while sending the request body.
1692 * As of yet we have not received any complaints indicating
1693 * this may be an issue.
1694 */
1695 conn->startClosing(reason);
1696
1697 return;
1698 }
1699 }
1700 }
1701
1702 doClose();
1703 }
1704
1705 void
1706 ClientSocketContext::writeComplete(int aFileDescriptor, char *bufnotused, size_t size, comm_err_t errflag)
1707 {
1708 StoreEntry *entry = http->storeEntry();
1709 http->out.size += size;
1710 assert(aFileDescriptor > -1);
1711 debugs(33, 5, "clientWriteComplete: FD " << aFileDescriptor << ", sz " << size <<
1712 ", err " << errflag << ", off " << http->out.size << ", len " <<
1713 entry ? entry->objectLen() : 0);
1714 clientUpdateSocketStats(http->logType, size);
1715 assert (this->fd() == aFileDescriptor);
1716
1717 /* Bail out quickly on COMM_ERR_CLOSING - close handlers will tidy up */
1718
1719 if (errflag == COMM_ERR_CLOSING)
1720 return;
1721
1722 if (errflag || clientHttpRequestStatus(aFileDescriptor, http)) {
1723 initiateClose("failure or true request status");
1724 /* Do we leak here ? */
1725 return;
1726 }
1727
1728 switch (socketState()) {
1729
1730 case STREAM_NONE:
1731 pullData();
1732 break;
1733
1734 case STREAM_COMPLETE:
1735 debugs(33, 5, "clientWriteComplete: FD " << aFileDescriptor << " Keeping Alive");
1736 keepaliveNextRequest();
1737 return;
1738
1739 case STREAM_UNPLANNED_COMPLETE:
1740 initiateClose("STREAM_UNPLANNED_COMPLETE");
1741 return;
1742
1743 case STREAM_FAILED:
1744 initiateClose("STREAM_FAILED");
1745 return;
1746
1747 default:
1748 fatal("Hit unreachable code in clientWriteComplete\n");
1749 }
1750 }
1751
1752 extern "C" CSR clientGetMoreData;
1753 extern "C" CSS clientReplyStatus;
1754 extern "C" CSD clientReplyDetach;
1755
1756 static ClientSocketContext *
1757 parseHttpRequestAbort(ConnStateData * conn, const char *uri)
1758 {
1759 ClientHttpRequest *http;
1760 ClientSocketContext *context;
1761 StoreIOBuffer tempBuffer;
1762 http = new ClientHttpRequest(conn);
1763 http->req_sz = conn->in.notYetUsed;
1764 http->uri = xstrdup(uri);
1765 setLogUri (http, uri);
1766 context = ClientSocketContextNew(http);
1767 tempBuffer.data = context->reqbuf;
1768 tempBuffer.length = HTTP_REQBUF_SZ;
1769 clientStreamInit(&http->client_stream, clientGetMoreData, clientReplyDetach,
1770 clientReplyStatus, new clientReplyContext(http), clientSocketRecipient,
1771 clientSocketDetach, context, tempBuffer);
1772 return context;
1773 }
1774
1775 char *
1776 skipLeadingSpace(char *aString)
1777 {
1778 char *result = aString;
1779
1780 while (xisspace(*aString))
1781 ++aString;
1782
1783 return result;
1784 }
1785
1786 /**
1787 * 'end' defaults to NULL for backwards compatibility
1788 * remove default value if we ever get rid of NULL-terminated
1789 * request buffers.
1790 */
1791 const char *
1792 findTrailingHTTPVersion(const char *uriAndHTTPVersion, const char *end)
1793 {
1794 if (NULL == end) {
1795 end = uriAndHTTPVersion + strcspn(uriAndHTTPVersion, "\r\n");
1796 assert(end);
1797 }
1798
1799 for (; end > uriAndHTTPVersion; end--) {
1800 if (*end == '\n' || *end == '\r')
1801 continue;
1802
1803 if (xisspace(*end)) {
1804 if (strncasecmp(end + 1, "HTTP/", 5) == 0)
1805 return end + 1;
1806 else
1807 break;
1808 }
1809 }
1810
1811 return NULL;
1812 }
1813
1814 void
1815 setLogUri(ClientHttpRequest * http, char const *uri)
1816 {
1817 safe_free(http->log_uri);
1818
1819 if (!stringHasCntl(uri))
1820 http->log_uri = xstrndup(uri, MAX_URL);
1821 else
1822 http->log_uri = xstrndup(rfc1738_escape_unescaped(uri), MAX_URL);
1823 }
1824
1825 static void
1826 prepareAcceleratedURL(ConnStateData * conn, ClientHttpRequest *http, char *url, const char *req_hdr)
1827 {
1828 int vhost = conn->port->vhost;
1829 int vport = conn->port->vport;
1830 char *host;
1831 char ntoabuf[MAX_IPSTRLEN];
1832
1833 http->flags.accel = 1;
1834
1835 /* BUG: Squid cannot deal with '*' URLs (RFC2616 5.1.2) */
1836
1837 if (strncasecmp(url, "cache_object://", 15) == 0)
1838 return; /* already in good shape */
1839
1840 if (*url != '/') {
1841 if (conn->port->vhost)
1842 return; /* already in good shape */
1843
1844 /* else we need to ignore the host name */
1845 url = strstr(url, "//");
1846
1847 #if SHOULD_REJECT_UNKNOWN_URLS
1848
1849 if (!url) {
1850 hp->request_parse_status = HTTP_BAD_REQUEST;
1851 return parseHttpRequestAbort(conn, "error:invalid-request");
1852 }
1853 #endif
1854
1855 if (url)
1856 url = strchr(url + 2, '/');
1857
1858 if (!url)
1859 url = (char *) "/";
1860 }
1861
1862 if (internalCheck(url)) {
1863 /* prepend our name & port */
1864 http->uri = xstrdup(internalLocalUri(NULL, url));
1865 return;
1866 }
1867
1868 const bool switchedToHttps = conn->switchedToHttps();
1869 const bool tryHostHeader = vhost || switchedToHttps;
1870 if (tryHostHeader && (host = mime_get_header(req_hdr, "Host")) != NULL) {
1871 int url_sz = strlen(url) + 32 + Config.appendDomainLen +
1872 strlen(host);
1873 http->uri = (char *)xcalloc(url_sz, 1);
1874 const char *protocol = switchedToHttps ?
1875 "https" : conn->port->protocol;
1876 snprintf(http->uri, url_sz, "%s://%s%s", protocol, host, url);
1877 debugs(33, 5, "ACCEL VHOST REWRITE: '" << http->uri << "'");
1878 } else if (conn->port->defaultsite) {
1879 int url_sz = strlen(url) + 32 + Config.appendDomainLen +
1880 strlen(conn->port->defaultsite);
1881 http->uri = (char *)xcalloc(url_sz, 1);
1882 snprintf(http->uri, url_sz, "%s://%s%s",
1883 conn->port->protocol, conn->port->defaultsite, url);
1884 debugs(33, 5, "ACCEL DEFAULTSITE REWRITE: '" << http->uri <<"'");
1885 } else if (vport == -1) {
1886 /* Put the local socket IP address as the hostname. */
1887 int url_sz = strlen(url) + 32 + Config.appendDomainLen;
1888 http->uri = (char *)xcalloc(url_sz, 1);
1889 snprintf(http->uri, url_sz, "%s://%s:%d%s",
1890 http->getConn()->port->protocol,
1891 http->getConn()->me.NtoA(ntoabuf,MAX_IPSTRLEN),
1892 http->getConn()->me.GetPort(), url);
1893 debugs(33, 5, "ACCEL VPORT REWRITE: '" << http->uri << "'");
1894 } else if (vport > 0) {
1895 /* Put the local socket IP address as the hostname, but static port */
1896 int url_sz = strlen(url) + 32 + Config.appendDomainLen;
1897 http->uri = (char *)xcalloc(url_sz, 1);
1898 snprintf(http->uri, url_sz, "%s://%s:%d%s",
1899 http->getConn()->port->protocol,
1900 http->getConn()->me.NtoA(ntoabuf,MAX_IPSTRLEN),
1901 vport, url);
1902 debugs(33, 5, "ACCEL VPORT REWRITE: '" << http->uri << "'");
1903 }
1904 }
1905
1906 static void
1907 prepareTransparentURL(ConnStateData * conn, ClientHttpRequest *http, char *url, const char *req_hdr)
1908 {
1909 char *host;
1910 char ntoabuf[MAX_IPSTRLEN];
1911
1912 if (*url != '/')
1913 return; /* already in good shape */
1914
1915 /* BUG: Squid cannot deal with '*' URLs (RFC2616 5.1.2) */
1916
1917 if ((host = mime_get_header(req_hdr, "Host")) != NULL) {
1918 int url_sz = strlen(url) + 32 + Config.appendDomainLen +
1919 strlen(host);
1920 http->uri = (char *)xcalloc(url_sz, 1);
1921 snprintf(http->uri, url_sz, "%s://%s%s",
1922 conn->port->protocol, host, url);
1923 debugs(33, 5, "TRANSPARENT HOST REWRITE: '" << http->uri <<"'");
1924 } else {
1925 /* Put the local socket IP address as the hostname. */
1926 int url_sz = strlen(url) + 32 + Config.appendDomainLen;
1927 http->uri = (char *)xcalloc(url_sz, 1);
1928 snprintf(http->uri, url_sz, "%s://%s:%d%s",
1929 http->getConn()->port->protocol,
1930 http->getConn()->me.NtoA(ntoabuf,MAX_IPSTRLEN),
1931 http->getConn()->me.GetPort(), url);
1932 debugs(33, 5, "TRANSPARENT REWRITE: '" << http->uri << "'");
1933 }
1934 }
1935
1936 // Temporary hack helper: determine whether the request is chunked, expensive
1937 static bool
1938 isChunkedRequest(const HttpParser *hp)
1939 {
1940 HttpRequest request;
1941 if (!request.parseHeader(HttpParserHdrBuf(hp), HttpParserHdrSz(hp)))
1942 return false;
1943
1944 return request.header.has(HDR_TRANSFER_ENCODING) &&
1945 request.header.hasListMember(HDR_TRANSFER_ENCODING, "chunked", ',');
1946 }
1947
1948
1949 /**
1950 * parseHttpRequest()
1951 *
1952 * Returns
1953 * NULL on incomplete requests
1954 * a ClientSocketContext structure on success or failure.
1955 * Sets result->flags.parsed_ok to 0 if failed to parse the request.
1956 * Sets result->flags.parsed_ok to 1 if we have a good request.
1957 */
1958 static ClientSocketContext *
1959 parseHttpRequest(ConnStateData *conn, HttpParser *hp, HttpRequestMethod * method_p, HttpVersion *http_ver)
1960 {
1961 char *req_hdr = NULL;
1962 char *end;
1963 size_t req_sz;
1964 ClientHttpRequest *http;
1965 ClientSocketContext *result;
1966 StoreIOBuffer tempBuffer;
1967 int r;
1968
1969 /* pre-set these values to make aborting simpler */
1970 *method_p = METHOD_NONE;
1971
1972 /* NP: don't be tempted to move this down or remove again.
1973 * It's the only DDoS protection old-String has against long URL */
1974 if ( hp->bufsiz <= 0) {
1975 debugs(33, 5, "Incomplete request, waiting for end of request line");
1976 return NULL;
1977 } else if ( (size_t)hp->bufsiz >= Config.maxRequestHeaderSize && headersEnd(hp->buf, Config.maxRequestHeaderSize) == 0) {
1978 debugs(33, 5, "parseHttpRequest: Too large request");
1979 hp->request_parse_status = HTTP_HEADER_TOO_LARGE;
1980 return parseHttpRequestAbort(conn, "error:request-too-large");
1981 }
1982
1983 /* Attempt to parse the first line; this'll define the method, url, version and header begin */
1984 r = HttpParserParseReqLine(hp);
1985
1986 if (r == 0) {
1987 debugs(33, 5, "Incomplete request, waiting for end of request line");
1988 return NULL;
1989 }
1990
1991 if (r == -1) {
1992 return parseHttpRequestAbort(conn, "error:invalid-request");
1993 }
1994
1995 /* Request line is valid here .. */
1996 *http_ver = HttpVersion(hp->v_maj, hp->v_min);
1997
1998 /* This call scans the entire request, not just the headers */
1999 if (hp->v_maj > 0) {
2000 if ((req_sz = headersEnd(hp->buf, hp->bufsiz)) == 0) {
2001 debugs(33, 5, "Incomplete request, waiting for end of headers");
2002 return NULL;
2003 }
2004 } else {
2005 debugs(33, 3, "parseHttpRequest: Missing HTTP identifier");
2006 req_sz = HttpParserReqSz(hp);
2007 }
2008
2009 /* We know the whole request is in hp->buf now */
2010
2011 assert(req_sz <= (size_t) hp->bufsiz);
2012
2013 /* Will the following be true with HTTP/0.9 requests? probably not .. */
2014 /* So the rest of the code will need to deal with '0'-byte headers (ie, none, so don't try parsing em) */
2015 assert(req_sz > 0);
2016
2017 hp->hdr_end = req_sz - 1;
2018
2019 hp->hdr_start = hp->req_end + 1;
2020
2021 /* Enforce max_request_size */
2022 if (req_sz >= Config.maxRequestHeaderSize) {
2023 debugs(33, 5, "parseHttpRequest: Too large request");
2024 hp->request_parse_status = HTTP_HEADER_TOO_LARGE;
2025 return parseHttpRequestAbort(conn, "error:request-too-large");
2026 }
2027
2028 /* Set method_p */
2029 *method_p = HttpRequestMethod(&hp->buf[hp->m_start], &hp->buf[hp->m_end]+1);
2030
2031 /* deny CONNECT via accelerated ports */
2032 if (*method_p == METHOD_CONNECT && conn && conn->port && conn->port->accel) {
2033 debugs(33, DBG_IMPORTANT, "WARNING: CONNECT method received on " << conn->port->protocol << " Accelerator port " << conn->port->s.GetPort() );
2034 /* XXX need a way to say "this many character length string" */
2035 debugs(33, DBG_IMPORTANT, "WARNING: for request: " << hp->buf);
2036 hp->request_parse_status = HTTP_METHOD_NOT_ALLOWED;
2037 return parseHttpRequestAbort(conn, "error:method-not-allowed");
2038 }
2039
2040 if (*method_p == METHOD_NONE) {
2041 /* XXX need a way to say "this many character length string" */
2042 debugs(33, 1, "clientParseRequestMethod: Unsupported method in request '" << hp->buf << "'");
2043 hp->request_parse_status = HTTP_METHOD_NOT_ALLOWED;
2044 return parseHttpRequestAbort(conn, "error:unsupported-request-method");
2045 }
2046
2047 /*
2048 * Process headers after request line
2049 * TODO: Use httpRequestParse here.
2050 */
2051 /* XXX this code should be modified to take a const char * later! */
2052 req_hdr = (char *) hp->buf + hp->req_end + 1;
2053
2054 debugs(33, 3, "parseHttpRequest: req_hdr = {" << req_hdr << "}");
2055
2056 end = (char *) hp->buf + hp->hdr_end;
2057
2058 debugs(33, 3, "parseHttpRequest: end = {" << end << "}");
2059
2060 /*
2061 * Check that the headers don't have double-CR.
2062 * NP: strnstr is required so we don't search any possible binary body blobs.
2063 */
2064 if ( squid_strnstr(req_hdr, "\r\r\n", req_sz) ) {
2065 debugs(33, 1, "WARNING: suspicious HTTP request contains double CR");
2066 hp->request_parse_status = HTTP_BAD_REQUEST;
2067 return parseHttpRequestAbort(conn, "error:double-CR");
2068 }
2069
2070 debugs(33, 3, "parseHttpRequest: prefix_sz = " <<
2071 (int) HttpParserRequestLen(hp) << ", req_line_sz = " <<
2072 HttpParserReqSz(hp));
2073
2074 // Temporary hack: We might receive a chunked body from a broken HTTP/1.1
2075 // client that sends chunked requests to HTTP/1.0 Squid. If the request
2076 // might have a chunked body, parse the headers early to look for the
2077 // "Transfer-Encoding: chunked" header. If we find it, wait until the
2078 // entire body is available so that we can set the content length and
2079 // forward the request without chunks. The primary reason for this is
2080 // to avoid forwarding a chunked request because the server side lacks
2081 // logic to determine when it is valid to do so.
2082 // FUTURE_CODE_TO_SUPPORT_CHUNKED_REQUESTS below will replace this hack.
2083 if (hp->v_min == 1 && hp->v_maj == 1 && // broken client, may send chunks
2084 Config.maxChunkedRequestBodySize > 0 && // configured to dechunk
2085 (*method_p == METHOD_PUT || *method_p == METHOD_POST)) {
2086
2087 // check only once per request because isChunkedRequest is expensive
2088 if (conn->in.dechunkingState == ConnStateData::chunkUnknown) {
2089 if (isChunkedRequest(hp))
2090 conn->startDechunkingRequest(hp);
2091 else
2092 conn->in.dechunkingState = ConnStateData::chunkNone;
2093 }
2094
2095 if (conn->in.dechunkingState == ConnStateData::chunkParsing) {
2096 if (conn->parseRequestChunks(hp)) // parses newly read chunks
2097 return NULL; // wait for more data
2098 debugs(33, 5, HERE << "Got complete chunked request or err.");
2099 assert(conn->in.dechunkingState != ConnStateData::chunkParsing);
2100 }
2101 }
2102
2103 /* Ok, all headers are received */
2104 http = new ClientHttpRequest(conn);
2105
2106 http->req_sz = HttpParserRequestLen(hp);
2107 result = ClientSocketContextNew(http);
2108 tempBuffer.data = result->reqbuf;
2109 tempBuffer.length = HTTP_REQBUF_SZ;
2110
2111 ClientStreamData newServer = new clientReplyContext(http);
2112 ClientStreamData newClient = result;
2113 clientStreamInit(&http->client_stream, clientGetMoreData, clientReplyDetach,
2114 clientReplyStatus, newServer, clientSocketRecipient,
2115 clientSocketDetach, newClient, tempBuffer);
2116
2117 debugs(33, 5, "parseHttpRequest: Request Header is\n" <<(hp->buf) + hp->hdr_start);
2118
2119 /* set url */
2120 /*
2121 * XXX this should eventually not use a malloc'ed buffer; the transformation code
2122 * below needs to be modified to not expect a mutable nul-terminated string.
2123 */
2124 char *url = (char *)xmalloc(hp->u_end - hp->u_start + 16);
2125
2126 memcpy(url, hp->buf + hp->u_start, hp->u_end - hp->u_start + 1);
2127
2128 url[hp->u_end - hp->u_start + 1] = '\0';
2129
2130 #if THIS_VIOLATES_HTTP_SPECS_ON_URL_TRANSFORMATION
2131
2132 if ((t = strchr(url, '#'))) /* remove HTML anchors */
2133 *t = '\0';
2134
2135 #endif
2136
2137 /* Rewrite the URL in transparent or accelerator mode */
2138 /* NP: there are several cases to traverse here:
2139 * - standard mode (forward proxy)
2140 * - transparent mode (TPROXY)
2141 * - transparent mode with failures
2142 * - intercept mode (NAT)
2143 * - intercept mode with failures
2144 * - accelerator mode (reverse proxy)
2145 * - internal URL
2146 * - mixed combos of the above with internal URL
2147 */
2148 if (conn->transparent()) {
2149 /* intercept or transparent mode, properly working with no failures */
2150 http->flags.intercepted = conn->port->intercepted;
2151 http->flags.spoof_client_ip = conn->port->spoof_client_ip;
2152 prepareTransparentURL(conn, http, url, req_hdr);
2153
2154 } else if (conn->port->intercepted || conn->port->spoof_client_ip) {
2155 /* transparent or intercept mode with failures */
2156 prepareTransparentURL(conn, http, url, req_hdr);
2157
2158 } else if (conn->port->accel || conn->switchedToHttps()) {
2159 /* accelerator mode */
2160 prepareAcceleratedURL(conn, http, url, req_hdr);
2161
2162 } else if (internalCheck(url)) {
2163 /* internal URL mode */
2164 /* prepend our name & port */
2165 http->uri = xstrdup(internalLocalUri(NULL, url));
2166 http->flags.accel = 1;
2167 }
2168
2169 if (!http->uri) {
2170 /* No special rewrites have been applied above, use the
2171 * requested url. may be rewritten later, so make extra room */
2172 int url_sz = strlen(url) + Config.appendDomainLen + 5;
2173 http->uri = (char *)xcalloc(url_sz, 1);
2174 strcpy(http->uri, url);
2175 }
2176
2177 setLogUri(http, http->uri);
2178 debugs(33, 5, "parseHttpRequest: Complete request received");
2179 result->flags.parsed_ok = 1;
2180 xfree(url);
2181 return result;
2182 }
2183
2184 int
2185 ConnStateData::getAvailableBufferLength() const
2186 {
2187 int result = in.allocatedSize - in.notYetUsed - 1;
2188 assert (result >= 0);
2189 return result;
2190 }
2191
2192 void
2193 ConnStateData::makeSpaceAvailable()
2194 {
2195 if (getAvailableBufferLength() < 2) {
2196 in.buf = (char *)memReallocBuf(in.buf, in.allocatedSize * 2, &in.allocatedSize);
2197 debugs(33, 2, "growing request buffer: notYetUsed=" << in.notYetUsed << " size=" << in.allocatedSize);
2198 }
2199 }
2200
2201 void
2202 ConnStateData::addContextToQueue(ClientSocketContext * context)
2203 {
2204 ClientSocketContext::Pointer *S;
2205
2206 for (S = (ClientSocketContext::Pointer *) & currentobject; S->getRaw();
2207 S = &(*S)->next);
2208 *S = context;
2209
2210 ++nrequests;
2211 }
2212
2213 int
2214 ConnStateData::getConcurrentRequestCount() const
2215 {
2216 int result = 0;
2217 ClientSocketContext::Pointer *T;
2218
2219 for (T = (ClientSocketContext::Pointer *) &currentobject;
2220 T->getRaw(); T = &(*T)->next, ++result);
2221 return result;
2222 }
2223
2224 int
2225 ConnStateData::connReadWasError(comm_err_t flag, int size, int xerrno)
2226 {
2227 if (flag != COMM_OK) {
2228 debugs(33, 2, "connReadWasError: FD " << fd << ": got flag " << flag);
2229 return 1;
2230 }
2231
2232 if (size < 0) {
2233 if (!ignoreErrno(xerrno)) {
2234 debugs(33, 2, "connReadWasError: FD " << fd << ": " << xstrerr(xerrno));
2235 return 1;
2236 } else if (in.notYetUsed == 0) {
2237 debugs(33, 2, "connReadWasError: FD " << fd << ": no data to process (" << xstrerr(xerrno) << ")");
2238 }
2239 }
2240
2241 return 0;
2242 }
2243
2244 int
2245 ConnStateData::connFinishedWithConn(int size)
2246 {
2247 if (size == 0) {
2248 if (getConcurrentRequestCount() == 0 && in.notYetUsed == 0) {
2249 /* no current or pending requests */
2250 debugs(33, 4, "connFinishedWithConn: FD " << fd << " closed");
2251 return 1;
2252 } else if (!Config.onoff.half_closed_clients) {
2253 /* admin doesn't want to support half-closed client sockets */
2254 debugs(33, 3, "connFinishedWithConn: FD " << fd << " aborted (half_closed_clients disabled)");
2255 notifyAllContexts(0); // no specific error implies abort
2256 return 1;
2257 }
2258 }
2259
2260 return 0;
2261 }
2262
2263 void
2264 connNoteUseOfBuffer(ConnStateData* conn, size_t byteCount)
2265 {
2266 assert(byteCount > 0 && byteCount <= conn->in.notYetUsed);
2267 conn->in.notYetUsed -= byteCount;
2268 debugs(33, 5, HERE << "conn->in.notYetUsed = " << conn->in.notYetUsed);
2269 /*
2270 * If there is still data that will be used,
2271 * move it to the beginning.
2272 */
2273
2274 if (conn->in.notYetUsed > 0)
2275 xmemmove(conn->in.buf, conn->in.buf + byteCount,
2276 conn->in.notYetUsed);
2277 }
2278
2279 int
2280 connKeepReadingIncompleteRequest(ConnStateData * conn)
2281 {
2282 // when we read chunked requests, the entire body is buffered
2283 // XXX: this check ignores header size and its limits.
2284 if (conn->in.dechunkingState == ConnStateData::chunkParsing)
2285 return ((int64_t)conn->in.notYetUsed) < Config.maxChunkedRequestBodySize;
2286
2287 return conn->in.notYetUsed >= Config.maxRequestHeaderSize ? 0 : 1;
2288 }
2289
2290 void
2291 connCancelIncompleteRequests(ConnStateData * conn)
2292 {
2293 ClientSocketContext *context = parseHttpRequestAbort(conn, "error:request-too-large");
2294 clientStreamNode *node = context->getClientReplyContext();
2295 assert(!connKeepReadingIncompleteRequest(conn));
2296 if (conn->in.dechunkingState == ConnStateData::chunkParsing) {
2297 debugs(33, 1, "Chunked request is too large (" << conn->in.notYetUsed << " bytes)");
2298 debugs(33, 1, "Config 'chunked_request_body_max_size'= " << Config.maxChunkedRequestBodySize << " bytes.");
2299 } else {
2300 debugs(33, 1, "Request header is too large (" << conn->in.notYetUsed << " bytes)");
2301 debugs(33, 1, "Config 'request_header_max_size'= " << Config.maxRequestHeaderSize << " bytes.");
2302 }
2303 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
2304 assert (repContext);
2305 repContext->setReplyToError(ERR_TOO_BIG,
2306 HTTP_REQUEST_ENTITY_TOO_LARGE, METHOD_NONE, NULL,
2307 conn->peer, NULL, NULL, NULL);
2308 context->registerWithConn();
2309 context->pullData();
2310 }
2311
2312 void
2313 ConnStateData::clientMaybeReadData(int do_next_read)
2314 {
2315 if (do_next_read) {
2316 flags.readMoreRequests = true;
2317 readSomeData();
2318 }
2319 }
2320
2321 void
2322 ConnStateData::clientAfterReadingRequests(int do_next_read)
2323 {
2324 /*
2325 * If (1) we are reading a message body, (2) and the connection
2326 * is half-closed, and (3) we didn't get the entire HTTP request
2327 * yet, then close this connection.
2328 */
2329
2330 if (fd_table[fd].flags.socket_eof) {
2331 if ((int64_t)in.notYetUsed < bodySizeLeft()) {
2332 /* Partial request received. Abort client connection! */
2333 debugs(33, 3, "clientAfterReadingRequests: FD " << fd << " aborted, partial request");
2334 comm_close(fd);
2335 return;
2336 }
2337 }
2338
2339 clientMaybeReadData (do_next_read);
2340 }
2341
2342 static void
2343 clientProcessRequest(ConnStateData *conn, HttpParser *hp, ClientSocketContext *context, const HttpRequestMethod& method, HttpVersion http_ver)
2344 {
2345 ClientHttpRequest *http = context->http;
2346 HttpRequest *request = NULL;
2347 bool notedUseOfBuffer = false;
2348 bool tePresent = false;
2349 bool deChunked = false;
2350 bool unsupportedTe = false;
2351
2352 /* We have an initial client stream in place should it be needed */
2353 /* setup our private context */
2354 context->registerWithConn();
2355
2356 if (context->flags.parsed_ok == 0) {
2357 clientStreamNode *node = context->getClientReplyContext();
2358 debugs(33, 1, "clientProcessRequest: Invalid Request");
2359 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
2360 assert (repContext);
2361 switch (hp->request_parse_status) {
2362 case HTTP_HEADER_TOO_LARGE:
2363 repContext->setReplyToError(ERR_TOO_BIG, HTTP_HEADER_TOO_LARGE, method, http->uri, conn->peer, NULL, conn->in.buf, NULL);
2364 break;
2365 case HTTP_METHOD_NOT_ALLOWED:
2366 repContext->setReplyToError(ERR_UNSUP_REQ, HTTP_METHOD_NOT_ALLOWED, method, http->uri, conn->peer, NULL, conn->in.buf, NULL);
2367 break;
2368 default:
2369 repContext->setReplyToError(ERR_INVALID_REQ, HTTP_BAD_REQUEST, method, http->uri, conn->peer, NULL, conn->in.buf, NULL);
2370 }
2371 assert(context->http->out.offset == 0);
2372 context->pullData();
2373 conn->flags.readMoreRequests = false;
2374 goto finish;
2375 }
2376
2377 if ((request = HttpRequest::CreateFromUrlAndMethod(http->uri, method)) == NULL) {
2378 clientStreamNode *node = context->getClientReplyContext();
2379 debugs(33, 5, "Invalid URL: " << http->uri);
2380 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
2381 assert (repContext);
2382 repContext->setReplyToError(ERR_INVALID_URL, HTTP_BAD_REQUEST, method, http->uri, conn->peer, NULL, NULL, NULL);
2383 assert(context->http->out.offset == 0);
2384 context->pullData();
2385 conn->flags.readMoreRequests = false;
2386 goto finish;
2387 }
2388
2389 /* RFC 2616 section 10.5.6 : handle unsupported HTTP versions cleanly. */
2390 /* We currently only accept 0.9, 1.0, 1.1 */
2391 if ( (http_ver.major == 0 && http_ver.minor != 9) ||
2392 (http_ver.major == 1 && http_ver.minor > 1 ) ||
2393 (http_ver.major > 1) ) {
2394
2395 clientStreamNode *node = context->getClientReplyContext();
2396 debugs(33, 5, "Unsupported HTTP version discovered. :\n" << HttpParserHdrBuf(hp));
2397 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
2398 assert (repContext);
2399 repContext->setReplyToError(ERR_UNSUP_HTTPVERSION, HTTP_HTTP_VERSION_NOT_SUPPORTED, method, http->uri, conn->peer, NULL, HttpParserHdrBuf(hp), NULL);
2400 assert(context->http->out.offset == 0);
2401 context->pullData();
2402 conn->flags.readMoreRequests = false;
2403 goto finish;
2404 }
2405
2406 /* compile headers */
2407 /* we should skip request line! */
2408 /* XXX should actually know the damned buffer size here */
2409 if (http_ver.major >= 1 && !request->parseHeader(HttpParserHdrBuf(hp), HttpParserHdrSz(hp))) {
2410 clientStreamNode *node = context->getClientReplyContext();
2411 debugs(33, 5, "Failed to parse request headers:\n" << HttpParserHdrBuf(hp));
2412 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
2413 assert (repContext);
2414 repContext->setReplyToError(ERR_INVALID_REQ, HTTP_BAD_REQUEST, method, http->uri, conn->peer, NULL, NULL, NULL);
2415 assert(context->http->out.offset == 0);
2416 context->pullData();
2417 conn->flags.readMoreRequests = false;
2418 goto finish;
2419 }
2420
2421 request->flags.accelerated = http->flags.accel;
2422 request->flags.ignore_cc = conn->port->ignore_cc;
2423 request->flags.no_direct = request->flags.accelerated ? !conn->port->allow_direct : 0;
2424
2425 /** \par
2426 * If transparent or interception mode is working clone the transparent and interception flags
2427 * from the port settings to the request.
2428 */
2429 if (Ip::Interceptor.InterceptActive()) {
2430 request->flags.intercepted = http->flags.intercepted;
2431 }
2432 if (Ip::Interceptor.TransparentActive()) {
2433 request->flags.spoof_client_ip = conn->port->spoof_client_ip;
2434 }
2435
2436 if (internalCheck(request->urlpath.termedBuf())) {
2437 if (internalHostnameIs(request->GetHost()) &&
2438 request->port == getMyPort()) {
2439 http->flags.internal = 1;
2440 } else if (Config.onoff.global_internal_static && internalStaticCheck(request->urlpath.termedBuf())) {
2441 request->SetHost(internalHostname());
2442 request->port = getMyPort();
2443 http->flags.internal = 1;
2444 }
2445 }
2446
2447 if (http->flags.internal) {
2448 request->protocol = PROTO_HTTP;
2449 request->login[0] = '\0';
2450 }
2451
2452 request->flags.internal = http->flags.internal;
2453 setLogUri (http, urlCanonicalClean(request));
2454 request->client_addr = conn->peer;
2455 #if USE_SQUID_EUI
2456 request->client_eui48 = conn->peer_eui48;
2457 request->client_eui64 = conn->peer_eui64;
2458 #endif
2459 #if FOLLOW_X_FORWARDED_FOR
2460 request->indirect_client_addr = conn->peer;
2461 #endif /* FOLLOW_X_FORWARDED_FOR */
2462 request->my_addr = conn->me;
2463 request->http_ver = http_ver;
2464
2465 tePresent = request->header.has(HDR_TRANSFER_ENCODING);
2466 deChunked = conn->in.dechunkingState == ConnStateData::chunkReady;
2467 if (deChunked) {
2468 assert(tePresent);
2469 request->setContentLength(conn->in.dechunked.contentSize());
2470 request->header.delById(HDR_TRANSFER_ENCODING);
2471 conn->finishDechunkingRequest(hp);
2472 }
2473
2474 unsupportedTe = tePresent && !deChunked;
2475 if (!urlCheckRequest(request) || unsupportedTe) {
2476 clientStreamNode *node = context->getClientReplyContext();
2477 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
2478 assert (repContext);
2479 repContext->setReplyToError(ERR_UNSUP_REQ,
2480 HTTP_NOT_IMPLEMENTED, request->method, NULL,
2481 conn->peer, request, NULL, NULL);
2482 assert(context->http->out.offset == 0);
2483 context->pullData();
2484 conn->flags.readMoreRequests = false;
2485 goto finish;
2486 }
2487
2488
2489 if (!clientIsContentLengthValid(request)) {
2490 clientStreamNode *node = context->getClientReplyContext();
2491 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
2492 assert (repContext);
2493 repContext->setReplyToError(ERR_INVALID_REQ,
2494 HTTP_LENGTH_REQUIRED, request->method, NULL,
2495 conn->peer, request, NULL, NULL);
2496 assert(context->http->out.offset == 0);
2497 context->pullData();
2498 conn->flags.readMoreRequests = false;
2499 goto finish;
2500 }
2501
2502 if (request->header.has(HDR_EXPECT)) {
2503 int ignore = 0;
2504 #if HTTP_VIOLATIONS
2505 if (Config.onoff.ignore_expect_100) {
2506 String expect = request->header.getList(HDR_EXPECT);
2507 if (expect.caseCmp("100-continue") == 0)
2508 ignore = 1;
2509 expect.clean();
2510 }
2511 #endif
2512 if (!ignore) {
2513 clientStreamNode *node = context->getClientReplyContext();
2514 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
2515 assert (repContext);
2516 repContext->setReplyToError(ERR_INVALID_REQ, HTTP_EXPECTATION_FAILED, request->method, http->uri, conn->peer, request, NULL, NULL);
2517 assert(context->http->out.offset == 0);
2518 context->pullData();
2519 goto finish;
2520 }
2521 }
2522
2523 http->request = HTTPMSGLOCK(request);
2524 clientSetKeepaliveFlag(http);
2525
2526 /* If this is a CONNECT, don't schedule a read - ssl.c will handle it */
2527 if (http->request->method == METHOD_CONNECT)
2528 context->mayUseConnection(true);
2529
2530 /* Do we expect a request-body? */
2531 if (!context->mayUseConnection() && request->content_length > 0) {
2532 request->body_pipe = conn->expectRequestBody(request->content_length);
2533
2534 // consume header early so that body pipe gets just the body
2535 connNoteUseOfBuffer(conn, http->req_sz);
2536 notedUseOfBuffer = true;
2537
2538 conn->handleRequestBodyData(); // may comm_close and stop producing
2539
2540 /* Is it too large? */
2541
2542 if (!clientIsRequestBodyValid(request->content_length) ||
2543 clientIsRequestBodyTooLargeForPolicy(request->content_length)) {
2544 clientStreamNode *node = context->getClientReplyContext();
2545 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
2546 assert (repContext);
2547 repContext->setReplyToError(ERR_TOO_BIG,
2548 HTTP_REQUEST_ENTITY_TOO_LARGE, METHOD_NONE, NULL,
2549 conn->peer, http->request, NULL, NULL);
2550 assert(context->http->out.offset == 0);
2551 context->pullData();
2552 goto finish;
2553 }
2554
2555 if (!request->body_pipe->productionEnded())
2556 conn->readSomeData();
2557
2558 context->mayUseConnection(!request->body_pipe->productionEnded());
2559 }
2560
2561 http->calloutContext = new ClientRequestContext(http);
2562
2563 http->doCallouts();
2564
2565 finish:
2566 if (!notedUseOfBuffer)
2567 connNoteUseOfBuffer(conn, http->req_sz);
2568
2569 /*
2570 * DPW 2007-05-18
2571 * Moved the TCP_RESET feature from clientReplyContext::sendMoreData
2572 * to here because calling comm_reset_close() causes http to
2573 * be freed and the above connNoteUseOfBuffer() would hit an
2574 * assertion, not to mention that we were accessing freed memory.
2575 */
2576 if (http->request->flags.resetTCP() && conn->fd > -1) {
2577 debugs(33, 3, HERE << "Sending TCP RST on FD " << conn->fd);
2578 conn->flags.readMoreRequests = false;
2579 comm_reset_close(conn->fd);
2580 return;
2581 }
2582 }
2583
2584 static void
2585 connStripBufferWhitespace (ConnStateData * conn)
2586 {
2587 while (conn->in.notYetUsed > 0 && xisspace(conn->in.buf[0])) {
2588 xmemmove(conn->in.buf, conn->in.buf + 1, conn->in.notYetUsed - 1);
2589 --conn->in.notYetUsed;
2590 }
2591 }
2592
2593 static int
2594 connOkToAddRequest(ConnStateData * conn)
2595 {
2596 int result = conn->getConcurrentRequestCount() < (Config.onoff.pipeline_prefetch ? 2 : 1);
2597
2598 if (!result) {
2599 debugs(33, 3, "connOkToAddRequest: FD " << conn->fd <<
2600 " max concurrent requests reached");
2601 debugs(33, 5, "connOkToAddRequest: FD " << conn->fd <<
2602 " defering new request until one is done");
2603 }
2604
2605 return result;
2606 }
2607
2608 /**
2609 * bodySizeLeft
2610 *
2611 * Report on the number of bytes of body content that we
2612 * know are yet to be read on this connection.
2613 */
2614 int64_t
2615 ConnStateData::bodySizeLeft()
2616 {
2617 // XXX: this logic will not work for chunked requests with unknown sizes
2618
2619 if (bodyPipe != NULL)
2620 return bodyPipe->unproducedSize();
2621
2622 return 0;
2623 }
2624
2625 /**
2626 * Attempt to parse one or more requests from the input buffer.
2627 * If a request is successfully parsed, even if the next request
2628 * is only partially parsed, it will return TRUE.
2629 * do_next_read is updated to indicate whether a read should be
2630 * scheduled.
2631 */
2632 static bool
2633 clientParseRequest(ConnStateData * conn, bool &do_next_read)
2634 {
2635 HttpRequestMethod method;
2636 ClientSocketContext *context;
2637 bool parsed_req = false;
2638 HttpVersion http_ver;
2639 HttpParser hp;
2640
2641 debugs(33, 5, "clientParseRequest: FD " << conn->fd << ": attempting to parse");
2642
2643 while (conn->in.notYetUsed > 0 && conn->bodySizeLeft() == 0) {
2644 connStripBufferWhitespace (conn);
2645
2646 /* Don't try to parse if the buffer is empty */
2647
2648 if (conn->in.notYetUsed == 0)
2649 break;
2650
2651 /* Limit the number of concurrent requests to 2 */
2652
2653 if (!connOkToAddRequest(conn)) {
2654 break;
2655 }
2656
2657 /* Should not be needed anymore */
2658 /* Terminate the string */
2659 conn->in.buf[conn->in.notYetUsed] = '\0';
2660
2661 /* Begin the parsing */
2662 HttpParserInit(&hp, conn->in.buf, conn->in.notYetUsed);
2663
2664 /* Process request */
2665 PROF_start(parseHttpRequest);
2666
2667 context = parseHttpRequest(conn, &hp, &method, &http_ver);
2668
2669 PROF_stop(parseHttpRequest);
2670
2671 /* partial or incomplete request */
2672 if (!context) {
2673
2674 if (!connKeepReadingIncompleteRequest(conn))
2675 connCancelIncompleteRequests(conn);
2676
2677 break;
2678 }
2679
2680 /* status -1 or 1 */
2681 if (context) {
2682 debugs(33, 5, "clientParseRequest: FD " << conn->fd << ": parsed a request");
2683 commSetTimeout(conn->fd, Config.Timeout.lifetime, clientLifetimeTimeout,
2684 context->http);
2685
2686 clientProcessRequest(conn, &hp, context, method, http_ver);
2687
2688 parsed_req = true;
2689
2690 if (context->mayUseConnection()) {
2691 debugs(33, 3, "clientParseRequest: Not reading, as this request may need the connection");
2692 do_next_read = 0;
2693 break;
2694 }
2695
2696 if (!conn->flags.readMoreRequests) {
2697 conn->flags.readMoreRequests = true;
2698 break;
2699 }
2700
2701 continue; /* while offset > 0 && conn->bodySizeLeft() == 0 */
2702 }
2703 } /* while offset > 0 && conn->bodySizeLeft() == 0 */
2704
2705 /* XXX where to 'finish' the parsing pass? */
2706
2707 return parsed_req;
2708 }
2709
2710 void
2711 ConnStateData::clientReadRequest(const CommIoCbParams &io)
2712 {
2713 debugs(33,5,HERE << "clientReadRequest FD " << io.fd << " size " << io.size);
2714 reading(false);
2715 bool do_next_read = 1; /* the default _is_ to read data! - adrian */
2716
2717 assert (io.fd == fd);
2718
2719 /* Bail out quickly on COMM_ERR_CLOSING - close handlers will tidy up */
2720
2721 if (io.flag == COMM_ERR_CLOSING) {
2722 debugs(33,5, HERE << " FD " << fd << " closing Bailout.");
2723 return;
2724 }
2725
2726 /*
2727 * Don't reset the timeout value here. The timeout value will be
2728 * set to Config.Timeout.request by httpAccept() and
2729 * clientWriteComplete(), and should apply to the request as a
2730 * whole, not individual read() calls. Plus, it breaks our
2731 * lame half-close detection
2732 */
2733 if (connReadWasError(io.flag, io.size, io.xerrno)) {
2734 notifyAllContexts(io.xerrno);
2735 comm_close(fd);
2736 return;
2737 }
2738
2739 if (io.flag == COMM_OK) {
2740 if (io.size > 0) {
2741 kb_incr(&statCounter.client_http.kbytes_in, io.size);
2742
2743 handleReadData(io.buf, io.size);
2744
2745 /* The above may close the connection under our feets */
2746 if (!isOpen())
2747 return;
2748
2749 } else if (io.size == 0) {
2750 debugs(33, 5, "clientReadRequest: FD " << fd << " closed?");
2751
2752 if (connFinishedWithConn(io.size)) {
2753 comm_close(fd);
2754 return;
2755 }
2756
2757 /* It might be half-closed, we can't tell */
2758 fd_table[fd].flags.socket_eof = 1;
2759
2760 commMarkHalfClosed(fd);
2761
2762 do_next_read = 0;
2763
2764 fd_note(fd, "half-closed");
2765
2766 /* There is one more close check at the end, to detect aborted
2767 * (partial) requests. At this point we can't tell if the request
2768 * is partial.
2769 */
2770 /* Continue to process previously read data */
2771 }
2772 }
2773
2774 /* Process next request */
2775 if (getConcurrentRequestCount() == 0)
2776 fd_note(fd, "Reading next request");
2777
2778 if (! clientParseRequest(this, do_next_read)) {
2779 if (!isOpen())
2780 return;
2781 /*
2782 * If the client here is half closed and we failed
2783 * to parse a request, close the connection.
2784 * The above check with connFinishedWithConn() only
2785 * succeeds _if_ the buffer is empty which it won't
2786 * be if we have an incomplete request.
2787 * XXX: This duplicates ClientSocketContext::keepaliveNextRequest
2788 */
2789 if (getConcurrentRequestCount() == 0 && commIsHalfClosed(fd)) {
2790 debugs(33, 5, "clientReadRequest: FD " << fd << ": half-closed connection, no completed request parsed, connection closing.");
2791 comm_close(fd);
2792 return;
2793 }
2794 }
2795
2796 if (!isOpen())
2797 return;
2798
2799 clientAfterReadingRequests(do_next_read);
2800 }
2801
2802 /**
2803 * called when new request data has been read from the socket
2804 */
2805 void
2806 ConnStateData::handleReadData(char *buf, size_t size)
2807 {
2808 char *current_buf = in.addressToReadInto();
2809
2810 if (buf != current_buf)
2811 xmemmove(current_buf, buf, size);
2812
2813 in.notYetUsed += size;
2814
2815 in.buf[in.notYetUsed] = '\0'; /* Terminate the string */
2816
2817 // if we are reading a body, stuff data into the body pipe
2818 if (bodyPipe != NULL)
2819 handleRequestBodyData();
2820 }
2821
2822 /**
2823 * called when new request body data has been buffered in in.buf
2824 * may close the connection if we were closing and piped everything out
2825 */
2826 void
2827 ConnStateData::handleRequestBodyData()
2828 {
2829 assert(bodyPipe != NULL);
2830
2831 size_t putSize = 0;
2832
2833 #if FUTURE_CODE_TO_SUPPORT_CHUNKED_REQUESTS
2834 // The code below works, in principle, but we cannot do dechunking
2835 // on-the-fly because that would mean sending chunked requests to
2836 // the next hop. Squid lacks logic to determine which servers can
2837 // receive chunk requests. Squid v3.0 code cannot even handle chunked
2838 // responses which we may encourage by sending chunked requests.
2839 // The error generation code probably needs more work.
2840 if (in.bodyParser) { // chunked body
2841 debugs(33,5, HERE << "handling chunked request body for FD " << fd);
2842 bool malformedChunks = false;
2843
2844 MemBuf raw; // ChunkedCodingParser only works with MemBufs
2845 raw.init(in.notYetUsed, in.notYetUsed);
2846 raw.append(in.buf, in.notYetUsed);
2847 try { // the parser will throw on errors
2848 const mb_size_t wasContentSize = raw.contentSize();
2849 BodyPipeCheckout bpc(*bodyPipe);
2850 const bool parsed = in.bodyParser->parse(&raw, &bpc.buf);
2851 bpc.checkIn();
2852 putSize = wasContentSize - raw.contentSize();
2853
2854 if (parsed) {
2855 stopProducingFor(bodyPipe, true); // this makes bodySize known
2856 } else {
2857 // parser needy state must imply body pipe needy state
2858 if (in.bodyParser->needsMoreData() &&
2859 !bodyPipe->mayNeedMoreData())
2860 malformedChunks = true;
2861 // XXX: if bodyParser->needsMoreSpace, how can we guarantee it?
2862 }
2863 } catch (...) { // XXX: be more specific
2864 malformedChunks = true;
2865 }
2866
2867 if (malformedChunks) {
2868 if (bodyPipe != NULL)
2869 stopProducingFor(bodyPipe, false);
2870
2871 ClientSocketContext::Pointer context = getCurrentContext();
2872 if (!context->http->out.offset) {
2873 clientStreamNode *node = context->getClientReplyContext();
2874 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
2875 assert (repContext);
2876 repContext->setReplyToError(ERR_INVALID_REQ, HTTP_BAD_REQUEST,
2877 METHOD_NONE, NULL, &peer.sin_addr,
2878 NULL, NULL, NULL);
2879 context->pullData();
2880 }
2881 flags.readMoreRequests = false;
2882 return; // XXX: is that sufficient to generate an error?
2883 }
2884 } else // identity encoding
2885 #endif
2886 {
2887 debugs(33,5, HERE << "handling plain request body for FD " << fd);
2888 putSize = bodyPipe->putMoreData(in.buf, in.notYetUsed);
2889 if (!bodyPipe->mayNeedMoreData()) {
2890 // BodyPipe will clear us automagically when we produced everything
2891 bodyPipe = NULL;
2892 }
2893 }
2894
2895 if (putSize > 0)
2896 connNoteUseOfBuffer(this, putSize);
2897
2898 if (!bodyPipe) {
2899 debugs(33,5, HERE << "produced entire request body for FD " << fd);
2900
2901 if (closing()) {
2902 /* we've finished reading like good clients,
2903 * now do the close that initiateClose initiated.
2904 *
2905 * XXX: do we have to close? why not check keepalive et.
2906 *
2907 * XXX: To support chunked requests safely, we need to handle
2908 * the case of an endless request. This if-statement does not,
2909 * because mayNeedMoreData is true if request size is not known.
2910 */
2911 comm_close(fd);
2912 }
2913 }
2914 }
2915
2916 void
2917 ConnStateData::noteMoreBodySpaceAvailable(BodyPipe::Pointer )
2918 {
2919 handleRequestBodyData();
2920 }
2921
2922 void
2923 ConnStateData::noteBodyConsumerAborted(BodyPipe::Pointer )
2924 {
2925 if (!closing())
2926 startClosing("body consumer aborted");
2927 }
2928
2929 /** general lifetime handler for HTTP requests */
2930 void
2931 ConnStateData::requestTimeout(const CommTimeoutCbParams &io)
2932 {
2933 #if THIS_CONFUSES_PERSISTENT_CONNECTION_AWARE_BROWSERS_AND_USERS
2934 debugs(33, 3, "requestTimeout: FD " << io.fd << ": lifetime is expired.");
2935
2936 if (COMMIO_FD_WRITECB(io.fd)->active) {
2937 /* FIXME: If this code is reinstated, check the conn counters,
2938 * not the fd table state
2939 */
2940 /*
2941 * Some data has been sent to the client, just close the FD
2942 */
2943 comm_close(io.fd);
2944 } else if (nrequests) {
2945 /*
2946 * assume its a persistent connection; just close it
2947 */
2948 comm_close(io.fd);
2949 } else {
2950 /*
2951 * Generate an error
2952 */
2953 ClientHttpRequest **H;
2954 clientStreamNode *node;
2955 ClientHttpRequest *http = parseHttpRequestAbort(this, "error:Connection%20lifetime%20expired");
2956 node = http->client_stream.tail->prev->data;
2957 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
2958 assert (repContext);
2959 repContext->setReplyToError(ERR_LIFETIME_EXP,
2960 HTTP_REQUEST_TIMEOUT, METHOD_NONE, "N/A", &peer.sin_addr,
2961 NULL, NULL, NULL);
2962 /* No requests can be outstanded */
2963 assert(chr == NULL);
2964 /* add to the client request queue */
2965
2966 for (H = &chr; *H; H = &(*H)->next);
2967 *H = http;
2968
2969 clientStreamRead(http->client_stream.tail->data, http, 0,
2970 HTTP_REQBUF_SZ, context->reqbuf);
2971
2972 /*
2973 * if we don't close() here, we still need a timeout handler!
2974 */
2975 typedef CommCbMemFunT<ConnStateData, CommTimeoutCbParams> TimeoutDialer;
2976 AsyncCall::Pointer timeoutCall = asyncCall(33, 5, "ConnStateData::requestTimeout",
2977 TimeoutDialer(this,&ConnStateData::requestTimeout));
2978 commSetTimeout(io.fd, 30, timeoutCall);
2979
2980 /*
2981 * Aha, but we don't want a read handler!
2982 */
2983 commSetSelect(io.fd, COMM_SELECT_READ, NULL, NULL, 0);
2984 }
2985
2986 #else
2987 /*
2988 * Just close the connection to not confuse browsers
2989 * using persistent connections. Some browsers opens
2990 * an connection and then does not use it until much
2991 * later (presumeably because the request triggering
2992 * the open has already been completed on another
2993 * connection)
2994 */
2995 debugs(33, 3, "requestTimeout: FD " << io.fd << ": lifetime is expired.");
2996
2997 comm_close(io.fd);
2998
2999 #endif
3000 }
3001
3002 static void
3003 clientLifetimeTimeout(int fd, void *data)
3004 {
3005 ClientHttpRequest *http = (ClientHttpRequest *)data;
3006 debugs(33, 1, "WARNING: Closing client " << " connection due to lifetime timeout");
3007 debugs(33, 1, "\t" << http->uri);
3008 http->al.http.timedout = true;
3009 comm_close(fd);
3010 }
3011
3012 ConnStateData *
3013 connStateCreate(const Ip::Address &peer, const Ip::Address &me, int fd, http_port_list *port)
3014 {
3015 ConnStateData *result = new ConnStateData;
3016
3017 result->peer = peer;
3018 result->log_addr = peer;
3019 result->log_addr.ApplyMask(Config.Addrs.client_netmask.GetCIDR());
3020 result->me = me;
3021 result->fd = fd;
3022 result->in.buf = (char *)memAllocBuf(CLIENT_REQ_BUF_SZ, &result->in.allocatedSize);
3023 result->port = cbdataReference(port);
3024
3025 if (port->intercepted || port->spoof_client_ip) {
3026 Ip::Address client, dst;
3027
3028 if (Ip::Interceptor.NatLookup(fd, me, peer, client, dst) == 0) {
3029 result->me = client;
3030 result->peer = dst;
3031 result->transparent(true);
3032 }
3033 }
3034
3035 if (port->disable_pmtu_discovery != DISABLE_PMTU_OFF &&
3036 (result->transparent() || port->disable_pmtu_discovery == DISABLE_PMTU_ALWAYS)) {
3037 #if defined(IP_MTU_DISCOVER) && defined(IP_PMTUDISC_DONT)
3038 int i = IP_PMTUDISC_DONT;
3039 setsockopt(fd, SOL_IP, IP_MTU_DISCOVER, &i, sizeof i);
3040
3041 #else
3042
3043 static int reported = 0;
3044
3045 if (!reported) {
3046 debugs(33, 1, "Notice: httpd_accel_no_pmtu_disc not supported on your platform");
3047 reported = 1;
3048 }
3049
3050 #endif
3051
3052 }
3053
3054 result->flags.readMoreRequests = true;
3055 return result;
3056 }
3057
3058 /** Handle a new connection on HTTP socket. */
3059 void
3060 httpAccept(int sock, int newfd, ConnectionDetail *details,
3061 comm_err_t flag, int xerrno, void *data)
3062 {
3063 http_port_list *s = (http_port_list *)data;
3064 ConnStateData *connState = NULL;
3065
3066 if (flag != COMM_OK) {
3067 debugs(33, 1, "httpAccept: FD " << sock << ": accept failure: " << xstrerr(xerrno));
3068 return;
3069 }
3070
3071 debugs(33, 4, "httpAccept: FD " << newfd << ": accepted");
3072 fd_note(newfd, "client http connect");
3073 connState = connStateCreate(&details->peer, &details->me, newfd, s);
3074
3075 typedef CommCbMemFunT<ConnStateData, CommCloseCbParams> Dialer;
3076 AsyncCall::Pointer call = asyncCall(33, 5, "ConnStateData::connStateClosed",
3077 Dialer(connState, &ConnStateData::connStateClosed));
3078 comm_add_close_handler(newfd, call);
3079
3080 if (Config.onoff.log_fqdn)
3081 fqdncache_gethostbyaddr(details->peer, FQDN_LOOKUP_IF_MISS);
3082
3083 typedef CommCbMemFunT<ConnStateData, CommTimeoutCbParams> TimeoutDialer;
3084 AsyncCall::Pointer timeoutCall = asyncCall(33, 5, "ConnStateData::requestTimeout",
3085 TimeoutDialer(connState,&ConnStateData::requestTimeout));
3086 commSetTimeout(newfd, Config.Timeout.read, timeoutCall);
3087
3088 #if USE_IDENT
3089 if (Ident::TheConfig.identLookup) {
3090 ACLFilledChecklist identChecklist(Ident::TheConfig.identLookup, NULL, NULL);
3091 identChecklist.src_addr = details->peer;
3092 identChecklist.my_addr = details->me;
3093 if (identChecklist.fastCheck())
3094 Ident::Start(details->me, details->peer, clientIdentDone, connState);
3095 }
3096 #endif
3097
3098 #if USE_SQUID_EUI
3099 if (Eui::TheConfig.euiLookup) {
3100 if (details->peer.IsIPv4()) {
3101 connState->peer_eui48.lookup(details->peer);
3102 } else if (details->peer.IsIPv6()) {
3103 connState->peer_eui64.lookup(details->peer);
3104 }
3105 }
3106 #endif
3107
3108 if (s->tcp_keepalive.enabled) {
3109 commSetTcpKeepalive(newfd, s->tcp_keepalive.idle, s->tcp_keepalive.interval, s->tcp_keepalive.timeout);
3110 }
3111
3112 connState->readSomeData();
3113
3114 clientdbEstablished(details->peer, 1);
3115
3116 incoming_sockets_accepted++;
3117 }
3118
3119 #if USE_SSL
3120
3121 /** Create SSL connection structure and update fd_table */
3122 static SSL *
3123 httpsCreate(int newfd, ConnectionDetail *details, SSL_CTX *sslContext)
3124 {
3125 SSL *ssl = SSL_new(sslContext);
3126
3127 if (!ssl) {
3128 const int ssl_error = ERR_get_error();
3129 debugs(83, 1, "httpsAccept: Error allocating handle: " << ERR_error_string(ssl_error, NULL) );
3130 comm_close(newfd);
3131 return NULL;
3132 }
3133
3134 SSL_set_fd(ssl, newfd);
3135 fd_table[newfd].ssl = ssl;
3136 fd_table[newfd].read_method = &ssl_read_method;
3137 fd_table[newfd].write_method = &ssl_write_method;
3138
3139 debugs(33, 5, "httpsCreate: will negotate SSL on FD " << newfd);
3140 fd_note(newfd, "client https start");
3141
3142 return ssl;
3143 }
3144
3145 /** negotiate an SSL connection */
3146 static void
3147 clientNegotiateSSL(int fd, void *data)
3148 {
3149 ConnStateData *conn = (ConnStateData *)data;
3150 X509 *client_cert;
3151 SSL *ssl = fd_table[fd].ssl;
3152 int ret;
3153
3154 if ((ret = SSL_accept(ssl)) <= 0) {
3155 int ssl_error = SSL_get_error(ssl, ret);
3156
3157 switch (ssl_error) {
3158
3159 case SSL_ERROR_WANT_READ:
3160 commSetSelect(fd, COMM_SELECT_READ, clientNegotiateSSL, conn, 0);
3161 return;
3162
3163 case SSL_ERROR_WANT_WRITE:
3164 commSetSelect(fd, COMM_SELECT_WRITE, clientNegotiateSSL, conn, 0);
3165 return;
3166
3167 case SSL_ERROR_SYSCALL:
3168
3169 if (ret == 0) {
3170 debugs(83, 2, "clientNegotiateSSL: Error negotiating SSL connection on FD " << fd << ": Aborted by client");
3171 comm_close(fd);
3172 return;
3173 } else {
3174 int hard = 1;
3175
3176 if (errno == ECONNRESET)
3177 hard = 0;
3178
3179 debugs(83, hard ? 1 : 2, "clientNegotiateSSL: Error negotiating SSL connection on FD " <<
3180 fd << ": " << strerror(errno) << " (" << errno << ")");
3181
3182 comm_close(fd);
3183
3184 return;
3185 }
3186
3187 case SSL_ERROR_ZERO_RETURN:
3188 debugs(83, 1, "clientNegotiateSSL: Error negotiating SSL connection on FD " << fd << ": Closed by client");
3189 comm_close(fd);
3190 return;
3191
3192 default:
3193 debugs(83, 1, "clientNegotiateSSL: Error negotiating SSL connection on FD " <<
3194 fd << ": " << ERR_error_string(ERR_get_error(), NULL) <<
3195 " (" << ssl_error << "/" << ret << ")");
3196 comm_close(fd);
3197 return;
3198 }
3199
3200 /* NOTREACHED */
3201 }
3202
3203 if (SSL_session_reused(ssl)) {
3204 debugs(83, 2, "clientNegotiateSSL: Session " << SSL_get_session(ssl) <<
3205 " reused on FD " << fd << " (" << fd_table[fd].ipaddr << ":" << (int)fd_table[fd].remote_port << ")");
3206 } else {
3207 if (do_debug(83, 4)) {
3208 /* Write out the SSL session details.. actually the call below, but
3209 * OpenSSL headers do strange typecasts confusing GCC.. */
3210 /* PEM_write_SSL_SESSION(debug_log, SSL_get_session(ssl)); */
3211 #if defined(OPENSSL_VERSION_NUMBER) && OPENSSL_VERSION_NUMBER >= 0x00908000L
3212 PEM_ASN1_write((i2d_of_void *)i2d_SSL_SESSION, PEM_STRING_SSL_SESSION, debug_log, (char *)SSL_get_session(ssl), NULL,NULL,0,NULL,NULL);
3213
3214 #elif (ALLOW_ALWAYS_SSL_SESSION_DETAIL == 1)
3215
3216 /* When using gcc 3.3.x and OpenSSL 0.9.7x sometimes a compile error can occur here.
3217 * This is caused by an unpredicatble gcc behaviour on a cast of the first argument
3218 * of PEM_ASN1_write(). For this reason this code section is disabled. To enable it,
3219 * define ALLOW_ALWAYS_SSL_SESSION_DETAIL=1.
3220 * Because there are two possible usable cast, if you get an error here, try the other
3221 * commented line. */
3222
3223 PEM_ASN1_write((int(*)())i2d_SSL_SESSION, PEM_STRING_SSL_SESSION, debug_log, (char *)SSL_get_session(ssl), NULL,NULL,0,NULL,NULL);
3224 /* PEM_ASN1_write((int(*)(...))i2d_SSL_SESSION, PEM_STRING_SSL_SESSION, debug_log, (char *)SSL_get_session(ssl), NULL,NULL,0,NULL,NULL); */
3225
3226 #else
3227
3228 debugs(83, 4, "With " OPENSSL_VERSION_TEXT ", session details are available only defining ALLOW_ALWAYS_SSL_SESSION_DETAIL=1 in the source." );
3229
3230 #endif
3231 /* Note: This does not automatically fflush the log file.. */
3232 }
3233
3234 debugs(83, 2, "clientNegotiateSSL: New session " <<
3235 SSL_get_session(ssl) << " on FD " << fd << " (" <<
3236 fd_table[fd].ipaddr << ":" << (int)fd_table[fd].remote_port <<
3237 ")");
3238 }
3239
3240 debugs(83, 3, "clientNegotiateSSL: FD " << fd << " negotiated cipher " <<
3241 SSL_get_cipher(ssl));
3242
3243 client_cert = SSL_get_peer_certificate(ssl);
3244
3245 if (client_cert != NULL) {
3246 debugs(83, 3, "clientNegotiateSSL: FD " << fd <<
3247 " client certificate: subject: " <<
3248 X509_NAME_oneline(X509_get_subject_name(client_cert), 0, 0));
3249
3250 debugs(83, 3, "clientNegotiateSSL: FD " << fd <<
3251 " client certificate: issuer: " <<
3252 X509_NAME_oneline(X509_get_issuer_name(client_cert), 0, 0));
3253
3254
3255 X509_free(client_cert);
3256 } else {
3257 debugs(83, 5, "clientNegotiateSSL: FD " << fd <<
3258 " has no certificate.");
3259 }
3260
3261 conn->readSomeData();
3262 }
3263
3264 /** handle a new HTTPS connection */
3265 static void
3266 httpsAccept(int sock, int newfd, ConnectionDetail *details,
3267 comm_err_t flag, int xerrno, void *data)
3268 {
3269 https_port_list *s = (https_port_list *)data;
3270 SSL_CTX *sslContext = s->sslContext;
3271
3272 if (flag != COMM_OK) {
3273 errno = xerrno;
3274 debugs(33, 1, "httpsAccept: FD " << sock << ": accept failure: " << xstrerr(xerrno));
3275 return;
3276 }
3277
3278 SSL *ssl = NULL;
3279 if (!(ssl = httpsCreate(newfd, details, sslContext)))
3280 return;
3281
3282 debugs(33, 5, "httpsAccept: FD " << newfd << " accepted, starting SSL negotiation.");
3283 fd_note(newfd, "client https connect");
3284 ConnStateData *connState = connStateCreate(details->peer, details->me,
3285 newfd, &s->http);
3286 typedef CommCbMemFunT<ConnStateData, CommCloseCbParams> Dialer;
3287 AsyncCall::Pointer call = asyncCall(33, 5, "ConnStateData::connStateClosed",
3288 Dialer(connState, &ConnStateData::connStateClosed));
3289 comm_add_close_handler(newfd, call);
3290
3291 if (Config.onoff.log_fqdn)
3292 fqdncache_gethostbyaddr(details->peer, FQDN_LOOKUP_IF_MISS);
3293
3294 typedef CommCbMemFunT<ConnStateData, CommTimeoutCbParams> TimeoutDialer;
3295 AsyncCall::Pointer timeoutCall = asyncCall(33, 5, "ConnStateData::requestTimeout",
3296 TimeoutDialer(connState,&ConnStateData::requestTimeout));
3297 commSetTimeout(newfd, Config.Timeout.request, timeoutCall);
3298
3299 #if USE_IDENT
3300 if (Ident::TheConfig.identLookup) {
3301 ACLFilledChecklist identChecklist(Ident::TheConfig.identLookup, NULL, NULL);
3302 identChecklist.src_addr = details->peer;
3303 identChecklist.my_addr = details->me;
3304 if (identChecklist.fastCheck())
3305 Ident::Start(details->me, details->peer, clientIdentDone, connState);
3306 }
3307 #endif
3308
3309 if (s->http.tcp_keepalive.enabled) {
3310 commSetTcpKeepalive(newfd, s->http.tcp_keepalive.idle, s->http.tcp_keepalive.interval, s->http.tcp_keepalive.timeout);
3311 }
3312
3313 commSetSelect(newfd, COMM_SELECT_READ, clientNegotiateSSL, connState, 0);
3314
3315 clientdbEstablished(details->peer, 1);
3316
3317 incoming_sockets_accepted++;
3318 }
3319
3320 bool
3321 ConnStateData::switchToHttps()
3322 {
3323 assert(!switchedToHttps_);
3324
3325 //HTTPMSGLOCK(currentobject->http->request);
3326 assert(areAllContextsForThisConnection());
3327 freeAllContexts();
3328 //currentobject->connIsFinished();
3329
3330 debugs(33, 5, HERE << "converting FD " << fd << " to SSL");
3331
3332 // fake a ConnectionDetail object; XXX: make ConnState a ConnectionDetail?
3333 ConnectionDetail detail;
3334 detail.me = me;
3335 detail.peer = peer;
3336
3337 SSL_CTX *sslContext = port->sslContext;
3338 SSL *ssl = NULL;
3339 if (!(ssl = httpsCreate(fd, &detail, sslContext)))
3340 return false;
3341
3342 // commSetTimeout() was called for this request before we switched.
3343
3344 // Disable the client read handler until peer selection is complete
3345 commSetSelect(fd, COMM_SELECT_READ, NULL, NULL, 0);
3346
3347 commSetSelect(fd, COMM_SELECT_READ, clientNegotiateSSL, this, 0);
3348
3349 switchedToHttps_ = true;
3350 return true;
3351 }
3352
3353 #endif /* USE_SSL */
3354
3355
3356 static void
3357 clientHttpConnectionsOpen(void)
3358 {
3359 http_port_list *s = NULL;
3360 int fd = -1;
3361 #if USE_SSL
3362 int bumpCount = 0; // counts http_ports with sslBump option
3363 #endif
3364
3365 for (s = Config.Sockaddr.http; s; s = s->next) {
3366 if (MAXHTTPPORTS == NHttpSockets) {
3367 debugs(1, 1, "WARNING: You have too many 'http_port' lines.");
3368 debugs(1, 1, " The limit is " << MAXHTTPPORTS);
3369 continue;
3370 }
3371
3372 #if USE_SSL
3373 if (s->sslBump && s->sslContext == NULL) {
3374 debugs(1, 1, "Will not bump SSL at http_port " <<
3375 s->http.s << " due to SSL initialization failure.");
3376 s->sslBump = 0;
3377 }
3378 if (s->sslBump)
3379 ++bumpCount;
3380 #endif
3381
3382 /* AYJ: 2009-12-27: bit bumpy. new ListenStateData(...) should be doing all the Comm:: stuff ... */
3383
3384 enter_suid();
3385
3386 if (s->spoof_client_ip) {
3387 fd = comm_open_listener(SOCK_STREAM, IPPROTO_TCP, s->s, (COMM_NONBLOCKING|COMM_TRANSPARENT), "HTTP Socket");
3388 } else {
3389 fd = comm_open_listener(SOCK_STREAM, IPPROTO_TCP, s->s, COMM_NONBLOCKING, "HTTP Socket");
3390 }
3391
3392 leave_suid();
3393
3394 if (fd < 0)
3395 continue;
3396
3397 AsyncCall::Pointer call = commCbCall(5,5, "SomeCommAcceptHandler(httpAccept)",
3398 CommAcceptCbPtrFun(httpAccept, s));
3399
3400 s->listener = new Comm::ListenStateData(fd, call, true);
3401
3402 debugs(1, 1, "Accepting " <<
3403 (s->intercepted ? " intercepted" : "") <<
3404 (s->spoof_client_ip ? " spoofing" : "") <<
3405 (s->sslBump ? " bumpy" : "") <<
3406 (s->accel ? " accelerated" : "")
3407 << " HTTP connections at " << s->s
3408 << ", FD " << fd << "." );
3409
3410 HttpSockets[NHttpSockets++] = fd;
3411 }
3412
3413 #if USE_SSL
3414 if (bumpCount && !Config.accessList.ssl_bump)
3415 debugs(33, 1, "WARNING: http_port(s) with SslBump found, but no " <<
3416 std::endl << "\tssl_bump ACL configured. No requests will be " <<
3417 "bumped.");
3418 #endif
3419 }
3420
3421 #if USE_SSL
3422 static void
3423 clientHttpsConnectionsOpen(void)
3424 {
3425 https_port_list *s;
3426 int fd;
3427
3428 for (s = Config.Sockaddr.https; s; s = (https_port_list *)s->http.next) {
3429 if (MAXHTTPPORTS == NHttpSockets) {
3430 debugs(1, 1, "Ignoring 'https_port' lines exceeding the limit.");
3431 debugs(1, 1, "The limit is " << MAXHTTPPORTS << " HTTPS ports.");
3432 continue;
3433 }
3434
3435 if (s->sslContext == NULL) {
3436 debugs(1, 1, "Ignoring https_port " << s->http.s <<
3437 " due to SSL initialization failure.");
3438 continue;
3439 }
3440
3441 enter_suid();
3442 fd = comm_open_listener(SOCK_STREAM,
3443 IPPROTO_TCP,
3444 s->http.s,
3445 COMM_NONBLOCKING, "HTTPS Socket");
3446 leave_suid();
3447
3448 if (fd < 0)
3449 continue;
3450
3451 AsyncCall::Pointer call = commCbCall(5,5, "SomeCommAcceptHandler(httpsAccept)",
3452 CommAcceptCbPtrFun(httpsAccept, s));
3453
3454 s->listener = new Comm::ListenStateData(fd, call, true);
3455
3456 debugs(1, 1, "Accepting HTTPS connections at " << s->http.s << ", FD " << fd << ".");
3457
3458 HttpSockets[NHttpSockets++] = fd;
3459 }
3460 }
3461
3462 #endif
3463
3464 void
3465 clientOpenListenSockets(void)
3466 {
3467 clientHttpConnectionsOpen();
3468 #if USE_SSL
3469 clientHttpsConnectionsOpen();
3470 #endif
3471
3472 if (NHttpSockets < 1)
3473 fatal("Cannot open HTTP Port");
3474 }
3475
3476 void
3477 clientHttpConnectionsClose(void)
3478 {
3479 for (http_port_list *s = Config.Sockaddr.http; s; s = s->next) {
3480 if (s->listener) {
3481 debugs(1, 1, "FD " << s->listener->fd << " Closing HTTP connection");
3482 delete s->listener;
3483 s->listener = NULL;
3484 }
3485 }
3486
3487 #if USE_SSL
3488 for (http_port_list *s = Config.Sockaddr.https; s; s = s->next) {
3489 if (s->listener) {
3490 debugs(1, 1, "FD " << s->listener->fd << " Closing HTTPS connection");
3491 delete s->listener;
3492 s->listener = NULL;
3493 }
3494 }
3495 #endif
3496
3497 // TODO see if we can drop HttpSockets array entirely */
3498 for (int i = 0; i < NHttpSockets; i++) {
3499 HttpSockets[i] = -1;
3500 }
3501
3502 NHttpSockets = 0;
3503 }
3504
3505 int
3506 varyEvaluateMatch(StoreEntry * entry, HttpRequest * request)
3507 {
3508 const char *vary = request->vary_headers;
3509 int has_vary = entry->getReply()->header.has(HDR_VARY);
3510 #if X_ACCELERATOR_VARY
3511
3512 has_vary |=
3513 entry->getReply()->header.has(HDR_X_ACCELERATOR_VARY);
3514 #endif
3515
3516 if (!has_vary || !entry->mem_obj->vary_headers) {
3517 if (vary) {
3518 /* Oops... something odd is going on here.. */
3519 debugs(33, 1, "varyEvaluateMatch: Oops. Not a Vary object on second attempt, '" <<
3520 entry->mem_obj->url << "' '" << vary << "'");
3521 safe_free(request->vary_headers);
3522 return VARY_CANCEL;
3523 }
3524
3525 if (!has_vary) {
3526 /* This is not a varying object */
3527 return VARY_NONE;
3528 }
3529
3530 /* virtual "vary" object found. Calculate the vary key and
3531 * continue the search
3532 */
3533 vary = httpMakeVaryMark(request, entry->getReply());
3534
3535 if (vary) {
3536 request->vary_headers = xstrdup(vary);
3537 return VARY_OTHER;
3538 } else {
3539 /* Ouch.. we cannot handle this kind of variance */
3540 /* XXX This cannot really happen, but just to be complete */
3541 return VARY_CANCEL;
3542 }
3543 } else {
3544 if (!vary) {
3545 vary = httpMakeVaryMark(request, entry->getReply());
3546
3547 if (vary)
3548 request->vary_headers = xstrdup(vary);
3549 }
3550
3551 if (!vary) {
3552 /* Ouch.. we cannot handle this kind of variance */
3553 /* XXX This cannot really happen, but just to be complete */
3554 return VARY_CANCEL;
3555 } else if (strcmp(vary, entry->mem_obj->vary_headers) == 0) {
3556 return VARY_MATCH;
3557 } else {
3558 /* Oops.. we have already been here and still haven't
3559 * found the requested variant. Bail out
3560 */
3561 debugs(33, 1, "varyEvaluateMatch: Oops. Not a Vary match on second attempt, '" <<
3562 entry->mem_obj->url << "' '" << vary << "'");
3563 return VARY_CANCEL;
3564 }
3565 }
3566 }
3567
3568 ACLFilledChecklist *
3569 clientAclChecklistCreate(const acl_access * acl, ClientHttpRequest * http)
3570 {
3571 ConnStateData * conn = http->getConn();
3572 ACLFilledChecklist *ch = new ACLFilledChecklist(acl, http->request,
3573 cbdataReferenceValid(conn) && conn != NULL ? conn->rfc931 : dash_str);
3574
3575 /*
3576 * hack for ident ACL. It needs to get full addresses, and a place to store
3577 * the ident result on persistent connections...
3578 */
3579 /* connection oriented auth also needs these two lines for it's operation. */
3580 /*
3581 * Internal requests do not have a connection reference, because: A) their
3582 * byte count may be transformed before being applied to an outbound
3583 * connection B) they are internal - any limiting on them should be done on
3584 * the server end.
3585 */
3586
3587 if (conn != NULL)
3588 ch->conn(conn); /* unreferenced in FilledCheckList.cc */
3589
3590 return ch;
3591 }
3592
3593 CBDATA_CLASS_INIT(ConnStateData);
3594
3595 ConnStateData::ConnStateData() :AsyncJob("ConnStateData"), transparent_ (false), reading_ (false), closing_ (false)
3596 {
3597 pinning.fd = -1;
3598 pinning.pinned = false;
3599 pinning.auth = false;
3600 }
3601
3602 bool
3603 ConnStateData::transparent() const
3604 {
3605 return transparent_;
3606 }
3607
3608 void
3609 ConnStateData::transparent(bool const anInt)
3610 {
3611 transparent_ = anInt;
3612 }
3613
3614 bool
3615 ConnStateData::reading() const
3616 {
3617 return reading_;
3618 }
3619
3620 void
3621 ConnStateData::reading(bool const newBool)
3622 {
3623 assert (reading() != newBool);
3624 reading_ = newBool;
3625 }
3626
3627
3628 BodyPipe::Pointer
3629 ConnStateData::expectRequestBody(int64_t size)
3630 {
3631 bodyPipe = new BodyPipe(this);
3632 bodyPipe->setBodySize(size);
3633 return bodyPipe;
3634 }
3635
3636 bool
3637 ConnStateData::closing() const
3638 {
3639 return closing_;
3640 }
3641
3642 /**
3643 * Called by ClientSocketContext to give the connection a chance to read
3644 * the entire body before closing the socket.
3645 */
3646 void
3647 ConnStateData::startClosing(const char *reason)
3648 {
3649 debugs(33, 5, HERE << "startClosing " << this << " for " << reason);
3650 assert(!closing());
3651 closing_ = true;
3652
3653 assert(bodyPipe != NULL);
3654 assert(bodySizeLeft() > 0);
3655
3656 // We do not have to abort the body pipeline because we are going to
3657 // read the entire body anyway.
3658 // Perhaps an ICAP server wants to log the complete request.
3659
3660 // If a consumer abort have caused this closing, we may get stuck
3661 // as nobody is consuming our data. Allow auto-consumption.
3662 bodyPipe->enableAutoConsumption();
3663 }
3664
3665 // initialize dechunking state
3666 void
3667 ConnStateData::startDechunkingRequest(HttpParser *hp)
3668 {
3669 debugs(33, 5, HERE << "start dechunking at " << HttpParserRequestLen(hp));
3670 assert(in.dechunkingState == chunkUnknown);
3671 assert(!in.bodyParser);
3672 in.bodyParser = new ChunkedCodingParser;
3673 in.chunkedSeen = HttpParserRequestLen(hp); // skip headers when dechunking
3674 in.chunked.init(); // TODO: should we have a smaller-than-default limit?
3675 in.dechunked.init();
3676 in.dechunkingState = chunkParsing;
3677 }
3678
3679 // put parsed content into input buffer and clean up
3680 void
3681 ConnStateData::finishDechunkingRequest(HttpParser *hp)
3682 {
3683 debugs(33, 5, HERE << "finish dechunking; content: " << in.dechunked.contentSize());
3684
3685 assert(in.dechunkingState == chunkReady);
3686 assert(in.bodyParser);
3687 delete in.bodyParser;
3688 in.bodyParser = NULL;
3689
3690 const mb_size_t headerSize = HttpParserRequestLen(hp);
3691
3692 // dechunking cannot make data bigger
3693 assert(headerSize + in.dechunked.contentSize() + in.chunked.contentSize()
3694 <= static_cast<mb_size_t>(in.notYetUsed));
3695 assert(in.notYetUsed <= in.allocatedSize);
3696
3697 // copy dechunked content
3698 char *end = in.buf + headerSize;
3699 xmemmove(end, in.dechunked.content(), in.dechunked.contentSize());
3700 end += in.dechunked.contentSize();
3701
3702 // copy post-chunks leftovers, if any, caused by request pipelining?
3703 if (in.chunked.contentSize()) {
3704 xmemmove(end, in.chunked.content(), in.chunked.contentSize());
3705 end += in.chunked.contentSize();
3706 }
3707
3708 in.notYetUsed = end - in.buf;
3709
3710 in.chunked.clean();
3711 in.dechunked.clean();
3712 in.dechunkingState = chunkUnknown;
3713 }
3714
3715 // parse newly read request chunks and buffer them for finishDechunkingRequest
3716 // returns true iff needs more data
3717 bool
3718 ConnStateData::parseRequestChunks(HttpParser *)
3719 {
3720 debugs(33,5, HERE << "parsing chunked request body at " <<
3721 in.chunkedSeen << " < " << in.notYetUsed);
3722 assert(in.bodyParser);
3723 assert(in.dechunkingState == chunkParsing);
3724
3725 assert(in.chunkedSeen <= in.notYetUsed);
3726 const mb_size_t fresh = in.notYetUsed - in.chunkedSeen;
3727
3728 // be safe: count some chunked coding metadata towards the total body size
3729 if (fresh + in.dechunked.contentSize() > Config.maxChunkedRequestBodySize) {
3730 debugs(33,3, HERE << "chunked body (" << fresh << " + " <<
3731 in.dechunked.contentSize() << " may exceed " <<
3732 "chunked_request_body_max_size=" <<
3733 Config.maxChunkedRequestBodySize);
3734 in.dechunkingState = chunkError;
3735 return false;
3736 }
3737
3738 if (fresh > in.chunked.potentialSpaceSize()) {
3739 // should not happen if Config.maxChunkedRequestBodySize is reasonable
3740 debugs(33,1, HERE << "request_body_max_size exceeds chunked buffer " <<
3741 "size: " << fresh << " + " << in.chunked.contentSize() << " > " <<
3742 in.chunked.potentialSpaceSize() << " with " <<
3743 "chunked_request_body_max_size=" <<
3744 Config.maxChunkedRequestBodySize);
3745 in.dechunkingState = chunkError;
3746 return false;
3747 }
3748 in.chunked.append(in.buf + in.chunkedSeen, fresh);
3749 in.chunkedSeen += fresh;
3750
3751 try { // the parser will throw on errors
3752 if (in.bodyParser->parse(&in.chunked, &in.dechunked))
3753 in.dechunkingState = chunkReady; // successfully parsed all chunks
3754 else
3755 return true; // need more, keep the same state
3756 } catch (...) {
3757 debugs(33,3, HERE << "chunk parsing error");
3758 in.dechunkingState = chunkError;
3759 }
3760 return false; // error, unsupported, or done
3761 }
3762
3763 char *
3764 ConnStateData::In::addressToReadInto() const
3765 {
3766 return buf + notYetUsed;
3767 }
3768
3769 ConnStateData::In::In() : bodyParser(NULL),
3770 buf (NULL), notYetUsed (0), allocatedSize (0),
3771 dechunkingState(ConnStateData::chunkUnknown)
3772 {}
3773
3774 ConnStateData::In::~In()
3775 {
3776 if (allocatedSize)
3777 memFreeBuf(allocatedSize, buf);
3778 if (bodyParser)
3779 delete bodyParser; // TODO: pool
3780 }
3781
3782 /* This is a comm call normally scheduled by comm_close() */
3783 void
3784 ConnStateData::clientPinnedConnectionClosed(const CommCloseCbParams &io)
3785 {
3786 pinning.fd = -1;
3787 if (pinning.peer) {
3788 cbdataReferenceDone(pinning.peer);
3789 }
3790 safe_free(pinning.host);
3791 /* NOTE: pinning.pinned should be kept. This combined with fd == -1 at the end of a request indicates that the host
3792 * connection has gone away */
3793 }
3794
3795 void ConnStateData::pinConnection(int pinning_fd, HttpRequest *request, struct peer *aPeer, bool auth)
3796 {
3797 fde *f;
3798 char desc[FD_DESC_SZ];
3799
3800 if (pinning.fd == pinning_fd)
3801 return;
3802 else if (pinning.fd != -1)
3803 comm_close(pinning.fd);
3804
3805 if (pinning.host)
3806 safe_free(pinning.host);
3807
3808 pinning.fd = pinning_fd;
3809 pinning.host = xstrdup(request->GetHost());
3810 pinning.port = request->port;
3811 pinning.pinned = true;
3812 if (pinning.peer)
3813 cbdataReferenceDone(pinning.peer);
3814 if (aPeer)
3815 pinning.peer = cbdataReference(aPeer);
3816 pinning.auth = auth;
3817 f = &fd_table[fd];
3818 snprintf(desc, FD_DESC_SZ, "%s pinned connection for %s:%d (%d)",
3819 (auth || !aPeer) ? request->GetHost() : aPeer->name, f->ipaddr, (int) f->remote_port, fd);
3820 fd_note(pinning_fd, desc);
3821
3822 typedef CommCbMemFunT<ConnStateData, CommCloseCbParams> Dialer;
3823 pinning.closeHandler = asyncCall(33, 5, "ConnStateData::clientPinnedConnectionClosed",
3824 Dialer(this, &ConnStateData::clientPinnedConnectionClosed));
3825 comm_add_close_handler(pinning_fd, pinning.closeHandler);
3826
3827 }
3828
3829 int ConnStateData::validatePinnedConnection(HttpRequest *request, const struct peer *aPeer)
3830 {
3831 bool valid = true;
3832 if (pinning.fd < 0)
3833 return -1;
3834
3835 if (pinning.auth && request && strcasecmp(pinning.host, request->GetHost()) != 0) {
3836 valid = false;
3837 }
3838 if (request && pinning.port != request->port) {
3839 valid = false;
3840 }
3841 if (pinning.peer && !cbdataReferenceValid(pinning.peer)) {
3842 valid = false;
3843 }
3844 if (aPeer != pinning.peer) {
3845 valid = false;
3846 }
3847
3848 if (!valid) {
3849 int pinning_fd=pinning.fd;
3850 /* The pinning info is not safe, remove any pinning info*/
3851 unpinConnection();
3852
3853 /* also close the server side socket, we should not use it for invalid/unauthenticated
3854 requests...
3855 */
3856 comm_close(pinning_fd);
3857 return -1;
3858 }
3859
3860 return pinning.fd;
3861 }
3862
3863 void ConnStateData::unpinConnection()
3864 {
3865 if (pinning.peer)
3866 cbdataReferenceDone(pinning.peer);
3867
3868 if (pinning.closeHandler != NULL) {
3869 comm_remove_close_handler(pinning.fd, pinning.closeHandler);
3870 pinning.closeHandler = NULL;
3871 }
3872 pinning.fd = -1;
3873 safe_free(pinning.host);
3874 }