]> git.ipfire.org Git - thirdparty/squid.git/blob - src/client_side.cc
Merge from trunk
[thirdparty/squid.git] / src / client_side.cc
1 /*
2 * $Id$
3 *
4 * DEBUG: section 33 Client-side Routines
5 * AUTHOR: Duane Wessels
6 *
7 * SQUID Web Proxy Cache http://www.squid-cache.org/
8 * ----------------------------------------------------------
9 *
10 * Squid is the result of efforts by numerous individuals from
11 * the Internet community; see the CONTRIBUTORS file for full
12 * details. Many organizations have provided support for Squid's
13 * development; see the SPONSORS file for full details. Squid is
14 * Copyrighted (C) 2001 by the Regents of the University of
15 * California; see the COPYRIGHT file for full details. Squid
16 * incorporates software developed and/or copyrighted by other
17 * sources; see the CREDITS file for full details.
18 *
19 * This program is free software; you can redistribute it and/or modify
20 * it under the terms of the GNU General Public License as published by
21 * the Free Software Foundation; either version 2 of the License, or
22 * (at your option) any later version.
23 *
24 * This program is distributed in the hope that it will be useful,
25 * but WITHOUT ANY WARRANTY; without even the implied warranty of
26 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
27 * GNU General Public License for more details.
28 *
29 * You should have received a copy of the GNU General Public License
30 * along with this program; if not, write to the Free Software
31 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
32 *
33 */
34
35 /**
36 \defgroup ClientSide Client-Side Logics
37 *
38 \section cserrors Errors and client side
39 *
40 \par Problem the first:
41 * the store entry is no longer authoritative on the
42 * reply status. EBITTEST (E_ABORT) is no longer a valid test outside
43 * of client_side_reply.c.
44 * Problem the second: resources are wasted if we delay in cleaning up.
45 * Problem the third we can't depend on a connection close to clean up.
46 *
47 \par Nice thing the first:
48 * Any step in the stream can callback with data
49 * representing an error.
50 * Nice thing the second: once you stop requesting reads from upstream,
51 * upstream can be stopped too.
52 *
53 \par Solution #1:
54 * Error has a callback mechanism to hand over a membuf
55 * with the error content. The failing node pushes that back as the
56 * reply. Can this be generalised to reduce duplicate efforts?
57 * A: Possibly. For now, only one location uses this.
58 * How to deal with pre-stream errors?
59 * Tell client_side_reply that we *want* an error page before any
60 * stream calls occur. Then we simply read as normal.
61 *
62 *
63 \section pconn_logic Persistent connection logic:
64 *
65 \par
66 * requests (httpClientRequest structs) get added to the connection
67 * list, with the current one being chr
68 *
69 \par
70 * The request is *immediately* kicked off, and data flows through
71 * to clientSocketRecipient.
72 *
73 \par
74 * If the data that arrives at clientSocketRecipient is not for the current
75 * request, clientSocketRecipient simply returns, without requesting more
76 * data, or sending it.
77 *
78 \par
79 * ClientKeepAliveNextRequest will then detect the presence of data in
80 * the next ClientHttpRequest, and will send it, restablishing the
81 * data flow.
82 */
83
84 #include "squid.h"
85 #include "client_side.h"
86 #include "clientStream.h"
87 #include "ProtoPort.h"
88 #include "auth/UserRequest.h"
89 #include "Store.h"
90 #include "comm.h"
91 #include "HttpHdrContRange.h"
92 #include "HttpReply.h"
93 #include "HttpRequest.h"
94 #include "ident/Config.h"
95 #include "ident/Ident.h"
96 #include "ip/IpIntercept.h"
97 #include "MemObject.h"
98 #include "fde.h"
99 #include "client_side_request.h"
100 #include "acl/FilledChecklist.h"
101 #include "ConnectionDetail.h"
102 #include "client_side_reply.h"
103 #include "ClientRequestContext.h"
104 #include "MemBuf.h"
105 #include "SquidTime.h"
106 #include "ChunkedCodingParser.h"
107
108 #if LINGERING_CLOSE
109 #define comm_close comm_lingering_close
110 #endif
111
112 /* our socket-related context */
113
114
115 CBDATA_CLASS_INIT(ClientSocketContext);
116
117 void *
118 ClientSocketContext::operator new (size_t byteCount)
119 {
120 /* derived classes with different sizes must implement their own new */
121 assert (byteCount == sizeof (ClientSocketContext));
122 CBDATA_INIT_TYPE(ClientSocketContext);
123 return cbdataAlloc(ClientSocketContext);
124 }
125
126 void
127 ClientSocketContext::operator delete (void *address)
128 {
129 cbdataFree (address);
130 }
131
132 /* Local functions */
133 /* ClientSocketContext */
134 static ClientSocketContext *ClientSocketContextNew(ClientHttpRequest *);
135 /* other */
136 static IOCB clientWriteComplete;
137 static IOCB clientWriteBodyComplete;
138 static bool clientParseRequest(ConnStateData * conn, bool &do_next_read);
139 static PF clientLifetimeTimeout;
140 static ClientSocketContext *parseHttpRequestAbort(ConnStateData * conn,
141 const char *uri);
142 static ClientSocketContext *parseHttpRequest(ConnStateData *, HttpParser *, HttpRequestMethod *, HttpVersion *);
143 #if USE_IDENT
144 static IDCB clientIdentDone;
145 #endif
146 static CSCB clientSocketRecipient;
147 static CSD clientSocketDetach;
148 static void clientSetKeepaliveFlag(ClientHttpRequest *);
149 static int clientIsContentLengthValid(HttpRequest * r);
150 static bool okToAccept();
151 static int clientIsRequestBodyValid(int64_t bodyLength);
152 static int clientIsRequestBodyTooLargeForPolicy(int64_t bodyLength);
153
154 static void clientUpdateStatHistCounters(log_type logType, int svc_time);
155 static void clientUpdateStatCounters(log_type logType);
156 static void clientUpdateHierCounters(HierarchyLogEntry *);
157 static bool clientPingHasFinished(ping_data const *aPing);
158 void prepareLogWithRequestDetails(HttpRequest *, AccessLogEntry *);
159 #ifndef PURIFY
160 static int connIsUsable(ConnStateData * conn);
161 #endif
162 static int responseFinishedOrFailed(HttpReply * rep, StoreIOBuffer const &receivedData);
163 static void ClientSocketContextPushDeferredIfNeeded(ClientSocketContext::Pointer deferredRequest, ConnStateData * conn);
164 static void clientUpdateSocketStats(log_type logType, size_t size);
165
166 char *skipLeadingSpace(char *aString);
167 static void connNoteUseOfBuffer(ConnStateData* conn, size_t byteCount);
168 static int connKeepReadingIncompleteRequest(ConnStateData * conn);
169 static void connCancelIncompleteRequests(ConnStateData * conn);
170
171 static ConnStateData *connStateCreate(const IpAddress &peer, const IpAddress &me, int fd, http_port_list *port);
172
173
174 int
175 ClientSocketContext::fd() const
176 {
177 assert (http);
178 assert (http->getConn() != NULL);
179 return http->getConn()->fd;
180 }
181
182 clientStreamNode *
183 ClientSocketContext::getTail() const
184 {
185 if (http->client_stream.tail)
186 return (clientStreamNode *)http->client_stream.tail->data;
187
188 return NULL;
189 }
190
191 clientStreamNode *
192 ClientSocketContext::getClientReplyContext() const
193 {
194 return (clientStreamNode *)http->client_stream.tail->prev->data;
195 }
196
197 /**
198 * This routine should be called to grow the inbuf and then
199 * call comm_read().
200 */
201 void
202 ConnStateData::readSomeData()
203 {
204 if (reading())
205 return;
206
207 reading(true);
208
209 debugs(33, 4, "clientReadSomeData: FD " << fd << ": reading request...");
210
211 makeSpaceAvailable();
212
213 typedef CommCbMemFunT<ConnStateData, CommIoCbParams> Dialer;
214 AsyncCall::Pointer call = asyncCall(33, 5, "ConnStateData::clientReadRequest",
215 Dialer(this, &ConnStateData::clientReadRequest));
216 comm_read(fd, in.addressToReadInto(), getAvailableBufferLength(), call);
217 }
218
219
220 void
221 ClientSocketContext::removeFromConnectionList(ConnStateData * conn)
222 {
223 ClientSocketContext::Pointer *tempContextPointer;
224 assert(conn != NULL && cbdataReferenceValid(conn));
225 assert(conn->getCurrentContext() != NULL);
226 /* Unlink us from the connection request list */
227 tempContextPointer = & conn->currentobject;
228
229 while (tempContextPointer->getRaw()) {
230 if (*tempContextPointer == this)
231 break;
232
233 tempContextPointer = &(*tempContextPointer)->next;
234 }
235
236 assert(tempContextPointer->getRaw() != NULL);
237 *tempContextPointer = next;
238 next = NULL;
239 }
240
241 ClientSocketContext::~ClientSocketContext()
242 {
243 clientStreamNode *node = getTail();
244
245 if (node) {
246 ClientSocketContext *streamContext = dynamic_cast<ClientSocketContext *> (node->data.getRaw());
247
248 if (streamContext) {
249 /* We are *always* the tail - prevent recursive free */
250 assert(this == streamContext);
251 node->data = NULL;
252 }
253 }
254
255 if (connRegistered_)
256 deRegisterWithConn();
257
258 httpRequestFree(http);
259
260 /* clean up connection links to us */
261 assert(this != next.getRaw());
262 }
263
264 void
265 ClientSocketContext::registerWithConn()
266 {
267 assert (!connRegistered_);
268 assert (http);
269 assert (http->getConn() != NULL);
270 connRegistered_ = true;
271 http->getConn()->addContextToQueue(this);
272 }
273
274 void
275 ClientSocketContext::deRegisterWithConn()
276 {
277 assert (connRegistered_);
278 removeFromConnectionList(http->getConn());
279 connRegistered_ = false;
280 }
281
282 void
283 ClientSocketContext::connIsFinished()
284 {
285 assert (http);
286 assert (http->getConn() != NULL);
287 deRegisterWithConn();
288 /* we can't handle any more stream data - detach */
289 clientStreamDetach(getTail(), http);
290 }
291
292 ClientSocketContext::ClientSocketContext() : http(NULL), reply(NULL), next(NULL),
293 writtenToSocket(0),
294 mayUseConnection_ (false),
295 connRegistered_ (false)
296 {
297 memset (reqbuf, '\0', sizeof (reqbuf));
298 flags.deferred = 0;
299 flags.parsed_ok = 0;
300 deferredparams.node = NULL;
301 deferredparams.rep = NULL;
302 }
303
304 ClientSocketContext *
305 ClientSocketContextNew(ClientHttpRequest * http)
306 {
307 ClientSocketContext *newContext;
308 assert(http != NULL);
309 newContext = new ClientSocketContext;
310 newContext->http = http;
311 return newContext;
312 }
313
314 #if USE_IDENT
315 static void
316 clientIdentDone(const char *ident, void *data)
317 {
318 ConnStateData *conn = (ConnStateData *)data;
319 xstrncpy(conn->rfc931, ident ? ident : dash_str, USER_IDENT_SZ);
320 }
321 #endif
322
323 void
324 clientUpdateStatCounters(log_type logType)
325 {
326 statCounter.client_http.requests++;
327
328 if (logTypeIsATcpHit(logType))
329 statCounter.client_http.hits++;
330
331 if (logType == LOG_TCP_HIT)
332 statCounter.client_http.disk_hits++;
333 else if (logType == LOG_TCP_MEM_HIT)
334 statCounter.client_http.mem_hits++;
335 }
336
337 void
338 clientUpdateStatHistCounters(log_type logType, int svc_time)
339 {
340 statHistCount(&statCounter.client_http.all_svc_time, svc_time);
341 /**
342 * The idea here is not to be complete, but to get service times
343 * for only well-defined types. For example, we don't include
344 * LOG_TCP_REFRESH_FAIL because its not really a cache hit
345 * (we *tried* to validate it, but failed).
346 */
347
348 switch (logType) {
349
350 case LOG_TCP_REFRESH_UNMODIFIED:
351 statHistCount(&statCounter.client_http.nh_svc_time, svc_time);
352 break;
353
354 case LOG_TCP_IMS_HIT:
355 statHistCount(&statCounter.client_http.nm_svc_time, svc_time);
356 break;
357
358 case LOG_TCP_HIT:
359
360 case LOG_TCP_MEM_HIT:
361
362 case LOG_TCP_OFFLINE_HIT:
363 statHistCount(&statCounter.client_http.hit_svc_time, svc_time);
364 break;
365
366 case LOG_TCP_MISS:
367
368 case LOG_TCP_CLIENT_REFRESH_MISS:
369 statHistCount(&statCounter.client_http.miss_svc_time, svc_time);
370 break;
371
372 default:
373 /* make compiler warnings go away */
374 break;
375 }
376 }
377
378 bool
379 clientPingHasFinished(ping_data const *aPing)
380 {
381 if (0 != aPing->stop.tv_sec && 0 != aPing->start.tv_sec)
382 return true;
383
384 return false;
385 }
386
387 void
388 clientUpdateHierCounters(HierarchyLogEntry * someEntry)
389 {
390 ping_data *i;
391
392 switch (someEntry->code) {
393 #if USE_CACHE_DIGESTS
394
395 case CD_PARENT_HIT:
396
397 case CD_SIBLING_HIT:
398 statCounter.cd.times_used++;
399 break;
400 #endif
401
402 case SIBLING_HIT:
403
404 case PARENT_HIT:
405
406 case FIRST_PARENT_MISS:
407
408 case CLOSEST_PARENT_MISS:
409 statCounter.icp.times_used++;
410 i = &someEntry->ping;
411
412 if (clientPingHasFinished(i))
413 statHistCount(&statCounter.icp.query_svc_time,
414 tvSubUsec(i->start, i->stop));
415
416 if (i->timeout)
417 statCounter.icp.query_timeouts++;
418
419 break;
420
421 case CLOSEST_PARENT:
422
423 case CLOSEST_DIRECT:
424 statCounter.netdb.times_used++;
425
426 break;
427
428 default:
429 break;
430 }
431 }
432
433 void
434 ClientHttpRequest::updateCounters()
435 {
436 clientUpdateStatCounters(logType);
437
438 if (request->errType != ERR_NONE)
439 statCounter.client_http.errors++;
440
441 clientUpdateStatHistCounters(logType,
442 tvSubMsec(start_time, current_time));
443
444 clientUpdateHierCounters(&request->hier);
445 }
446
447 void
448 prepareLogWithRequestDetails(HttpRequest * request, AccessLogEntry * aLogEntry)
449 {
450 assert(request);
451 assert(aLogEntry);
452
453 #if ICAP_CLIENT
454 Adaptation::Icap::History::Pointer ih = request->icapHistory();
455 #endif
456 if (Config.onoff.log_mime_hdrs) {
457 Packer p;
458 MemBuf mb;
459 mb.init();
460 packerToMemInit(&p, &mb);
461 request->header.packInto(&p);
462 aLogEntry->headers.request = xstrdup(mb.buf);
463
464 #if ICAP_CLIENT
465 packerClean(&p);
466 mb.reset();
467 packerToMemInit(&p, &mb);
468
469 if (ih != NULL)
470 ih->lastIcapHeader.packInto(&p);
471 aLogEntry->headers.icap = xstrdup(mb.buf);
472 #endif
473
474 packerClean(&p);
475 mb.clean();
476 }
477
478 #if ICAP_CLIENT
479 if (ih != NULL)
480 aLogEntry->icap.processingTime = ih->processingTime();
481 #endif
482
483 aLogEntry->http.method = request->method;
484 aLogEntry->http.version = request->http_ver;
485 aLogEntry->hier = request->hier;
486 if (request->content_length > 0) // negative when no body or unknown length
487 aLogEntry->cache.requestSize += request->content_length;
488 aLogEntry->cache.extuser = request->extacl_user.termedBuf();
489
490 if (request->auth_user_request) {
491
492 if (request->auth_user_request->username())
493 aLogEntry->cache.authuser =
494 xstrdup(request->auth_user_request->username());
495
496 AUTHUSERREQUESTUNLOCK(request->auth_user_request, "request via clientPrepareLogWithRequestDetails");
497 }
498 }
499
500 void
501 ClientHttpRequest::logRequest()
502 {
503 if (out.size || logType) {
504 al.icp.opcode = ICP_INVALID;
505 al.url = log_uri;
506 debugs(33, 9, "clientLogRequest: al.url='" << al.url << "'");
507
508 if (al.reply) {
509 al.http.code = al.reply->sline.status;
510 al.http.content_type = al.reply->content_type.termedBuf();
511 } else if (loggingEntry() && loggingEntry()->mem_obj) {
512 al.http.code = loggingEntry()->mem_obj->getReply()->sline.status;
513 al.http.content_type = loggingEntry()->mem_obj->getReply()->content_type.termedBuf();
514 }
515
516 debugs(33, 9, "clientLogRequest: http.code='" << al.http.code << "'");
517
518 if (loggingEntry() && loggingEntry()->mem_obj)
519 al.cache.objectSize = loggingEntry()->contentLen();
520
521 al.cache.caddr.SetNoAddr();
522
523 if (getConn() != NULL) al.cache.caddr = getConn()->log_addr;
524
525 al.cache.requestSize = req_sz;
526 al.cache.requestHeadersSize = req_sz;
527
528 al.cache.replySize = out.size;
529 al.cache.replyHeadersSize = out.headers_sz;
530
531 al.cache.highOffset = out.offset;
532
533 al.cache.code = logType;
534
535 al.cache.msec = tvSubMsec(start_time, current_time);
536
537 if (request)
538 prepareLogWithRequestDetails(request, &al);
539
540 if (getConn() != NULL && getConn()->rfc931[0])
541 al.cache.rfc931 = getConn()->rfc931;
542
543 #if USE_SSL && 0
544
545 /* This is broken. Fails if the connection has been closed. Needs
546 * to snarf the ssl details some place earlier..
547 */
548 if (getConn() != NULL)
549 al.cache.ssluser = sslGetUserEmail(fd_table[getConn()->fd].ssl);
550
551 #endif
552
553 ACLFilledChecklist *checklist = clientAclChecklistCreate(Config.accessList.log, this);
554
555 if (al.reply)
556 checklist->reply = HTTPMSGLOCK(al.reply);
557
558 if (!Config.accessList.log || checklist->fastCheck()) {
559 if (request)
560 al.request = HTTPMSGLOCK(request);
561 accessLogLog(&al, checklist);
562 updateCounters();
563
564 if (getConn() != NULL)
565 clientdbUpdate(getConn()->peer, logType, PROTO_HTTP, out.size);
566 }
567
568 delete checklist;
569
570 accessLogFreeMemory(&al);
571 }
572 }
573
574 void
575 ClientHttpRequest::freeResources()
576 {
577 safe_free(uri);
578 safe_free(log_uri);
579 safe_free(redirect.location);
580 range_iter.boundary.clean();
581 HTTPMSGUNLOCK(request);
582
583 if (client_stream.tail)
584 clientStreamAbort((clientStreamNode *)client_stream.tail->data, this);
585 }
586
587 void
588 httpRequestFree(void *data)
589 {
590 ClientHttpRequest *http = (ClientHttpRequest *)data;
591 assert(http != NULL);
592 delete http;
593 }
594
595 bool
596 ConnStateData::areAllContextsForThisConnection() const
597 {
598 assert(this != NULL);
599 ClientSocketContext::Pointer context = getCurrentContext();
600
601 while (context.getRaw()) {
602 if (context->http->getConn() != this)
603 return false;
604
605 context = context->next;
606 }
607
608 return true;
609 }
610
611 void
612 ConnStateData::freeAllContexts()
613 {
614 ClientSocketContext::Pointer context;
615
616 while ((context = getCurrentContext()).getRaw() != NULL) {
617 assert(getCurrentContext() !=
618 getCurrentContext()->next);
619 context->connIsFinished();
620 assert (context != currentobject);
621 }
622 }
623
624 /* This is a handler normally called by comm_close() */
625 void ConnStateData::connStateClosed(const CommCloseCbParams &io)
626 {
627 assert (fd == io.fd);
628 deleteThis("ConnStateData::connStateClosed");
629 }
630
631 // cleans up before destructor is called
632 void
633 ConnStateData::swanSong()
634 {
635 debugs(33, 2, "ConnStateData::swanSong: FD " << fd);
636 fd = -1;
637 flags.readMoreRequests = false;
638 clientdbEstablished(peer, -1); /* decrement */
639 assert(areAllContextsForThisConnection());
640 freeAllContexts();
641
642 if (auth_user_request != NULL) {
643 debugs(33, 4, "ConnStateData::swanSong: freeing auth_user_request '" << auth_user_request << "' (this is '" << this << "')");
644 auth_user_request->onConnectionClose(this);
645 }
646
647 if (pinning.fd >= 0)
648 comm_close(pinning.fd);
649
650 BodyProducer::swanSong();
651 flags.swanSang = true;
652 }
653
654 bool
655 ConnStateData::isOpen() const
656 {
657 return cbdataReferenceValid(this) && // XXX: checking "this" in a method
658 fd >= 0 &&
659 !fd_table[fd].closing();
660 }
661
662 ConnStateData::~ConnStateData()
663 {
664 assert(this != NULL);
665 debugs(33, 3, "ConnStateData::~ConnStateData: FD " << fd);
666
667 if (isOpen())
668 debugs(33, 1, "BUG: ConnStateData did not close FD " << fd);
669
670 if (!flags.swanSang)
671 debugs(33, 1, "BUG: ConnStateData was not destroyed properly; FD " << fd);
672
673 AUTHUSERREQUESTUNLOCK(auth_user_request, "~conn");
674
675 cbdataReferenceDone(port);
676
677 if (bodyPipe != NULL)
678 stopProducingFor(bodyPipe, false);
679 }
680
681 /**
682 * clientSetKeepaliveFlag() sets request->flags.proxy_keepalive.
683 * This is the client-side persistent connection flag. We need
684 * to set this relatively early in the request processing
685 * to handle hacks for broken servers and clients.
686 */
687 static void
688 clientSetKeepaliveFlag(ClientHttpRequest * http)
689 {
690 HttpRequest *request = http->request;
691 const HttpHeader *req_hdr = &request->header;
692
693 debugs(33, 3, "clientSetKeepaliveFlag: http_ver = " <<
694 request->http_ver.major << "." << request->http_ver.minor);
695 debugs(33, 3, "clientSetKeepaliveFlag: method = " <<
696 RequestMethodStr(request->method));
697
698 HttpVersion http_ver(1,0);
699 /* we are HTTP/1.0, no matter what the client requests... */
700
701 if (httpMsgIsPersistent(http_ver, req_hdr))
702 request->flags.proxy_keepalive = 1;
703 }
704
705 static int
706 clientIsContentLengthValid(HttpRequest * r)
707 {
708 switch (r->method.id()) {
709
710 case METHOD_PUT:
711
712 case METHOD_POST:
713 /* PUT/POST requires a request entity */
714 return (r->content_length >= 0);
715
716 case METHOD_GET:
717
718 case METHOD_HEAD:
719 /* We do not want to see a request entity on GET/HEAD requests */
720 return (r->content_length <= 0 || Config.onoff.request_entities);
721
722 default:
723 /* For other types of requests we don't care */
724 return 1;
725 }
726
727 /* NOT REACHED */
728 }
729
730 int
731 clientIsRequestBodyValid(int64_t bodyLength)
732 {
733 if (bodyLength >= 0)
734 return 1;
735
736 return 0;
737 }
738
739 int
740 clientIsRequestBodyTooLargeForPolicy(int64_t bodyLength)
741 {
742 if (Config.maxRequestBodySize &&
743 bodyLength > Config.maxRequestBodySize)
744 return 1; /* too large */
745
746 return 0;
747 }
748
749 #ifndef PURIFY
750 int
751 connIsUsable(ConnStateData * conn)
752 {
753 if (conn == NULL || !cbdataReferenceValid(conn) || conn->fd == -1)
754 return 0;
755
756 return 1;
757 }
758
759 #endif
760
761 ClientSocketContext::Pointer
762 ConnStateData::getCurrentContext() const
763 {
764 assert(this);
765 return currentobject;
766 }
767
768 void
769 ClientSocketContext::deferRecipientForLater(clientStreamNode * node, HttpReply * rep, StoreIOBuffer receivedData)
770 {
771 debugs(33, 2, "clientSocketRecipient: Deferring request " << http->uri);
772 assert(flags.deferred == 0);
773 flags.deferred = 1;
774 deferredparams.node = node;
775 deferredparams.rep = rep;
776 deferredparams.queuedBuffer = receivedData;
777 return;
778 }
779
780 int
781 responseFinishedOrFailed(HttpReply * rep, StoreIOBuffer const & receivedData)
782 {
783 if (rep == NULL && receivedData.data == NULL && receivedData.length == 0)
784 return 1;
785
786 return 0;
787 }
788
789 bool
790 ClientSocketContext::startOfOutput() const
791 {
792 return http->out.size == 0;
793 }
794
795 size_t
796 ClientSocketContext::lengthToSend(Range<int64_t> const &available)
797 {
798 /*the size of available range can always fit in a size_t type*/
799 size_t maximum = (size_t)available.size();
800
801 if (!http->request->range)
802 return maximum;
803
804 assert (canPackMoreRanges());
805
806 if (http->range_iter.debt() == -1)
807 return maximum;
808
809 assert (http->range_iter.debt() > 0);
810
811 /* TODO this + the last line could be a range intersection calculation */
812 if (available.start < http->range_iter.currentSpec()->offset)
813 return 0;
814
815 return min(http->range_iter.debt(), (int64_t)maximum);
816 }
817
818 void
819 ClientSocketContext::noteSentBodyBytes(size_t bytes)
820 {
821 http->out.offset += bytes;
822
823 if (!http->request->range)
824 return;
825
826 if (http->range_iter.debt() != -1) {
827 http->range_iter.debt(http->range_iter.debt() - bytes);
828 assert (http->range_iter.debt() >= 0);
829 }
830
831 /* debt() always stops at -1, below that is a bug */
832 assert (http->range_iter.debt() >= -1);
833 }
834
835 bool
836 ClientHttpRequest::multipartRangeRequest() const
837 {
838 return request->multipartRangeRequest();
839 }
840
841 bool
842 ClientSocketContext::multipartRangeRequest() const
843 {
844 return http->multipartRangeRequest();
845 }
846
847 void
848 ClientSocketContext::sendBody(HttpReply * rep, StoreIOBuffer bodyData)
849 {
850 assert(rep == NULL);
851
852 if (!multipartRangeRequest()) {
853 size_t length = lengthToSend(bodyData.range());
854 noteSentBodyBytes (length);
855 AsyncCall::Pointer call = commCbCall(33, 5, "clientWriteBodyComplete",
856 CommIoCbPtrFun(clientWriteBodyComplete, this));
857 comm_write(fd(), bodyData.data, length, call );
858 return;
859 }
860
861 MemBuf mb;
862 mb.init();
863 packRange(bodyData, &mb);
864
865 if (mb.contentSize()) {
866 /* write */
867 AsyncCall::Pointer call = commCbCall(33, 5, "clientWriteComplete",
868 CommIoCbPtrFun(clientWriteComplete, this));
869 comm_write_mbuf(fd(), &mb, call);
870 } else
871 writeComplete(fd(), NULL, 0, COMM_OK);
872 }
873
874 /** put terminating boundary for multiparts */
875 static void
876 clientPackTermBound(String boundary, MemBuf * mb)
877 {
878 mb->Printf("\r\n--" SQUIDSTRINGPH "--\r\n", SQUIDSTRINGPRINT(boundary));
879 debugs(33, 6, "clientPackTermBound: buf offset: " << mb->size);
880 }
881
882 /** appends a "part" HTTP header (as in a multi-part/range reply) to the buffer */
883 static void
884 clientPackRangeHdr(const HttpReply * rep, const HttpHdrRangeSpec * spec, String boundary, MemBuf * mb)
885 {
886 HttpHeader hdr(hoReply);
887 Packer p;
888 assert(rep);
889 assert(spec);
890
891 /* put boundary */
892 debugs(33, 5, "clientPackRangeHdr: appending boundary: " << boundary);
893 /* rfc2046 requires to _prepend_ boundary with <crlf>! */
894 mb->Printf("\r\n--" SQUIDSTRINGPH "\r\n", SQUIDSTRINGPRINT(boundary));
895
896 /* stuff the header with required entries and pack it */
897
898 if (rep->header.has(HDR_CONTENT_TYPE))
899 hdr.putStr(HDR_CONTENT_TYPE, rep->header.getStr(HDR_CONTENT_TYPE));
900
901 httpHeaderAddContRange(&hdr, *spec, rep->content_length);
902
903 packerToMemInit(&p, mb);
904
905 hdr.packInto(&p);
906
907 packerClean(&p);
908
909 hdr.clean();
910
911 /* append <crlf> (we packed a header, not a reply) */
912 mb->Printf("\r\n");
913 }
914
915 /**
916 * extracts a "range" from *buf and appends them to mb, updating
917 * all offsets and such.
918 */
919 void
920 ClientSocketContext::packRange(StoreIOBuffer const &source, MemBuf * mb)
921 {
922 HttpHdrRangeIter * i = &http->range_iter;
923 Range<int64_t> available (source.range());
924 char const *buf = source.data;
925
926 while (i->currentSpec() && available.size()) {
927 const size_t copy_sz = lengthToSend(available);
928
929 if (copy_sz) {
930 /*
931 * intersection of "have" and "need" ranges must not be empty
932 */
933 assert(http->out.offset < i->currentSpec()->offset + i->currentSpec()->length);
934 assert(http->out.offset + available.size() > i->currentSpec()->offset);
935
936 /*
937 * put boundary and headers at the beginning of a range in a
938 * multi-range
939 */
940
941 if (http->multipartRangeRequest() && i->debt() == i->currentSpec()->length) {
942 assert(http->memObject());
943 clientPackRangeHdr(
944 http->memObject()->getReply(), /* original reply */
945 i->currentSpec(), /* current range */
946 i->boundary, /* boundary, the same for all */
947 mb);
948 }
949
950 /*
951 * append content
952 */
953 debugs(33, 3, "clientPackRange: appending " << copy_sz << " bytes");
954
955 noteSentBodyBytes (copy_sz);
956
957 mb->append(buf, copy_sz);
958
959 /*
960 * update offsets
961 */
962 available.start += copy_sz;
963
964 buf += copy_sz;
965
966 }
967
968 /*
969 * paranoid check
970 */
971 assert((available.size() >= 0 && i->debt() >= 0) || i->debt() == -1);
972
973 if (!canPackMoreRanges()) {
974 debugs(33, 3, "clientPackRange: Returning because !canPackMoreRanges.");
975
976 if (i->debt() == 0)
977 /* put terminating boundary for multiparts */
978 clientPackTermBound(i->boundary, mb);
979
980 return;
981 }
982
983 int64_t next = getNextRangeOffset();
984
985 assert (next >= http->out.offset);
986
987 int64_t skip = next - http->out.offset;
988
989 /* adjust for not to be transmitted bytes */
990 http->out.offset = next;
991
992 if (available.size() <= skip)
993 return;
994
995 available.start += skip;
996
997 buf += skip;
998
999 if (copy_sz == 0)
1000 return;
1001 }
1002 }
1003
1004 /** returns expected content length for multi-range replies
1005 * note: assumes that httpHdrRangeCanonize has already been called
1006 * warning: assumes that HTTP headers for individual ranges at the
1007 * time of the actuall assembly will be exactly the same as
1008 * the headers when clientMRangeCLen() is called */
1009 int
1010 ClientHttpRequest::mRangeCLen()
1011 {
1012 int64_t clen = 0;
1013 MemBuf mb;
1014
1015 assert(memObject());
1016
1017 mb.init();
1018 HttpHdrRange::iterator pos = request->range->begin();
1019
1020 while (pos != request->range->end()) {
1021 /* account for headers for this range */
1022 mb.reset();
1023 clientPackRangeHdr(memObject()->getReply(),
1024 *pos, range_iter.boundary, &mb);
1025 clen += mb.size;
1026
1027 /* account for range content */
1028 clen += (*pos)->length;
1029
1030 debugs(33, 6, "clientMRangeCLen: (clen += " << mb.size << " + " << (*pos)->length << ") == " << clen);
1031 ++pos;
1032 }
1033
1034 /* account for the terminating boundary */
1035 mb.reset();
1036
1037 clientPackTermBound(range_iter.boundary, &mb);
1038
1039 clen += mb.size;
1040
1041 mb.clean();
1042
1043 return clen;
1044 }
1045
1046 /**
1047 * returns true if If-Range specs match reply, false otherwise
1048 */
1049 static int
1050 clientIfRangeMatch(ClientHttpRequest * http, HttpReply * rep)
1051 {
1052 const TimeOrTag spec = http->request->header.getTimeOrTag(HDR_IF_RANGE);
1053 /* check for parsing falure */
1054
1055 if (!spec.valid)
1056 return 0;
1057
1058 /* got an ETag? */
1059 if (spec.tag.str) {
1060 ETag rep_tag = rep->header.getETag(HDR_ETAG);
1061 debugs(33, 3, "clientIfRangeMatch: ETags: " << spec.tag.str << " and " <<
1062 (rep_tag.str ? rep_tag.str : "<none>"));
1063
1064 if (!rep_tag.str)
1065 return 0; /* entity has no etag to compare with! */
1066
1067 if (spec.tag.weak || rep_tag.weak) {
1068 debugs(33, 1, "clientIfRangeMatch: Weak ETags are not allowed in If-Range: " << spec.tag.str << " ? " << rep_tag.str);
1069 return 0; /* must use strong validator for sub-range requests */
1070 }
1071
1072 return etagIsEqual(&rep_tag, &spec.tag);
1073 }
1074
1075 /* got modification time? */
1076 if (spec.time >= 0) {
1077 return http->storeEntry()->lastmod <= spec.time;
1078 }
1079
1080 assert(0); /* should not happen */
1081 return 0;
1082 }
1083
1084 /**
1085 * generates a "unique" boundary string for multipart responses
1086 * the caller is responsible for cleaning the string */
1087 String
1088 ClientHttpRequest::rangeBoundaryStr() const
1089 {
1090 assert(this);
1091 const char *key;
1092 String b(APP_FULLNAME);
1093 b.append(":",1);
1094 key = storeEntry()->getMD5Text();
1095 b.append(key, strlen(key));
1096 return b;
1097 }
1098
1099 /** adds appropriate Range headers if needed */
1100 void
1101 ClientSocketContext::buildRangeHeader(HttpReply * rep)
1102 {
1103 HttpHeader *hdr = rep ? &rep->header : 0;
1104 const char *range_err = NULL;
1105 HttpRequest *request = http->request;
1106 assert(request->range);
1107 /* check if we still want to do ranges */
1108
1109 if (!rep)
1110 range_err = "no [parse-able] reply";
1111 else if ((rep->sline.status != HTTP_OK) && (rep->sline.status != HTTP_PARTIAL_CONTENT))
1112 range_err = "wrong status code";
1113 else if (hdr->has(HDR_CONTENT_RANGE))
1114 range_err = "origin server does ranges";
1115 else if (rep->content_length < 0)
1116 range_err = "unknown length";
1117 else if (rep->content_length != http->memObject()->getReply()->content_length)
1118 range_err = "INCONSISTENT length"; /* a bug? */
1119
1120 /* hits only - upstream peer determines correct behaviour on misses, and client_side_reply determines
1121 * hits candidates
1122 */
1123 else if (logTypeIsATcpHit(http->logType) && http->request->header.has(HDR_IF_RANGE) && !clientIfRangeMatch(http, rep))
1124 range_err = "If-Range match failed";
1125 else if (!http->request->range->canonize(rep))
1126 range_err = "canonization failed";
1127 else if (http->request->range->isComplex())
1128 range_err = "too complex range header";
1129 else if (!logTypeIsATcpHit(http->logType) && http->request->range->offsetLimitExceeded())
1130 range_err = "range outside range_offset_limit";
1131
1132 /* get rid of our range specs on error */
1133 if (range_err) {
1134 /* XXX We do this here because we need canonisation etc. However, this current
1135 * code will lead to incorrect store offset requests - the store will have the
1136 * offset data, but we won't be requesting it.
1137 * So, we can either re-request, or generate an error
1138 */
1139 debugs(33, 3, "clientBuildRangeHeader: will not do ranges: " << range_err << ".");
1140 delete http->request->range;
1141 http->request->range = NULL;
1142 } else {
1143 /* XXX: TODO: Review, this unconditional set may be wrong. - TODO: review. */
1144 httpStatusLineSet(&rep->sline, rep->sline.version,
1145 HTTP_PARTIAL_CONTENT, NULL);
1146 // web server responded with a valid, but unexpected range.
1147 // will (try-to) forward as-is.
1148 //TODO: we should cope with multirange request/responses
1149 bool replyMatchRequest = rep->content_range != NULL ?
1150 request->range->contains(rep->content_range->spec) :
1151 true;
1152 const int spec_count = http->request->range->specs.count;
1153 int64_t actual_clen = -1;
1154
1155 debugs(33, 3, "clientBuildRangeHeader: range spec count: " <<
1156 spec_count << " virgin clen: " << rep->content_length);
1157 assert(spec_count > 0);
1158 /* ETags should not be returned with Partial Content replies? */
1159 hdr->delById(HDR_ETAG);
1160 /* append appropriate header(s) */
1161
1162 if (spec_count == 1) {
1163 if (!replyMatchRequest) {
1164 hdr->delById(HDR_CONTENT_RANGE);
1165 hdr->putContRange(rep->content_range);
1166 actual_clen = rep->content_length;
1167 //http->range_iter.pos = rep->content_range->spec.begin();
1168 (*http->range_iter.pos)->offset = rep->content_range->spec.offset;
1169 (*http->range_iter.pos)->length = rep->content_range->spec.length;
1170
1171 } else {
1172 HttpHdrRange::iterator pos = http->request->range->begin();
1173 assert(*pos);
1174 /* append Content-Range */
1175
1176 if (!hdr->has(HDR_CONTENT_RANGE)) {
1177 /* No content range, so this was a full object we are
1178 * sending parts of.
1179 */
1180 httpHeaderAddContRange(hdr, **pos, rep->content_length);
1181 }
1182
1183 /* set new Content-Length to the actual number of bytes
1184 * transmitted in the message-body */
1185 actual_clen = (*pos)->length;
1186 }
1187 } else {
1188 /* multipart! */
1189 /* generate boundary string */
1190 http->range_iter.boundary = http->rangeBoundaryStr();
1191 /* delete old Content-Type, add ours */
1192 hdr->delById(HDR_CONTENT_TYPE);
1193 httpHeaderPutStrf(hdr, HDR_CONTENT_TYPE,
1194 "multipart/byteranges; boundary=\"" SQUIDSTRINGPH "\"",
1195 SQUIDSTRINGPRINT(http->range_iter.boundary));
1196 /* Content-Length is not required in multipart responses
1197 * but it is always nice to have one */
1198 actual_clen = http->mRangeCLen();
1199 /* http->out needs to start where we want data at */
1200 http->out.offset = http->range_iter.currentSpec()->offset;
1201 }
1202
1203 /* replace Content-Length header */
1204 assert(actual_clen >= 0);
1205
1206 hdr->delById(HDR_CONTENT_LENGTH);
1207
1208 hdr->putInt64(HDR_CONTENT_LENGTH, actual_clen);
1209
1210 debugs(33, 3, "clientBuildRangeHeader: actual content length: " << actual_clen);
1211
1212 /* And start the range iter off */
1213 http->range_iter.updateSpec();
1214 }
1215 }
1216
1217 void
1218 ClientSocketContext::prepareReply(HttpReply * rep)
1219 {
1220 reply = rep;
1221
1222 if (http->request->range)
1223 buildRangeHeader(rep);
1224 }
1225
1226 void
1227 ClientSocketContext::sendStartOfMessage(HttpReply * rep, StoreIOBuffer bodyData)
1228 {
1229 prepareReply(rep);
1230 assert (rep);
1231 MemBuf *mb = rep->pack();
1232 /* Save length of headers for persistent conn checks */
1233 http->out.headers_sz = mb->contentSize();
1234 #if HEADERS_LOG
1235
1236 headersLog(0, 0, http->request->method, rep);
1237 #endif
1238
1239 if (bodyData.data && bodyData.length) {
1240 if (!multipartRangeRequest()) {
1241 size_t length = lengthToSend(bodyData.range());
1242 noteSentBodyBytes (length);
1243
1244 mb->append(bodyData.data, length);
1245 } else {
1246 packRange(bodyData, mb);
1247 }
1248 }
1249
1250 /* write */
1251 debugs(33,7, HERE << "sendStartOfMessage schedules clientWriteComplete");
1252 AsyncCall::Pointer call = commCbCall(33, 5, "clientWriteComplete",
1253 CommIoCbPtrFun(clientWriteComplete, this));
1254 comm_write_mbuf(fd(), mb, call);
1255
1256 delete mb;
1257 }
1258
1259 /**
1260 * Write a chunk of data to a client socket. If the reply is present,
1261 * send the reply headers down the wire too, and clean them up when
1262 * finished.
1263 * Pre-condition:
1264 * The request is one backed by a connection, not an internal request.
1265 * data context is not NULL
1266 * There are no more entries in the stream chain.
1267 */
1268 static void
1269 clientSocketRecipient(clientStreamNode * node, ClientHttpRequest * http,
1270 HttpReply * rep, StoreIOBuffer receivedData)
1271 {
1272 int fd;
1273 /* Test preconditions */
1274 assert(node != NULL);
1275 PROF_start(clientSocketRecipient);
1276 /* TODO: handle this rather than asserting
1277 * - it should only ever happen if we cause an abort and
1278 * the callback chain loops back to here, so we can simply return.
1279 * However, that itself shouldn't happen, so it stays as an assert for now.
1280 */
1281 assert(cbdataReferenceValid(node));
1282 assert(node->node.next == NULL);
1283 ClientSocketContext::Pointer context = dynamic_cast<ClientSocketContext *>(node->data.getRaw());
1284 assert(context != NULL);
1285 assert(connIsUsable(http->getConn()));
1286 fd = http->getConn()->fd;
1287 /* TODO: check offset is what we asked for */
1288
1289 if (context != http->getConn()->getCurrentContext()) {
1290 context->deferRecipientForLater(node, rep, receivedData);
1291 PROF_stop(clientSocketRecipient);
1292 return;
1293 }
1294
1295 if (responseFinishedOrFailed(rep, receivedData)) {
1296 context->writeComplete(fd, NULL, 0, COMM_OK);
1297 PROF_stop(clientSocketRecipient);
1298 return;
1299 }
1300
1301 if (!context->startOfOutput())
1302 context->sendBody(rep, receivedData);
1303 else {
1304 assert(rep);
1305 http->al.reply = HTTPMSGLOCK(rep);
1306 context->sendStartOfMessage(rep, receivedData);
1307 }
1308
1309 PROF_stop(clientSocketRecipient);
1310 }
1311
1312 /**
1313 * Called when a downstream node is no longer interested in
1314 * our data. As we are a terminal node, this means on aborts
1315 * only
1316 */
1317 void
1318 clientSocketDetach(clientStreamNode * node, ClientHttpRequest * http)
1319 {
1320 /* Test preconditions */
1321 assert(node != NULL);
1322 /* TODO: handle this rather than asserting
1323 * - it should only ever happen if we cause an abort and
1324 * the callback chain loops back to here, so we can simply return.
1325 * However, that itself shouldn't happen, so it stays as an assert for now.
1326 */
1327 assert(cbdataReferenceValid(node));
1328 /* Set null by ContextFree */
1329 assert(node->node.next == NULL);
1330 /* this is the assert discussed above */
1331 assert(NULL == dynamic_cast<ClientSocketContext *>(node->data.getRaw()));
1332 /* We are only called when the client socket shutsdown.
1333 * Tell the prev pipeline member we're finished
1334 */
1335 clientStreamDetach(node, http);
1336 }
1337
1338 static void
1339 clientWriteBodyComplete(int fd, char *buf, size_t size, comm_err_t errflag, int xerrno, void *data)
1340 {
1341 debugs(33,7, HERE << "clientWriteBodyComplete schedules clientWriteComplete");
1342 clientWriteComplete(fd, NULL, size, errflag, xerrno, data);
1343 }
1344
1345 void
1346 ConnStateData::readNextRequest()
1347 {
1348 debugs(33, 5, "ConnStateData::readNextRequest: FD " << fd << " reading next req");
1349
1350 fd_note(fd, "Waiting for next request");
1351 /**
1352 * Set the timeout BEFORE calling clientReadRequest().
1353 */
1354 typedef CommCbMemFunT<ConnStateData, CommTimeoutCbParams> TimeoutDialer;
1355 AsyncCall::Pointer timeoutCall = asyncCall(33, 5, "ConnStateData::requestTimeout",
1356 TimeoutDialer(this, &ConnStateData::requestTimeout));
1357 commSetTimeout(fd, Config.Timeout.persistent_request, timeoutCall);
1358
1359 readSomeData();
1360 /** Please don't do anything with the FD past here! */
1361 }
1362
1363 static void
1364 ClientSocketContextPushDeferredIfNeeded(ClientSocketContext::Pointer deferredRequest, ConnStateData * conn)
1365 {
1366 debugs(33, 2, "ClientSocketContextPushDeferredIfNeeded: FD " << conn->fd << " Sending next");
1367
1368 /** If the client stream is waiting on a socket write to occur, then */
1369
1370 if (deferredRequest->flags.deferred) {
1371 /** NO data is allowed to have been sent. */
1372 assert(deferredRequest->http->out.size == 0);
1373 /** defer now. */
1374 clientSocketRecipient(deferredRequest->deferredparams.node,
1375 deferredRequest->http,
1376 deferredRequest->deferredparams.rep,
1377 deferredRequest->deferredparams.queuedBuffer);
1378 }
1379
1380 /** otherwise, the request is still active in a callbacksomewhere,
1381 * and we are done
1382 */
1383 }
1384
1385 void
1386 ClientSocketContext::keepaliveNextRequest()
1387 {
1388 ConnStateData * conn = http->getConn();
1389 bool do_next_read = false;
1390
1391 debugs(33, 3, "ClientSocketContext::keepaliveNextRequest: FD " << conn->fd);
1392 connIsFinished();
1393
1394 if (conn->pinning.pinned && conn->pinning.fd == -1) {
1395 debugs(33, 2, "clientKeepaliveNextRequest: FD " << conn->fd << " Connection was pinned but server side gone. Terminating client connection");
1396 comm_close(conn->fd);
1397 return;
1398 }
1399
1400 /** \par
1401 * Attempt to parse a request from the request buffer.
1402 * If we've been fed a pipelined request it may already
1403 * be in our read buffer.
1404 *
1405 \par
1406 * This needs to fall through - if we're unlucky and parse the _last_ request
1407 * from our read buffer we may never re-register for another client read.
1408 */
1409
1410 if (clientParseRequest(conn, do_next_read)) {
1411 debugs(33, 3, "clientSocketContext::keepaliveNextRequest: FD " << conn->fd << ": parsed next request from buffer");
1412 }
1413
1414 /** \par
1415 * Either we need to kick-start another read or, if we have
1416 * a half-closed connection, kill it after the last request.
1417 * This saves waiting for half-closed connections to finished being
1418 * half-closed _AND_ then, sometimes, spending "Timeout" time in
1419 * the keepalive "Waiting for next request" state.
1420 */
1421 if (commIsHalfClosed(conn->fd) && (conn->getConcurrentRequestCount() == 0)) {
1422 debugs(33, 3, "ClientSocketContext::keepaliveNextRequest: half-closed client with no pending requests, closing");
1423 comm_close(conn->fd);
1424 return;
1425 }
1426
1427 ClientSocketContext::Pointer deferredRequest;
1428
1429 /** \par
1430 * At this point we either have a parsed request (which we've
1431 * kicked off the processing for) or not. If we have a deferred
1432 * request (parsed but deferred for pipeling processing reasons)
1433 * then look at processing it. If not, simply kickstart
1434 * another read.
1435 */
1436
1437 if ((deferredRequest = conn->getCurrentContext()).getRaw()) {
1438 debugs(33, 3, "ClientSocketContext:: FD " << conn->fd << ": calling PushDeferredIfNeeded");
1439 ClientSocketContextPushDeferredIfNeeded(deferredRequest, conn);
1440 } else {
1441 debugs(33, 3, "ClientSocketContext:: FD " << conn->fd << ": calling conn->readNextRequest()");
1442 conn->readNextRequest();
1443 }
1444 }
1445
1446 void
1447 clientUpdateSocketStats(log_type logType, size_t size)
1448 {
1449 if (size == 0)
1450 return;
1451
1452 kb_incr(&statCounter.client_http.kbytes_out, size);
1453
1454 if (logTypeIsATcpHit(logType))
1455 kb_incr(&statCounter.client_http.hit_kbytes_out, size);
1456 }
1457
1458 /**
1459 * increments iterator "i"
1460 * used by clientPackMoreRanges
1461 *
1462 \retval true there is still data available to pack more ranges
1463 \retval false
1464 */
1465 bool
1466 ClientSocketContext::canPackMoreRanges() const
1467 {
1468 /** first update iterator "i" if needed */
1469
1470 if (!http->range_iter.debt()) {
1471 debugs(33, 5, "ClientSocketContext::canPackMoreRanges: At end of current range spec for FD " << fd());
1472
1473 if (http->range_iter.pos.incrementable())
1474 ++http->range_iter.pos;
1475
1476 http->range_iter.updateSpec();
1477 }
1478
1479 assert(!http->range_iter.debt() == !http->range_iter.currentSpec());
1480
1481 /* paranoid sync condition */
1482 /* continue condition: need_more_data */
1483 debugs(33, 5, "ClientSocketContext::canPackMoreRanges: returning " << (http->range_iter.currentSpec() ? true : false));
1484 return http->range_iter.currentSpec() ? true : false;
1485 }
1486
1487 int64_t
1488 ClientSocketContext::getNextRangeOffset() const
1489 {
1490 if (http->request->range) {
1491 /* offset in range specs does not count the prefix of an http msg */
1492 debugs (33, 5, "ClientSocketContext::getNextRangeOffset: http offset " << http->out.offset);
1493 /* check: reply was parsed and range iterator was initialized */
1494 assert(http->range_iter.valid);
1495 /* filter out data according to range specs */
1496 assert (canPackMoreRanges());
1497 {
1498 int64_t start; /* offset of still missing data */
1499 assert(http->range_iter.currentSpec());
1500 start = http->range_iter.currentSpec()->offset + http->range_iter.currentSpec()->length - http->range_iter.debt();
1501 debugs(33, 3, "clientPackMoreRanges: in: offset: " << http->out.offset);
1502 debugs(33, 3, "clientPackMoreRanges: out:"
1503 " start: " << start <<
1504 " spec[" << http->range_iter.pos - http->request->range->begin() << "]:" <<
1505 " [" << http->range_iter.currentSpec()->offset <<
1506 ", " << http->range_iter.currentSpec()->offset + http->range_iter.currentSpec()->length << "),"
1507 " len: " << http->range_iter.currentSpec()->length <<
1508 " debt: " << http->range_iter.debt());
1509 if (http->range_iter.currentSpec()->length != -1)
1510 assert(http->out.offset <= start); /* we did not miss it */
1511
1512 return start;
1513 }
1514
1515 } else if (reply && reply->content_range) {
1516 /* request does not have ranges, but reply does */
1517 /** \todo FIXME: should use range_iter_pos on reply, as soon as reply->content_range
1518 * becomes HttpHdrRange rather than HttpHdrRangeSpec.
1519 */
1520 return http->out.offset + reply->content_range->spec.offset;
1521 }
1522
1523 return http->out.offset;
1524 }
1525
1526 void
1527 ClientSocketContext::pullData()
1528 {
1529 debugs(33, 5, "ClientSocketContext::pullData: FD " << fd() <<
1530 " attempting to pull upstream data");
1531
1532 /* More data will be coming from the stream. */
1533 StoreIOBuffer readBuffer;
1534 /* XXX: Next requested byte in the range sequence */
1535 /* XXX: length = getmaximumrangelenfgth */
1536 readBuffer.offset = getNextRangeOffset();
1537 readBuffer.length = HTTP_REQBUF_SZ;
1538 readBuffer.data = reqbuf;
1539 /* we may note we have reached the end of the wanted ranges */
1540 clientStreamRead(getTail(), http, readBuffer);
1541 }
1542
1543 clientStream_status_t
1544 ClientSocketContext::socketState()
1545 {
1546 switch (clientStreamStatus(getTail(), http)) {
1547
1548 case STREAM_NONE:
1549 /* check for range support ending */
1550
1551 if (http->request->range) {
1552 /* check: reply was parsed and range iterator was initialized */
1553 assert(http->range_iter.valid);
1554 /* filter out data according to range specs */
1555
1556 if (!canPackMoreRanges()) {
1557 debugs(33, 5, HERE << "Range request at end of returnable " <<
1558 "range sequence on FD " << fd());
1559
1560 if (http->request->flags.proxy_keepalive)
1561 return STREAM_COMPLETE;
1562 else
1563 return STREAM_UNPLANNED_COMPLETE;
1564 }
1565 } else if (reply && reply->content_range) {
1566 /* reply has content-range, but Squid is not managing ranges */
1567 const int64_t &bytesSent = http->out.offset;
1568 const int64_t &bytesExpected = reply->content_range->spec.length;
1569
1570 debugs(33, 7, HERE << "body bytes sent vs. expected: " <<
1571 bytesSent << " ? " << bytesExpected << " (+" <<
1572 reply->content_range->spec.offset << ")");
1573
1574 // did we get at least what we expected, based on range specs?
1575
1576 if (bytesSent == bytesExpected) { // got everything
1577 if (http->request->flags.proxy_keepalive)
1578 return STREAM_COMPLETE;
1579 else
1580 return STREAM_UNPLANNED_COMPLETE;
1581 }
1582
1583 // The logic below is not clear: If we got more than we
1584 // expected why would persistency matter? Should not this
1585 // always be an error?
1586 if (bytesSent > bytesExpected) { // got extra
1587 if (http->request->flags.proxy_keepalive)
1588 return STREAM_COMPLETE;
1589 else
1590 return STREAM_UNPLANNED_COMPLETE;
1591 }
1592
1593 // did not get enough yet, expecting more
1594 }
1595
1596 return STREAM_NONE;
1597
1598 case STREAM_COMPLETE:
1599 return STREAM_COMPLETE;
1600
1601 case STREAM_UNPLANNED_COMPLETE:
1602 return STREAM_UNPLANNED_COMPLETE;
1603
1604 case STREAM_FAILED:
1605 return STREAM_FAILED;
1606 }
1607
1608 fatal ("unreachable code\n");
1609 return STREAM_NONE;
1610 }
1611
1612 /**
1613 * A write has just completed to the client, or we have just realised there is
1614 * no more data to send.
1615 */
1616 void
1617 clientWriteComplete(int fd, char *bufnotused, size_t size, comm_err_t errflag, int xerrno, void *data)
1618 {
1619 ClientSocketContext *context = (ClientSocketContext *)data;
1620 context->writeComplete (fd, bufnotused, size, errflag);
1621 }
1622
1623 void
1624 ClientSocketContext::doClose()
1625 {
1626 comm_close(fd());
1627 }
1628
1629 /** Called to initiate (and possibly complete) closing of the context.
1630 * The underlying socket may be already closed */
1631 void
1632 ClientSocketContext::initiateClose(const char *reason)
1633 {
1634 debugs(33, 5, HERE << "initiateClose: closing for " << reason);
1635
1636 if (http != NULL) {
1637 ConnStateData * conn = http->getConn();
1638
1639 if (conn != NULL) {
1640 if (const int64_t expecting = conn->bodySizeLeft()) {
1641 debugs(33, 5, HERE << "ClientSocketContext::initiateClose: " <<
1642 "closing, but first " << conn << " needs to read " <<
1643 expecting << " request body bytes with " <<
1644 conn->in.notYetUsed << " notYetUsed");
1645
1646 if (conn->closing()) {
1647 debugs(33, 2, HERE << "avoiding double-closing " << conn);
1648 return;
1649 }
1650
1651 /*
1652 * XXX We assume the reply fits in the TCP transmit
1653 * window. If not the connection may stall while sending
1654 * the reply (before reaching here) if the client does not
1655 * try to read the response while sending the request body.
1656 * As of yet we have not received any complaints indicating
1657 * this may be an issue.
1658 */
1659 conn->startClosing(reason);
1660
1661 return;
1662 }
1663 }
1664 }
1665
1666 doClose();
1667 }
1668
1669 void
1670 ClientSocketContext::writeComplete(int fd, char *bufnotused, size_t size, comm_err_t errflag)
1671 {
1672 StoreEntry *entry = http->storeEntry();
1673 http->out.size += size;
1674 assert(fd > -1);
1675 debugs(33, 5, "clientWriteComplete: FD " << fd << ", sz " << size <<
1676 ", err " << errflag << ", off " << http->out.size << ", len " <<
1677 entry ? entry->objectLen() : 0);
1678 clientUpdateSocketStats(http->logType, size);
1679 assert (this->fd() == fd);
1680
1681 /* Bail out quickly on COMM_ERR_CLOSING - close handlers will tidy up */
1682
1683 if (errflag == COMM_ERR_CLOSING)
1684 return;
1685
1686 if (errflag || clientHttpRequestStatus(fd, http)) {
1687 initiateClose("failure or true request status");
1688 /* Do we leak here ? */
1689 return;
1690 }
1691
1692 switch (socketState()) {
1693
1694 case STREAM_NONE:
1695 pullData();
1696 break;
1697
1698 case STREAM_COMPLETE:
1699 debugs(33, 5, "clientWriteComplete: FD " << fd << " Keeping Alive");
1700 keepaliveNextRequest();
1701 return;
1702
1703 case STREAM_UNPLANNED_COMPLETE:
1704 initiateClose("STREAM_UNPLANNED_COMPLETE");
1705 return;
1706
1707 case STREAM_FAILED:
1708 initiateClose("STREAM_FAILED");
1709 return;
1710
1711 default:
1712 fatal("Hit unreachable code in clientWriteComplete\n");
1713 }
1714 }
1715
1716 extern "C" CSR clientGetMoreData;
1717 extern "C" CSS clientReplyStatus;
1718 extern "C" CSD clientReplyDetach;
1719
1720 static ClientSocketContext *
1721 parseHttpRequestAbort(ConnStateData * conn, const char *uri)
1722 {
1723 ClientHttpRequest *http;
1724 ClientSocketContext *context;
1725 StoreIOBuffer tempBuffer;
1726 http = new ClientHttpRequest(conn);
1727 http->req_sz = conn->in.notYetUsed;
1728 http->uri = xstrdup(uri);
1729 setLogUri (http, uri);
1730 context = ClientSocketContextNew(http);
1731 tempBuffer.data = context->reqbuf;
1732 tempBuffer.length = HTTP_REQBUF_SZ;
1733 clientStreamInit(&http->client_stream, clientGetMoreData, clientReplyDetach,
1734 clientReplyStatus, new clientReplyContext(http), clientSocketRecipient,
1735 clientSocketDetach, context, tempBuffer);
1736 return context;
1737 }
1738
1739 char *
1740 skipLeadingSpace(char *aString)
1741 {
1742 char *result = aString;
1743
1744 while (xisspace(*aString))
1745 ++aString;
1746
1747 return result;
1748 }
1749
1750 /**
1751 * 'end' defaults to NULL for backwards compatibility
1752 * remove default value if we ever get rid of NULL-terminated
1753 * request buffers.
1754 */
1755 const char *
1756 findTrailingHTTPVersion(const char *uriAndHTTPVersion, const char *end)
1757 {
1758 if (NULL == end) {
1759 end = uriAndHTTPVersion + strcspn(uriAndHTTPVersion, "\r\n");
1760 assert(end);
1761 }
1762
1763 for (; end > uriAndHTTPVersion; end--) {
1764 if (*end == '\n' || *end == '\r')
1765 continue;
1766
1767 if (xisspace(*end)) {
1768 if (strncasecmp(end + 1, "HTTP/", 5) == 0)
1769 return end + 1;
1770 else
1771 break;
1772 }
1773 }
1774
1775 return NULL;
1776 }
1777
1778 void
1779 setLogUri(ClientHttpRequest * http, char const *uri)
1780 {
1781 safe_free(http->log_uri);
1782
1783 if (!stringHasCntl(uri))
1784 http->log_uri = xstrndup(uri, MAX_URL);
1785 else
1786 http->log_uri = xstrndup(rfc1738_escape_unescaped(uri), MAX_URL);
1787 }
1788
1789 static void
1790 prepareAcceleratedURL(ConnStateData * conn, ClientHttpRequest *http, char *url, const char *req_hdr)
1791 {
1792 int vhost = conn->port->vhost;
1793 int vport = conn->port->vport;
1794 char *host;
1795 char ntoabuf[MAX_IPSTRLEN];
1796
1797 http->flags.accel = 1;
1798
1799 /* BUG: Squid cannot deal with '*' URLs (RFC2616 5.1.2) */
1800
1801 if (strncasecmp(url, "cache_object://", 15) == 0)
1802 return; /* already in good shape */
1803
1804 if (*url != '/') {
1805 if (conn->port->vhost)
1806 return; /* already in good shape */
1807
1808 /* else we need to ignore the host name */
1809 url = strstr(url, "//");
1810
1811 #if SHOULD_REJECT_UNKNOWN_URLS
1812
1813 if (!url)
1814 return parseHttpRequestAbort(conn, "error:invalid-request");
1815
1816 #endif
1817
1818 if (url)
1819 url = strchr(url + 2, '/');
1820
1821 if (!url)
1822 url = (char *) "/";
1823 }
1824
1825 if (internalCheck(url)) {
1826 /* prepend our name & port */
1827 http->uri = xstrdup(internalLocalUri(NULL, url));
1828 return;
1829 }
1830
1831 const bool switchedToHttps = conn->switchedToHttps();
1832 const bool tryHostHeader = vhost || switchedToHttps;
1833 if (tryHostHeader && (host = mime_get_header(req_hdr, "Host")) != NULL) {
1834 int url_sz = strlen(url) + 32 + Config.appendDomainLen +
1835 strlen(host);
1836 http->uri = (char *)xcalloc(url_sz, 1);
1837 const char *protocol = switchedToHttps ?
1838 "https" : conn->port->protocol;
1839 snprintf(http->uri, url_sz, "%s://%s%s", protocol, host, url);
1840 debugs(33, 5, "ACCEL VHOST REWRITE: '" << http->uri << "'");
1841 } else if (conn->port->defaultsite) {
1842 int url_sz = strlen(url) + 32 + Config.appendDomainLen +
1843 strlen(conn->port->defaultsite);
1844 http->uri = (char *)xcalloc(url_sz, 1);
1845 snprintf(http->uri, url_sz, "%s://%s%s",
1846 conn->port->protocol, conn->port->defaultsite, url);
1847 debugs(33, 5, "ACCEL DEFAULTSITE REWRITE: '" << http->uri <<"'");
1848 } else if (vport == -1) {
1849 /* Put the local socket IP address as the hostname. */
1850 int url_sz = strlen(url) + 32 + Config.appendDomainLen;
1851 http->uri = (char *)xcalloc(url_sz, 1);
1852 snprintf(http->uri, url_sz, "%s://%s:%d%s",
1853 http->getConn()->port->protocol,
1854 http->getConn()->me.NtoA(ntoabuf,MAX_IPSTRLEN),
1855 http->getConn()->me.GetPort(), url);
1856 debugs(33, 5, "ACCEL VPORT REWRITE: '" << http->uri << "'");
1857 } else if (vport > 0) {
1858 /* Put the local socket IP address as the hostname, but static port */
1859 int url_sz = strlen(url) + 32 + Config.appendDomainLen;
1860 http->uri = (char *)xcalloc(url_sz, 1);
1861 snprintf(http->uri, url_sz, "%s://%s:%d%s",
1862 http->getConn()->port->protocol,
1863 http->getConn()->me.NtoA(ntoabuf,MAX_IPSTRLEN),
1864 vport, url);
1865 debugs(33, 5, "ACCEL VPORT REWRITE: '" << http->uri << "'");
1866 }
1867 }
1868
1869 static void
1870 prepareTransparentURL(ConnStateData * conn, ClientHttpRequest *http, char *url, const char *req_hdr)
1871 {
1872 char *host;
1873 char ntoabuf[MAX_IPSTRLEN];
1874
1875 if (*url != '/')
1876 return; /* already in good shape */
1877
1878 /* BUG: Squid cannot deal with '*' URLs (RFC2616 5.1.2) */
1879
1880 if ((host = mime_get_header(req_hdr, "Host")) != NULL) {
1881 int url_sz = strlen(url) + 32 + Config.appendDomainLen +
1882 strlen(host);
1883 http->uri = (char *)xcalloc(url_sz, 1);
1884 snprintf(http->uri, url_sz, "%s://%s%s",
1885 conn->port->protocol, host, url);
1886 debugs(33, 5, "TRANSPARENT HOST REWRITE: '" << http->uri <<"'");
1887 } else {
1888 /* Put the local socket IP address as the hostname. */
1889 int url_sz = strlen(url) + 32 + Config.appendDomainLen;
1890 http->uri = (char *)xcalloc(url_sz, 1);
1891 snprintf(http->uri, url_sz, "%s://%s:%d%s",
1892 http->getConn()->port->protocol,
1893 http->getConn()->me.NtoA(ntoabuf,MAX_IPSTRLEN),
1894 http->getConn()->me.GetPort(), url);
1895 debugs(33, 5, "TRANSPARENT REWRITE: '" << http->uri << "'");
1896 }
1897 }
1898
1899 // Temporary hack helper: determine whether the request is chunked, expensive
1900 static bool
1901 isChunkedRequest(const HttpParser *hp)
1902 {
1903 HttpRequest request;
1904 if (!request.parseHeader(HttpParserHdrBuf(hp), HttpParserHdrSz(hp)))
1905 return false;
1906
1907 return request.header.has(HDR_TRANSFER_ENCODING) &&
1908 request.header.hasListMember(HDR_TRANSFER_ENCODING, "chunked", ',');
1909 }
1910
1911
1912 /**
1913 * parseHttpRequest()
1914 *
1915 * Returns
1916 * NULL on incomplete requests
1917 * a ClientSocketContext structure on success or failure.
1918 * Sets result->flags.parsed_ok to 0 if failed to parse the request.
1919 * Sets result->flags.parsed_ok to 1 if we have a good request.
1920 */
1921 static ClientSocketContext *
1922 parseHttpRequest(ConnStateData *conn, HttpParser *hp, HttpRequestMethod * method_p, HttpVersion *http_ver)
1923 {
1924 char *req_hdr = NULL;
1925 char *end;
1926 size_t req_sz;
1927 ClientHttpRequest *http;
1928 ClientSocketContext *result;
1929 StoreIOBuffer tempBuffer;
1930 int r;
1931
1932 /* pre-set these values to make aborting simpler */
1933 *method_p = METHOD_NONE;
1934
1935 /* NP: don't be tempted to move this down or remove again.
1936 * It's the only DDoS protection old-String has against long URL */
1937 if ( hp->bufsiz <= 0) {
1938 debugs(33, 5, "Incomplete request, waiting for end of request line");
1939 return NULL;
1940 } else if ( (size_t)hp->bufsiz >= Config.maxRequestHeaderSize && headersEnd(hp->buf, Config.maxRequestHeaderSize) == 0) {
1941 debugs(33, 5, "parseHttpRequest: Too large request");
1942 return parseHttpRequestAbort(conn, "error:request-too-large");
1943 }
1944
1945 /* Attempt to parse the first line; this'll define the method, url, version and header begin */
1946 r = HttpParserParseReqLine(hp);
1947
1948 if (r == 0) {
1949 debugs(33, 5, "Incomplete request, waiting for end of request line");
1950 return NULL;
1951 }
1952
1953 if (r == -1) {
1954 return parseHttpRequestAbort(conn, "error:invalid-request");
1955 }
1956
1957 /* Request line is valid here .. */
1958 *http_ver = HttpVersion(hp->v_maj, hp->v_min);
1959
1960 /* This call scans the entire request, not just the headers */
1961 if (hp->v_maj > 0) {
1962 if ((req_sz = headersEnd(hp->buf, hp->bufsiz)) == 0) {
1963 debugs(33, 5, "Incomplete request, waiting for end of headers");
1964 return NULL;
1965 }
1966 } else {
1967 debugs(33, 3, "parseHttpRequest: Missing HTTP identifier");
1968 req_sz = HttpParserReqSz(hp);
1969 }
1970
1971 /* We know the whole request is in hp->buf now */
1972
1973 assert(req_sz <= (size_t) hp->bufsiz);
1974
1975 /* Will the following be true with HTTP/0.9 requests? probably not .. */
1976 /* So the rest of the code will need to deal with '0'-byte headers (ie, none, so don't try parsing em) */
1977 assert(req_sz > 0);
1978
1979 hp->hdr_end = req_sz - 1;
1980
1981 hp->hdr_start = hp->req_end + 1;
1982
1983 /* Enforce max_request_size */
1984 if (req_sz >= Config.maxRequestHeaderSize) {
1985 debugs(33, 5, "parseHttpRequest: Too large request");
1986 return parseHttpRequestAbort(conn, "error:request-too-large");
1987 }
1988
1989 /* Set method_p */
1990 *method_p = HttpRequestMethod(&hp->buf[hp->m_start], &hp->buf[hp->m_end]+1);
1991
1992 /* deny CONNECT via accelerated ports */
1993 if (*method_p == METHOD_CONNECT && conn && conn->port && conn->port->accel) {
1994 debugs(33, DBG_IMPORTANT, "WARNING: CONNECT method received on " << conn->port->protocol << " Accelerator port " << conn->port->s.GetPort() );
1995 /* XXX need a way to say "this many character length string" */
1996 debugs(33, DBG_IMPORTANT, "WARNING: for request: " << hp->buf);
1997 /* XXX need some way to set 405 status on the error reply */
1998 return parseHttpRequestAbort(conn, "error:method-not-allowed");
1999 }
2000
2001 if (*method_p == METHOD_NONE) {
2002 /* XXX need a way to say "this many character length string" */
2003 debugs(33, 1, "clientParseRequestMethod: Unsupported method in request '" << hp->buf << "'");
2004
2005 /* XXX where's the method set for this error? */
2006 return parseHttpRequestAbort(conn, "error:unsupported-request-method");
2007 }
2008
2009 /*
2010 * Process headers after request line
2011 * TODO: Use httpRequestParse here.
2012 */
2013 /* XXX this code should be modified to take a const char * later! */
2014 req_hdr = (char *) hp->buf + hp->req_end + 1;
2015
2016 debugs(33, 3, "parseHttpRequest: req_hdr = {" << req_hdr << "}");
2017
2018 end = (char *) hp->buf + hp->hdr_end;
2019
2020 debugs(33, 3, "parseHttpRequest: end = {" << end << "}");
2021
2022 /*
2023 * Check that the headers don't have double-CR.
2024 * NP: strnstr is required so we don't search any possible binary body blobs.
2025 */
2026 if ( squid_strnstr(req_hdr, "\r\r\n", req_sz) ) {
2027 debugs(33, 1, "WARNING: suspicious HTTP request contains double CR");
2028 return parseHttpRequestAbort(conn, "error:double-CR");
2029 }
2030
2031 debugs(33, 3, "parseHttpRequest: prefix_sz = " <<
2032 (int) HttpParserRequestLen(hp) << ", req_line_sz = " <<
2033 HttpParserReqSz(hp));
2034
2035 // Temporary hack: We might receive a chunked body from a broken HTTP/1.1
2036 // client that sends chunked requests to HTTP/1.0 Squid. If the request
2037 // might have a chunked body, parse the headers early to look for the
2038 // "Transfer-Encoding: chunked" header. If we find it, wait until the
2039 // entire body is available so that we can set the content length and
2040 // forward the request without chunks. The primary reason for this is
2041 // to avoid forwarding a chunked request because the server side lacks
2042 // logic to determine when it is valid to do so.
2043 // FUTURE_CODE_TO_SUPPORT_CHUNKED_REQUESTS below will replace this hack.
2044 if (hp->v_min == 1 && hp->v_maj == 1 && // broken client, may send chunks
2045 Config.maxChunkedRequestBodySize > 0 && // configured to dechunk
2046 (*method_p == METHOD_PUT || *method_p == METHOD_POST)) {
2047
2048 // check only once per request because isChunkedRequest is expensive
2049 if (conn->in.dechunkingState == ConnStateData::chunkUnknown) {
2050 if (isChunkedRequest(hp))
2051 conn->startDechunkingRequest(hp);
2052 else
2053 conn->in.dechunkingState = ConnStateData::chunkNone;
2054 }
2055
2056 if (conn->in.dechunkingState == ConnStateData::chunkParsing) {
2057 if (conn->parseRequestChunks(hp)) // parses newly read chunks
2058 return NULL; // wait for more data
2059 debugs(33, 5, HERE << "Got complete chunked request or err.");
2060 assert(conn->in.dechunkingState != ConnStateData::chunkParsing);
2061 }
2062 }
2063
2064 /* Ok, all headers are received */
2065 http = new ClientHttpRequest(conn);
2066
2067 http->req_sz = HttpParserRequestLen(hp);
2068 result = ClientSocketContextNew(http);
2069 tempBuffer.data = result->reqbuf;
2070 tempBuffer.length = HTTP_REQBUF_SZ;
2071
2072 ClientStreamData newServer = new clientReplyContext(http);
2073 ClientStreamData newClient = result;
2074 clientStreamInit(&http->client_stream, clientGetMoreData, clientReplyDetach,
2075 clientReplyStatus, newServer, clientSocketRecipient,
2076 clientSocketDetach, newClient, tempBuffer);
2077
2078 debugs(33, 5, "parseHttpRequest: Request Header is\n" <<(hp->buf) + hp->hdr_start);
2079
2080 /* set url */
2081 /*
2082 * XXX this should eventually not use a malloc'ed buffer; the transformation code
2083 * below needs to be modified to not expect a mutable nul-terminated string.
2084 */
2085 char *url = (char *)xmalloc(hp->u_end - hp->u_start + 16);
2086
2087 memcpy(url, hp->buf + hp->u_start, hp->u_end - hp->u_start + 1);
2088
2089 url[hp->u_end - hp->u_start + 1] = '\0';
2090
2091 #if THIS_VIOLATES_HTTP_SPECS_ON_URL_TRANSFORMATION
2092
2093 if ((t = strchr(url, '#'))) /* remove HTML anchors */
2094 *t = '\0';
2095
2096 #endif
2097
2098 /* Rewrite the URL in transparent or accelerator mode */
2099 /* NP: there are several cases to traverse here:
2100 * - standard mode (forward proxy)
2101 * - transparent mode (TPROXY)
2102 * - transparent mode with failures
2103 * - intercept mode (NAT)
2104 * - intercept mode with failures
2105 * - accelerator mode (reverse proxy)
2106 * - internal URL
2107 * - mixed combos of the above with internal URL
2108 */
2109 if (conn->transparent()) {
2110 /* intercept or transparent mode, properly working with no failures */
2111 http->flags.intercepted = conn->port->intercepted;
2112 http->flags.spoof_client_ip = conn->port->spoof_client_ip;
2113 prepareTransparentURL(conn, http, url, req_hdr);
2114
2115 } else if (conn->port->intercepted || conn->port->spoof_client_ip) {
2116 /* transparent or intercept mode with failures */
2117 prepareTransparentURL(conn, http, url, req_hdr);
2118
2119 } else if (conn->port->accel || conn->switchedToHttps()) {
2120 /* accelerator mode */
2121 prepareAcceleratedURL(conn, http, url, req_hdr);
2122
2123 } else if (internalCheck(url)) {
2124 /* internal URL mode */
2125 /* prepend our name & port */
2126 http->uri = xstrdup(internalLocalUri(NULL, url));
2127 http->flags.accel = 1;
2128 }
2129
2130 if (!http->uri) {
2131 /* No special rewrites have been applied above, use the
2132 * requested url. may be rewritten later, so make extra room */
2133 int url_sz = strlen(url) + Config.appendDomainLen + 5;
2134 http->uri = (char *)xcalloc(url_sz, 1);
2135 strcpy(http->uri, url);
2136 }
2137
2138 setLogUri(http, http->uri);
2139 debugs(33, 5, "parseHttpRequest: Complete request received");
2140 result->flags.parsed_ok = 1;
2141 xfree(url);
2142 return result;
2143 }
2144
2145 int
2146 ConnStateData::getAvailableBufferLength() const
2147 {
2148 int result = in.allocatedSize - in.notYetUsed - 1;
2149 assert (result >= 0);
2150 return result;
2151 }
2152
2153 void
2154 ConnStateData::makeSpaceAvailable()
2155 {
2156 if (getAvailableBufferLength() < 2) {
2157 in.buf = (char *)memReallocBuf(in.buf, in.allocatedSize * 2, &in.allocatedSize);
2158 debugs(33, 2, "growing request buffer: notYetUsed=" << in.notYetUsed << " size=" << in.allocatedSize);
2159 }
2160 }
2161
2162 void
2163 ConnStateData::addContextToQueue(ClientSocketContext * context)
2164 {
2165 ClientSocketContext::Pointer *S;
2166
2167 for (S = (ClientSocketContext::Pointer *) & currentobject; S->getRaw();
2168 S = &(*S)->next);
2169 *S = context;
2170
2171 ++nrequests;
2172 }
2173
2174 int
2175 ConnStateData::getConcurrentRequestCount() const
2176 {
2177 int result = 0;
2178 ClientSocketContext::Pointer *T;
2179
2180 for (T = (ClientSocketContext::Pointer *) &currentobject;
2181 T->getRaw(); T = &(*T)->next, ++result);
2182 return result;
2183 }
2184
2185 int
2186 ConnStateData::connReadWasError(comm_err_t flag, int size, int xerrno)
2187 {
2188 if (flag != COMM_OK) {
2189 debugs(33, 2, "connReadWasError: FD " << fd << ": got flag " << flag);
2190 return 1;
2191 }
2192
2193 if (size < 0) {
2194 if (!ignoreErrno(xerrno)) {
2195 debugs(33, 2, "connReadWasError: FD " << fd << ": " << xstrerr(xerrno));
2196 return 1;
2197 } else if (in.notYetUsed == 0) {
2198 debugs(33, 2, "connReadWasError: FD " << fd << ": no data to process (" << xstrerr(xerrno) << ")");
2199 }
2200 }
2201
2202 return 0;
2203 }
2204
2205 int
2206 ConnStateData::connFinishedWithConn(int size)
2207 {
2208 if (size == 0) {
2209 if (getConcurrentRequestCount() == 0 && in.notYetUsed == 0) {
2210 /* no current or pending requests */
2211 debugs(33, 4, "connFinishedWithConn: FD " << fd << " closed");
2212 return 1;
2213 } else if (!Config.onoff.half_closed_clients) {
2214 /* admin doesn't want to support half-closed client sockets */
2215 debugs(33, 3, "connFinishedWithConn: FD " << fd << " aborted (half_closed_clients disabled)");
2216 return 1;
2217 }
2218 }
2219
2220 return 0;
2221 }
2222
2223 void
2224 connNoteUseOfBuffer(ConnStateData* conn, size_t byteCount)
2225 {
2226 assert(byteCount > 0 && byteCount <= conn->in.notYetUsed);
2227 conn->in.notYetUsed -= byteCount;
2228 debugs(33, 5, HERE << "conn->in.notYetUsed = " << conn->in.notYetUsed);
2229 /*
2230 * If there is still data that will be used,
2231 * move it to the beginning.
2232 */
2233
2234 if (conn->in.notYetUsed > 0)
2235 xmemmove(conn->in.buf, conn->in.buf + byteCount,
2236 conn->in.notYetUsed);
2237 }
2238
2239 int
2240 connKeepReadingIncompleteRequest(ConnStateData * conn)
2241 {
2242 // when we read chunked requests, the entire body is buffered
2243 // XXX: this check ignores header size and its limits.
2244 if (conn->in.dechunkingState == ConnStateData::chunkParsing)
2245 return ((int64_t)conn->in.notYetUsed) < Config.maxChunkedRequestBodySize;
2246
2247 return conn->in.notYetUsed >= Config.maxRequestHeaderSize ? 0 : 1;
2248 }
2249
2250 void
2251 connCancelIncompleteRequests(ConnStateData * conn)
2252 {
2253 ClientSocketContext *context = parseHttpRequestAbort(conn, "error:request-too-large");
2254 clientStreamNode *node = context->getClientReplyContext();
2255 assert(!connKeepReadingIncompleteRequest(conn));
2256 if (conn->in.dechunkingState == ConnStateData::chunkParsing) {
2257 debugs(33, 1, "Chunked request is too large (" << conn->in.notYetUsed << " bytes)");
2258 debugs(33, 1, "Config 'chunked_request_body_max_size'= " << Config.maxChunkedRequestBodySize << " bytes.");
2259 } else {
2260 debugs(33, 1, "Request header is too large (" << conn->in.notYetUsed << " bytes)");
2261 debugs(33, 1, "Config 'request_header_max_size'= " << Config.maxRequestHeaderSize << " bytes.");
2262 }
2263 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
2264 assert (repContext);
2265 repContext->setReplyToError(ERR_TOO_BIG,
2266 HTTP_REQUEST_ENTITY_TOO_LARGE, METHOD_NONE, NULL,
2267 conn->peer, NULL, NULL, NULL);
2268 context->registerWithConn();
2269 context->pullData();
2270 }
2271
2272 void
2273 ConnStateData::clientMaybeReadData(int do_next_read)
2274 {
2275 if (do_next_read) {
2276 flags.readMoreRequests = true;
2277 readSomeData();
2278 }
2279 }
2280
2281 void
2282 ConnStateData::clientAfterReadingRequests(int do_next_read)
2283 {
2284 /*
2285 * If (1) we are reading a message body, (2) and the connection
2286 * is half-closed, and (3) we didn't get the entire HTTP request
2287 * yet, then close this connection.
2288 */
2289
2290 if (fd_table[fd].flags.socket_eof) {
2291 if ((int64_t)in.notYetUsed < bodySizeLeft()) {
2292 /* Partial request received. Abort client connection! */
2293 debugs(33, 3, "clientAfterReadingRequests: FD " << fd << " aborted, partial request");
2294 comm_close(fd);
2295 return;
2296 }
2297 }
2298
2299 clientMaybeReadData (do_next_read);
2300 }
2301
2302 static void
2303 clientProcessRequest(ConnStateData *conn, HttpParser *hp, ClientSocketContext *context, const HttpRequestMethod& method, HttpVersion http_ver)
2304 {
2305 ClientHttpRequest *http = context->http;
2306 HttpRequest *request = NULL;
2307 bool notedUseOfBuffer = false;
2308 bool tePresent = false;
2309 bool deChunked = false;
2310 bool unsupportedTe = false;
2311
2312 /* We have an initial client stream in place should it be needed */
2313 /* setup our private context */
2314 context->registerWithConn();
2315
2316 if (context->flags.parsed_ok == 0) {
2317 clientStreamNode *node = context->getClientReplyContext();
2318 debugs(33, 1, "clientProcessRequest: Invalid Request");
2319 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
2320 assert (repContext);
2321 repContext->setReplyToError(ERR_INVALID_REQ, HTTP_BAD_REQUEST, method, NULL, conn->peer, NULL, conn->in.buf, NULL);
2322 assert(context->http->out.offset == 0);
2323 context->pullData();
2324 conn->flags.readMoreRequests = false;
2325 goto finish;
2326 }
2327
2328 if ((request = HttpRequest::CreateFromUrlAndMethod(http->uri, method)) == NULL) {
2329 clientStreamNode *node = context->getClientReplyContext();
2330 debugs(33, 5, "Invalid URL: " << http->uri);
2331 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
2332 assert (repContext);
2333 repContext->setReplyToError(ERR_INVALID_URL, HTTP_BAD_REQUEST, method, http->uri, conn->peer, NULL, NULL, NULL);
2334 assert(context->http->out.offset == 0);
2335 context->pullData();
2336 conn->flags.readMoreRequests = false;
2337 goto finish;
2338 }
2339
2340 /* RFC 2616 section 10.5.6 : handle unsupported HTTP versions cleanly. */
2341 /* We currently only accept 0.9, 1.0, 1.1 */
2342 if ( (http_ver.major == 0 && http_ver.minor != 9) ||
2343 (http_ver.major == 1 && http_ver.minor > 1 ) ||
2344 (http_ver.major > 1) ) {
2345
2346 clientStreamNode *node = context->getClientReplyContext();
2347 debugs(33, 5, "Unsupported HTTP version discovered. :\n" << HttpParserHdrBuf(hp));
2348 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
2349 assert (repContext);
2350 repContext->setReplyToError(ERR_UNSUP_HTTPVERSION, HTTP_HTTP_VERSION_NOT_SUPPORTED, method, http->uri, conn->peer, NULL, HttpParserHdrBuf(hp), NULL);
2351 assert(context->http->out.offset == 0);
2352 context->pullData();
2353 conn->flags.readMoreRequests = false;
2354 goto finish;
2355 }
2356
2357 /* compile headers */
2358 /* we should skip request line! */
2359 /* XXX should actually know the damned buffer size here */
2360 if (http_ver.major >= 1 && !request->parseHeader(HttpParserHdrBuf(hp), HttpParserHdrSz(hp))) {
2361 clientStreamNode *node = context->getClientReplyContext();
2362 debugs(33, 5, "Failed to parse request headers:\n" << HttpParserHdrBuf(hp));
2363 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
2364 assert (repContext);
2365 repContext->setReplyToError(ERR_INVALID_REQ, HTTP_BAD_REQUEST, method, http->uri, conn->peer, NULL, NULL, NULL);
2366 assert(context->http->out.offset == 0);
2367 context->pullData();
2368 conn->flags.readMoreRequests = false;
2369 goto finish;
2370 }
2371
2372 request->flags.accelerated = http->flags.accel;
2373 request->flags.no_direct = request->flags.accelerated ? !conn->port->allow_direct : 0;
2374
2375 /** \par
2376 * If transparent or interception mode is working clone the transparent and interception flags
2377 * from the port settings to the request.
2378 */
2379 if (IpInterceptor.InterceptActive()) {
2380 request->flags.intercepted = http->flags.intercepted;
2381 }
2382 if (IpInterceptor.TransparentActive()) {
2383 request->flags.spoof_client_ip = conn->port->spoof_client_ip;
2384 }
2385
2386 if (internalCheck(request->urlpath.termedBuf())) {
2387 if (internalHostnameIs(request->GetHost()) &&
2388 request->port == getMyPort()) {
2389 http->flags.internal = 1;
2390 } else if (Config.onoff.global_internal_static && internalStaticCheck(request->urlpath.termedBuf())) {
2391 request->SetHost(internalHostname());
2392 request->port = getMyPort();
2393 http->flags.internal = 1;
2394 }
2395 }
2396
2397 if (http->flags.internal) {
2398 request->protocol = PROTO_HTTP;
2399 request->login[0] = '\0';
2400 }
2401
2402 request->flags.internal = http->flags.internal;
2403 setLogUri (http, urlCanonicalClean(request));
2404 request->client_addr = conn->peer;
2405 #if FOLLOW_X_FORWARDED_FOR
2406 request->indirect_client_addr = conn->peer;
2407 #endif /* FOLLOW_X_FORWARDED_FOR */
2408 request->my_addr = conn->me;
2409 request->http_ver = http_ver;
2410
2411 tePresent = request->header.has(HDR_TRANSFER_ENCODING);
2412 deChunked = conn->in.dechunkingState == ConnStateData::chunkReady;
2413 if (deChunked) {
2414 assert(tePresent);
2415 request->setContentLength(conn->in.dechunked.contentSize());
2416 request->header.delById(HDR_TRANSFER_ENCODING);
2417 conn->finishDechunkingRequest(hp);
2418 }
2419
2420 unsupportedTe = tePresent && !deChunked;
2421 if (!urlCheckRequest(request) || unsupportedTe) {
2422 clientStreamNode *node = context->getClientReplyContext();
2423 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
2424 assert (repContext);
2425 repContext->setReplyToError(ERR_UNSUP_REQ,
2426 HTTP_NOT_IMPLEMENTED, request->method, NULL,
2427 conn->peer, request, NULL, NULL);
2428 assert(context->http->out.offset == 0);
2429 context->pullData();
2430 conn->flags.readMoreRequests = false;
2431 goto finish;
2432 }
2433
2434
2435 if (!clientIsContentLengthValid(request)) {
2436 clientStreamNode *node = context->getClientReplyContext();
2437 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
2438 assert (repContext);
2439 repContext->setReplyToError(ERR_INVALID_REQ,
2440 HTTP_LENGTH_REQUIRED, request->method, NULL,
2441 conn->peer, request, NULL, NULL);
2442 assert(context->http->out.offset == 0);
2443 context->pullData();
2444 conn->flags.readMoreRequests = false;
2445 goto finish;
2446 }
2447
2448 http->request = HTTPMSGLOCK(request);
2449 clientSetKeepaliveFlag(http);
2450
2451 /* If this is a CONNECT, don't schedule a read - ssl.c will handle it */
2452 if (http->request->method == METHOD_CONNECT)
2453 context->mayUseConnection(true);
2454
2455 /* Do we expect a request-body? */
2456 if (!context->mayUseConnection() && request->content_length > 0) {
2457 request->body_pipe = conn->expectRequestBody(request->content_length);
2458
2459 // consume header early so that body pipe gets just the body
2460 connNoteUseOfBuffer(conn, http->req_sz);
2461 notedUseOfBuffer = true;
2462
2463 conn->handleRequestBodyData(); // may comm_close and stop producing
2464
2465 /* Is it too large? */
2466
2467 if (!clientIsRequestBodyValid(request->content_length) ||
2468 clientIsRequestBodyTooLargeForPolicy(request->content_length)) {
2469 clientStreamNode *node = context->getClientReplyContext();
2470 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
2471 assert (repContext);
2472 repContext->setReplyToError(ERR_TOO_BIG,
2473 HTTP_REQUEST_ENTITY_TOO_LARGE, METHOD_NONE, NULL,
2474 conn->peer, http->request, NULL, NULL);
2475 assert(context->http->out.offset == 0);
2476 context->pullData();
2477 goto finish;
2478 }
2479
2480 if (!request->body_pipe->productionEnded())
2481 conn->readSomeData();
2482
2483 context->mayUseConnection(!request->body_pipe->productionEnded());
2484 }
2485
2486 http->calloutContext = new ClientRequestContext(http);
2487
2488 http->doCallouts();
2489
2490 finish:
2491 if (!notedUseOfBuffer)
2492 connNoteUseOfBuffer(conn, http->req_sz);
2493
2494 /*
2495 * DPW 2007-05-18
2496 * Moved the TCP_RESET feature from clientReplyContext::sendMoreData
2497 * to here because calling comm_reset_close() causes http to
2498 * be freed and the above connNoteUseOfBuffer() would hit an
2499 * assertion, not to mention that we were accessing freed memory.
2500 */
2501 if (http->request->flags.resetTCP() && conn->fd > -1) {
2502 debugs(33, 3, HERE << "Sending TCP RST on FD " << conn->fd);
2503 conn->flags.readMoreRequests = false;
2504 comm_reset_close(conn->fd);
2505 return;
2506 }
2507 }
2508
2509 static void
2510 connStripBufferWhitespace (ConnStateData * conn)
2511 {
2512 while (conn->in.notYetUsed > 0 && xisspace(conn->in.buf[0])) {
2513 xmemmove(conn->in.buf, conn->in.buf + 1, conn->in.notYetUsed - 1);
2514 --conn->in.notYetUsed;
2515 }
2516 }
2517
2518 static int
2519 connOkToAddRequest(ConnStateData * conn)
2520 {
2521 int result = conn->getConcurrentRequestCount() < (Config.onoff.pipeline_prefetch ? 2 : 1);
2522
2523 if (!result) {
2524 debugs(33, 3, "connOkToAddRequest: FD " << conn->fd <<
2525 " max concurrent requests reached");
2526 debugs(33, 5, "connOkToAddRequest: FD " << conn->fd <<
2527 " defering new request until one is done");
2528 }
2529
2530 return result;
2531 }
2532
2533 /**
2534 * bodySizeLeft
2535 *
2536 * Report on the number of bytes of body content that we
2537 * know are yet to be read on this connection.
2538 */
2539 int64_t
2540 ConnStateData::bodySizeLeft()
2541 {
2542 // XXX: this logic will not work for chunked requests with unknown sizes
2543
2544 if (bodyPipe != NULL)
2545 return bodyPipe->unproducedSize();
2546
2547 return 0;
2548 }
2549
2550 /**
2551 * Attempt to parse one or more requests from the input buffer.
2552 * If a request is successfully parsed, even if the next request
2553 * is only partially parsed, it will return TRUE.
2554 * do_next_read is updated to indicate whether a read should be
2555 * scheduled.
2556 */
2557 static bool
2558 clientParseRequest(ConnStateData * conn, bool &do_next_read)
2559 {
2560 HttpRequestMethod method;
2561 ClientSocketContext *context;
2562 bool parsed_req = false;
2563 HttpVersion http_ver;
2564 HttpParser hp;
2565
2566 debugs(33, 5, "clientParseRequest: FD " << conn->fd << ": attempting to parse");
2567
2568 while (conn->in.notYetUsed > 0 && conn->bodySizeLeft() == 0) {
2569 connStripBufferWhitespace (conn);
2570
2571 /* Don't try to parse if the buffer is empty */
2572
2573 if (conn->in.notYetUsed == 0)
2574 break;
2575
2576 /* Limit the number of concurrent requests to 2 */
2577
2578 if (!connOkToAddRequest(conn)) {
2579 break;
2580 }
2581
2582 /* Should not be needed anymore */
2583 /* Terminate the string */
2584 conn->in.buf[conn->in.notYetUsed] = '\0';
2585
2586 /* Begin the parsing */
2587 HttpParserInit(&hp, conn->in.buf, conn->in.notYetUsed);
2588
2589 /* Process request */
2590 PROF_start(parseHttpRequest);
2591
2592 context = parseHttpRequest(conn, &hp, &method, &http_ver);
2593
2594 PROF_stop(parseHttpRequest);
2595
2596 /* partial or incomplete request */
2597 if (!context) {
2598
2599 if (!connKeepReadingIncompleteRequest(conn))
2600 connCancelIncompleteRequests(conn);
2601
2602 break;
2603 }
2604
2605 /* status -1 or 1 */
2606 if (context) {
2607 debugs(33, 5, "clientParseRequest: FD " << conn->fd << ": parsed a request");
2608 commSetTimeout(conn->fd, Config.Timeout.lifetime, clientLifetimeTimeout,
2609 context->http);
2610
2611 clientProcessRequest(conn, &hp, context, method, http_ver);
2612
2613 parsed_req = true;
2614
2615 if (context->mayUseConnection()) {
2616 debugs(33, 3, "clientParseRequest: Not reading, as this request may need the connection");
2617 do_next_read = 0;
2618 break;
2619 }
2620
2621 if (!conn->flags.readMoreRequests) {
2622 conn->flags.readMoreRequests = true;
2623 break;
2624 }
2625
2626 continue; /* while offset > 0 && conn->bodySizeLeft() == 0 */
2627 }
2628 } /* while offset > 0 && conn->bodySizeLeft() == 0 */
2629
2630 /* XXX where to 'finish' the parsing pass? */
2631
2632 return parsed_req;
2633 }
2634
2635 void
2636 ConnStateData::clientReadRequest(const CommIoCbParams &io)
2637 {
2638 debugs(33,5,HERE << "clientReadRequest FD " << io.fd << " size " << io.size);
2639 reading(false);
2640 bool do_next_read = 1; /* the default _is_ to read data! - adrian */
2641
2642 assert (io.fd == fd);
2643
2644 /* Bail out quickly on COMM_ERR_CLOSING - close handlers will tidy up */
2645
2646 if (io.flag == COMM_ERR_CLOSING) {
2647 debugs(33,5, HERE << " FD " << fd << " closing Bailout.");
2648 return;
2649 }
2650
2651 /*
2652 * Don't reset the timeout value here. The timeout value will be
2653 * set to Config.Timeout.request by httpAccept() and
2654 * clientWriteComplete(), and should apply to the request as a
2655 * whole, not individual read() calls. Plus, it breaks our
2656 * lame half-close detection
2657 */
2658 if (connReadWasError(io.flag, io.size, io.xerrno)) {
2659 comm_close(fd);
2660 return;
2661 }
2662
2663 if (io.flag == COMM_OK) {
2664 if (io.size > 0) {
2665 kb_incr(&statCounter.client_http.kbytes_in, io.size);
2666
2667 handleReadData(io.buf, io.size);
2668
2669 /* The above may close the connection under our feets */
2670 if (!isOpen())
2671 return;
2672
2673 } else if (io.size == 0) {
2674 debugs(33, 5, "clientReadRequest: FD " << fd << " closed?");
2675
2676 if (connFinishedWithConn(io.size)) {
2677 comm_close(fd);
2678 return;
2679 }
2680
2681 /* It might be half-closed, we can't tell */
2682 fd_table[fd].flags.socket_eof = 1;
2683
2684 commMarkHalfClosed(fd);
2685
2686 do_next_read = 0;
2687
2688 fd_note(fd, "half-closed");
2689
2690 /* There is one more close check at the end, to detect aborted
2691 * (partial) requests. At this point we can't tell if the request
2692 * is partial.
2693 */
2694 /* Continue to process previously read data */
2695 }
2696 }
2697
2698 /* Process next request */
2699 if (getConcurrentRequestCount() == 0)
2700 fd_note(fd, "Reading next request");
2701
2702 if (! clientParseRequest(this, do_next_read)) {
2703 if (!isOpen())
2704 return;
2705 /*
2706 * If the client here is half closed and we failed
2707 * to parse a request, close the connection.
2708 * The above check with connFinishedWithConn() only
2709 * succeeds _if_ the buffer is empty which it won't
2710 * be if we have an incomplete request.
2711 * XXX: This duplicates ClientSocketContext::keepaliveNextRequest
2712 */
2713 if (getConcurrentRequestCount() == 0 && commIsHalfClosed(fd)) {
2714 debugs(33, 5, "clientReadRequest: FD " << fd << ": half-closed connection, no completed request parsed, connection closing.");
2715 comm_close(fd);
2716 return;
2717 }
2718 }
2719
2720 if (!isOpen())
2721 return;
2722
2723 clientAfterReadingRequests(do_next_read);
2724 }
2725
2726 /**
2727 * called when new request data has been read from the socket
2728 */
2729 void
2730 ConnStateData::handleReadData(char *buf, size_t size)
2731 {
2732 char *current_buf = in.addressToReadInto();
2733
2734 if (buf != current_buf)
2735 xmemmove(current_buf, buf, size);
2736
2737 in.notYetUsed += size;
2738
2739 in.buf[in.notYetUsed] = '\0'; /* Terminate the string */
2740
2741 // if we are reading a body, stuff data into the body pipe
2742 if (bodyPipe != NULL)
2743 handleRequestBodyData();
2744 }
2745
2746 /**
2747 * called when new request body data has been buffered in in.buf
2748 * may close the connection if we were closing and piped everything out
2749 */
2750 void
2751 ConnStateData::handleRequestBodyData()
2752 {
2753 assert(bodyPipe != NULL);
2754
2755 size_t putSize = 0;
2756
2757 #if FUTURE_CODE_TO_SUPPORT_CHUNKED_REQUESTS
2758 // The code below works, in principle, but we cannot do dechunking
2759 // on-the-fly because that would mean sending chunked requests to
2760 // the next hop. Squid lacks logic to determine which servers can
2761 // receive chunk requests. Squid v3.0 code cannot even handle chunked
2762 // responses which we may encourage by sending chunked requests.
2763 // The error generation code probably needs more work.
2764 if (in.bodyParser) { // chunked body
2765 debugs(33,5, HERE << "handling chunked request body for FD " << fd);
2766 bool malformedChunks = false;
2767
2768 MemBuf raw; // ChunkedCodingParser only works with MemBufs
2769 raw.init(in.notYetUsed, in.notYetUsed);
2770 raw.append(in.buf, in.notYetUsed);
2771 try { // the parser will throw on errors
2772 const mb_size_t wasContentSize = raw.contentSize();
2773 BodyPipeCheckout bpc(*bodyPipe);
2774 const bool parsed = in.bodyParser->parse(&raw, &bpc.buf);
2775 bpc.checkIn();
2776 putSize = wasContentSize - raw.contentSize();
2777
2778 if (parsed) {
2779 stopProducingFor(bodyPipe, true); // this makes bodySize known
2780 } else {
2781 // parser needy state must imply body pipe needy state
2782 if (in.bodyParser->needsMoreData() &&
2783 !bodyPipe->mayNeedMoreData())
2784 malformedChunks = true;
2785 // XXX: if bodyParser->needsMoreSpace, how can we guarantee it?
2786 }
2787 } catch (...) { // XXX: be more specific
2788 malformedChunks = true;
2789 }
2790
2791 if (malformedChunks) {
2792 if (bodyPipe != NULL)
2793 stopProducingFor(bodyPipe, false);
2794
2795 ClientSocketContext::Pointer context = getCurrentContext();
2796 if (!context->http->out.offset) {
2797 clientStreamNode *node = context->getClientReplyContext();
2798 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
2799 assert (repContext);
2800 repContext->setReplyToError(ERR_INVALID_REQ, HTTP_BAD_REQUEST,
2801 METHOD_NONE, NULL, &peer.sin_addr,
2802 NULL, NULL, NULL);
2803 context->pullData();
2804 }
2805 flags.readMoreRequests = false;
2806 return; // XXX: is that sufficient to generate an error?
2807 }
2808 } else // identity encoding
2809 #endif
2810 {
2811 debugs(33,5, HERE << "handling plain request body for FD " << fd);
2812 putSize = bodyPipe->putMoreData(in.buf, in.notYetUsed);
2813 if (!bodyPipe->mayNeedMoreData()) {
2814 // BodyPipe will clear us automagically when we produced everything
2815 bodyPipe = NULL;
2816 }
2817 }
2818
2819 if (putSize > 0)
2820 connNoteUseOfBuffer(this, putSize);
2821
2822 if (!bodyPipe) {
2823 debugs(33,5, HERE << "produced entire request body for FD " << fd);
2824
2825 if (closing()) {
2826 /* we've finished reading like good clients,
2827 * now do the close that initiateClose initiated.
2828 *
2829 * XXX: do we have to close? why not check keepalive et.
2830 *
2831 * XXX: To support chunked requests safely, we need to handle
2832 * the case of an endless request. This if-statement does not,
2833 * because mayNeedMoreData is true if request size is not known.
2834 */
2835 comm_close(fd);
2836 }
2837 }
2838 }
2839
2840 void
2841 ConnStateData::noteMoreBodySpaceAvailable(BodyPipe::Pointer )
2842 {
2843 handleRequestBodyData();
2844 }
2845
2846 void
2847 ConnStateData::noteBodyConsumerAborted(BodyPipe::Pointer )
2848 {
2849 if (!closing())
2850 startClosing("body consumer aborted");
2851 }
2852
2853 /** general lifetime handler for HTTP requests */
2854 void
2855 ConnStateData::requestTimeout(const CommTimeoutCbParams &io)
2856 {
2857 #if THIS_CONFUSES_PERSISTENT_CONNECTION_AWARE_BROWSERS_AND_USERS
2858 debugs(33, 3, "requestTimeout: FD " << io.fd << ": lifetime is expired.");
2859
2860 if (COMMIO_FD_WRITECB(io.fd)->active) {
2861 /* FIXME: If this code is reinstated, check the conn counters,
2862 * not the fd table state
2863 */
2864 /*
2865 * Some data has been sent to the client, just close the FD
2866 */
2867 comm_close(io.fd);
2868 } else if (nrequests) {
2869 /*
2870 * assume its a persistent connection; just close it
2871 */
2872 comm_close(io.fd);
2873 } else {
2874 /*
2875 * Generate an error
2876 */
2877 ClientHttpRequest **H;
2878 clientStreamNode *node;
2879 ClientHttpRequest *http =
2880 parseHttpRequestAbort(this, "error:Connection%20lifetime%20expired");
2881 node = http->client_stream.tail->prev->data;
2882 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
2883 assert (repContext);
2884 repContext->setReplyToError(ERR_LIFETIME_EXP,
2885 HTTP_REQUEST_TIMEOUT, METHOD_NONE, "N/A", &peer.sin_addr,
2886 NULL, NULL, NULL);
2887 /* No requests can be outstanded */
2888 assert(chr == NULL);
2889 /* add to the client request queue */
2890
2891 for (H = &chr; *H; H = &(*H)->next);
2892 *H = http;
2893
2894 clientStreamRead(http->client_stream.tail->data, http, 0,
2895 HTTP_REQBUF_SZ, context->reqbuf);
2896
2897 /*
2898 * if we don't close() here, we still need a timeout handler!
2899 */
2900 typedef CommCbMemFunT<ConnStateData, CommTimeoutCbParams> TimeoutDialer;
2901 AsyncCall::Pointer timeoutCall = asyncCall(33, 5, "ConnStateData::requestTimeout",
2902 TimeoutDialer(this,&ConnStateData::requestTimeout));
2903 commSetTimeout(io.fd, 30, timeoutCall);
2904
2905 /*
2906 * Aha, but we don't want a read handler!
2907 */
2908 commSetSelect(io.fd, COMM_SELECT_READ, NULL, NULL, 0);
2909 }
2910
2911 #else
2912 /*
2913 * Just close the connection to not confuse browsers
2914 * using persistent connections. Some browsers opens
2915 * an connection and then does not use it until much
2916 * later (presumeably because the request triggering
2917 * the open has already been completed on another
2918 * connection)
2919 */
2920 debugs(33, 3, "requestTimeout: FD " << io.fd << ": lifetime is expired.");
2921
2922 comm_close(io.fd);
2923
2924 #endif
2925 }
2926
2927
2928
2929 static void
2930 clientLifetimeTimeout(int fd, void *data)
2931 {
2932 ClientHttpRequest *http = (ClientHttpRequest *)data;
2933 debugs(33, 1, "WARNING: Closing client " << http->getConn()->peer << " connection due to lifetime timeout");
2934 debugs(33, 1, "\t" << http->uri);
2935 comm_close(fd);
2936 }
2937
2938 static bool
2939 okToAccept()
2940 {
2941 static time_t last_warn = 0;
2942
2943 if (fdNFree() >= RESERVED_FD)
2944 return true;
2945
2946 if (last_warn + 15 < squid_curtime) {
2947 debugs(33, 0, HERE << "WARNING! Your cache is running out of filedescriptors");
2948 last_warn = squid_curtime;
2949 }
2950
2951 return false;
2952 }
2953
2954 ConnStateData *
2955 connStateCreate(const IpAddress &peer, const IpAddress &me, int fd, http_port_list *port)
2956 {
2957 ConnStateData *result = new ConnStateData;
2958
2959 result->peer = peer;
2960 result->log_addr = peer;
2961 result->log_addr.ApplyMask(Config.Addrs.client_netmask.GetCIDR());
2962 result->me = me;
2963 result->fd = fd;
2964 result->in.buf = (char *)memAllocBuf(CLIENT_REQ_BUF_SZ, &result->in.allocatedSize);
2965 result->port = cbdataReference(port);
2966
2967 if (port->intercepted || port->spoof_client_ip) {
2968 IpAddress client, dst;
2969
2970 if (IpInterceptor.NatLookup(fd, me, peer, client, dst) == 0) {
2971 result->me = client;
2972 result->peer = dst;
2973 result->transparent(true);
2974 }
2975 }
2976
2977 if (port->disable_pmtu_discovery != DISABLE_PMTU_OFF &&
2978 (result->transparent() || port->disable_pmtu_discovery == DISABLE_PMTU_ALWAYS)) {
2979 #if defined(IP_MTU_DISCOVER) && defined(IP_PMTUDISC_DONT)
2980 int i = IP_PMTUDISC_DONT;
2981 setsockopt(fd, SOL_IP, IP_MTU_DISCOVER, &i, sizeof i);
2982
2983 #else
2984
2985 static int reported = 0;
2986
2987 if (!reported) {
2988 debugs(33, 1, "Notice: httpd_accel_no_pmtu_disc not supported on your platform");
2989 reported = 1;
2990 }
2991
2992 #endif
2993
2994 }
2995
2996 result->flags.readMoreRequests = true;
2997 return result;
2998 }
2999
3000 /** Handle a new connection on HTTP socket. */
3001 void
3002 httpAccept(int sock, int newfd, ConnectionDetail *details,
3003 comm_err_t flag, int xerrno, void *data)
3004 {
3005 http_port_list *s = (http_port_list *)data;
3006 ConnStateData *connState = NULL;
3007
3008 if (flag == COMM_ERR_CLOSING) {
3009 return;
3010 }
3011
3012 if (!okToAccept())
3013 AcceptLimiter::Instance().defer (sock, httpAccept, data);
3014 else
3015 /* kick off another one for later */
3016 comm_accept(sock, httpAccept, data);
3017
3018 if (flag != COMM_OK) {
3019 debugs(33, 1, "httpAccept: FD " << sock << ": accept failure: " << xstrerr(xerrno));
3020 return;
3021 }
3022
3023 debugs(33, 4, "httpAccept: FD " << newfd << ": accepted");
3024 fd_note(newfd, "client http connect");
3025 connState = connStateCreate(&details->peer, &details->me, newfd, s);
3026
3027 typedef CommCbMemFunT<ConnStateData, CommCloseCbParams> Dialer;
3028 AsyncCall::Pointer call = asyncCall(33, 5, "ConnStateData::connStateClosed",
3029 Dialer(connState, &ConnStateData::connStateClosed));
3030 comm_add_close_handler(newfd, call);
3031
3032 if (Config.onoff.log_fqdn)
3033 fqdncache_gethostbyaddr(details->peer, FQDN_LOOKUP_IF_MISS);
3034
3035 typedef CommCbMemFunT<ConnStateData, CommTimeoutCbParams> TimeoutDialer;
3036 AsyncCall::Pointer timeoutCall = asyncCall(33, 5, "ConnStateData::requestTimeout",
3037 TimeoutDialer(connState,&ConnStateData::requestTimeout));
3038 commSetTimeout(newfd, Config.Timeout.read, timeoutCall);
3039
3040 #if USE_IDENT
3041 if (Ident::TheConfig.identLookup) {
3042 ACLFilledChecklist identChecklist(Ident::TheConfig.identLookup, NULL, NULL);
3043 identChecklist.src_addr = details->peer;
3044 identChecklist.my_addr = details->me;
3045 if (identChecklist.fastCheck())
3046 Ident::Start(details->me, details->peer, clientIdentDone, connState);
3047 }
3048 #endif
3049
3050 if (s->tcp_keepalive.enabled) {
3051 commSetTcpKeepalive(newfd, s->tcp_keepalive.idle, s->tcp_keepalive.interval, s->tcp_keepalive.timeout);
3052 }
3053
3054 connState->readSomeData();
3055
3056 clientdbEstablished(details->peer, 1);
3057
3058 incoming_sockets_accepted++;
3059 }
3060
3061 #if USE_SSL
3062
3063 /** Create SSL connection structure and update fd_table */
3064 static SSL *
3065 httpsCreate(int newfd, ConnectionDetail *details, SSL_CTX *sslContext)
3066 {
3067 SSL *ssl = SSL_new(sslContext);
3068
3069 if (!ssl) {
3070 const int ssl_error = ERR_get_error();
3071 debugs(83, 1, "httpsAccept: Error allocating handle: " << ERR_error_string(ssl_error, NULL) );
3072 comm_close(newfd);
3073 return NULL;
3074 }
3075
3076 SSL_set_fd(ssl, newfd);
3077 fd_table[newfd].ssl = ssl;
3078 fd_table[newfd].read_method = &ssl_read_method;
3079 fd_table[newfd].write_method = &ssl_write_method;
3080
3081 debugs(33, 5, "httpsCreate: will negotate SSL on FD " << newfd);
3082 fd_note(newfd, "client https start");
3083
3084 return ssl;
3085 }
3086
3087 /** negotiate an SSL connection */
3088 static void
3089 clientNegotiateSSL(int fd, void *data)
3090 {
3091 ConnStateData *conn = (ConnStateData *)data;
3092 X509 *client_cert;
3093 SSL *ssl = fd_table[fd].ssl;
3094 int ret;
3095
3096 if ((ret = SSL_accept(ssl)) <= 0) {
3097 int ssl_error = SSL_get_error(ssl, ret);
3098
3099 switch (ssl_error) {
3100
3101 case SSL_ERROR_WANT_READ:
3102 commSetSelect(fd, COMM_SELECT_READ, clientNegotiateSSL, conn, 0);
3103 return;
3104
3105 case SSL_ERROR_WANT_WRITE:
3106 commSetSelect(fd, COMM_SELECT_WRITE, clientNegotiateSSL, conn, 0);
3107 return;
3108
3109 case SSL_ERROR_SYSCALL:
3110
3111 if (ret == 0) {
3112 debugs(83, 2, "clientNegotiateSSL: Error negotiating SSL connection on FD " << fd << ": Aborted by client");
3113 comm_close(fd);
3114 return;
3115 } else {
3116 int hard = 1;
3117
3118 if (errno == ECONNRESET)
3119 hard = 0;
3120
3121 debugs(83, hard ? 1 : 2, "clientNegotiateSSL: Error negotiating SSL connection on FD " <<
3122 fd << ": " << strerror(errno) << " (" << errno << ")");
3123
3124 comm_close(fd);
3125
3126 return;
3127 }
3128
3129 case SSL_ERROR_ZERO_RETURN:
3130 debugs(83, 1, "clientNegotiateSSL: Error negotiating SSL connection on FD " << fd << ": Closed by client");
3131 comm_close(fd);
3132 return;
3133
3134 default:
3135 debugs(83, 1, "clientNegotiateSSL: Error negotiating SSL connection on FD " <<
3136 fd << ": " << ERR_error_string(ERR_get_error(), NULL) <<
3137 " (" << ssl_error << "/" << ret << ")");
3138 comm_close(fd);
3139 return;
3140 }
3141
3142 /* NOTREACHED */
3143 }
3144
3145 if (SSL_session_reused(ssl)) {
3146 debugs(83, 2, "clientNegotiateSSL: Session " << SSL_get_session(ssl) <<
3147 " reused on FD " << fd << " (" << fd_table[fd].ipaddr << ":" << (int)fd_table[fd].remote_port << ")");
3148 } else {
3149 if (do_debug(83, 4)) {
3150 /* Write out the SSL session details.. actually the call below, but
3151 * OpenSSL headers do strange typecasts confusing GCC.. */
3152 /* PEM_write_SSL_SESSION(debug_log, SSL_get_session(ssl)); */
3153 #if defined(OPENSSL_VERSION_NUMBER) && OPENSSL_VERSION_NUMBER >= 0x00908000L
3154 PEM_ASN1_write((i2d_of_void *)i2d_SSL_SESSION, PEM_STRING_SSL_SESSION, debug_log, (char *)SSL_get_session(ssl), NULL,NULL,0,NULL,NULL);
3155
3156 #elif (ALLOW_ALWAYS_SSL_SESSION_DETAIL == 1)
3157
3158 /* When using gcc 3.3.x and OpenSSL 0.9.7x sometimes a compile error can occur here.
3159 * This is caused by an unpredicatble gcc behaviour on a cast of the first argument
3160 * of PEM_ASN1_write(). For this reason this code section is disabled. To enable it,
3161 * define ALLOW_ALWAYS_SSL_SESSION_DETAIL=1.
3162 * Because there are two possible usable cast, if you get an error here, try the other
3163 * commented line. */
3164
3165 PEM_ASN1_write((int(*)())i2d_SSL_SESSION, PEM_STRING_SSL_SESSION, debug_log, (char *)SSL_get_session(ssl), NULL,NULL,0,NULL,NULL);
3166 /* PEM_ASN1_write((int(*)(...))i2d_SSL_SESSION, PEM_STRING_SSL_SESSION, debug_log, (char *)SSL_get_session(ssl), NULL,NULL,0,NULL,NULL); */
3167
3168 #else
3169
3170 debugs(83, 4, "With " OPENSSL_VERSION_TEXT ", session details are available only defining ALLOW_ALWAYS_SSL_SESSION_DETAIL=1 in the source." );
3171
3172 #endif
3173 /* Note: This does not automatically fflush the log file.. */
3174 }
3175
3176 debugs(83, 2, "clientNegotiateSSL: New session " <<
3177 SSL_get_session(ssl) << " on FD " << fd << " (" <<
3178 fd_table[fd].ipaddr << ":" << (int)fd_table[fd].remote_port <<
3179 ")");
3180 }
3181
3182 debugs(83, 3, "clientNegotiateSSL: FD " << fd << " negotiated cipher " <<
3183 SSL_get_cipher(ssl));
3184
3185 client_cert = SSL_get_peer_certificate(ssl);
3186
3187 if (client_cert != NULL) {
3188 debugs(83, 3, "clientNegotiateSSL: FD " << fd <<
3189 " client certificate: subject: " <<
3190 X509_NAME_oneline(X509_get_subject_name(client_cert), 0, 0));
3191
3192 debugs(83, 3, "clientNegotiateSSL: FD " << fd <<
3193 " client certificate: issuer: " <<
3194 X509_NAME_oneline(X509_get_issuer_name(client_cert), 0, 0));
3195
3196
3197 X509_free(client_cert);
3198 } else {
3199 debugs(83, 5, "clientNegotiateSSL: FD " << fd <<
3200 " has no certificate.");
3201 }
3202
3203 conn->readSomeData();
3204 }
3205
3206 /** handle a new HTTPS connection */
3207 static void
3208 httpsAccept(int sock, int newfd, ConnectionDetail *details,
3209 comm_err_t flag, int xerrno, void *data)
3210 {
3211 https_port_list *s = (https_port_list *)data;
3212 SSL_CTX *sslContext = s->sslContext;
3213
3214 if (flag == COMM_ERR_CLOSING) {
3215 return;
3216 }
3217
3218 if (!okToAccept())
3219 AcceptLimiter::Instance().defer (sock, httpsAccept, data);
3220 else
3221 /* kick off another one for later */
3222 comm_accept(sock, httpsAccept, data);
3223
3224 if (flag != COMM_OK) {
3225 errno = xerrno;
3226 debugs(33, 1, "httpsAccept: FD " << sock << ": accept failure: " << xstrerr(xerrno));
3227 return;
3228 }
3229
3230 SSL *ssl = NULL;
3231 if (!(ssl = httpsCreate(newfd, details, sslContext)))
3232 return;
3233
3234 debugs(33, 5, "httpsAccept: FD " << newfd << " accepted, starting SSL negotiation.");
3235 fd_note(newfd, "client https connect");
3236 ConnStateData *connState = connStateCreate(details->peer, details->me,
3237 newfd, &s->http);
3238 typedef CommCbMemFunT<ConnStateData, CommCloseCbParams> Dialer;
3239 AsyncCall::Pointer call = asyncCall(33, 5, "ConnStateData::connStateClosed",
3240 Dialer(connState, &ConnStateData::connStateClosed));
3241 comm_add_close_handler(newfd, call);
3242
3243 if (Config.onoff.log_fqdn)
3244 fqdncache_gethostbyaddr(details->peer, FQDN_LOOKUP_IF_MISS);
3245
3246 typedef CommCbMemFunT<ConnStateData, CommTimeoutCbParams> TimeoutDialer;
3247 AsyncCall::Pointer timeoutCall = asyncCall(33, 5, "ConnStateData::requestTimeout",
3248 TimeoutDialer(connState,&ConnStateData::requestTimeout));
3249 commSetTimeout(newfd, Config.Timeout.request, timeoutCall);
3250
3251 #if USE_IDENT
3252 if (Ident::TheConfig.identLookup) {
3253 ACLFilledChecklist identChecklist(Ident::TheConfig.identLookup, NULL, NULL);
3254 identChecklist.src_addr = details->peer;
3255 identChecklist.my_addr = details->me;
3256 if (identChecklist.fastCheck())
3257 Ident::Start(details->me, details->peer, clientIdentDone, connState);
3258 }
3259
3260 #endif
3261
3262 if (s->http.tcp_keepalive.enabled) {
3263 commSetTcpKeepalive(newfd, s->http.tcp_keepalive.idle, s->http.tcp_keepalive.interval, s->http.tcp_keepalive.timeout);
3264 }
3265
3266 commSetSelect(newfd, COMM_SELECT_READ, clientNegotiateSSL, connState, 0);
3267
3268 clientdbEstablished(details->peer, 1);
3269
3270 incoming_sockets_accepted++;
3271 }
3272
3273 bool
3274 ConnStateData::switchToHttps()
3275 {
3276 assert(!switchedToHttps_);
3277
3278 //HTTPMSGLOCK(currentobject->http->request);
3279 assert(areAllContextsForThisConnection());
3280 freeAllContexts();
3281 //currentobject->connIsFinished();
3282
3283 debugs(33, 5, HERE << "converting FD " << fd << " to SSL");
3284
3285 // fake a ConnectionDetail object; XXX: make ConnState a ConnectionDetail?
3286 ConnectionDetail detail;
3287 detail.me = me;
3288 detail.peer = peer;
3289
3290 SSL_CTX *sslContext = port->sslContext;
3291 SSL *ssl = NULL;
3292 if (!(ssl = httpsCreate(fd, &detail, sslContext)))
3293 return false;
3294
3295 // commSetTimeout() was called for this request before we switched.
3296
3297 // Disable the client read handler until peer selection is complete
3298 commSetSelect(fd, COMM_SELECT_READ, NULL, NULL, 0);
3299
3300 commSetSelect(fd, COMM_SELECT_READ, clientNegotiateSSL, this, 0);
3301
3302 switchedToHttps_ = true;
3303 return true;
3304 }
3305
3306 #endif /* USE_SSL */
3307
3308
3309 static void
3310 clientHttpConnectionsOpen(void)
3311 {
3312 http_port_list *s = NULL;
3313 int fd = -1;
3314 #if USE_SSL
3315 int bumpCount = 0; // counts http_ports with sslBump option
3316 #endif
3317
3318 for (s = Config.Sockaddr.http; s; s = s->next) {
3319 if (MAXHTTPPORTS == NHttpSockets) {
3320 debugs(1, 1, "WARNING: You have too many 'http_port' lines.");
3321 debugs(1, 1, " The limit is " << MAXHTTPPORTS);
3322 continue;
3323 }
3324
3325 #if USE_SSL
3326 if (s->sslBump && s->sslContext == NULL) {
3327 debugs(1, 1, "Will not bump SSL at http_port " <<
3328 s->http.s << " due to SSL initialization failure.");
3329 s->sslBump = 0;
3330 }
3331 if (s->sslBump)
3332 ++bumpCount;
3333 #endif
3334
3335 enter_suid();
3336
3337 if (s->spoof_client_ip) {
3338 fd = comm_open_listener(SOCK_STREAM, IPPROTO_TCP, s->s, (COMM_NONBLOCKING|COMM_TRANSPARENT), "HTTP Socket");
3339 } else {
3340 fd = comm_open_listener(SOCK_STREAM, IPPROTO_TCP, s->s, COMM_NONBLOCKING, "HTTP Socket");
3341 }
3342
3343 leave_suid();
3344
3345 if (fd < 0)
3346 continue;
3347
3348 comm_listen(fd);
3349
3350 comm_accept(fd, httpAccept, s);
3351
3352 debugs(1, 1, "Accepting " <<
3353 (s->intercepted ? " intercepted" : "") <<
3354 (s->spoof_client_ip ? " spoofing" : "") <<
3355 (s->sslBump ? " bumpy" : "") <<
3356 (s->accel ? " accelerated" : "")
3357 << " HTTP connections at " << s->s
3358 << ", FD " << fd << "." );
3359
3360 HttpSockets[NHttpSockets++] = fd;
3361 }
3362
3363 #if USE_SSL
3364 if (bumpCount && !Config.accessList.ssl_bump)
3365 debugs(33, 1, "WARNING: http_port(s) with SslBump found, but no " <<
3366 std::endl << "\tssl_bump ACL configured. No requests will be " <<
3367 "bumped.");
3368 #endif
3369 }
3370
3371 #if USE_SSL
3372 static void
3373 clientHttpsConnectionsOpen(void)
3374 {
3375 https_port_list *s;
3376 int fd;
3377
3378 for (s = Config.Sockaddr.https; s; s = (https_port_list *)s->http.next) {
3379 if (MAXHTTPPORTS == NHttpSockets) {
3380 debugs(1, 1, "Ignoring 'https_port' lines exceeding the limit.");
3381 debugs(1, 1, "The limit is " << MAXHTTPPORTS << " HTTPS ports.");
3382 continue;
3383 }
3384
3385 if (s->sslContext == NULL) {
3386 debugs(1, 1, "Ignoring https_port " << s->http.s <<
3387 " due to SSL initialization failure.");
3388 continue;
3389 }
3390
3391 enter_suid();
3392 fd = comm_open_listener(SOCK_STREAM,
3393 IPPROTO_TCP,
3394 s->http.s,
3395 COMM_NONBLOCKING, "HTTPS Socket");
3396 leave_suid();
3397
3398 if (fd < 0)
3399 continue;
3400
3401 comm_listen(fd);
3402
3403 comm_accept(fd, httpsAccept, s);
3404
3405 debugs(1, 1, "Accepting HTTPS connections at " << s->http.s << ", FD " << fd << ".");
3406
3407 HttpSockets[NHttpSockets++] = fd;
3408 }
3409 }
3410
3411 #endif
3412
3413 void
3414 clientOpenListenSockets(void)
3415 {
3416 clientHttpConnectionsOpen();
3417 #if USE_SSL
3418
3419 clientHttpsConnectionsOpen();
3420 #endif
3421
3422 if (NHttpSockets < 1)
3423 fatal("Cannot open HTTP Port");
3424 }
3425
3426 void
3427 clientHttpConnectionsClose(void)
3428 {
3429 int i;
3430
3431 for (i = 0; i < NHttpSockets; i++) {
3432 if (HttpSockets[i] >= 0) {
3433 debugs(1, 1, "FD " << HttpSockets[i] <<
3434 " Closing HTTP connection");
3435 comm_close(HttpSockets[i]);
3436 HttpSockets[i] = -1;
3437 }
3438 }
3439
3440 NHttpSockets = 0;
3441 }
3442
3443 int
3444 varyEvaluateMatch(StoreEntry * entry, HttpRequest * request)
3445 {
3446 const char *vary = request->vary_headers;
3447 int has_vary = entry->getReply()->header.has(HDR_VARY);
3448 #if X_ACCELERATOR_VARY
3449
3450 has_vary |=
3451 entry->getReply()->header.has(HDR_X_ACCELERATOR_VARY);
3452 #endif
3453
3454 if (!has_vary || !entry->mem_obj->vary_headers) {
3455 if (vary) {
3456 /* Oops... something odd is going on here.. */
3457 debugs(33, 1, "varyEvaluateMatch: Oops. Not a Vary object on second attempt, '" <<
3458 entry->mem_obj->url << "' '" << vary << "'");
3459 safe_free(request->vary_headers);
3460 return VARY_CANCEL;
3461 }
3462
3463 if (!has_vary) {
3464 /* This is not a varying object */
3465 return VARY_NONE;
3466 }
3467
3468 /* virtual "vary" object found. Calculate the vary key and
3469 * continue the search
3470 */
3471 vary = httpMakeVaryMark(request, entry->getReply());
3472
3473 if (vary) {
3474 request->vary_headers = xstrdup(vary);
3475 return VARY_OTHER;
3476 } else {
3477 /* Ouch.. we cannot handle this kind of variance */
3478 /* XXX This cannot really happen, but just to be complete */
3479 return VARY_CANCEL;
3480 }
3481 } else {
3482 if (!vary) {
3483 vary = httpMakeVaryMark(request, entry->getReply());
3484
3485 if (vary)
3486 request->vary_headers = xstrdup(vary);
3487 }
3488
3489 if (!vary) {
3490 /* Ouch.. we cannot handle this kind of variance */
3491 /* XXX This cannot really happen, but just to be complete */
3492 return VARY_CANCEL;
3493 } else if (strcmp(vary, entry->mem_obj->vary_headers) == 0) {
3494 return VARY_MATCH;
3495 } else {
3496 /* Oops.. we have already been here and still haven't
3497 * found the requested variant. Bail out
3498 */
3499 debugs(33, 1, "varyEvaluateMatch: Oops. Not a Vary match on second attempt, '" <<
3500 entry->mem_obj->url << "' '" << vary << "'");
3501 return VARY_CANCEL;
3502 }
3503 }
3504 }
3505
3506 ACLFilledChecklist *
3507 clientAclChecklistCreate(const acl_access * acl, ClientHttpRequest * http)
3508 {
3509 ConnStateData * conn = http->getConn();
3510 ACLFilledChecklist *ch = new ACLFilledChecklist(acl, http->request,
3511 cbdataReferenceValid(conn) && conn != NULL ? conn->rfc931 : dash_str);
3512
3513 /*
3514 * hack for ident ACL. It needs to get full addresses, and a place to store
3515 * the ident result on persistent connections...
3516 */
3517 /* connection oriented auth also needs these two lines for it's operation. */
3518 /*
3519 * Internal requests do not have a connection reference, because: A) their
3520 * byte count may be transformed before being applied to an outbound
3521 * connection B) they are internal - any limiting on them should be done on
3522 * the server end.
3523 */
3524
3525 if (conn != NULL)
3526 ch->conn(conn); /* unreferenced in FilledCheckList.cc */
3527
3528 return ch;
3529 }
3530
3531 CBDATA_CLASS_INIT(ConnStateData);
3532
3533 ConnStateData::ConnStateData() :AsyncJob("ConnStateData"), transparent_ (false), reading_ (false), closing_ (false)
3534 {
3535 pinning.fd = -1;
3536 pinning.pinned = false;
3537 pinning.auth = false;
3538 }
3539
3540 bool
3541 ConnStateData::transparent() const
3542 {
3543 return transparent_;
3544 }
3545
3546 void
3547 ConnStateData::transparent(bool const anInt)
3548 {
3549 transparent_ = anInt;
3550 }
3551
3552 bool
3553 ConnStateData::reading() const
3554 {
3555 return reading_;
3556 }
3557
3558 void
3559 ConnStateData::reading(bool const newBool)
3560 {
3561 assert (reading() != newBool);
3562 reading_ = newBool;
3563 }
3564
3565
3566 BodyPipe::Pointer
3567 ConnStateData::expectRequestBody(int64_t size)
3568 {
3569 bodyPipe = new BodyPipe(this);
3570 bodyPipe->setBodySize(size);
3571 return bodyPipe;
3572 }
3573
3574 bool
3575 ConnStateData::closing() const
3576 {
3577 return closing_;
3578 }
3579
3580 /**
3581 * Called by ClientSocketContext to give the connection a chance to read
3582 * the entire body before closing the socket.
3583 */
3584 void
3585 ConnStateData::startClosing(const char *reason)
3586 {
3587 debugs(33, 5, HERE << "startClosing " << this << " for " << reason);
3588 assert(!closing());
3589 closing_ = true;
3590
3591 assert(bodyPipe != NULL);
3592 assert(bodySizeLeft() > 0);
3593
3594 // We do not have to abort the body pipeline because we are going to
3595 // read the entire body anyway.
3596 // Perhaps an ICAP server wants to log the complete request.
3597
3598 // If a consumer abort have caused this closing, we may get stuck
3599 // as nobody is consuming our data. Allow auto-consumption.
3600 bodyPipe->enableAutoConsumption();
3601 }
3602
3603 // initialize dechunking state
3604 void
3605 ConnStateData::startDechunkingRequest(HttpParser *hp)
3606 {
3607 debugs(33, 5, HERE << "start dechunking at " << HttpParserRequestLen(hp));
3608 assert(in.dechunkingState == chunkUnknown);
3609 assert(!in.bodyParser);
3610 in.bodyParser = new ChunkedCodingParser;
3611 in.chunkedSeen = HttpParserRequestLen(hp); // skip headers when dechunking
3612 in.chunked.init(); // TODO: should we have a smaller-than-default limit?
3613 in.dechunked.init();
3614 in.dechunkingState = chunkParsing;
3615 }
3616
3617 // put parsed content into input buffer and clean up
3618 void
3619 ConnStateData::finishDechunkingRequest(HttpParser *hp)
3620 {
3621 debugs(33, 5, HERE << "finish dechunking; content: " << in.dechunked.contentSize());
3622
3623 assert(in.dechunkingState == chunkReady);
3624 assert(in.bodyParser);
3625 delete in.bodyParser;
3626 in.bodyParser = NULL;
3627
3628 const mb_size_t headerSize = HttpParserRequestLen(hp);
3629
3630 // dechunking cannot make data bigger
3631 assert(headerSize + in.dechunked.contentSize() + in.chunked.contentSize()
3632 <= static_cast<mb_size_t>(in.notYetUsed));
3633 assert(in.notYetUsed <= in.allocatedSize);
3634
3635 // copy dechunked content
3636 char *end = in.buf + headerSize;
3637 xmemmove(end, in.dechunked.content(), in.dechunked.contentSize());
3638 end += in.dechunked.contentSize();
3639
3640 // copy post-chunks leftovers, if any, caused by request pipelining?
3641 if (in.chunked.contentSize()) {
3642 xmemmove(end, in.chunked.content(), in.chunked.contentSize());
3643 end += in.chunked.contentSize();
3644 }
3645
3646 in.notYetUsed = end - in.buf;
3647
3648 in.chunked.clean();
3649 in.dechunked.clean();
3650 in.dechunkingState = chunkUnknown;
3651 }
3652
3653 // parse newly read request chunks and buffer them for finishDechunkingRequest
3654 // returns true iff needs more data
3655 bool
3656 ConnStateData::parseRequestChunks(HttpParser *)
3657 {
3658 debugs(33,5, HERE << "parsing chunked request body at " <<
3659 in.chunkedSeen << " < " << in.notYetUsed);
3660 assert(in.bodyParser);
3661 assert(in.dechunkingState == chunkParsing);
3662
3663 assert(in.chunkedSeen <= in.notYetUsed);
3664 const mb_size_t fresh = in.notYetUsed - in.chunkedSeen;
3665
3666 // be safe: count some chunked coding metadata towards the total body size
3667 if (fresh + in.dechunked.contentSize() > Config.maxChunkedRequestBodySize) {
3668 debugs(33,3, HERE << "chunked body (" << fresh << " + " <<
3669 in.dechunked.contentSize() << " may exceed " <<
3670 "chunked_request_body_max_size=" <<
3671 Config.maxChunkedRequestBodySize);
3672 in.dechunkingState = chunkError;
3673 return false;
3674 }
3675
3676 if (fresh > in.chunked.potentialSpaceSize()) {
3677 // should not happen if Config.maxChunkedRequestBodySize is reasonable
3678 debugs(33,1, HERE << "request_body_max_size exceeds chunked buffer " <<
3679 "size: " << fresh << " + " << in.chunked.contentSize() << " > " <<
3680 in.chunked.potentialSpaceSize() << " with " <<
3681 "chunked_request_body_max_size=" <<
3682 Config.maxChunkedRequestBodySize);
3683 in.dechunkingState = chunkError;
3684 return false;
3685 }
3686 in.chunked.append(in.buf + in.chunkedSeen, fresh);
3687 in.chunkedSeen += fresh;
3688
3689 try { // the parser will throw on errors
3690 if (in.bodyParser->parse(&in.chunked, &in.dechunked))
3691 in.dechunkingState = chunkReady; // successfully parsed all chunks
3692 else
3693 return true; // need more, keep the same state
3694 } catch (...) {
3695 debugs(33,3, HERE << "chunk parsing error");
3696 in.dechunkingState = chunkError;
3697 }
3698 return false; // error, unsupported, or done
3699 }
3700
3701 char *
3702 ConnStateData::In::addressToReadInto() const
3703 {
3704 return buf + notYetUsed;
3705 }
3706
3707 ConnStateData::In::In() : bodyParser(NULL),
3708 buf (NULL), notYetUsed (0), allocatedSize (0),
3709 dechunkingState(ConnStateData::chunkUnknown)
3710 {}
3711
3712 ConnStateData::In::~In()
3713 {
3714 if (allocatedSize)
3715 memFreeBuf(allocatedSize, buf);
3716 if (bodyParser)
3717 delete bodyParser; // TODO: pool
3718 }
3719
3720 /* This is a comm call normally scheduled by comm_close() */
3721 void
3722 ConnStateData::clientPinnedConnectionClosed(const CommCloseCbParams &io)
3723 {
3724 pinning.fd = -1;
3725 if (pinning.peer) {
3726 cbdataReferenceDone(pinning.peer);
3727 }
3728 safe_free(pinning.host);
3729 /* NOTE: pinning.pinned should be kept. This combined with fd == -1 at the end of a request indicates that the host
3730 * connection has gone away */
3731 }
3732
3733 void ConnStateData::pinConnection(int pinning_fd, HttpRequest *request, struct peer *peer, bool auth)
3734 {
3735 fde *f;
3736 char desc[FD_DESC_SZ];
3737
3738 if (pinning.fd == pinning_fd)
3739 return;
3740 else if (pinning.fd != -1)
3741 comm_close(pinning.fd);
3742
3743 if (pinning.host)
3744 safe_free(pinning.host);
3745
3746 pinning.fd = pinning_fd;
3747 pinning.host = xstrdup(request->GetHost());
3748 pinning.port = request->port;
3749 pinning.pinned = true;
3750 if (pinning.peer)
3751 cbdataReferenceDone(pinning.peer);
3752 if (peer)
3753 pinning.peer = cbdataReference(peer);
3754 pinning.auth = auth;
3755 f = &fd_table[fd];
3756 snprintf(desc, FD_DESC_SZ, "%s pinned connection for %s:%d (%d)",
3757 (auth || !peer) ? request->GetHost() : peer->name, f->ipaddr, (int) f->remote_port, fd);
3758 fd_note(pinning_fd, desc);
3759
3760 typedef CommCbMemFunT<ConnStateData, CommCloseCbParams> Dialer;
3761 pinning.closeHandler = asyncCall(33, 5, "ConnStateData::clientPinnedConnectionClosed",
3762 Dialer(this, &ConnStateData::clientPinnedConnectionClosed));
3763 comm_add_close_handler(pinning_fd, pinning.closeHandler);
3764
3765 }
3766
3767 int ConnStateData::validatePinnedConnection(HttpRequest *request, const struct peer *peer)
3768 {
3769 bool valid = true;
3770 if (pinning.fd < 0)
3771 return -1;
3772
3773 if (pinning.auth && request && strcasecmp(pinning.host, request->GetHost()) != 0) {
3774 valid = false;
3775 }
3776 if (request && pinning.port != request->port) {
3777 valid = false;
3778 }
3779 if (pinning.peer && !cbdataReferenceValid(pinning.peer)) {
3780 valid = false;
3781 }
3782 if (peer != pinning.peer) {
3783 valid = false;
3784 }
3785
3786 if (!valid) {
3787 int pinning_fd=pinning.fd;
3788 /* The pinning info is not safe, remove any pinning info*/
3789 unpinConnection();
3790
3791 /* also close the server side socket, we should not use it for invalid/unauthenticated
3792 requests...
3793 */
3794 comm_close(pinning_fd);
3795 return -1;
3796 }
3797
3798 return pinning.fd;
3799 }
3800
3801 void ConnStateData::unpinConnection()
3802 {
3803 if (pinning.peer)
3804 cbdataReferenceDone(pinning.peer);
3805
3806 if (pinning.closeHandler != NULL) {
3807 comm_remove_close_handler(pinning.fd, pinning.closeHandler);
3808 pinning.closeHandler = NULL;
3809 }
3810 pinning.fd = -1;
3811 safe_free(pinning.host);
3812 }