2 * DEBUG: section 33 Client-side Routines
3 * AUTHOR: Duane Wessels
5 * SQUID Web Proxy Cache http://www.squid-cache.org/
6 * ----------------------------------------------------------
8 * Squid is the result of efforts by numerous individuals from
9 * the Internet community; see the CONTRIBUTORS file for full
10 * details. Many organizations have provided support for Squid's
11 * development; see the SPONSORS file for full details. Squid is
12 * Copyrighted (C) 2001 by the Regents of the University of
13 * California; see the COPYRIGHT file for full details. Squid
14 * incorporates software developed and/or copyrighted by other
15 * sources; see the CREDITS file for full details.
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2 of the License, or
20 * (at your option) any later version.
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
27 * You should have received a copy of the GNU General Public License
28 * along with this program; if not, write to the Free Software
29 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
33 \defgroup ClientSide Client-Side Logics
35 \section cserrors Errors and client side
37 \par Problem the first:
38 * the store entry is no longer authoritative on the
39 * reply status. EBITTEST (E_ABORT) is no longer a valid test outside
40 * of client_side_reply.c.
41 * Problem the second: resources are wasted if we delay in cleaning up.
42 * Problem the third we can't depend on a connection close to clean up.
44 \par Nice thing the first:
45 * Any step in the stream can callback with data
46 * representing an error.
47 * Nice thing the second: once you stop requesting reads from upstream,
48 * upstream can be stopped too.
51 * Error has a callback mechanism to hand over a membuf
52 * with the error content. The failing node pushes that back as the
53 * reply. Can this be generalised to reduce duplicate efforts?
54 * A: Possibly. For now, only one location uses this.
55 * How to deal with pre-stream errors?
56 * Tell client_side_reply that we *want* an error page before any
57 * stream calls occur. Then we simply read as normal.
60 \section pconn_logic Persistent connection logic:
63 * requests (httpClientRequest structs) get added to the connection
64 * list, with the current one being chr
67 * The request is *immediately* kicked off, and data flows through
68 * to clientSocketRecipient.
71 * If the data that arrives at clientSocketRecipient is not for the current
72 * request, clientSocketRecipient simply returns, without requesting more
73 * data, or sending it.
76 * ClientKeepAliveNextRequest will then detect the presence of data in
77 * the next ClientHttpRequest, and will send it, restablishing the
82 #include "acl/FilledChecklist.h"
83 #include "anyp/PortCfg.h"
84 #include "base/Subscription.h"
85 #include "base/TextException.h"
86 #include "CachePeer.h"
87 #include "ChunkedCodingParser.h"
88 #include "client_db.h"
89 #include "client_side.h"
90 #include "client_side_reply.h"
91 #include "client_side_request.h"
92 #include "ClientRequestContext.h"
93 #include "clientStream.h"
95 #include "comm/Connection.h"
96 #include "comm/ConnOpener.h"
97 #include "comm/Loops.h"
98 #include "comm/Read.h"
99 #include "comm/TcpAcceptor.h"
100 #include "comm/Write.h"
101 #include "CommCalls.h"
102 #include "errorpage.h"
105 #include "FtpServer.h"
106 #include "fqdncache.h"
107 #include "FwdState.h"
110 #include "HttpHdrCc.h"
111 #include "HttpHdrContRange.h"
112 #include "HttpHeaderTools.h"
113 #include "HttpReply.h"
114 #include "HttpRequest.h"
115 #include "ident/Config.h"
116 #include "ident/Ident.h"
117 #include "internal.h"
118 #include "ip/tools.h"
119 #include "ipc/FdNotes.h"
120 #include "ipc/StartListening.h"
121 #include "log/access_log.h"
124 #include "MemObject.h"
125 #include "mime_header.h"
126 #include "profiler/Profiler.h"
128 #include "SquidConfig.h"
129 #include "SquidTime.h"
130 #include "StatCounters.h"
131 #include "StatHist.h"
133 #include "TimeOrTag.h"
138 #include "auth/UserRequest.h"
141 #include "ClientInfo.h"
144 #include "ssl/context_storage.h"
145 #include "ssl/gadgets.h"
146 #include "ssl/helper.h"
147 #include "ssl/ProxyCerts.h"
148 #include "ssl/ServerBump.h"
149 #include "ssl/support.h"
152 #include "ssl/certificate_db.h"
153 #include "ssl/crtd_message.h"
162 #define comm_close comm_lingering_close
165 /// dials clientListenerConnectionOpened call
166 class ListeningStartedDialer
: public CallDialer
, public Ipc::StartListeningCb
169 typedef void (*Handler
)(AnyP::PortCfgPointer
&portCfg
, const Ipc::FdNoteId note
, const Subscription::Pointer
&sub
);
170 ListeningStartedDialer(Handler aHandler
, AnyP::PortCfgPointer
&aPortCfg
, const Ipc::FdNoteId note
, const Subscription::Pointer
&aSub
):
171 handler(aHandler
), portCfg(aPortCfg
), portTypeNote(note
), sub(aSub
) {}
173 virtual void print(std::ostream
&os
) const {
175 ", " << FdNote(portTypeNote
) << " port=" << (void*)&portCfg
<< ')';
178 virtual bool canDial(AsyncCall
&) const { return true; }
179 virtual void dial(AsyncCall
&) { (handler
)(portCfg
, portTypeNote
, sub
); }
185 AnyP::PortCfgPointer portCfg
; ///< from HttpPortList
186 Ipc::FdNoteId portTypeNote
; ///< Type of IPC socket being opened
187 Subscription::Pointer sub
; ///< The handler to be subscribed for this connetion listener
190 static void clientListenerConnectionOpened(AnyP::PortCfgPointer
&s
, const Ipc::FdNoteId portTypeNote
, const Subscription::Pointer
&sub
);
192 /* our socket-related context */
194 CBDATA_CLASS_INIT(ClientSocketContext
);
196 /* Local functions */
197 static IOCB clientWriteComplete
;
198 static IOCB clientWriteBodyComplete
;
199 static IOACB httpAccept
;
201 static IOACB httpsAccept
;
203 static IOACB ftpAccept
;
204 static CTCB clientLifetimeTimeout
;
205 static ClientSocketContext
*parseHttpRequestAbort(ConnStateData
* conn
, const char *uri
);
206 static ClientSocketContext
*parseHttpRequest(ConnStateData
*, HttpParser
*, HttpRequestMethod
*, Http::ProtocolVersion
*);
208 static IDCB clientIdentDone
;
210 static CSCB clientSocketRecipient
;
211 static CSD clientSocketDetach
;
212 static void clientSetKeepaliveFlag(ClientHttpRequest
*);
213 static int clientIsContentLengthValid(HttpRequest
* r
);
214 static int clientIsRequestBodyTooLargeForPolicy(int64_t bodyLength
);
216 static void clientUpdateStatHistCounters(LogTags logType
, int svc_time
);
217 static void clientUpdateStatCounters(LogTags logType
);
218 static void clientUpdateHierCounters(HierarchyLogEntry
*);
219 static bool clientPingHasFinished(ping_data
const *aPing
);
220 void prepareLogWithRequestDetails(HttpRequest
*, AccessLogEntry::Pointer
&);
222 static bool connIsUsable(ConnStateData
* conn
);
224 static int responseFinishedOrFailed(HttpReply
* rep
, StoreIOBuffer
const &receivedData
);
225 static void ClientSocketContextPushDeferredIfNeeded(ClientSocketContext::Pointer deferredRequest
, ConnStateData
* conn
);
226 static void clientUpdateSocketStats(LogTags logType
, size_t size
);
228 char *skipLeadingSpace(char *aString
);
229 static void connNoteUseOfBuffer(ConnStateData
* conn
, size_t byteCount
);
231 static void FtpChangeState(ConnStateData
*connState
, const ConnStateData::FtpState newState
, const char *reason
);
232 static IOACB FtpAcceptDataConnection
;
233 static void FtpCloseDataConnection(ConnStateData
*conn
);
234 static ClientSocketContext
*FtpParseRequest(ConnStateData
*connState
, HttpRequestMethod
*method_p
, Http::ProtocolVersion
*http_ver
);
235 static bool FtpHandleUserRequest(ConnStateData
*connState
, const String
&cmd
, String
¶ms
);
236 static CNCB FtpHandleConnectDone
;
238 static void FtpHandleReply(ClientSocketContext
*context
, HttpReply
*reply
, StoreIOBuffer data
);
239 typedef void FtpReplyHandler(ClientSocketContext
*context
, const HttpReply
*reply
, StoreIOBuffer data
);
240 static FtpReplyHandler FtpHandleFeatReply
;
241 static FtpReplyHandler FtpHandlePasvReply
;
242 static FtpReplyHandler FtpHandlePortReply
;
243 static FtpReplyHandler FtpHandleErrorReply
;
244 static FtpReplyHandler FtpHandleDataReply
;
245 static FtpReplyHandler FtpHandleUploadReply
;
246 static FtpReplyHandler FtpHandleEprtReply
;
247 static FtpReplyHandler FtpHandleEpsvReply
;
249 static void FtpWriteEarlyReply(ConnStateData
*conn
, const int code
, const char *msg
);
250 static void FtpWriteReply(ClientSocketContext
*context
, MemBuf
&mb
);
251 static void FtpWriteCustomReply(ClientSocketContext
*context
, const int code
, const char *msg
, const HttpReply
*reply
= NULL
);
252 static void FtpWriteForwardedReply(ClientSocketContext
*context
, const HttpReply
*reply
);
253 static void FtpWriteForwardedReply(ClientSocketContext
*context
, const HttpReply
*reply
, AsyncCall::Pointer call
);
254 static void FtpWriteErrorReply(ClientSocketContext
*context
, const HttpReply
*reply
, const int status
);
256 static void FtpPrintReply(MemBuf
&mb
, const HttpReply
*reply
, const char *const prefix
= "");
257 static IOCB FtpWroteEarlyReply
;
258 static IOCB FtpWroteReply
;
259 static IOCB FtpWroteReplyData
;
261 typedef bool FtpRequestHandler(ClientSocketContext
*context
, String
&cmd
, String
¶ms
);
262 static FtpRequestHandler FtpHandleRequest
;
263 static FtpRequestHandler FtpHandleFeatRequest
;
264 static FtpRequestHandler FtpHandlePasvRequest
;
265 static FtpRequestHandler FtpHandlePortRequest
;
266 static FtpRequestHandler FtpHandleDataRequest
;
267 static FtpRequestHandler FtpHandleUploadRequest
;
268 static FtpRequestHandler FtpHandleEprtRequest
;
269 static FtpRequestHandler FtpHandleEpsvRequest
;
270 static FtpRequestHandler FtpHandleCwdRequest
;
271 static FtpRequestHandler FtpHandlePassRequest
;
272 static FtpRequestHandler FtpHandleCdupRequest
;
274 static bool FtpCheckDataConnPre(ClientSocketContext
*context
);
275 static bool FtpCheckDataConnPost(ClientSocketContext
*context
);
276 static void FtpSetDataCommand(ClientSocketContext
*context
);
277 static void FtpSetReply(ClientSocketContext
*context
, const int code
, const char *msg
);
278 static bool FtpSupportedCommand(const String
&name
);
282 ClientSocketContext::getTail() const
284 if (http
->client_stream
.tail
)
285 return (clientStreamNode
*)http
->client_stream
.tail
->data
;
291 ClientSocketContext::getClientReplyContext() const
293 return (clientStreamNode
*)http
->client_stream
.tail
->prev
->data
;
297 ClientSocketContext::getConn() const
299 return http
->getConn();
303 * This routine should be called to grow the in.buf and then
307 ConnStateData::readSomeData()
312 debugs(33, 4, HERE
<< clientConnection
<< ": reading request...");
314 if (!in
.maybeMakeSpaceAvailable())
317 typedef CommCbMemFunT
<ConnStateData
, CommIoCbParams
> Dialer
;
318 reader
= JobCallback(33, 5, Dialer
, this, ConnStateData::clientReadRequest
);
319 Comm::Read(clientConnection
, reader
);
323 ConnStateData::readSomeFtpData()
325 if (ftp
.reader
!= NULL
)
328 const size_t availSpace
= sizeof(ftp
.uploadBuf
) - ftp
.uploadAvailSize
;
332 debugs(33, 4, HERE
<< ftp
.dataConn
<< ": reading FTP data...");
334 typedef CommCbMemFunT
<ConnStateData
, CommIoCbParams
> Dialer
;
335 ftp
.reader
= JobCallback(33, 5, Dialer
, this,
336 ConnStateData::clientReadFtpData
);
337 comm_read(ftp
.dataConn
, ftp
.uploadBuf
+ ftp
.uploadAvailSize
, availSpace
,
342 ClientSocketContext::removeFromConnectionList(ConnStateData
* conn
)
344 ClientSocketContext::Pointer
*tempContextPointer
;
345 assert(conn
!= NULL
&& cbdataReferenceValid(conn
));
346 assert(conn
->getCurrentContext() != NULL
);
347 /* Unlink us from the connection request list */
348 tempContextPointer
= & conn
->currentobject
;
350 while (tempContextPointer
->getRaw()) {
351 if (*tempContextPointer
== this)
354 tempContextPointer
= &(*tempContextPointer
)->next
;
357 assert(tempContextPointer
->getRaw() != NULL
);
358 *tempContextPointer
= next
;
362 ClientSocketContext::~ClientSocketContext()
364 clientStreamNode
*node
= getTail();
367 ClientSocketContext
*streamContext
= dynamic_cast<ClientSocketContext
*> (node
->data
.getRaw());
370 /* We are *always* the tail - prevent recursive free */
371 assert(this == streamContext
);
377 deRegisterWithConn();
379 httpRequestFree(http
);
381 /* clean up connection links to us */
382 assert(this != next
.getRaw());
386 ClientSocketContext::registerWithConn()
388 assert (!connRegistered_
);
390 assert (http
->getConn() != NULL
);
391 connRegistered_
= true;
392 http
->getConn()->addContextToQueue(this);
396 ClientSocketContext::deRegisterWithConn()
398 assert (connRegistered_
);
399 removeFromConnectionList(http
->getConn());
400 connRegistered_
= false;
404 ClientSocketContext::connIsFinished()
407 assert (http
->getConn() != NULL
);
408 deRegisterWithConn();
409 /* we can't handle any more stream data - detach */
410 clientStreamDetach(getTail(), http
);
413 ClientSocketContext::ClientSocketContext(const Comm::ConnectionPointer
&aConn
, ClientHttpRequest
*aReq
) :
414 clientConnection(aConn
),
419 mayUseConnection_ (false),
420 connRegistered_ (false)
422 assert(http
!= NULL
);
423 memset (reqbuf
, '\0', sizeof (reqbuf
));
426 deferredparams
.node
= NULL
;
427 deferredparams
.rep
= NULL
;
431 ClientSocketContext::writeControlMsg(HttpControlMsg
&msg
)
433 const HttpReply::Pointer
rep(msg
.reply
);
436 // remember the callback
437 cbControlMsgSent
= msg
.cbSuccess
;
439 AsyncCall::Pointer call
= commCbCall(33, 5, "ClientSocketContext::wroteControlMsg",
440 CommIoCbPtrFun(&WroteControlMsg
, this));
442 if (getConn()->isFtp
) {
443 FtpWriteForwardedReply(this, rep
.getRaw(), call
);
447 // apply selected clientReplyContext::buildReplyHeader() mods
448 // it is not clear what headers are required for control messages
449 rep
->header
.removeHopByHopEntries();
450 rep
->header
.putStr(HDR_CONNECTION
, "keep-alive");
451 httpHdrMangleList(&rep
->header
, http
->request
, ROR_REPLY
);
453 MemBuf
*mb
= rep
->pack();
455 debugs(11, 2, "HTTP Client " << clientConnection
);
456 debugs(11, 2, "HTTP Client CONTROL MSG:\n---------\n" << mb
->buf
<< "\n----------");
458 Comm::Write(clientConnection
, mb
, call
);
463 /// called when we wrote the 1xx response
465 ClientSocketContext::wroteControlMsg(const Comm::ConnectionPointer
&conn
, char *, size_t, Comm::Flag errflag
, int xerrno
)
467 if (errflag
== Comm::ERR_CLOSING
)
470 if (errflag
== Comm::OK
) {
471 ScheduleCallHere(cbControlMsgSent
);
475 debugs(33, 3, HERE
<< "1xx writing failed: " << xstrerr(xerrno
));
476 // no error notification: see HttpControlMsg.h for rationale and
477 // note that some errors are detected elsewhere (e.g., close handler)
479 // close on 1xx errors to be conservative and to simplify the code
480 // (if we do not close, we must notify the source of a failure!)
484 /// wroteControlMsg() wrapper: ClientSocketContext is not an AsyncJob
486 ClientSocketContext::WroteControlMsg(const Comm::ConnectionPointer
&conn
, char *bufnotused
, size_t size
, Comm::Flag errflag
, int xerrno
, void *data
)
488 ClientSocketContext
*context
= static_cast<ClientSocketContext
*>(data
);
489 context
->wroteControlMsg(conn
, bufnotused
, size
, errflag
, xerrno
);
494 clientIdentDone(const char *ident
, void *data
)
496 ConnStateData
*conn
= (ConnStateData
*)data
;
497 xstrncpy(conn
->clientConnection
->rfc931
, ident
? ident
: dash_str
, USER_IDENT_SZ
);
502 clientUpdateStatCounters(LogTags logType
)
504 ++statCounter
.client_http
.requests
;
506 if (logTypeIsATcpHit(logType
))
507 ++statCounter
.client_http
.hits
;
509 if (logType
== LOG_TCP_HIT
)
510 ++statCounter
.client_http
.disk_hits
;
511 else if (logType
== LOG_TCP_MEM_HIT
)
512 ++statCounter
.client_http
.mem_hits
;
516 clientUpdateStatHistCounters(LogTags logType
, int svc_time
)
518 statCounter
.client_http
.allSvcTime
.count(svc_time
);
520 * The idea here is not to be complete, but to get service times
521 * for only well-defined types. For example, we don't include
522 * LOG_TCP_REFRESH_FAIL because its not really a cache hit
523 * (we *tried* to validate it, but failed).
528 case LOG_TCP_REFRESH_UNMODIFIED
:
529 statCounter
.client_http
.nearHitSvcTime
.count(svc_time
);
532 case LOG_TCP_IMS_HIT
:
533 statCounter
.client_http
.nearMissSvcTime
.count(svc_time
);
538 case LOG_TCP_MEM_HIT
:
540 case LOG_TCP_OFFLINE_HIT
:
541 statCounter
.client_http
.hitSvcTime
.count(svc_time
);
546 case LOG_TCP_CLIENT_REFRESH_MISS
:
547 statCounter
.client_http
.missSvcTime
.count(svc_time
);
551 /* make compiler warnings go away */
557 clientPingHasFinished(ping_data
const *aPing
)
559 if (0 != aPing
->stop
.tv_sec
&& 0 != aPing
->start
.tv_sec
)
566 clientUpdateHierCounters(HierarchyLogEntry
* someEntry
)
570 switch (someEntry
->code
) {
571 #if USE_CACHE_DIGESTS
576 ++ statCounter
.cd
.times_used
;
584 case FIRST_PARENT_MISS
:
586 case CLOSEST_PARENT_MISS
:
587 ++ statCounter
.icp
.times_used
;
588 i
= &someEntry
->ping
;
590 if (clientPingHasFinished(i
))
591 statCounter
.icp
.querySvcTime
.count(tvSubUsec(i
->start
, i
->stop
));
594 ++ statCounter
.icp
.query_timeouts
;
601 ++ statCounter
.netdb
.times_used
;
611 ClientHttpRequest::updateCounters()
613 clientUpdateStatCounters(logType
);
615 if (request
->errType
!= ERR_NONE
)
616 ++ statCounter
.client_http
.errors
;
618 clientUpdateStatHistCounters(logType
,
619 tvSubMsec(al
->cache
.start_time
, current_time
));
621 clientUpdateHierCounters(&request
->hier
);
625 prepareLogWithRequestDetails(HttpRequest
* request
, AccessLogEntry::Pointer
&aLogEntry
)
628 assert(aLogEntry
!= NULL
);
630 if (Config
.onoff
.log_mime_hdrs
) {
634 packerToMemInit(&p
, &mb
);
635 request
->header
.packInto(&p
);
636 //This is the request after adaptation or redirection
637 aLogEntry
->headers
.adapted_request
= xstrdup(mb
.buf
);
639 // the virgin request is saved to aLogEntry->request
640 if (aLogEntry
->request
) {
643 packerToMemInit(&p
, &mb
);
644 aLogEntry
->request
->header
.packInto(&p
);
645 aLogEntry
->headers
.request
= xstrdup(mb
.buf
);
649 const Adaptation::History::Pointer ah
= request
->adaptLogHistory();
653 packerToMemInit(&p
, &mb
);
654 ah
->lastMeta
.packInto(&p
);
655 aLogEntry
->adapt
.last_meta
= xstrdup(mb
.buf
);
664 const Adaptation::Icap::History::Pointer ih
= request
->icapHistory();
666 aLogEntry
->icap
.processingTime
= ih
->processingTime();
669 aLogEntry
->http
.method
= request
->method
;
670 aLogEntry
->http
.version
= request
->http_ver
;
671 aLogEntry
->hier
= request
->hier
;
672 if (request
->content_length
> 0) // negative when no body or unknown length
673 aLogEntry
->http
.clientRequestSz
.payloadData
+= request
->content_length
; // XXX: actually adaptedRequest payload size ??
674 aLogEntry
->cache
.extuser
= request
->extacl_user
.termedBuf();
676 // Adapted request, if any, inherits and then collects all the stats, but
677 // the virgin request gets logged instead; copy the stats to log them.
678 // TODO: avoid losses by keeping these stats in a shared history object?
679 if (aLogEntry
->request
) {
680 aLogEntry
->request
->dnsWait
= request
->dnsWait
;
681 aLogEntry
->request
->errType
= request
->errType
;
682 aLogEntry
->request
->errDetail
= request
->errDetail
;
687 ClientHttpRequest::logRequest()
689 if (!out
.size
&& !logType
)
690 debugs(33, 5, HERE
<< "logging half-baked transaction: " << log_uri
);
692 al
->icp
.opcode
= ICP_INVALID
;
694 debugs(33, 9, "clientLogRequest: al.url='" << al
->url
<< "'");
697 al
->http
.code
= al
->reply
->sline
.status();
698 al
->http
.content_type
= al
->reply
->content_type
.termedBuf();
699 } else if (loggingEntry() && loggingEntry()->mem_obj
) {
700 al
->http
.code
= loggingEntry()->mem_obj
->getReply()->sline
.status();
701 al
->http
.content_type
= loggingEntry()->mem_obj
->getReply()->content_type
.termedBuf();
704 debugs(33, 9, "clientLogRequest: http.code='" << al
->http
.code
<< "'");
706 if (loggingEntry() && loggingEntry()->mem_obj
&& loggingEntry()->objectLen() >= 0)
707 al
->cache
.objectSize
= loggingEntry()->contentLen(); // payload duplicate ?? with or without TE ?
709 al
->http
.clientRequestSz
.header
= req_sz
;
710 al
->http
.clientReplySz
.header
= out
.headers_sz
;
711 // XXX: calculate without payload encoding or headers !!
712 al
->http
.clientReplySz
.payloadData
= out
.size
- out
.headers_sz
; // pretend its all un-encoded data for now.
714 al
->cache
.highOffset
= out
.offset
;
716 al
->cache
.code
= logType
;
718 al
->cache
.msec
= tvSubMsec(al
->cache
.start_time
, current_time
);
721 prepareLogWithRequestDetails(request
, al
);
723 if (getConn() != NULL
&& getConn()->clientConnection
!= NULL
&& getConn()->clientConnection
->rfc931
[0])
724 al
->cache
.rfc931
= getConn()->clientConnection
->rfc931
;
728 /* This is broken. Fails if the connection has been closed. Needs
729 * to snarf the ssl details some place earlier..
731 if (getConn() != NULL
)
732 al
->cache
.ssluser
= sslGetUserEmail(fd_table
[getConn()->fd
].ssl
);
737 // The al->notes and request->notes must point to the same object.
738 (void)SyncNotes(*al
, *request
);
739 typedef Notes::iterator ACAMLI
;
740 for (ACAMLI i
= Config
.notes
.begin(); i
!= Config
.notes
.end(); ++i
) {
741 if (const char *value
= (*i
)->match(request
, al
->reply
, NULL
)) {
742 NotePairs
¬es
= SyncNotes(*al
, *request
);
743 notes
.add((*i
)->key
.termedBuf(), value
);
744 debugs(33, 3, HERE
<< (*i
)->key
.termedBuf() << " " << value
);
748 ACLFilledChecklist
checklist(NULL
, request
, NULL
);
750 checklist
.reply
= al
->reply
;
751 HTTPMSGLOCK(checklist
.reply
);
755 al
->adapted_request
= request
;
756 HTTPMSGLOCK(al
->adapted_request
);
758 accessLogLog(al
, &checklist
);
760 bool updatePerformanceCounters
= true;
761 if (Config
.accessList
.stats_collection
) {
762 ACLFilledChecklist
statsCheck(Config
.accessList
.stats_collection
, request
, NULL
);
764 statsCheck
.reply
= al
->reply
;
765 HTTPMSGLOCK(statsCheck
.reply
);
767 updatePerformanceCounters
= (statsCheck
.fastCheck() == ACCESS_ALLOWED
);
770 if (updatePerformanceCounters
) {
774 if (getConn() != NULL
&& getConn()->clientConnection
!= NULL
)
775 clientdbUpdate(getConn()->clientConnection
->remote
, logType
, AnyP::PROTO_HTTP
, out
.size
);
780 ClientHttpRequest::freeResources()
784 safe_free(redirect
.location
);
785 range_iter
.boundary
.clean();
786 HTTPMSGUNLOCK(request
);
788 if (client_stream
.tail
)
789 clientStreamAbort((clientStreamNode
*)client_stream
.tail
->data
, this);
793 httpRequestFree(void *data
)
795 ClientHttpRequest
*http
= (ClientHttpRequest
*)data
;
796 assert(http
!= NULL
);
801 ConnStateData::areAllContextsForThisConnection() const
803 assert(this != NULL
);
804 ClientSocketContext::Pointer context
= getCurrentContext();
806 while (context
.getRaw()) {
807 if (context
->http
->getConn() != this)
810 context
= context
->next
;
817 ConnStateData::freeAllContexts()
819 ClientSocketContext::Pointer context
;
821 while ((context
= getCurrentContext()).getRaw() != NULL
) {
822 assert(getCurrentContext() !=
823 getCurrentContext()->next
);
824 context
->connIsFinished();
825 assert (context
!= currentobject
);
829 /// propagates abort event to all contexts
831 ConnStateData::notifyAllContexts(int xerrno
)
833 typedef ClientSocketContext::Pointer CSCP
;
834 for (CSCP c
= getCurrentContext(); c
.getRaw(); c
= c
->next
)
835 c
->noteIoError(xerrno
);
838 /* This is a handler normally called by comm_close() */
839 void ConnStateData::connStateClosed(const CommCloseCbParams
&io
)
841 deleteThis("ConnStateData::connStateClosed");
846 ConnStateData::setAuth(const Auth::UserRequest::Pointer
&aur
, const char *by
)
850 debugs(33, 2, "Adding connection-auth to " << clientConnection
<< " from " << by
);
856 // clobered with self-pointer
857 // NP: something nasty is going on in Squid, but harmless.
859 debugs(33, 2, "WARNING: Ignoring duplicate connection-auth for " << clientConnection
<< " from " << by
);
864 * Connection-auth relies on a single set of credentials being preserved
865 * for all requests on a connection once they have been setup.
866 * There are several things which need to happen to preserve security
867 * when connection-auth credentials change unexpectedly or are unset.
869 * 1) auth helper released from any active state
871 * They can only be reserved by a handshake process which this
872 * connection can now never complete.
873 * This prevents helpers hanging when their connections close.
875 * 2) pinning is expected to be removed and server conn closed
877 * The upstream link is authenticated with the same credentials.
878 * Expecting the same level of consistency we should have received.
879 * This prevents upstream being faced with multiple or missing
880 * credentials after authentication.
881 * NP: un-pin is left to the cleanup in ConnStateData::swanSong()
882 * we just trigger that cleanup here via comm_reset_close() or
883 * ConnStateData::stopReceiving()
885 * 3) the connection needs to close.
887 * This prevents attackers injecting requests into a connection,
888 * or gateways wrongly multiplexing users into a single connection.
890 * When credentials are missing closure needs to follow an auth
891 * challenge for best recovery by the client.
893 * When credentials change there is nothing we can do but abort as
894 * fast as possible. Sending TCP RST instead of an HTTP response
895 * is the best-case action.
898 // clobbered with nul-pointer
900 debugs(33, 2, "WARNING: Graceful closure on " << clientConnection
<< " due to connection-auth erase from " << by
);
901 auth_
->releaseAuthServer();
903 // XXX: need to test whether the connection re-auth challenge is sent. If not, how to trigger it from here.
904 // NP: the current situation seems to fix challenge loops in Safari without visible issues in others.
905 // we stop receiving more traffic but can leave the Job running to terminate after the error or challenge is delivered.
906 stopReceiving("connection-auth removed");
910 // clobbered with alternative credentials
912 debugs(33, 2, "ERROR: Closing " << clientConnection
<< " due to change of connection-auth from " << by
);
913 auth_
->releaseAuthServer();
915 // this is a fatal type of problem.
916 // Close the connection immediately with TCP RST to abort all traffic flow
917 comm_reset_close(clientConnection
);
925 // cleans up before destructor is called
927 ConnStateData::swanSong()
929 debugs(33, 2, HERE
<< clientConnection
);
930 flags
.readMore
= false;
931 clientdbEstablished(clientConnection
->remote
, -1); /* decrement */
932 assert(areAllContextsForThisConnection());
935 unpinConnection(true);
937 if (Comm::IsConnOpen(clientConnection
))
938 clientConnection
->close();
941 // NP: do this bit after closing the connections to avoid side effects from unwanted TCP RST
942 setAuth(NULL
, "ConnStateData::SwanSong cleanup");
945 BodyProducer::swanSong();
946 flags
.swanSang
= true;
950 ConnStateData::isOpen() const
952 return cbdataReferenceValid(this) && // XXX: checking "this" in a method
953 Comm::IsConnOpen(clientConnection
) &&
954 !fd_table
[clientConnection
->fd
].closing();
957 ConnStateData::~ConnStateData()
959 assert(this != NULL
);
960 debugs(33, 3, HERE
<< clientConnection
);
962 FtpCloseDataConnection(this);
965 debugs(33, DBG_IMPORTANT
, "BUG: ConnStateData did not close " << clientConnection
);
968 debugs(33, DBG_IMPORTANT
, "BUG: ConnStateData was not destroyed properly; " << clientConnection
);
970 if (bodyPipe
!= NULL
)
971 stopProducingFor(bodyPipe
, false);
974 delete sslServerBump
;
979 * clientSetKeepaliveFlag() sets request->flags.proxyKeepalive.
980 * This is the client-side persistent connection flag. We need
981 * to set this relatively early in the request processing
982 * to handle hacks for broken servers and clients.
985 clientSetKeepaliveFlag(ClientHttpRequest
* http
)
987 HttpRequest
*request
= http
->request
;
989 debugs(33, 3, "http_ver = " << request
->http_ver
);
990 debugs(33, 3, "method = " << request
->method
);
992 // TODO: move to HttpRequest::hdrCacheInit, just like HttpReply.
993 request
->flags
.proxyKeepalive
= request
->persistent();
997 clientIsContentLengthValid(HttpRequest
* r
)
999 switch (r
->method
.id()) {
1001 case Http::METHOD_GET
:
1003 case Http::METHOD_HEAD
:
1004 /* We do not want to see a request entity on GET/HEAD requests */
1005 return (r
->content_length
<= 0 || Config
.onoff
.request_entities
);
1008 /* For other types of requests we don't care */
1016 clientIsRequestBodyTooLargeForPolicy(int64_t bodyLength
)
1018 if (Config
.maxRequestBodySize
&&
1019 bodyLength
> Config
.maxRequestBodySize
)
1020 return 1; /* too large */
1027 connIsUsable(ConnStateData
* conn
)
1029 if (conn
== NULL
|| !cbdataReferenceValid(conn
) || !Comm::IsConnOpen(conn
->clientConnection
))
1037 // careful: the "current" context may be gone if we wrote an early response
1038 ClientSocketContext::Pointer
1039 ConnStateData::getCurrentContext() const
1042 return currentobject
;
1046 ClientSocketContext::deferRecipientForLater(clientStreamNode
* node
, HttpReply
* rep
, StoreIOBuffer receivedData
)
1048 debugs(33, 2, "clientSocketRecipient: Deferring request " << http
->uri
);
1049 assert(flags
.deferred
== 0);
1051 deferredparams
.node
= node
;
1052 deferredparams
.rep
= rep
;
1053 deferredparams
.queuedBuffer
= receivedData
;
1058 responseFinishedOrFailed(HttpReply
* rep
, StoreIOBuffer
const & receivedData
)
1060 if (rep
== NULL
&& receivedData
.data
== NULL
&& receivedData
.length
== 0)
1067 ClientSocketContext::startOfOutput() const
1069 return http
->out
.size
== 0;
1073 ClientSocketContext::lengthToSend(Range
<int64_t> const &available
)
1075 /*the size of available range can always fit in a size_t type*/
1076 size_t maximum
= (size_t)available
.size();
1078 if (!http
->request
->range
)
1081 assert (canPackMoreRanges());
1083 if (http
->range_iter
.debt() == -1)
1086 assert (http
->range_iter
.debt() > 0);
1088 /* TODO this + the last line could be a range intersection calculation */
1089 if (available
.start
< http
->range_iter
.currentSpec()->offset
)
1092 return min(http
->range_iter
.debt(), (int64_t)maximum
);
1096 ClientSocketContext::noteSentBodyBytes(size_t bytes
)
1098 http
->out
.offset
+= bytes
;
1100 if (!http
->request
->range
)
1103 if (http
->range_iter
.debt() != -1) {
1104 http
->range_iter
.debt(http
->range_iter
.debt() - bytes
);
1105 assert (http
->range_iter
.debt() >= 0);
1108 /* debt() always stops at -1, below that is a bug */
1109 assert (http
->range_iter
.debt() >= -1);
1113 ClientHttpRequest::multipartRangeRequest() const
1115 return request
->multipartRangeRequest();
1119 ClientSocketContext::multipartRangeRequest() const
1121 return http
->multipartRangeRequest();
1125 ClientSocketContext::sendBody(HttpReply
* rep
, StoreIOBuffer bodyData
)
1127 assert(rep
== NULL
);
1129 if (!multipartRangeRequest() && !http
->request
->flags
.chunkedReply
) {
1130 size_t length
= lengthToSend(bodyData
.range());
1131 noteSentBodyBytes (length
);
1132 AsyncCall::Pointer call
= commCbCall(33, 5, "clientWriteBodyComplete",
1133 CommIoCbPtrFun(clientWriteBodyComplete
, this));
1134 Comm::Write(clientConnection
, bodyData
.data
, length
, call
, NULL
);
1140 if (multipartRangeRequest())
1141 packRange(bodyData
, &mb
);
1143 packChunk(bodyData
, mb
);
1145 if (mb
.contentSize()) {
1147 AsyncCall::Pointer call
= commCbCall(33, 5, "clientWriteComplete",
1148 CommIoCbPtrFun(clientWriteComplete
, this));
1149 Comm::Write(clientConnection
, &mb
, call
);
1151 writeComplete(clientConnection
, NULL
, 0, Comm::OK
);
1155 * Packs bodyData into mb using chunked encoding. Packs the last-chunk
1156 * if bodyData is empty.
1159 ClientSocketContext::packChunk(const StoreIOBuffer
&bodyData
, MemBuf
&mb
)
1161 const uint64_t length
=
1162 static_cast<uint64_t>(lengthToSend(bodyData
.range()));
1163 noteSentBodyBytes(length
);
1165 mb
.Printf("%" PRIX64
"\r\n", length
);
1166 mb
.append(bodyData
.data
, length
);
1170 /** put terminating boundary for multiparts */
1172 clientPackTermBound(String boundary
, MemBuf
* mb
)
1174 mb
->Printf("\r\n--" SQUIDSTRINGPH
"--\r\n", SQUIDSTRINGPRINT(boundary
));
1175 debugs(33, 6, "clientPackTermBound: buf offset: " << mb
->size
);
1178 /** appends a "part" HTTP header (as in a multi-part/range reply) to the buffer */
1180 clientPackRangeHdr(const HttpReply
* rep
, const HttpHdrRangeSpec
* spec
, String boundary
, MemBuf
* mb
)
1182 HttpHeader
hdr(hoReply
);
1188 debugs(33, 5, "clientPackRangeHdr: appending boundary: " << boundary
);
1189 /* rfc2046 requires to _prepend_ boundary with <crlf>! */
1190 mb
->Printf("\r\n--" SQUIDSTRINGPH
"\r\n", SQUIDSTRINGPRINT(boundary
));
1192 /* stuff the header with required entries and pack it */
1194 if (rep
->header
.has(HDR_CONTENT_TYPE
))
1195 hdr
.putStr(HDR_CONTENT_TYPE
, rep
->header
.getStr(HDR_CONTENT_TYPE
));
1197 httpHeaderAddContRange(&hdr
, *spec
, rep
->content_length
);
1199 packerToMemInit(&p
, mb
);
1207 /* append <crlf> (we packed a header, not a reply) */
1212 * extracts a "range" from *buf and appends them to mb, updating
1213 * all offsets and such.
1216 ClientSocketContext::packRange(StoreIOBuffer
const &source
, MemBuf
* mb
)
1218 HttpHdrRangeIter
* i
= &http
->range_iter
;
1219 Range
<int64_t> available (source
.range());
1220 char const *buf
= source
.data
;
1222 while (i
->currentSpec() && available
.size()) {
1223 const size_t copy_sz
= lengthToSend(available
);
1227 * intersection of "have" and "need" ranges must not be empty
1229 assert(http
->out
.offset
< i
->currentSpec()->offset
+ i
->currentSpec()->length
);
1230 assert(http
->out
.offset
+ (int64_t)available
.size() > i
->currentSpec()->offset
);
1233 * put boundary and headers at the beginning of a range in a
1237 if (http
->multipartRangeRequest() && i
->debt() == i
->currentSpec()->length
) {
1238 assert(http
->memObject());
1240 http
->memObject()->getReply(), /* original reply */
1241 i
->currentSpec(), /* current range */
1242 i
->boundary
, /* boundary, the same for all */
1249 debugs(33, 3, "clientPackRange: appending " << copy_sz
<< " bytes");
1251 noteSentBodyBytes (copy_sz
);
1253 mb
->append(buf
, copy_sz
);
1258 available
.start
+= copy_sz
;
1264 if (!canPackMoreRanges()) {
1265 debugs(33, 3, "clientPackRange: Returning because !canPackMoreRanges.");
1268 /* put terminating boundary for multiparts */
1269 clientPackTermBound(i
->boundary
, mb
);
1274 int64_t nextOffset
= getNextRangeOffset();
1276 assert (nextOffset
>= http
->out
.offset
);
1278 int64_t skip
= nextOffset
- http
->out
.offset
;
1280 /* adjust for not to be transmitted bytes */
1281 http
->out
.offset
= nextOffset
;
1283 if (available
.size() <= (uint64_t)skip
)
1286 available
.start
+= skip
;
1295 /** returns expected content length for multi-range replies
1296 * note: assumes that httpHdrRangeCanonize has already been called
1297 * warning: assumes that HTTP headers for individual ranges at the
1298 * time of the actuall assembly will be exactly the same as
1299 * the headers when clientMRangeCLen() is called */
1301 ClientHttpRequest::mRangeCLen()
1306 assert(memObject());
1309 HttpHdrRange::iterator pos
= request
->range
->begin();
1311 while (pos
!= request
->range
->end()) {
1312 /* account for headers for this range */
1314 clientPackRangeHdr(memObject()->getReply(),
1315 *pos
, range_iter
.boundary
, &mb
);
1318 /* account for range content */
1319 clen
+= (*pos
)->length
;
1321 debugs(33, 6, "clientMRangeCLen: (clen += " << mb
.size
<< " + " << (*pos
)->length
<< ") == " << clen
);
1325 /* account for the terminating boundary */
1328 clientPackTermBound(range_iter
.boundary
, &mb
);
1338 * returns true if If-Range specs match reply, false otherwise
1341 clientIfRangeMatch(ClientHttpRequest
* http
, HttpReply
* rep
)
1343 const TimeOrTag spec
= http
->request
->header
.getTimeOrTag(HDR_IF_RANGE
);
1344 /* check for parsing falure */
1351 ETag rep_tag
= rep
->header
.getETag(HDR_ETAG
);
1352 debugs(33, 3, "clientIfRangeMatch: ETags: " << spec
.tag
.str
<< " and " <<
1353 (rep_tag
.str
? rep_tag
.str
: "<none>"));
1356 return 0; /* entity has no etag to compare with! */
1358 if (spec
.tag
.weak
|| rep_tag
.weak
) {
1359 debugs(33, DBG_IMPORTANT
, "clientIfRangeMatch: Weak ETags are not allowed in If-Range: " << spec
.tag
.str
<< " ? " << rep_tag
.str
);
1360 return 0; /* must use strong validator for sub-range requests */
1363 return etagIsStrongEqual(rep_tag
, spec
.tag
);
1366 /* got modification time? */
1367 if (spec
.time
>= 0) {
1368 return http
->storeEntry()->lastmod
<= spec
.time
;
1371 assert(0); /* should not happen */
1376 * generates a "unique" boundary string for multipart responses
1377 * the caller is responsible for cleaning the string */
1379 ClientHttpRequest::rangeBoundaryStr() const
1383 String
b(APP_FULLNAME
);
1385 key
= storeEntry()->getMD5Text();
1386 b
.append(key
, strlen(key
));
1390 /** adds appropriate Range headers if needed */
1392 ClientSocketContext::buildRangeHeader(HttpReply
* rep
)
1394 HttpHeader
*hdr
= rep
? &rep
->header
: 0;
1395 const char *range_err
= NULL
;
1396 HttpRequest
*request
= http
->request
;
1397 assert(request
->range
);
1398 /* check if we still want to do ranges */
1400 int64_t roffLimit
= request
->getRangeOffsetLimit();
1403 range_err
= "no [parse-able] reply";
1404 else if ((rep
->sline
.status() != Http::scOkay
) && (rep
->sline
.status() != Http::scPartialContent
))
1405 range_err
= "wrong status code";
1406 else if (hdr
->has(HDR_CONTENT_RANGE
))
1407 range_err
= "origin server does ranges";
1408 else if (rep
->content_length
< 0)
1409 range_err
= "unknown length";
1410 else if (rep
->content_length
!= http
->memObject()->getReply()->content_length
)
1411 range_err
= "INCONSISTENT length"; /* a bug? */
1413 /* hits only - upstream CachePeer determines correct behaviour on misses, and client_side_reply determines
1416 else if (logTypeIsATcpHit(http
->logType
) && http
->request
->header
.has(HDR_IF_RANGE
) && !clientIfRangeMatch(http
, rep
))
1417 range_err
= "If-Range match failed";
1418 else if (!http
->request
->range
->canonize(rep
))
1419 range_err
= "canonization failed";
1420 else if (http
->request
->range
->isComplex())
1421 range_err
= "too complex range header";
1422 else if (!logTypeIsATcpHit(http
->logType
) && http
->request
->range
->offsetLimitExceeded(roffLimit
))
1423 range_err
= "range outside range_offset_limit";
1425 /* get rid of our range specs on error */
1427 /* XXX We do this here because we need canonisation etc. However, this current
1428 * code will lead to incorrect store offset requests - the store will have the
1429 * offset data, but we won't be requesting it.
1430 * So, we can either re-request, or generate an error
1432 http
->request
->ignoreRange(range_err
);
1434 /* XXX: TODO: Review, this unconditional set may be wrong. */
1435 rep
->sline
.set(rep
->sline
.version
, Http::scPartialContent
);
1436 // web server responded with a valid, but unexpected range.
1437 // will (try-to) forward as-is.
1438 //TODO: we should cope with multirange request/responses
1439 bool replyMatchRequest
= rep
->content_range
!= NULL
?
1440 request
->range
->contains(rep
->content_range
->spec
) :
1442 const int spec_count
= http
->request
->range
->specs
.size();
1443 int64_t actual_clen
= -1;
1445 debugs(33, 3, "clientBuildRangeHeader: range spec count: " <<
1446 spec_count
<< " virgin clen: " << rep
->content_length
);
1447 assert(spec_count
> 0);
1448 /* append appropriate header(s) */
1450 if (spec_count
== 1) {
1451 if (!replyMatchRequest
) {
1452 hdr
->delById(HDR_CONTENT_RANGE
);
1453 hdr
->putContRange(rep
->content_range
);
1454 actual_clen
= rep
->content_length
;
1455 //http->range_iter.pos = rep->content_range->spec.begin();
1456 (*http
->range_iter
.pos
)->offset
= rep
->content_range
->spec
.offset
;
1457 (*http
->range_iter
.pos
)->length
= rep
->content_range
->spec
.length
;
1460 HttpHdrRange::iterator pos
= http
->request
->range
->begin();
1462 /* append Content-Range */
1464 if (!hdr
->has(HDR_CONTENT_RANGE
)) {
1465 /* No content range, so this was a full object we are
1468 httpHeaderAddContRange(hdr
, **pos
, rep
->content_length
);
1471 /* set new Content-Length to the actual number of bytes
1472 * transmitted in the message-body */
1473 actual_clen
= (*pos
)->length
;
1477 /* generate boundary string */
1478 http
->range_iter
.boundary
= http
->rangeBoundaryStr();
1479 /* delete old Content-Type, add ours */
1480 hdr
->delById(HDR_CONTENT_TYPE
);
1481 httpHeaderPutStrf(hdr
, HDR_CONTENT_TYPE
,
1482 "multipart/byteranges; boundary=\"" SQUIDSTRINGPH
"\"",
1483 SQUIDSTRINGPRINT(http
->range_iter
.boundary
));
1484 /* Content-Length is not required in multipart responses
1485 * but it is always nice to have one */
1486 actual_clen
= http
->mRangeCLen();
1487 /* http->out needs to start where we want data at */
1488 http
->out
.offset
= http
->range_iter
.currentSpec()->offset
;
1491 /* replace Content-Length header */
1492 assert(actual_clen
>= 0);
1494 hdr
->delById(HDR_CONTENT_LENGTH
);
1496 hdr
->putInt64(HDR_CONTENT_LENGTH
, actual_clen
);
1498 debugs(33, 3, "clientBuildRangeHeader: actual content length: " << actual_clen
);
1500 /* And start the range iter off */
1501 http
->range_iter
.updateSpec();
1506 ClientSocketContext::prepareReply(HttpReply
* rep
)
1510 if (http
->request
->range
)
1511 buildRangeHeader(rep
);
1515 ClientSocketContext::sendStartOfMessage(HttpReply
* rep
, StoreIOBuffer bodyData
)
1519 MemBuf
*mb
= rep
->pack();
1521 // dump now, so we dont output any body.
1522 debugs(11, 2, "HTTP Client " << clientConnection
);
1523 debugs(11, 2, "HTTP Client REPLY:\n---------\n" << mb
->buf
<< "\n----------");
1525 /* Save length of headers for persistent conn checks */
1526 http
->out
.headers_sz
= mb
->contentSize();
1529 headersLog(0, 0, http
->request
->method
, rep
);
1532 if (bodyData
.data
&& bodyData
.length
) {
1533 if (multipartRangeRequest())
1534 packRange(bodyData
, mb
);
1535 else if (http
->request
->flags
.chunkedReply
) {
1536 packChunk(bodyData
, *mb
);
1538 size_t length
= lengthToSend(bodyData
.range());
1539 noteSentBodyBytes (length
);
1541 mb
->append(bodyData
.data
, length
);
1546 debugs(33,7, HERE
<< "sendStartOfMessage schedules clientWriteComplete");
1547 AsyncCall::Pointer call
= commCbCall(33, 5, "clientWriteComplete",
1548 CommIoCbPtrFun(clientWriteComplete
, this));
1549 Comm::Write(clientConnection
, mb
, call
);
1554 * Write a chunk of data to a client socket. If the reply is present,
1555 * send the reply headers down the wire too, and clean them up when
1558 * The request is one backed by a connection, not an internal request.
1559 * data context is not NULL
1560 * There are no more entries in the stream chain.
1563 clientSocketRecipient(clientStreamNode
* node
, ClientHttpRequest
* http
,
1564 HttpReply
* rep
, StoreIOBuffer receivedData
)
1566 debugs(33,7, HERE
<< "rep->content_length=" << (rep
? rep
->content_length
: -2) << " receivedData.length=" << receivedData
.length
);
1567 /* Test preconditions */
1568 assert(node
!= NULL
);
1569 PROF_start(clientSocketRecipient
);
1570 /* TODO: handle this rather than asserting
1571 * - it should only ever happen if we cause an abort and
1572 * the callback chain loops back to here, so we can simply return.
1573 * However, that itself shouldn't happen, so it stays as an assert for now.
1575 assert(cbdataReferenceValid(node
));
1576 assert(node
->node
.next
== NULL
);
1577 ClientSocketContext::Pointer context
= dynamic_cast<ClientSocketContext
*>(node
->data
.getRaw());
1578 assert(context
!= NULL
);
1579 assert(connIsUsable(http
->getConn()));
1581 /* TODO: check offset is what we asked for */
1583 if (context
!= http
->getConn()->getCurrentContext()) {
1584 context
->deferRecipientForLater(node
, rep
, receivedData
);
1585 PROF_stop(clientSocketRecipient
);
1589 if (http
->getConn()->isFtp
) {
1590 assert(context
->http
== http
);
1591 FtpHandleReply(context
.getRaw(), rep
, receivedData
);
1592 PROF_stop(clientSocketRecipient
);
1596 // After sending Transfer-Encoding: chunked (at least), always send
1597 // the last-chunk if there was no error, ignoring responseFinishedOrFailed.
1598 const bool mustSendLastChunk
= http
->request
->flags
.chunkedReply
&&
1599 !http
->request
->flags
.streamError
&& !context
->startOfOutput();
1600 if (responseFinishedOrFailed(rep
, receivedData
) && !mustSendLastChunk
) {
1601 context
->writeComplete(context
->clientConnection
, NULL
, 0, Comm::OK
);
1602 PROF_stop(clientSocketRecipient
);
1606 if (!context
->startOfOutput())
1607 context
->sendBody(rep
, receivedData
);
1610 http
->al
->reply
= rep
;
1611 HTTPMSGLOCK(http
->al
->reply
);
1612 context
->sendStartOfMessage(rep
, receivedData
);
1615 PROF_stop(clientSocketRecipient
);
1619 * Called when a downstream node is no longer interested in
1620 * our data. As we are a terminal node, this means on aborts
1624 clientSocketDetach(clientStreamNode
* node
, ClientHttpRequest
* http
)
1626 /* Test preconditions */
1627 assert(node
!= NULL
);
1628 /* TODO: handle this rather than asserting
1629 * - it should only ever happen if we cause an abort and
1630 * the callback chain loops back to here, so we can simply return.
1631 * However, that itself shouldn't happen, so it stays as an assert for now.
1633 assert(cbdataReferenceValid(node
));
1634 /* Set null by ContextFree */
1635 assert(node
->node
.next
== NULL
);
1636 /* this is the assert discussed above */
1637 assert(NULL
== dynamic_cast<ClientSocketContext
*>(node
->data
.getRaw()));
1638 /* We are only called when the client socket shutsdown.
1639 * Tell the prev pipeline member we're finished
1641 clientStreamDetach(node
, http
);
1645 clientWriteBodyComplete(const Comm::ConnectionPointer
&conn
, char *buf
, size_t size
, Comm::Flag errflag
, int xerrno
, void *data
)
1647 debugs(33,7, HERE
<< "clientWriteBodyComplete schedules clientWriteComplete");
1648 clientWriteComplete(conn
, NULL
, size
, errflag
, xerrno
, data
);
1652 ConnStateData::readNextRequest()
1654 debugs(33, 5, HERE
<< clientConnection
<< " reading next req");
1656 fd_note(clientConnection
->fd
, "Idle client: Waiting for next request");
1658 * Set the timeout BEFORE calling readSomeData().
1660 typedef CommCbMemFunT
<ConnStateData
, CommTimeoutCbParams
> TimeoutDialer
;
1661 AsyncCall::Pointer timeoutCall
= JobCallback(33, 5,
1662 TimeoutDialer
, this, ConnStateData::requestTimeout
);
1663 const int timeout
= isFtp
? Config
.Timeout
.ftpClientIdle
:
1664 Config
.Timeout
.clientIdlePconn
;
1665 commSetConnTimeout(clientConnection
, timeout
, timeoutCall
);
1668 /** Please don't do anything with the FD past here! */
1672 ClientSocketContextPushDeferredIfNeeded(ClientSocketContext::Pointer deferredRequest
, ConnStateData
* conn
)
1674 debugs(33, 2, HERE
<< conn
->clientConnection
<< " Sending next");
1676 /** If the client stream is waiting on a socket write to occur, then */
1678 if (deferredRequest
->flags
.deferred
) {
1679 /** NO data is allowed to have been sent. */
1680 assert(deferredRequest
->http
->out
.size
== 0);
1682 clientSocketRecipient(deferredRequest
->deferredparams
.node
,
1683 deferredRequest
->http
,
1684 deferredRequest
->deferredparams
.rep
,
1685 deferredRequest
->deferredparams
.queuedBuffer
);
1688 /** otherwise, the request is still active in a callbacksomewhere,
1693 /// called when we have successfully finished writing the response
1695 ClientSocketContext::keepaliveNextRequest()
1697 ConnStateData
* conn
= http
->getConn();
1699 debugs(33, 3, HERE
<< "ConnnStateData(" << conn
->clientConnection
<< "), Context(" << clientConnection
<< ")");
1702 if (conn
->pinning
.pinned
&& !Comm::IsConnOpen(conn
->pinning
.serverConnection
)) {
1703 debugs(33, 2, HERE
<< conn
->clientConnection
<< " Connection was pinned but server side gone. Terminating client connection");
1704 conn
->clientConnection
->close();
1709 * We are done with the response, and we are either still receiving request
1710 * body (early response!) or have already stopped receiving anything.
1712 * If we are still receiving, then clientParseRequest() below will fail.
1713 * (XXX: but then we will call readNextRequest() which may succeed and
1714 * execute a smuggled request as we are not done with the current request).
1716 * If we stopped because we got everything, then try the next request.
1718 * If we stopped receiving because of an error, then close now to avoid
1719 * getting stuck and to prevent accidental request smuggling.
1722 if (const char *reason
= conn
->stoppedReceiving()) {
1723 debugs(33, 3, HERE
<< "closing for earlier request error: " << reason
);
1724 conn
->clientConnection
->close();
1729 * Attempt to parse a request from the request buffer.
1730 * If we've been fed a pipelined request it may already
1731 * be in our read buffer.
1734 * This needs to fall through - if we're unlucky and parse the _last_ request
1735 * from our read buffer we may never re-register for another client read.
1738 if (conn
->clientParseRequests()) {
1739 debugs(33, 3, HERE
<< conn
->clientConnection
<< ": parsed next request from buffer");
1743 * Either we need to kick-start another read or, if we have
1744 * a half-closed connection, kill it after the last request.
1745 * This saves waiting for half-closed connections to finished being
1746 * half-closed _AND_ then, sometimes, spending "Timeout" time in
1747 * the keepalive "Waiting for next request" state.
1749 if (commIsHalfClosed(conn
->clientConnection
->fd
) && (conn
->getConcurrentRequestCount() == 0)) {
1750 debugs(33, 3, "ClientSocketContext::keepaliveNextRequest: half-closed client with no pending requests, closing");
1751 conn
->clientConnection
->close();
1755 ClientSocketContext::Pointer deferredRequest
;
1758 * At this point we either have a parsed request (which we've
1759 * kicked off the processing for) or not. If we have a deferred
1760 * request (parsed but deferred for pipeling processing reasons)
1761 * then look at processing it. If not, simply kickstart
1765 if ((deferredRequest
= conn
->getCurrentContext()).getRaw()) {
1766 debugs(33, 3, HERE
<< conn
->clientConnection
<< ": calling PushDeferredIfNeeded");
1767 ClientSocketContextPushDeferredIfNeeded(deferredRequest
, conn
);
1768 } else if (conn
->flags
.readMore
) {
1769 debugs(33, 3, HERE
<< conn
->clientConnection
<< ": calling conn->readNextRequest()");
1770 conn
->readNextRequest();
1772 // XXX: Can this happen? CONNECT tunnels have deferredRequest set.
1773 debugs(33, DBG_IMPORTANT
, HERE
<< "abandoning " << conn
->clientConnection
);
1778 clientUpdateSocketStats(LogTags logType
, size_t size
)
1783 kb_incr(&statCounter
.client_http
.kbytes_out
, size
);
1785 if (logTypeIsATcpHit(logType
))
1786 kb_incr(&statCounter
.client_http
.hit_kbytes_out
, size
);
1790 * increments iterator "i"
1791 * used by clientPackMoreRanges
1793 \retval true there is still data available to pack more ranges
1797 ClientSocketContext::canPackMoreRanges() const
1799 /** first update iterator "i" if needed */
1801 if (!http
->range_iter
.debt()) {
1802 debugs(33, 5, HERE
<< "At end of current range spec for " << clientConnection
);
1804 if (http
->range_iter
.pos
!= http
->range_iter
.end
)
1805 ++http
->range_iter
.pos
;
1807 http
->range_iter
.updateSpec();
1810 assert(!http
->range_iter
.debt() == !http
->range_iter
.currentSpec());
1812 /* paranoid sync condition */
1813 /* continue condition: need_more_data */
1814 debugs(33, 5, "ClientSocketContext::canPackMoreRanges: returning " << (http
->range_iter
.currentSpec() ? true : false));
1815 return http
->range_iter
.currentSpec() ? true : false;
1819 ClientSocketContext::getNextRangeOffset() const
1821 debugs (33, 5, "range: " << http
->request
->range
<<
1822 "; http offset " << http
->out
.offset
<<
1823 "; reply " << reply
);
1825 // XXX: This method is called from many places, including pullData() which
1826 // may be called before prepareReply() [on some Squid-generated errors].
1827 // Hence, we may not even know yet whether we should honor/do ranges.
1829 if (http
->request
->range
) {
1830 /* offset in range specs does not count the prefix of an http msg */
1831 /* check: reply was parsed and range iterator was initialized */
1832 assert(http
->range_iter
.valid
);
1833 /* filter out data according to range specs */
1834 assert (canPackMoreRanges());
1836 int64_t start
; /* offset of still missing data */
1837 assert(http
->range_iter
.currentSpec());
1838 start
= http
->range_iter
.currentSpec()->offset
+ http
->range_iter
.currentSpec()->length
- http
->range_iter
.debt();
1839 debugs(33, 3, "clientPackMoreRanges: in: offset: " << http
->out
.offset
);
1840 debugs(33, 3, "clientPackMoreRanges: out:"
1841 " start: " << start
<<
1842 " spec[" << http
->range_iter
.pos
- http
->request
->range
->begin() << "]:" <<
1843 " [" << http
->range_iter
.currentSpec()->offset
<<
1844 ", " << http
->range_iter
.currentSpec()->offset
+ http
->range_iter
.currentSpec()->length
<< "),"
1845 " len: " << http
->range_iter
.currentSpec()->length
<<
1846 " debt: " << http
->range_iter
.debt());
1847 if (http
->range_iter
.currentSpec()->length
!= -1)
1848 assert(http
->out
.offset
<= start
); /* we did not miss it */
1853 } else if (reply
&& reply
->content_range
) {
1854 /* request does not have ranges, but reply does */
1855 /** \todo FIXME: should use range_iter_pos on reply, as soon as reply->content_range
1856 * becomes HttpHdrRange rather than HttpHdrRangeSpec.
1858 return http
->out
.offset
+ reply
->content_range
->spec
.offset
;
1861 return http
->out
.offset
;
1865 ClientSocketContext::pullData()
1867 debugs(33, 5, reply
<< " written " << http
->out
.size
<< " into " << clientConnection
);
1869 /* More data will be coming from the stream. */
1870 StoreIOBuffer readBuffer
;
1871 /* XXX: Next requested byte in the range sequence */
1872 /* XXX: length = getmaximumrangelenfgth */
1873 readBuffer
.offset
= getNextRangeOffset();
1874 readBuffer
.length
= HTTP_REQBUF_SZ
;
1875 readBuffer
.data
= reqbuf
;
1876 /* we may note we have reached the end of the wanted ranges */
1877 clientStreamRead(getTail(), http
, readBuffer
);
1880 /** Adapt stream status to account for Range cases
1883 clientStream_status_t
1884 ClientSocketContext::socketState()
1886 switch (clientStreamStatus(getTail(), http
)) {
1889 /* check for range support ending */
1891 if (http
->request
->range
) {
1892 /* check: reply was parsed and range iterator was initialized */
1893 assert(http
->range_iter
.valid
);
1894 /* filter out data according to range specs */
1896 if (!canPackMoreRanges()) {
1897 debugs(33, 5, HERE
<< "Range request at end of returnable " <<
1898 "range sequence on " << clientConnection
);
1899 // we got everything we wanted from the store
1900 return STREAM_COMPLETE
;
1902 } else if (reply
&& reply
->content_range
) {
1903 /* reply has content-range, but Squid is not managing ranges */
1904 const int64_t &bytesSent
= http
->out
.offset
;
1905 const int64_t &bytesExpected
= reply
->content_range
->spec
.length
;
1907 debugs(33, 7, HERE
<< "body bytes sent vs. expected: " <<
1908 bytesSent
<< " ? " << bytesExpected
<< " (+" <<
1909 reply
->content_range
->spec
.offset
<< ")");
1911 // did we get at least what we expected, based on range specs?
1913 if (bytesSent
== bytesExpected
) // got everything
1914 return STREAM_COMPLETE
;
1916 if (bytesSent
> bytesExpected
) // Error: Sent more than expected
1917 return STREAM_UNPLANNED_COMPLETE
;
1922 case STREAM_COMPLETE
:
1923 return STREAM_COMPLETE
;
1925 case STREAM_UNPLANNED_COMPLETE
:
1926 return STREAM_UNPLANNED_COMPLETE
;
1929 return STREAM_FAILED
;
1932 fatal ("unreachable code\n");
1937 * A write has just completed to the client, or we have just realised there is
1938 * no more data to send.
1941 clientWriteComplete(const Comm::ConnectionPointer
&conn
, char *bufnotused
, size_t size
, Comm::Flag errflag
, int xerrno
, void *data
)
1943 ClientSocketContext
*context
= (ClientSocketContext
*)data
;
1944 context
->writeComplete(conn
, bufnotused
, size
, errflag
);
1947 /// remembers the abnormal connection termination for logging purposes
1949 ClientSocketContext::noteIoError(const int xerrno
)
1952 if (xerrno
== ETIMEDOUT
)
1953 http
->al
->http
.timedout
= true;
1954 else // even if xerrno is zero (which means read abort/eof)
1955 http
->al
->http
.aborted
= true;
1960 ClientSocketContext::doClose()
1962 clientConnection
->close();
1965 /// called when we encounter a response-related error
1967 ClientSocketContext::initiateClose(const char *reason
)
1969 http
->getConn()->stopSending(reason
); // closes ASAP
1973 ConnStateData::stopSending(const char *error
)
1975 debugs(33, 4, HERE
<< "sending error (" << clientConnection
<< "): " << error
<<
1976 "; old receiving error: " <<
1977 (stoppedReceiving() ? stoppedReceiving_
: "none"));
1979 if (const char *oldError
= stoppedSending()) {
1980 debugs(33, 3, HERE
<< "already stopped sending: " << oldError
);
1981 return; // nothing has changed as far as this connection is concerned
1983 stoppedSending_
= error
;
1985 if (!stoppedReceiving()) {
1986 if (const int64_t expecting
= mayNeedToReadMoreBody()) {
1987 debugs(33, 5, HERE
<< "must still read " << expecting
<<
1988 " request body bytes with " << in
.buf
.length() << " unused");
1989 return; // wait for the request receiver to finish reading
1993 clientConnection
->close();
1997 ClientSocketContext::writeComplete(const Comm::ConnectionPointer
&conn
, char *bufnotused
, size_t size
, Comm::Flag errflag
)
1999 const StoreEntry
*entry
= http
->storeEntry();
2000 http
->out
.size
+= size
;
2001 debugs(33, 5, HERE
<< conn
<< ", sz " << size
<<
2002 ", err " << errflag
<< ", off " << http
->out
.size
<< ", len " <<
2003 (entry
? entry
->objectLen() : 0));
2004 clientUpdateSocketStats(http
->logType
, size
);
2006 /* Bail out quickly on Comm::ERR_CLOSING - close handlers will tidy up */
2008 if (errflag
== Comm::ERR_CLOSING
|| !Comm::IsConnOpen(conn
))
2011 if (errflag
|| clientHttpRequestStatus(conn
->fd
, http
)) {
2012 initiateClose("failure or true request status");
2013 /* Do we leak here ? */
2017 switch (socketState()) {
2023 case STREAM_COMPLETE
:
2024 debugs(33, 5, conn
<< "Stream complete, keepalive is " << http
->request
->flags
.proxyKeepalive
);
2025 if (http
->request
->flags
.proxyKeepalive
)
2026 keepaliveNextRequest();
2028 initiateClose("STREAM_COMPLETE NOKEEPALIVE");
2031 case STREAM_UNPLANNED_COMPLETE
:
2032 initiateClose("STREAM_UNPLANNED_COMPLETE");
2036 initiateClose("STREAM_FAILED");
2040 fatal("Hit unreachable code in clientWriteComplete\n");
2044 SQUIDCEXTERN CSR clientGetMoreData
;
2045 SQUIDCEXTERN CSS clientReplyStatus
;
2046 SQUIDCEXTERN CSD clientReplyDetach
;
2048 static ClientSocketContext
*
2049 parseHttpRequestAbort(ConnStateData
* csd
, const char *uri
)
2051 ClientHttpRequest
*http
;
2052 ClientSocketContext
*context
;
2053 StoreIOBuffer tempBuffer
;
2054 http
= new ClientHttpRequest(csd
);
2055 http
->req_sz
= csd
->in
.buf
.length();
2056 http
->uri
= xstrdup(uri
);
2057 setLogUri (http
, uri
);
2058 context
= new ClientSocketContext(csd
->clientConnection
, http
);
2059 tempBuffer
.data
= context
->reqbuf
;
2060 tempBuffer
.length
= HTTP_REQBUF_SZ
;
2061 clientStreamInit(&http
->client_stream
, clientGetMoreData
, clientReplyDetach
,
2062 clientReplyStatus
, new clientReplyContext(http
), clientSocketRecipient
,
2063 clientSocketDetach
, context
, tempBuffer
);
2068 skipLeadingSpace(char *aString
)
2070 char *result
= aString
;
2072 while (xisspace(*aString
))
2079 * 'end' defaults to NULL for backwards compatibility
2080 * remove default value if we ever get rid of NULL-terminated
2084 findTrailingHTTPVersion(const char *uriAndHTTPVersion
, const char *end
)
2087 end
= uriAndHTTPVersion
+ strcspn(uriAndHTTPVersion
, "\r\n");
2091 for (; end
> uriAndHTTPVersion
; --end
) {
2092 if (*end
== '\n' || *end
== '\r')
2095 if (xisspace(*end
)) {
2096 if (strncasecmp(end
+ 1, "HTTP/", 5) == 0)
2107 setLogUri(ClientHttpRequest
* http
, char const *uri
, bool cleanUrl
)
2109 safe_free(http
->log_uri
);
2112 // The uri is already clean just dump it.
2113 http
->log_uri
= xstrndup(uri
, MAX_URL
);
2116 switch (Config
.uri_whitespace
) {
2117 case URI_WHITESPACE_ALLOW
:
2118 flags
|= RFC1738_ESCAPE_NOSPACE
;
2120 case URI_WHITESPACE_ENCODE
:
2121 flags
|= RFC1738_ESCAPE_UNESCAPED
;
2122 http
->log_uri
= xstrndup(rfc1738_do_escape(uri
, flags
), MAX_URL
);
2125 case URI_WHITESPACE_CHOP
: {
2126 flags
|= RFC1738_ESCAPE_NOSPACE
;
2127 flags
|= RFC1738_ESCAPE_UNESCAPED
;
2128 http
->log_uri
= xstrndup(rfc1738_do_escape(uri
, flags
), MAX_URL
);
2129 int pos
= strcspn(http
->log_uri
, w_space
);
2130 http
->log_uri
[pos
] = '\0';
2134 case URI_WHITESPACE_DENY
:
2135 case URI_WHITESPACE_STRIP
:
2138 char *tmp_uri
= static_cast<char*>(xmalloc(strlen(uri
) + 1));
2142 if (!xisspace(*t
)) {
2149 http
->log_uri
= xstrndup(rfc1738_escape_unescaped(tmp_uri
), MAX_URL
);
2158 prepareAcceleratedURL(ConnStateData
* conn
, ClientHttpRequest
*http
, char *url
, const char *req_hdr
)
2160 int vhost
= conn
->port
->vhost
;
2161 int vport
= conn
->port
->vport
;
2163 char ipbuf
[MAX_IPSTRLEN
];
2165 http
->flags
.accel
= true;
2167 /* BUG: Squid cannot deal with '*' URLs (RFC2616 5.1.2) */
2169 if (strncasecmp(url
, "cache_object://", 15) == 0)
2170 return; /* already in good shape */
2173 if (conn
->port
->vhost
)
2174 return; /* already in good shape */
2176 /* else we need to ignore the host name */
2177 url
= strstr(url
, "//");
2179 #if SHOULD_REJECT_UNKNOWN_URLS
2182 hp
->request_parse_status
= Http::scBadRequest
;
2183 return parseHttpRequestAbort(conn
, "error:invalid-request");
2188 url
= strchr(url
+ 2, '/');
2195 vport
= http
->getConn()->clientConnection
->local
.port();
2197 const bool switchedToHttps
= conn
->switchedToHttps();
2198 const bool tryHostHeader
= vhost
|| switchedToHttps
;
2199 if (tryHostHeader
&& (host
= mime_get_header(req_hdr
, "Host")) != NULL
) {
2200 debugs(33, 5, "ACCEL VHOST REWRITE: vhost=" << host
<< " + vport=" << vport
);
2205 if (host
[strlen(host
)] != ']' && (t
= strrchr(host
,':')) != NULL
) {
2206 strncpy(thost
, host
, (t
-host
));
2207 snprintf(thost
+(t
-host
), sizeof(thost
)-(t
-host
), ":%d", vport
);
2210 snprintf(thost
, sizeof(thost
), "%s:%d",host
, vport
);
2213 } // else nothing to alter port-wise.
2214 int url_sz
= strlen(url
) + 32 + Config
.appendDomainLen
+
2216 http
->uri
= (char *)xcalloc(url_sz
, 1);
2217 const char *protocol
= switchedToHttps
?
2218 "https" : AnyP::UriScheme(conn
->port
->transport
.protocol
).c_str();
2219 snprintf(http
->uri
, url_sz
, "%s://%s%s", protocol
, host
, url
);
2220 debugs(33, 5, "ACCEL VHOST REWRITE: '" << http
->uri
<< "'");
2221 } else if (conn
->port
->defaultsite
/* && !vhost */) {
2222 debugs(33, 5, "ACCEL DEFAULTSITE REWRITE: defaultsite=" << conn
->port
->defaultsite
<< " + vport=" << vport
);
2223 int url_sz
= strlen(url
) + 32 + Config
.appendDomainLen
+
2224 strlen(conn
->port
->defaultsite
);
2225 http
->uri
= (char *)xcalloc(url_sz
, 1);
2229 snprintf(vportStr
, sizeof(vportStr
),":%d",vport
);
2231 snprintf(http
->uri
, url_sz
, "%s://%s%s%s",
2232 AnyP::UriScheme(conn
->port
->transport
.protocol
).c_str(), conn
->port
->defaultsite
, vportStr
, url
);
2233 debugs(33, 5, "ACCEL DEFAULTSITE REWRITE: '" << http
->uri
<<"'");
2234 } else if (vport
> 0 /* && (!vhost || no Host:) */) {
2235 debugs(33, 5, "ACCEL VPORT REWRITE: http_port IP + vport=" << vport
);
2236 /* Put the local socket IP address as the hostname, with whatever vport we found */
2237 int url_sz
= strlen(url
) + 32 + Config
.appendDomainLen
;
2238 http
->uri
= (char *)xcalloc(url_sz
, 1);
2239 http
->getConn()->clientConnection
->local
.toHostStr(ipbuf
,MAX_IPSTRLEN
);
2240 snprintf(http
->uri
, url_sz
, "%s://%s:%d%s",
2241 AnyP::UriScheme(conn
->port
->transport
.protocol
).c_str(),
2243 debugs(33, 5, "ACCEL VPORT REWRITE: '" << http
->uri
<< "'");
2248 prepareTransparentURL(ConnStateData
* conn
, ClientHttpRequest
*http
, char *url
, const char *req_hdr
)
2251 char ipbuf
[MAX_IPSTRLEN
];
2254 return; /* already in good shape */
2256 /* BUG: Squid cannot deal with '*' URLs (RFC2616 5.1.2) */
2258 if ((host
= mime_get_header(req_hdr
, "Host")) != NULL
) {
2259 int url_sz
= strlen(url
) + 32 + Config
.appendDomainLen
+
2261 http
->uri
= (char *)xcalloc(url_sz
, 1);
2262 snprintf(http
->uri
, url_sz
, "%s://%s%s", AnyP::UriScheme(conn
->port
->transport
.protocol
).c_str(), host
, url
);
2263 debugs(33, 5, "TRANSPARENT HOST REWRITE: '" << http
->uri
<<"'");
2265 /* Put the local socket IP address as the hostname. */
2266 int url_sz
= strlen(url
) + 32 + Config
.appendDomainLen
;
2267 http
->uri
= (char *)xcalloc(url_sz
, 1);
2268 http
->getConn()->clientConnection
->local
.toHostStr(ipbuf
,MAX_IPSTRLEN
);
2269 snprintf(http
->uri
, url_sz
, "%s://%s:%d%s",
2270 AnyP::UriScheme(http
->getConn()->port
->transport
.protocol
).c_str(),
2271 ipbuf
, http
->getConn()->clientConnection
->local
.port(), url
);
2272 debugs(33, 5, "TRANSPARENT REWRITE: '" << http
->uri
<< "'");
2276 /** Parse an HTTP request
2278 * \note Sets result->flags.parsed_ok to 0 if failed to parse the request,
2279 * to 1 if the request was correctly parsed.
2280 * \param[in] csd a ConnStateData. The caller must make sure it is not null
2281 * \param[in] hp an HttpParser
2282 * \param[out] mehtod_p will be set as a side-effect of the parsing.
2283 * Pointed-to value will be set to Http::METHOD_NONE in case of
2285 * \param[out] http_ver will be set as a side-effect of the parsing
2286 * \return NULL on incomplete requests,
2287 * a ClientSocketContext structure on success or failure.
2289 static ClientSocketContext
*
2290 parseHttpRequest(ConnStateData
*csd
, HttpParser
*hp
, HttpRequestMethod
* method_p
, Http::ProtocolVersion
*http_ver
)
2292 char *req_hdr
= NULL
;
2295 ClientHttpRequest
*http
;
2296 ClientSocketContext
*result
;
2297 StoreIOBuffer tempBuffer
;
2300 /* pre-set these values to make aborting simpler */
2301 *method_p
= Http::METHOD_NONE
;
2303 /* NP: don't be tempted to move this down or remove again.
2304 * It's the only DDoS protection old-String has against long URL */
2305 if ( hp
->bufsiz
<= 0) {
2306 debugs(33, 5, "Incomplete request, waiting for end of request line");
2308 } else if ( (size_t)hp
->bufsiz
>= Config
.maxRequestHeaderSize
&& headersEnd(hp
->buf
, Config
.maxRequestHeaderSize
) == 0) {
2309 debugs(33, 5, "parseHttpRequest: Too large request");
2310 hp
->request_parse_status
= Http::scHeaderTooLarge
;
2311 return parseHttpRequestAbort(csd
, "error:request-too-large");
2314 /* Attempt to parse the first line; this'll define the method, url, version and header begin */
2315 r
= HttpParserParseReqLine(hp
);
2318 debugs(33, 5, "Incomplete request, waiting for end of request line");
2323 return parseHttpRequestAbort(csd
, "error:invalid-request");
2326 /* Request line is valid here .. */
2327 *http_ver
= Http::ProtocolVersion(hp
->req
.v_maj
, hp
->req
.v_min
);
2329 /* This call scans the entire request, not just the headers */
2330 if (hp
->req
.v_maj
> 0) {
2331 if ((req_sz
= headersEnd(hp
->buf
, hp
->bufsiz
)) == 0) {
2332 debugs(33, 5, "Incomplete request, waiting for end of headers");
2336 debugs(33, 3, "parseHttpRequest: Missing HTTP identifier");
2337 req_sz
= HttpParserReqSz(hp
);
2340 /* We know the whole request is in hp->buf now */
2342 assert(req_sz
<= (size_t) hp
->bufsiz
);
2344 /* Will the following be true with HTTP/0.9 requests? probably not .. */
2345 /* So the rest of the code will need to deal with '0'-byte headers (ie, none, so don't try parsing em) */
2348 hp
->hdr_end
= req_sz
- 1;
2350 hp
->hdr_start
= hp
->req
.end
+ 1;
2352 /* Enforce max_request_size */
2353 if (req_sz
>= Config
.maxRequestHeaderSize
) {
2354 debugs(33, 5, "parseHttpRequest: Too large request");
2355 hp
->request_parse_status
= Http::scHeaderTooLarge
;
2356 return parseHttpRequestAbort(csd
, "error:request-too-large");
2360 *method_p
= HttpRequestMethod(&hp
->buf
[hp
->req
.m_start
], &hp
->buf
[hp
->req
.m_end
]+1);
2362 /* deny CONNECT via accelerated ports */
2363 if (*method_p
== Http::METHOD_CONNECT
&& csd
->port
!= NULL
&& csd
->port
->flags
.accelSurrogate
) {
2364 debugs(33, DBG_IMPORTANT
, "WARNING: CONNECT method received on " << csd
->port
->transport
.protocol
<< " Accelerator port " << csd
->port
->s
.port());
2365 /* XXX need a way to say "this many character length string" */
2366 debugs(33, DBG_IMPORTANT
, "WARNING: for request: " << hp
->buf
);
2367 hp
->request_parse_status
= Http::scMethodNotAllowed
;
2368 return parseHttpRequestAbort(csd
, "error:method-not-allowed");
2371 if (*method_p
== Http::METHOD_NONE
) {
2372 /* XXX need a way to say "this many character length string" */
2373 debugs(33, DBG_IMPORTANT
, "clientParseRequestMethod: Unsupported method in request '" << hp
->buf
<< "'");
2374 hp
->request_parse_status
= Http::scMethodNotAllowed
;
2375 return parseHttpRequestAbort(csd
, "error:unsupported-request-method");
2379 * Process headers after request line
2380 * TODO: Use httpRequestParse here.
2382 /* XXX this code should be modified to take a const char * later! */
2383 req_hdr
= (char *) hp
->buf
+ hp
->req
.end
+ 1;
2385 debugs(33, 3, "parseHttpRequest: req_hdr = {" << req_hdr
<< "}");
2387 end
= (char *) hp
->buf
+ hp
->hdr_end
;
2389 debugs(33, 3, "parseHttpRequest: end = {" << end
<< "}");
2391 debugs(33, 3, "parseHttpRequest: prefix_sz = " <<
2392 (int) HttpParserRequestLen(hp
) << ", req_line_sz = " <<
2393 HttpParserReqSz(hp
));
2395 /* Ok, all headers are received */
2396 http
= new ClientHttpRequest(csd
);
2398 http
->req_sz
= HttpParserRequestLen(hp
);
2399 result
= new ClientSocketContext(csd
->clientConnection
, http
);
2400 tempBuffer
.data
= result
->reqbuf
;
2401 tempBuffer
.length
= HTTP_REQBUF_SZ
;
2403 ClientStreamData newServer
= new clientReplyContext(http
);
2404 ClientStreamData newClient
= result
;
2405 clientStreamInit(&http
->client_stream
, clientGetMoreData
, clientReplyDetach
,
2406 clientReplyStatus
, newServer
, clientSocketRecipient
,
2407 clientSocketDetach
, newClient
, tempBuffer
);
2409 debugs(33, 5, "parseHttpRequest: Request Header is\n" <<(hp
->buf
) + hp
->hdr_start
);
2413 * XXX this should eventually not use a malloc'ed buffer; the transformation code
2414 * below needs to be modified to not expect a mutable nul-terminated string.
2416 char *url
= (char *)xmalloc(hp
->req
.u_end
- hp
->req
.u_start
+ 16);
2418 memcpy(url
, hp
->buf
+ hp
->req
.u_start
, hp
->req
.u_end
- hp
->req
.u_start
+ 1);
2420 url
[hp
->req
.u_end
- hp
->req
.u_start
+ 1] = '\0';
2422 #if THIS_VIOLATES_HTTP_SPECS_ON_URL_TRANSFORMATION
2424 if ((t
= strchr(url
, '#'))) /* remove HTML anchors */
2429 debugs(33,5, HERE
<< "repare absolute URL from " <<
2430 (csd
->transparent()?"intercept":(csd
->port
->flags
.accelSurrogate
? "accel":"")));
2431 /* Rewrite the URL in transparent or accelerator mode */
2432 /* NP: there are several cases to traverse here:
2433 * - standard mode (forward proxy)
2434 * - transparent mode (TPROXY)
2435 * - transparent mode with failures
2436 * - intercept mode (NAT)
2437 * - intercept mode with failures
2438 * - accelerator mode (reverse proxy)
2440 * - mixed combos of the above with internal URL
2442 if (csd
->transparent()) {
2443 /* intercept or transparent mode, properly working with no failures */
2444 prepareTransparentURL(csd
, http
, url
, req_hdr
);
2446 } else if (internalCheck(url
)) {
2447 /* internal URL mode */
2448 /* prepend our name & port */
2449 http
->uri
= xstrdup(internalLocalUri(NULL
, url
));
2450 // We just re-wrote the URL. Must replace the Host: header.
2451 // But have not parsed there yet!! flag for local-only handling.
2452 http
->flags
.internal
= true;
2454 } else if (csd
->port
->flags
.accelSurrogate
|| csd
->switchedToHttps()) {
2455 /* accelerator mode */
2456 prepareAcceleratedURL(csd
, http
, url
, req_hdr
);
2460 /* No special rewrites have been applied above, use the
2461 * requested url. may be rewritten later, so make extra room */
2462 int url_sz
= strlen(url
) + Config
.appendDomainLen
+ 5;
2463 http
->uri
= (char *)xcalloc(url_sz
, 1);
2464 strcpy(http
->uri
, url
);
2467 debugs(33, 5, "parseHttpRequest: Complete request received");
2469 // XXX: crop this dump at the end of headers. No need for extras
2470 debugs(11, 2, "HTTP Client " << csd
->clientConnection
);
2471 debugs(11, 2, "HTTP Client REQUEST:\n---------\n" << (hp
->buf
) + hp
->req
.m_start
<< "\n----------");
2473 result
->flags
.parsed_ok
= 1;
2479 ConnStateData::In::maybeMakeSpaceAvailable()
2481 if (buf
.spaceSize() < 2) {
2482 const SBuf::size_type haveCapacity
= buf
.length() + buf
.spaceSize();
2483 if (haveCapacity
>= Config
.maxRequestBufferSize
) {
2484 debugs(33, 4, "request buffer full: client_request_buffer_max_size=" << Config
.maxRequestBufferSize
);
2487 if (haveCapacity
== 0) {
2488 // haveCapacity is based on the SBuf visible window of the MemBlob buffer, which may fill up.
2489 // at which point bump the buffer back to default. This allocates a new MemBlob with any un-parsed bytes.
2490 buf
.reserveCapacity(CLIENT_REQ_BUF_SZ
);
2492 const SBuf::size_type wantCapacity
= min(static_cast<SBuf::size_type
>(Config
.maxRequestBufferSize
), haveCapacity
*2);
2493 buf
.reserveCapacity(wantCapacity
);
2495 debugs(33, 2, "growing request buffer: available=" << buf
.spaceSize() << " used=" << buf
.length());
2497 return (buf
.spaceSize() >= 2);
2501 ConnStateData::addContextToQueue(ClientSocketContext
* context
)
2503 ClientSocketContext::Pointer
*S
;
2505 for (S
= (ClientSocketContext::Pointer
*) & currentobject
; S
->getRaw();
2513 ConnStateData::getConcurrentRequestCount() const
2516 ClientSocketContext::Pointer
*T
;
2518 for (T
= (ClientSocketContext::Pointer
*) ¤tobject
;
2519 T
->getRaw(); T
= &(*T
)->next
, ++result
);
2524 ConnStateData::connFinishedWithConn(int size
)
2527 if (getConcurrentRequestCount() == 0 && in
.buf
.isEmpty()) {
2528 /* no current or pending requests */
2529 debugs(33, 4, HERE
<< clientConnection
<< " closed");
2531 } else if (!Config
.onoff
.half_closed_clients
) {
2532 /* admin doesn't want to support half-closed client sockets */
2533 debugs(33, 3, HERE
<< clientConnection
<< " aborted (half_closed_clients disabled)");
2534 notifyAllContexts(0); // no specific error implies abort
2543 connNoteUseOfBuffer(ConnStateData
* conn
, size_t byteCount
)
2545 assert(byteCount
> 0 && byteCount
<= conn
->in
.buf
.length());
2546 conn
->in
.buf
.consume(byteCount
);
2547 debugs(33, 5, "conn->in.buf has " << conn
->in
.buf
.length() << " bytes unused.");
2550 /// respond with ERR_TOO_BIG if request header exceeds request_header_max_size
2552 ConnStateData::checkHeaderLimits()
2554 if (in
.buf
.length() < Config
.maxRequestHeaderSize
)
2555 return; // can accumulte more header data
2557 debugs(33, 3, "Request header is too large (" << in
.buf
.length() << " > " <<
2558 Config
.maxRequestHeaderSize
<< " bytes)");
2560 ClientSocketContext
*context
= parseHttpRequestAbort(this, "error:request-too-large");
2561 clientStreamNode
*node
= context
->getClientReplyContext();
2562 clientReplyContext
*repContext
= dynamic_cast<clientReplyContext
*>(node
->data
.getRaw());
2563 assert (repContext
);
2564 repContext
->setReplyToError(ERR_TOO_BIG
,
2565 Http::scBadRequest
, Http::METHOD_NONE
, NULL
,
2566 clientConnection
->remote
, NULL
, NULL
, NULL
);
2567 context
->registerWithConn();
2568 context
->pullData();
2572 ConnStateData::clientAfterReadingRequests()
2574 // Were we expecting to read more request body from half-closed connection?
2575 if (mayNeedToReadMoreBody() && commIsHalfClosed(clientConnection
->fd
)) {
2576 debugs(33, 3, HERE
<< "truncated body: closing half-closed " << clientConnection
);
2577 clientConnection
->close();
2586 ConnStateData::quitAfterError(HttpRequest
*request
)
2588 // From HTTP p.o.v., we do not have to close after every error detected
2589 // at the client-side, but many such errors do require closure and the
2590 // client-side code is bad at handling errors so we play it safe.
2592 request
->flags
.proxyKeepalive
= false;
2593 flags
.readMore
= false;
2594 debugs(33,4, HERE
<< "Will close after error: " << clientConnection
);
2598 bool ConnStateData::serveDelayedError(ClientSocketContext
*context
)
2600 ClientHttpRequest
*http
= context
->http
;
2605 assert(sslServerBump
->entry
);
2606 // Did we create an error entry while processing CONNECT?
2607 if (!sslServerBump
->entry
->isEmpty()) {
2608 quitAfterError(http
->request
);
2610 // Get the saved error entry and send it to the client by replacing the
2611 // ClientHttpRequest store entry with it.
2612 clientStreamNode
*node
= context
->getClientReplyContext();
2613 clientReplyContext
*repContext
= dynamic_cast<clientReplyContext
*>(node
->data
.getRaw());
2615 debugs(33, 5, "Responding with delated error for " << http
->uri
);
2616 repContext
->setReplyToStoreEntry(sslServerBump
->entry
, "delayed SslBump error");
2618 // save the original request for logging purposes
2619 if (!context
->http
->al
->request
) {
2620 context
->http
->al
->request
= http
->request
;
2621 HTTPMSGLOCK(context
->http
->al
->request
);
2624 // Get error details from the fake certificate-peeking request.
2625 http
->request
->detailError(sslServerBump
->request
->errType
, sslServerBump
->request
->errDetail
);
2626 context
->pullData();
2630 // In bump-server-first mode, we have not necessarily seen the intended
2631 // server name at certificate-peeking time. Check for domain mismatch now,
2632 // when we can extract the intended name from the bumped HTTP request.
2633 if (X509
*srvCert
= sslServerBump
->serverCert
.get()) {
2634 HttpRequest
*request
= http
->request
;
2635 if (!Ssl::checkX509ServerValidity(srvCert
, request
->GetHost())) {
2636 debugs(33, 2, "SQUID_X509_V_ERR_DOMAIN_MISMATCH: Certificate " <<
2637 "does not match domainname " << request
->GetHost());
2639 bool allowDomainMismatch
= false;
2640 if (Config
.ssl_client
.cert_error
) {
2641 ACLFilledChecklist
check(Config
.ssl_client
.cert_error
, request
, dash_str
);
2642 check
.sslErrors
= new Ssl::CertErrors(Ssl::CertError(SQUID_X509_V_ERR_DOMAIN_MISMATCH
, srvCert
));
2643 allowDomainMismatch
= (check
.fastCheck() == ACCESS_ALLOWED
);
2644 delete check
.sslErrors
;
2645 check
.sslErrors
= NULL
;
2648 if (!allowDomainMismatch
) {
2649 quitAfterError(request
);
2651 clientStreamNode
*node
= context
->getClientReplyContext();
2652 clientReplyContext
*repContext
= dynamic_cast<clientReplyContext
*>(node
->data
.getRaw());
2653 assert (repContext
);
2655 // Fill the server IP and hostname for error page generation.
2656 HttpRequest::Pointer
const & peekerRequest
= sslServerBump
->request
;
2657 request
->hier
.note(peekerRequest
->hier
.tcpServer
, request
->GetHost());
2659 // Create an error object and fill it
2660 ErrorState
*err
= new ErrorState(ERR_SECURE_CONNECT_FAIL
, Http::scServiceUnavailable
, request
);
2661 err
->src_addr
= clientConnection
->remote
;
2662 Ssl::ErrorDetail
*errDetail
= new Ssl::ErrorDetail(
2663 SQUID_X509_V_ERR_DOMAIN_MISMATCH
,
2665 err
->detail
= errDetail
;
2666 // Save the original request for logging purposes.
2667 if (!context
->http
->al
->request
) {
2668 context
->http
->al
->request
= request
;
2669 HTTPMSGLOCK(context
->http
->al
->request
);
2671 repContext
->setReplyToError(request
->method
, err
);
2672 assert(context
->http
->out
.offset
== 0);
2673 context
->pullData();
2681 #endif // USE_OPENSSL
2684 clientProcessRequest(ConnStateData
*conn
, HttpParser
*hp
, ClientSocketContext
*context
, const HttpRequestMethod
& method
, Http::ProtocolVersion http_ver
)
2686 ClientHttpRequest
*http
= context
->http
;
2687 HttpRequest::Pointer request
;
2688 bool notedUseOfBuffer
= false;
2689 bool chunked
= false;
2690 bool mustReplyToOptions
= false;
2691 bool unsupportedTe
= false;
2692 bool expectBody
= false;
2694 /* We have an initial client stream in place should it be needed */
2695 /* setup our private context */
2697 context
->registerWithConn();
2699 if (context
->flags
.parsed_ok
== 0) {
2700 clientStreamNode
*node
= context
->getClientReplyContext();
2701 debugs(33, 2, "clientProcessRequest: Invalid Request");
2702 conn
->quitAfterError(NULL
);
2703 // setLogUri should called before repContext->setReplyToError
2704 setLogUri(http
, http
->uri
, true);
2705 clientReplyContext
*repContext
= dynamic_cast<clientReplyContext
*>(node
->data
.getRaw());
2706 assert (repContext
);
2707 switch (hp
->request_parse_status
) {
2708 case Http::scHeaderTooLarge
:
2709 repContext
->setReplyToError(ERR_TOO_BIG
, Http::scBadRequest
, method
, http
->uri
, conn
->clientConnection
->remote
, NULL
, conn
->in
.buf
.c_str(), NULL
);
2711 case Http::scMethodNotAllowed
:
2712 repContext
->setReplyToError(ERR_UNSUP_REQ
, Http::scMethodNotAllowed
, method
, http
->uri
,
2713 conn
->clientConnection
->remote
, NULL
, conn
->in
.buf
.c_str(), NULL
);
2716 repContext
->setReplyToError(ERR_INVALID_REQ
, hp
->request_parse_status
, method
, http
->uri
,
2717 conn
->clientConnection
->remote
, NULL
, conn
->in
.buf
.c_str(), NULL
);
2719 assert(context
->http
->out
.offset
== 0);
2720 context
->pullData();
2725 assert(http
->request
);
2726 request
= http
->request
;
2727 notedUseOfBuffer
= true;
2729 if ((request
= HttpRequest::CreateFromUrlAndMethod(http
->uri
, method
)) == NULL
) {
2730 clientStreamNode
*node
= context
->getClientReplyContext();
2731 debugs(33, 5, "Invalid URL: " << http
->uri
);
2732 conn
->quitAfterError(request
.getRaw());
2733 // setLogUri should called before repContext->setReplyToError
2734 setLogUri(http
, http
->uri
, true);
2735 clientReplyContext
*repContext
= dynamic_cast<clientReplyContext
*>(node
->data
.getRaw());
2736 assert (repContext
);
2737 repContext
->setReplyToError(ERR_INVALID_URL
, Http::scBadRequest
, method
, http
->uri
, conn
->clientConnection
->remote
, NULL
, NULL
, NULL
);
2738 assert(context
->http
->out
.offset
== 0);
2739 context
->pullData();
2743 /* RFC 2616 section 10.5.6 : handle unsupported HTTP major versions cleanly. */
2744 /* We currently only support 0.9, 1.0, 1.1 properly */
2745 if ( (http_ver
.major
== 0 && http_ver
.minor
!= 9) ||
2746 (http_ver
.major
> 1) ) {
2748 clientStreamNode
*node
= context
->getClientReplyContext();
2749 debugs(33, 5, "Unsupported HTTP version discovered. :\n" << HttpParserHdrBuf(hp
));
2750 conn
->quitAfterError(request
.getRaw());
2751 // setLogUri should called before repContext->setReplyToError
2752 setLogUri(http
, http
->uri
, true);
2753 clientReplyContext
*repContext
= dynamic_cast<clientReplyContext
*>(node
->data
.getRaw());
2754 assert (repContext
);
2755 repContext
->setReplyToError(ERR_UNSUP_HTTPVERSION
, Http::scHttpVersionNotSupported
, method
, http
->uri
,
2756 conn
->clientConnection
->remote
, NULL
, HttpParserHdrBuf(hp
), NULL
);
2757 assert(context
->http
->out
.offset
== 0);
2758 context
->pullData();
2762 /* compile headers */
2763 /* we should skip request line! */
2764 /* XXX should actually know the damned buffer size here */
2765 if (!conn
->isFtp
&& http_ver
.major
>= 1 &&
2766 !request
->parseHeader(HttpParserHdrBuf(hp
), HttpParserHdrSz(hp
))) {
2767 clientStreamNode
*node
= context
->getClientReplyContext();
2768 debugs(33, 5, "Failed to parse request headers:\n" << HttpParserHdrBuf(hp
));
2769 conn
->quitAfterError(request
.getRaw());
2770 // setLogUri should called before repContext->setReplyToError
2771 setLogUri(http
, http
->uri
, true);
2772 clientReplyContext
*repContext
= dynamic_cast<clientReplyContext
*>(node
->data
.getRaw());
2773 assert (repContext
);
2774 repContext
->setReplyToError(ERR_INVALID_REQ
, Http::scBadRequest
, method
, http
->uri
, conn
->clientConnection
->remote
, NULL
, NULL
, NULL
);
2775 assert(context
->http
->out
.offset
== 0);
2776 context
->pullData();
2780 request
->clientConnectionManager
= conn
;
2782 request
->flags
.accelerated
= http
->flags
.accel
;
2783 request
->flags
.sslBumped
=conn
->switchedToHttps();
2784 request
->flags
.ignoreCc
= conn
->port
->ignore_cc
;
2785 // TODO: decouple http->flags.accel from request->flags.sslBumped
2786 request
->flags
.noDirect
= (request
->flags
.accelerated
&& !request
->flags
.sslBumped
) ?
2787 !conn
->port
->allow_direct
: 0;
2789 if (request
->flags
.sslBumped
) {
2790 if (conn
->getAuth() != NULL
)
2791 request
->auth_user_request
= conn
->getAuth();
2796 * If transparent or interception mode is working clone the transparent and interception flags
2797 * from the port settings to the request.
2799 if (http
->clientConnection
!= NULL
) {
2800 request
->flags
.intercepted
= ((http
->clientConnection
->flags
& COMM_INTERCEPTION
) != 0);
2801 request
->flags
.interceptTproxy
= ((http
->clientConnection
->flags
& COMM_TRANSPARENT
) != 0 ) ;
2802 if (request
->flags
.interceptTproxy
) {
2803 if (Config
.accessList
.spoof_client_ip
) {
2804 ACLFilledChecklist
*checklist
= clientAclChecklistCreate(Config
.accessList
.spoof_client_ip
, http
);
2805 request
->flags
.spoofClientIp
= (checklist
->fastCheck() == ACCESS_ALLOWED
);
2808 request
->flags
.spoofClientIp
= true;
2810 request
->flags
.spoofClientIp
= false;
2813 if (internalCheck(request
->urlpath
.termedBuf())) {
2814 if (internalHostnameIs(request
->GetHost()) && request
->port
== getMyPort()) {
2815 debugs(33, 2, "internal URL found: " << request
->url
.getScheme() << "://" << request
->GetHost() <<
2816 ':' << request
->port
);
2817 http
->flags
.internal
= true;
2818 } else if (Config
.onoff
.global_internal_static
&& internalStaticCheck(request
->urlpath
.termedBuf())) {
2819 debugs(33, 2, "internal URL found: " << request
->url
.getScheme() << "://" << request
->GetHost() <<
2820 ':' << request
->port
<< " (global_internal_static on)");
2821 request
->SetHost(internalHostname());
2822 request
->port
= getMyPort();
2823 http
->flags
.internal
= true;
2825 debugs(33, 2, "internal URL found: " << request
->url
.getScheme() << "://" << request
->GetHost() <<
2826 ':' << request
->port
<< " (not this proxy)");
2829 if (http
->flags
.internal
)
2830 request
->login
[0] = '\0';
2832 request
->flags
.internal
= http
->flags
.internal
;
2833 setLogUri (http
, urlCanonicalClean(request
.getRaw()));
2834 request
->client_addr
= conn
->clientConnection
->remote
; // XXX: remove reuest->client_addr member.
2835 #if FOLLOW_X_FORWARDED_FOR
2836 // indirect client gets stored here because it is an HTTP header result (from X-Forwarded-For:)
2837 // not a details about teh TCP connection itself
2838 request
->indirect_client_addr
= conn
->clientConnection
->remote
;
2839 #endif /* FOLLOW_X_FORWARDED_FOR */
2840 request
->my_addr
= conn
->clientConnection
->local
;
2841 request
->myportname
= conn
->port
->name
;
2842 request
->http_ver
= http_ver
;
2844 // Link this HttpRequest to ConnStateData relatively early so the following complex handling can use it
2845 // TODO: this effectively obsoletes a lot of conn->FOO copying. That needs cleaning up later.
2846 request
->clientConnectionManager
= conn
;
2848 if (request
->header
.chunked()) {
2850 } else if (request
->header
.has(HDR_TRANSFER_ENCODING
)) {
2851 const String te
= request
->header
.getList(HDR_TRANSFER_ENCODING
);
2852 // HTTP/1.1 requires chunking to be the last encoding if there is one
2853 unsupportedTe
= te
.size() && te
!= "identity";
2854 } // else implied identity coding
2856 mustReplyToOptions
= (method
== Http::METHOD_OPTIONS
) &&
2857 (request
->header
.getInt64(HDR_MAX_FORWARDS
) == 0);
2858 if (!urlCheckRequest(request
.getRaw()) || mustReplyToOptions
|| unsupportedTe
) {
2859 clientStreamNode
*node
= context
->getClientReplyContext();
2860 conn
->quitAfterError(request
.getRaw());
2861 clientReplyContext
*repContext
= dynamic_cast<clientReplyContext
*>(node
->data
.getRaw());
2862 assert (repContext
);
2863 repContext
->setReplyToError(ERR_UNSUP_REQ
, Http::scNotImplemented
, request
->method
, NULL
,
2864 conn
->clientConnection
->remote
, request
.getRaw(), NULL
, NULL
);
2865 assert(context
->http
->out
.offset
== 0);
2866 context
->pullData();
2870 if (!chunked
&& !clientIsContentLengthValid(request
.getRaw())) {
2871 clientStreamNode
*node
= context
->getClientReplyContext();
2872 clientReplyContext
*repContext
= dynamic_cast<clientReplyContext
*>(node
->data
.getRaw());
2873 assert (repContext
);
2874 conn
->quitAfterError(request
.getRaw());
2875 repContext
->setReplyToError(ERR_INVALID_REQ
,
2876 Http::scLengthRequired
, request
->method
, NULL
,
2877 conn
->clientConnection
->remote
, request
.getRaw(), NULL
, NULL
);
2878 assert(context
->http
->out
.offset
== 0);
2879 context
->pullData();
2883 if (request
->header
.has(HDR_EXPECT
)) {
2884 const String expect
= request
->header
.getList(HDR_EXPECT
);
2885 const bool supportedExpect
= (expect
.caseCmp("100-continue") == 0);
2886 if (!supportedExpect
) {
2887 clientStreamNode
*node
= context
->getClientReplyContext();
2888 clientReplyContext
*repContext
= dynamic_cast<clientReplyContext
*>(node
->data
.getRaw());
2889 assert (repContext
);
2890 conn
->quitAfterError(request
.getRaw());
2891 repContext
->setReplyToError(ERR_INVALID_REQ
, Http::scExpectationFailed
, request
->method
, http
->uri
,
2892 conn
->clientConnection
->remote
, request
.getRaw(), NULL
, NULL
);
2893 assert(context
->http
->out
.offset
== 0);
2894 context
->pullData();
2900 http
->request
= request
.getRaw();
2901 HTTPMSGLOCK(http
->request
);
2903 clientSetKeepaliveFlag(http
);
2905 // Let tunneling code be fully responsible for CONNECT requests
2906 if (http
->request
->method
== Http::METHOD_CONNECT
) {
2907 context
->mayUseConnection(true);
2908 conn
->flags
.readMore
= false;
2910 // consume header early so that tunnel gets just the body
2911 connNoteUseOfBuffer(conn
, http
->req_sz
);
2912 notedUseOfBuffer
= true;
2916 if (conn
->switchedToHttps() && conn
->serveDelayedError(context
))
2920 /* Do we expect a request-body? */
2921 expectBody
= chunked
|| request
->content_length
> 0;
2922 if (!context
->mayUseConnection() && expectBody
) {
2923 request
->body_pipe
= conn
->expectRequestBody(
2924 chunked
? -1 : request
->content_length
);
2926 if (!notedUseOfBuffer
) {
2927 // consume header early so that body pipe gets just the body
2928 connNoteUseOfBuffer(conn
, http
->req_sz
);
2929 notedUseOfBuffer
= true;
2932 /* Is it too large? */
2933 if (!chunked
&& // if chunked, we will check as we accumulate
2934 clientIsRequestBodyTooLargeForPolicy(request
->content_length
)) {
2935 clientStreamNode
*node
= context
->getClientReplyContext();
2936 clientReplyContext
*repContext
= dynamic_cast<clientReplyContext
*>(node
->data
.getRaw());
2937 assert (repContext
);
2938 conn
->quitAfterError(request
.getRaw());
2939 repContext
->setReplyToError(ERR_TOO_BIG
,
2940 Http::scPayloadTooLarge
, Http::METHOD_NONE
, NULL
,
2941 conn
->clientConnection
->remote
, http
->request
, NULL
, NULL
);
2942 assert(context
->http
->out
.offset
== 0);
2943 context
->pullData();
2948 // We may stop producing, comm_close, and/or call setReplyToError()
2949 // below, so quit on errors to avoid http->doCallouts()
2950 if (!conn
->handleRequestBodyData())
2953 if (!request
->body_pipe
->productionEnded()) {
2954 debugs(33, 5, HERE
<< "need more request body");
2955 context
->mayUseConnection(true);
2956 assert(conn
->flags
.readMore
);
2961 http
->calloutContext
= new ClientRequestContext(http
);
2966 if (!notedUseOfBuffer
)
2967 connNoteUseOfBuffer(conn
, http
->req_sz
);
2971 * Moved the TCP_RESET feature from clientReplyContext::sendMoreData
2972 * to here because calling comm_reset_close() causes http to
2973 * be freed and the above connNoteUseOfBuffer() would hit an
2974 * assertion, not to mention that we were accessing freed memory.
2976 if (request
!= NULL
&& request
->flags
.resetTcp
&& Comm::IsConnOpen(conn
->clientConnection
)) {
2977 debugs(33, 3, HERE
<< "Sending TCP RST on " << conn
->clientConnection
);
2978 conn
->flags
.readMore
= false;
2979 comm_reset_close(conn
->clientConnection
);
2984 ConnStateData::processFtpRequest(ClientSocketContext
*const context
)
2986 ClientHttpRequest
*const http
= context
->http
;
2987 assert(http
!= NULL
);
2988 HttpRequest
*const request
= http
->request
;
2989 assert(request
!= NULL
);
2990 debugs(33, 9, request
);
2992 HttpHeader
&header
= request
->header
;
2993 assert(header
.has(HDR_FTP_COMMAND
));
2994 String
&cmd
= header
.findEntry(HDR_FTP_COMMAND
)->value
;
2995 assert(header
.has(HDR_FTP_ARGUMENTS
));
2996 String
¶ms
= header
.findEntry(HDR_FTP_ARGUMENTS
)->value
;
2998 const bool fwd
= !http
->storeEntry() &&
2999 FtpHandleRequest(context
, cmd
, params
);
3001 if (http
->storeEntry() != NULL
) {
3002 debugs(33, 4, "got an immediate response");
3003 assert(http
->storeEntry() != NULL
);
3004 clientSetKeepaliveFlag(http
);
3005 context
->pullData();
3007 debugs(33, 4, "forwarding request to server side");
3008 assert(http
->storeEntry() == NULL
);
3009 clientProcessRequest(this, &parser_
, context
, request
->method
,
3012 debugs(33, 4, "will resume processing later");
3017 ConnStateData::resumeFtpRequest(ClientSocketContext
*const context
)
3019 debugs(33, 4, "resuming");
3020 processFtpRequest(context
);
3024 connStripBufferWhitespace (ConnStateData
* conn
)
3026 // XXX: kill this whole function.
3027 while (!conn
->in
.buf
.isEmpty() && xisspace(conn
->in
.buf
.at(0))) {
3028 conn
->in
.buf
.consume(1);
3033 * Limit the number of concurrent requests.
3034 * \return true when there are available position(s) in the pipeline queue for another request.
3035 * \return false when the pipeline queue is full or disabled.
3038 ConnStateData::concurrentRequestQueueFilled() const
3040 const int existingRequestCount
= getConcurrentRequestCount();
3042 // default to the configured pipeline size.
3043 // add 1 because the head of pipeline is counted in concurrent requests and not prefetch queue
3044 const int concurrentRequestLimit
= (isFtp
? 0 : Config
.pipeline_max_prefetch
) + 1;
3046 // when queue filled already we cant add more.
3047 if (existingRequestCount
>= concurrentRequestLimit
) {
3048 debugs(33, 3, clientConnection
<< " max concurrent requests reached (" << concurrentRequestLimit
<< ")");
3049 debugs(33, 5, clientConnection
<< " deferring new request until one is done");
3057 * Attempt to parse one or more requests from the input buffer.
3058 * If a request is successfully parsed, even if the next request
3059 * is only partially parsed, it will return TRUE.
3062 ConnStateData::clientParseRequests()
3064 HttpRequestMethod method
;
3065 bool parsed_req
= false;
3067 debugs(33, 5, HERE
<< clientConnection
<< ": attempting to parse");
3069 // Loop while we have read bytes that are not needed for producing the body
3070 // On errors, bodyPipe may become nil, but readMore will be cleared
3071 while (!in
.buf
.isEmpty() && !bodyPipe
&& flags
.readMore
) {
3072 connStripBufferWhitespace(this);
3074 /* Don't try to parse if the buffer is empty */
3075 if (in
.buf
.isEmpty())
3078 /* Limit the number of concurrent requests */
3079 if (concurrentRequestQueueFilled())
3082 ClientSocketContext
*context
= NULL
;
3083 Http::ProtocolVersion http_ver
;
3085 /* Begin the parsing */
3086 PROF_start(parseHttpRequest
);
3087 HttpParserInit(&parser_
, in
.buf
.c_str(), in
.buf
.length());
3089 /* Process request */
3090 context
= parseHttpRequest(this, &parser_
, &method
, &http_ver
);
3091 PROF_stop(parseHttpRequest
);
3093 context
= FtpParseRequest(this, &method
, &http_ver
);
3095 /* partial or incomplete request */
3097 // TODO: why parseHttpRequest can just return parseHttpRequestAbort
3098 // (which becomes context) but checkHeaderLimits cannot?
3099 checkHeaderLimits();
3103 /* status -1 or 1 */
3105 debugs(33, 5, HERE
<< clientConnection
<< ": parsed a request");
3106 AsyncCall::Pointer timeoutCall
= commCbCall(5, 4, "clientLifetimeTimeout",
3107 CommTimeoutCbPtrFun(clientLifetimeTimeout
, context
->http
));
3108 commSetConnTimeout(clientConnection
, Config
.Timeout
.lifetime
, timeoutCall
);
3111 clientProcessRequest(this, &parser_
, context
, method
, http_ver
);
3113 // Process FTP request asynchronously to make sure FTP
3114 // data connection accept callback is fired first.
3115 CallJobHere1(33, 4, CbcPointer
<ConnStateData
>(this),
3116 ConnStateData
, ConnStateData::processFtpRequest
, context
);
3119 parsed_req
= true; // XXX: do we really need to parse everything right NOW ?
3121 if (context
->mayUseConnection()) {
3122 debugs(33, 3, HERE
<< "Not parsing new requests, as this request may need the connection");
3128 /* XXX where to 'finish' the parsing pass? */
3133 ConnStateData::clientReadRequest(const CommIoCbParams
&io
)
3135 debugs(33,5, io
.conn
);
3139 /* Bail out quickly on Comm::ERR_CLOSING - close handlers will tidy up */
3140 if (io
.flag
== Comm::ERR_CLOSING
) {
3141 debugs(33,5, io
.conn
<< " closing Bailout.");
3145 assert(Comm::IsConnOpen(clientConnection
));
3146 assert(io
.conn
->fd
== clientConnection
->fd
);
3149 * Don't reset the timeout value here. The value should be
3150 * counting Config.Timeout.request and applies to the request
3151 * as a whole, not individual read() calls.
3152 * Plus, it breaks our lame *HalfClosed() detection
3155 CommIoCbParams
rd(this); // will be expanded with ReadNow results
3157 switch (Comm::ReadNow(rd
, in
.buf
)) {
3158 case Comm::INPROGRESS
:
3159 if (in
.buf
.isEmpty())
3160 debugs(33, 2, io
.conn
<< ": no data to process, " << xstrerr(rd
.xerrno
));
3165 kb_incr(&(statCounter
.client_http
.kbytes_in
), rd
.size
);
3166 // may comm_close or setReplyToError
3167 if (!handleReadData())
3170 /* Continue to process previously read data */
3173 case Comm::ENDFILE
: // close detected by 0-byte read
3174 debugs(33, 5, io
.conn
<< " closed?");
3176 if (connFinishedWithConn(rd
.size
)) {
3177 clientConnection
->close();
3181 /* It might be half-closed, we can't tell */
3182 fd_table
[io
.conn
->fd
].flags
.socket_eof
= true;
3183 commMarkHalfClosed(io
.conn
->fd
);
3184 fd_note(io
.conn
->fd
, "half-closed");
3186 /* There is one more close check at the end, to detect aborted
3187 * (partial) requests. At this point we can't tell if the request
3191 /* Continue to process previously read data */
3194 // case Comm::COMM_ERROR:
3195 default: // no other flags should ever occur
3196 debugs(33, 2, io
.conn
<< ": got flag " << rd
.flag
<< "; " << xstrerr(rd
.xerrno
));
3197 notifyAllContexts(rd
.xerrno
);
3202 /* Process next request */
3203 if (getConcurrentRequestCount() == 0)
3204 fd_note(io
.fd
, "Reading next request");
3206 if (!clientParseRequests()) {
3210 * If the client here is half closed and we failed
3211 * to parse a request, close the connection.
3212 * The above check with connFinishedWithConn() only
3213 * succeeds _if_ the buffer is empty which it won't
3214 * be if we have an incomplete request.
3215 * XXX: This duplicates ClientSocketContext::keepaliveNextRequest
3217 if (getConcurrentRequestCount() == 0 && commIsHalfClosed(io
.fd
)) {
3218 debugs(33, 5, HERE
<< io
.conn
<< ": half-closed connection, no completed request parsed, connection closing.");
3219 clientConnection
->close();
3227 clientAfterReadingRequests();
3231 ConnStateData::clientReadFtpData(const CommIoCbParams
&io
)
3233 debugs(33,5,HERE
<< io
.conn
<< " size " << io
.size
);
3234 Must(ftp
.reader
!= NULL
);
3237 assert(Comm::IsConnOpen(ftp
.dataConn
));
3238 assert(io
.conn
->fd
== ftp
.dataConn
->fd
);
3240 if (io
.flag
== Comm::OK
&& bodyPipe
!= NULL
) {
3242 kb_incr(&(statCounter
.client_http
.kbytes_in
), io
.size
);
3244 char *const current_buf
= ftp
.uploadBuf
+ ftp
.uploadAvailSize
;
3245 if (io
.buf
!= current_buf
)
3246 memmove(current_buf
, io
.buf
, io
.size
);
3247 ftp
.uploadAvailSize
+= io
.size
;
3248 handleFtpRequestData();
3249 } else if (io
.size
== 0) {
3250 debugs(33, 5, HERE
<< io
.conn
<< " closed");
3251 FtpCloseDataConnection(this);
3252 if (ftp
.uploadAvailSize
<= 0)
3253 finishDechunkingRequest(true);
3255 } else { // not Comm::Flags::OK or unexpected read
3256 debugs(33, 5, HERE
<< io
.conn
<< " closed");
3257 FtpCloseDataConnection(this);
3258 finishDechunkingRequest(false);
3264 ConnStateData::handleFtpRequestData()
3266 assert(bodyPipe
!= NULL
);
3268 debugs(33,5, HERE
<< "handling FTP request data for " << clientConnection
);
3269 const size_t putSize
= bodyPipe
->putMoreData(ftp
.uploadBuf
,
3270 ftp
.uploadAvailSize
);
3272 ftp
.uploadAvailSize
-= putSize
;
3273 if (ftp
.uploadAvailSize
> 0)
3274 memmove(ftp
.uploadBuf
, ftp
.uploadBuf
+ putSize
, ftp
.uploadAvailSize
);
3277 if (Comm::IsConnOpen(ftp
.dataConn
))
3279 else if (ftp
.uploadAvailSize
<= 0)
3280 finishDechunkingRequest(true);
3284 * called when new request data has been read from the socket
3286 * \retval false called comm_close or setReplyToError (the caller should bail)
3287 * \retval true we did not call comm_close or setReplyToError
3290 ConnStateData::handleReadData()
3292 // if we are reading a body, stuff data into the body pipe
3293 if (bodyPipe
!= NULL
)
3294 return handleRequestBodyData();
3299 * called when new request body data has been buffered in in.buf
3300 * may close the connection if we were closing and piped everything out
3302 * \retval false called comm_close or setReplyToError (the caller should bail)
3303 * \retval true we did not call comm_close or setReplyToError
3306 ConnStateData::handleRequestBodyData()
3308 assert(bodyPipe
!= NULL
);
3312 if (in
.bodyParser
) { // chunked encoding
3313 if (const err_type error
= handleChunkedRequestBody(putSize
)) {
3314 abortChunkedRequestBody(error
);
3317 } else { // identity encoding
3318 debugs(33,5, HERE
<< "handling plain request body for " << clientConnection
);
3319 putSize
= bodyPipe
->putMoreData(in
.buf
.c_str(), in
.buf
.length());
3320 if (!bodyPipe
->mayNeedMoreData()) {
3321 // BodyPipe will clear us automagically when we produced everything
3327 connNoteUseOfBuffer(this, putSize
);
3330 debugs(33,5, HERE
<< "produced entire request body for " << clientConnection
);
3332 if (const char *reason
= stoppedSending()) {
3333 /* we've finished reading like good clients,
3334 * now do the close that initiateClose initiated.
3336 debugs(33, 3, HERE
<< "closing for earlier sending error: " << reason
);
3337 clientConnection
->close();
3345 /// parses available chunked encoded body bytes, checks size, returns errors
3347 ConnStateData::handleChunkedRequestBody(size_t &putSize
)
3349 debugs(33, 7, "chunked from " << clientConnection
<< ": " << in
.buf
.length());
3351 try { // the parser will throw on errors
3353 if (in
.buf
.isEmpty()) // nothing to do
3356 MemBuf raw
; // ChunkedCodingParser only works with MemBufs
3357 // add one because MemBuf will assert if it cannot 0-terminate
3358 raw
.init(in
.buf
.length(), in
.buf
.length()+1);
3359 raw
.append(in
.buf
.c_str(), in
.buf
.length());
3361 const mb_size_t wasContentSize
= raw
.contentSize();
3362 BodyPipeCheckout
bpc(*bodyPipe
);
3363 const bool parsed
= in
.bodyParser
->parse(&raw
, &bpc
.buf
);
3365 putSize
= wasContentSize
- raw
.contentSize();
3367 // dechunk then check: the size limit applies to _dechunked_ content
3368 if (clientIsRequestBodyTooLargeForPolicy(bodyPipe
->producedSize()))
3372 finishDechunkingRequest(true);
3374 return ERR_NONE
; // nil bodyPipe implies body end for the caller
3377 // if chunk parser needs data, then the body pipe must need it too
3378 Must(!in
.bodyParser
->needsMoreData() || bodyPipe
->mayNeedMoreData());
3380 // if parser needs more space and we can consume nothing, we will stall
3381 Must(!in
.bodyParser
->needsMoreSpace() || bodyPipe
->buf().hasContent());
3382 } catch (...) { // TODO: be more specific
3383 debugs(33, 3, HERE
<< "malformed chunks" << bodyPipe
->status());
3384 return ERR_INVALID_REQ
;
3387 debugs(33, 7, HERE
<< "need more chunked data" << *bodyPipe
->status());
3391 /// quit on errors related to chunked request body handling
3393 ConnStateData::abortChunkedRequestBody(const err_type error
)
3395 finishDechunkingRequest(false);
3397 // XXX: The code below works if we fail during initial request parsing,
3398 // but if we fail when the server-side works already, the server may send
3399 // us its response too, causing various assertions. How to prevent that?
3400 #if WE_KNOW_HOW_TO_SEND_ERRORS
3401 ClientSocketContext::Pointer context
= getCurrentContext();
3402 if (context
!= NULL
&& !context
->http
->out
.offset
) { // output nothing yet
3403 clientStreamNode
*node
= context
->getClientReplyContext();
3404 clientReplyContext
*repContext
= dynamic_cast<clientReplyContext
*>(node
->data
.getRaw());
3406 const Http::StatusCode scode
= (error
== ERR_TOO_BIG
) ?
3407 Http::scPayloadTooLarge
: HTTP_BAD_REQUEST
;
3408 repContext
->setReplyToError(error
, scode
,
3409 repContext
->http
->request
->method
,
3410 repContext
->http
->uri
,
3412 repContext
->http
->request
,
3414 context
->pullData();
3416 // close or otherwise we may get stuck as nobody will notice the error?
3417 comm_reset_close(clientConnection
);
3420 debugs(33, 3, HERE
<< "aborting chunked request without error " << error
);
3421 comm_reset_close(clientConnection
);
3423 flags
.readMore
= false;
3427 ConnStateData::noteMoreBodySpaceAvailable(BodyPipe::Pointer
)
3430 handleFtpRequestData();
3434 if (!handleRequestBodyData())
3437 // too late to read more body
3438 if (!isOpen() || stoppedReceiving())
3445 ConnStateData::noteBodyConsumerAborted(BodyPipe::Pointer
)
3447 // request reader may get stuck waiting for space if nobody consumes body
3448 if (bodyPipe
!= NULL
)
3449 bodyPipe
->enableAutoConsumption();
3452 FtpCloseDataConnection(this);
3456 stopReceiving("virgin request body consumer aborted"); // closes ASAP
3459 /** general lifetime handler for HTTP requests */
3461 ConnStateData::requestTimeout(const CommTimeoutCbParams
&io
)
3464 * Just close the connection to not confuse browsers
3465 * using persistent connections. Some browsers open
3466 * a connection and then do not use it until much
3467 * later (presumeably because the request triggering
3468 * the open has already been completed on another
3471 debugs(33, 3, "requestTimeout: FD " << io
.fd
<< ": lifetime is expired.");
3476 clientLifetimeTimeout(const CommTimeoutCbParams
&io
)
3478 ClientHttpRequest
*http
= static_cast<ClientHttpRequest
*>(io
.data
);
3479 debugs(33, DBG_IMPORTANT
, "WARNING: Closing client connection due to lifetime timeout");
3480 debugs(33, DBG_IMPORTANT
, "\t" << http
->uri
);
3481 http
->al
->http
.timedout
= true;
3482 if (Comm::IsConnOpen(io
.conn
))
3486 ConnStateData::ConnStateData(const MasterXaction::Pointer
&xact
):
3487 AsyncJob("ConnStateData"),
3488 isFtp(xact
->squidPort
->transport
.protocol
== AnyP::PROTO_FTP
), // TODO: convert into a method?
3490 sslBumpMode(Ssl::bumpEnd
),
3491 switchedToHttps_(false),
3492 sslServerBump(NULL
),
3494 stoppedSending_(NULL
),
3495 stoppedReceiving_(NULL
)
3497 pinning
.host
= NULL
;
3499 pinning
.pinned
= false;
3500 pinning
.auth
= false;
3501 pinning
.zeroReply
= false;
3502 pinning
.peer
= NULL
;
3504 // store the details required for creating more MasterXaction objects as new requests come in
3505 clientConnection
= xact
->tcpClient
;
3506 port
= xact
->squidPort
;
3507 log_addr
= xact
->tcpClient
->remote
;
3508 log_addr
.applyMask(Config
.Addrs
.client_netmask
);
3510 // ensure a buffer is present for this connection
3511 in
.maybeMakeSpaceAvailable();
3513 if (port
->disable_pmtu_discovery
!= DISABLE_PMTU_OFF
&&
3514 (transparent() || port
->disable_pmtu_discovery
== DISABLE_PMTU_ALWAYS
)) {
3515 #if defined(IP_MTU_DISCOVER) && defined(IP_PMTUDISC_DONT)
3516 int i
= IP_PMTUDISC_DONT
;
3517 if (setsockopt(clientConnection
->fd
, SOL_IP
, IP_MTU_DISCOVER
, &i
, sizeof(i
)) < 0)
3518 debugs(33, 2, "WARNING: Path MTU discovery disabling failed on " << clientConnection
<< " : " << xstrerror());
3520 static bool reported
= false;
3523 debugs(33, DBG_IMPORTANT
, "NOTICE: Path MTU discovery disabling is not supported on your platform.");
3529 typedef CommCbMemFunT
<ConnStateData
, CommCloseCbParams
> Dialer
;
3530 AsyncCall::Pointer call
= JobCallback(33, 5, Dialer
, this, ConnStateData::connStateClosed
);
3531 comm_add_close_handler(clientConnection
->fd
, call
);
3533 if (Config
.onoff
.log_fqdn
)
3534 fqdncache_gethostbyaddr(clientConnection
->remote
, FQDN_LOOKUP_IF_MISS
);
3537 if (Ident::TheConfig
.identLookup
) {
3538 ACLFilledChecklist
identChecklist(Ident::TheConfig
.identLookup
, NULL
, NULL
);
3539 identChecklist
.src_addr
= xact
->tcpClient
->remote
;
3540 identChecklist
.my_addr
= xact
->tcpClient
->local
;
3541 if (identChecklist
.fastCheck() == ACCESS_ALLOWED
)
3542 Ident::Start(xact
->tcpClient
, clientIdentDone
, this);
3546 clientdbEstablished(clientConnection
->remote
, 1);
3548 flags
.readMore
= !isFtp
;
3551 ftp
.gotEpsvAll
= false;
3552 ftp
.readGreeting
= false;
3553 ftp
.state
= FTP_BEGIN
;
3554 ftp
.uploadAvailSize
= 0;
3558 /** Handle a new connection on HTTP socket. */
3560 httpAccept(const CommAcceptCbParams
¶ms
)
3562 MasterXaction::Pointer xact
= params
.xaction
;
3563 AnyP::PortCfgPointer s
= xact
->squidPort
;
3565 // NP: it is possible the port was reconfigured when the call or accept() was queued.
3567 if (params
.flag
!= Comm::OK
) {
3568 // Its possible the call was still queued when the client disconnected
3569 debugs(33, 2, "httpAccept: " << s
->listenConn
<< ": accept failure: " << xstrerr(params
.xerrno
));
3573 debugs(33, 4, HERE
<< params
.conn
<< ": accepted");
3574 fd_note(params
.conn
->fd
, "client http connect");
3576 if (s
->tcp_keepalive
.enabled
) {
3577 commSetTcpKeepalive(params
.conn
->fd
, s
->tcp_keepalive
.idle
, s
->tcp_keepalive
.interval
, s
->tcp_keepalive
.timeout
);
3580 ++ incoming_sockets_accepted
;
3582 // Socket is ready, setup the connection manager to start using it
3583 ConnStateData
*connState
= new ConnStateData(xact
);
3585 typedef CommCbMemFunT
<ConnStateData
, CommTimeoutCbParams
> TimeoutDialer
;
3586 AsyncCall::Pointer timeoutCall
= JobCallback(33, 5,
3587 TimeoutDialer
, connState
, ConnStateData::requestTimeout
);
3588 commSetConnTimeout(params
.conn
, Config
.Timeout
.request
, timeoutCall
);
3590 connState
->readSomeData();
3593 fd_table
[params
.conn
->fd
].clientInfo
= NULL
;
3595 if (Config
.onoff
.client_db
) {
3596 /* it was said several times that client write limiter does not work if client_db is disabled */
3598 ClientDelayPools
& pools(Config
.ClientDelay
.pools
);
3599 ACLFilledChecklist
ch(NULL
, NULL
, NULL
);
3601 // TODO: we check early to limit error response bandwith but we
3602 // should recheck when we can honor delay_pool_uses_indirect
3603 // TODO: we should also pass the port details for myportname here.
3604 ch
.src_addr
= params
.conn
->remote
;
3605 ch
.my_addr
= params
.conn
->local
;
3607 for (unsigned int pool
= 0; pool
< pools
.size(); ++pool
) {
3609 /* pools require explicit 'allow' to assign a client into them */
3610 if (pools
[pool
].access
) {
3611 ch
.accessList
= pools
[pool
].access
;
3612 allow_t answer
= ch
.fastCheck();
3613 if (answer
== ACCESS_ALLOWED
) {
3615 /* request client information from db after we did all checks
3616 this will save hash lookup if client failed checks */
3617 ClientInfo
* cli
= clientdbGetInfo(params
.conn
->remote
);
3620 /* put client info in FDE */
3621 fd_table
[params
.conn
->fd
].clientInfo
= cli
;
3623 /* setup write limiter for this request */
3624 const double burst
= floor(0.5 +
3625 (pools
[pool
].highwatermark
* Config
.ClientDelay
.initial
)/100.0);
3626 cli
->setWriteLimiter(pools
[pool
].rate
, burst
, pools
[pool
].highwatermark
);
3629 debugs(83, 4, HERE
<< "Delay pool " << pool
<< " skipped because ACL " << answer
);
3639 /** Create SSL connection structure and update fd_table */
3641 httpsCreate(const Comm::ConnectionPointer
&conn
, SSL_CTX
*sslContext
)
3643 SSL
*ssl
= SSL_new(sslContext
);
3646 const int ssl_error
= ERR_get_error();
3647 debugs(83, DBG_IMPORTANT
, "ERROR: httpsAccept: Error allocating handle: " << ERR_error_string(ssl_error
, NULL
) );
3652 SSL_set_fd(ssl
, conn
->fd
);
3653 fd_table
[conn
->fd
].ssl
= ssl
;
3654 fd_table
[conn
->fd
].read_method
= &ssl_read_method
;
3655 fd_table
[conn
->fd
].write_method
= &ssl_write_method
;
3657 debugs(33, 5, "httpsCreate: will negotate SSL on " << conn
);
3658 fd_note(conn
->fd
, "client https start");
3663 /** negotiate an SSL connection */
3665 clientNegotiateSSL(int fd
, void *data
)
3667 ConnStateData
*conn
= (ConnStateData
*)data
;
3669 SSL
*ssl
= fd_table
[fd
].ssl
;
3672 if ((ret
= SSL_accept(ssl
)) <= 0) {
3673 int ssl_error
= SSL_get_error(ssl
, ret
);
3675 switch (ssl_error
) {
3677 case SSL_ERROR_WANT_READ
:
3678 Comm::SetSelect(fd
, COMM_SELECT_READ
, clientNegotiateSSL
, conn
, 0);
3681 case SSL_ERROR_WANT_WRITE
:
3682 Comm::SetSelect(fd
, COMM_SELECT_WRITE
, clientNegotiateSSL
, conn
, 0);
3685 case SSL_ERROR_SYSCALL
:
3688 debugs(83, 2, "clientNegotiateSSL: Error negotiating SSL connection on FD " << fd
<< ": Aborted by client");
3694 if (errno
== ECONNRESET
)
3697 debugs(83, hard
? 1 : 2, "clientNegotiateSSL: Error negotiating SSL connection on FD " <<
3698 fd
<< ": " << strerror(errno
) << " (" << errno
<< ")");
3705 case SSL_ERROR_ZERO_RETURN
:
3706 debugs(83, DBG_IMPORTANT
, "clientNegotiateSSL: Error negotiating SSL connection on FD " << fd
<< ": Closed by client");
3711 debugs(83, DBG_IMPORTANT
, "clientNegotiateSSL: Error negotiating SSL connection on FD " <<
3712 fd
<< ": " << ERR_error_string(ERR_get_error(), NULL
) <<
3713 " (" << ssl_error
<< "/" << ret
<< ")");
3721 if (SSL_session_reused(ssl
)) {
3722 debugs(83, 2, "clientNegotiateSSL: Session " << SSL_get_session(ssl
) <<
3723 " reused on FD " << fd
<< " (" << fd_table
[fd
].ipaddr
<< ":" << (int)fd_table
[fd
].remote_port
<< ")");
3725 if (do_debug(83, 4)) {
3726 /* Write out the SSL session details.. actually the call below, but
3727 * OpenSSL headers do strange typecasts confusing GCC.. */
3728 /* PEM_write_SSL_SESSION(debug_log, SSL_get_session(ssl)); */
3729 #if defined(OPENSSL_VERSION_NUMBER) && OPENSSL_VERSION_NUMBER >= 0x00908000L
3730 PEM_ASN1_write((i2d_of_void
*)i2d_SSL_SESSION
, PEM_STRING_SSL_SESSION
, debug_log
, (char *)SSL_get_session(ssl
), NULL
,NULL
,0,NULL
,NULL
);
3732 #elif (ALLOW_ALWAYS_SSL_SESSION_DETAIL == 1)
3734 /* When using gcc 3.3.x and OpenSSL 0.9.7x sometimes a compile error can occur here.
3735 * This is caused by an unpredicatble gcc behaviour on a cast of the first argument
3736 * of PEM_ASN1_write(). For this reason this code section is disabled. To enable it,
3737 * define ALLOW_ALWAYS_SSL_SESSION_DETAIL=1.
3738 * Because there are two possible usable cast, if you get an error here, try the other
3739 * commented line. */
3741 PEM_ASN1_write((int(*)())i2d_SSL_SESSION
, PEM_STRING_SSL_SESSION
, debug_log
, (char *)SSL_get_session(ssl
), NULL
,NULL
,0,NULL
,NULL
);
3742 /* PEM_ASN1_write((int(*)(...))i2d_SSL_SESSION, PEM_STRING_SSL_SESSION, debug_log, (char *)SSL_get_session(ssl), NULL,NULL,0,NULL,NULL); */
3746 debugs(83, 4, "With " OPENSSL_VERSION_TEXT
", session details are available only defining ALLOW_ALWAYS_SSL_SESSION_DETAIL=1 in the source." );
3749 /* Note: This does not automatically fflush the log file.. */
3752 debugs(83, 2, "clientNegotiateSSL: New session " <<
3753 SSL_get_session(ssl
) << " on FD " << fd
<< " (" <<
3754 fd_table
[fd
].ipaddr
<< ":" << (int)fd_table
[fd
].remote_port
<<
3758 debugs(83, 3, "clientNegotiateSSL: FD " << fd
<< " negotiated cipher " <<
3759 SSL_get_cipher(ssl
));
3761 client_cert
= SSL_get_peer_certificate(ssl
);
3763 if (client_cert
!= NULL
) {
3764 debugs(83, 3, "clientNegotiateSSL: FD " << fd
<<
3765 " client certificate: subject: " <<
3766 X509_NAME_oneline(X509_get_subject_name(client_cert
), 0, 0));
3768 debugs(83, 3, "clientNegotiateSSL: FD " << fd
<<
3769 " client certificate: issuer: " <<
3770 X509_NAME_oneline(X509_get_issuer_name(client_cert
), 0, 0));
3772 X509_free(client_cert
);
3774 debugs(83, 5, "clientNegotiateSSL: FD " << fd
<<
3775 " has no certificate.");
3778 conn
->readSomeData();
3782 * If SSL_CTX is given, starts reading the SSL handshake.
3783 * Otherwise, calls switchToHttps to generate a dynamic SSL_CTX.
3786 httpsEstablish(ConnStateData
*connState
, SSL_CTX
*sslContext
, Ssl::BumpMode bumpMode
)
3790 const Comm::ConnectionPointer
&details
= connState
->clientConnection
;
3792 if (sslContext
&& !(ssl
= httpsCreate(details
, sslContext
)))
3795 typedef CommCbMemFunT
<ConnStateData
, CommTimeoutCbParams
> TimeoutDialer
;
3796 AsyncCall::Pointer timeoutCall
= JobCallback(33, 5, TimeoutDialer
,
3797 connState
, ConnStateData::requestTimeout
);
3798 commSetConnTimeout(details
, Config
.Timeout
.request
, timeoutCall
);
3801 Comm::SetSelect(details
->fd
, COMM_SELECT_READ
, clientNegotiateSSL
, connState
, 0);
3803 char buf
[MAX_IPSTRLEN
];
3804 assert(bumpMode
!= Ssl::bumpNone
&& bumpMode
!= Ssl::bumpEnd
);
3805 HttpRequest::Pointer
fakeRequest(new HttpRequest
);
3806 fakeRequest
->SetHost(details
->local
.toStr(buf
, sizeof(buf
)));
3807 fakeRequest
->port
= details
->local
.port();
3808 fakeRequest
->clientConnectionManager
= connState
;
3809 fakeRequest
->client_addr
= connState
->clientConnection
->remote
;
3810 #if FOLLOW_X_FORWARDED_FOR
3811 fakeRequest
->indirect_client_addr
= connState
->clientConnection
->remote
;
3813 fakeRequest
->my_addr
= connState
->clientConnection
->local
;
3814 fakeRequest
->flags
.interceptTproxy
= ((connState
->clientConnection
->flags
& COMM_TRANSPARENT
) != 0 ) ;
3815 fakeRequest
->flags
.intercepted
= ((connState
->clientConnection
->flags
& COMM_INTERCEPTION
) != 0);
3816 fakeRequest
->myportname
= connState
->port
->name
;
3817 if (fakeRequest
->flags
.interceptTproxy
) {
3818 if (Config
.accessList
.spoof_client_ip
) {
3819 ACLFilledChecklist
checklist(Config
.accessList
.spoof_client_ip
, fakeRequest
.getRaw(), NULL
);
3820 fakeRequest
->flags
.spoofClientIp
= (checklist
.fastCheck() == ACCESS_ALLOWED
);
3822 fakeRequest
->flags
.spoofClientIp
= true;
3824 fakeRequest
->flags
.spoofClientIp
= false;
3825 debugs(33, 4, HERE
<< details
<< " try to generate a Dynamic SSL CTX");
3826 connState
->switchToHttps(fakeRequest
.getRaw(), bumpMode
);
3831 * A callback function to use with the ACLFilledChecklist callback.
3832 * In the case of ACCESS_ALLOWED answer initializes a bumped SSL connection,
3833 * else reverts the connection to tunnel mode.
3836 httpsSslBumpAccessCheckDone(allow_t answer
, void *data
)
3838 ConnStateData
*connState
= (ConnStateData
*) data
;
3840 // if the connection is closed or closing, just return.
3841 if (!connState
->isOpen())
3844 // Require both a match and a positive bump mode to work around exceptional
3845 // cases where ACL code may return ACCESS_ALLOWED with zero answer.kind.
3846 if (answer
== ACCESS_ALLOWED
&& answer
.kind
!= Ssl::bumpNone
) {
3847 debugs(33, 2, HERE
<< "sslBump needed for " << connState
->clientConnection
);
3848 connState
->sslBumpMode
= static_cast<Ssl::BumpMode
>(answer
.kind
);
3849 httpsEstablish(connState
, NULL
, (Ssl::BumpMode
)answer
.kind
);
3851 debugs(33, 2, HERE
<< "sslBump not needed for " << connState
->clientConnection
);
3852 connState
->sslBumpMode
= Ssl::bumpNone
;
3854 // fake a CONNECT request to force connState to tunnel
3855 static char ip
[MAX_IPSTRLEN
];
3856 connState
->clientConnection
->local
.toUrl(ip
, sizeof(ip
));
3857 // Pre-pend this fake request to the TLS bits already in the buffer
3859 retStr
.append("CONNECT ").append(ip
).append(" HTTP/1.1\r\nHost: ").append(ip
).append("\r\n\r\n");
3860 connState
->in
.buf
= retStr
.append(connState
->in
.buf
);
3861 bool ret
= connState
->handleReadData();
3863 ret
= connState
->clientParseRequests();
3866 debugs(33, 2, HERE
<< "Failed to start fake CONNECT request for ssl bumped connection: " << connState
->clientConnection
);
3867 connState
->clientConnection
->close();
3872 /** handle a new HTTPS connection */
3874 httpsAccept(const CommAcceptCbParams
¶ms
)
3876 MasterXaction::Pointer xact
= params
.xaction
;
3877 const AnyP::PortCfgPointer s
= xact
->squidPort
;
3879 // NP: it is possible the port was reconfigured when the call or accept() was queued.
3881 if (params
.flag
!= Comm::OK
) {
3882 // Its possible the call was still queued when the client disconnected
3883 debugs(33, 2, "httpsAccept: " << s
->listenConn
<< ": accept failure: " << xstrerr(params
.xerrno
));
3887 debugs(33, 4, HERE
<< params
.conn
<< " accepted, starting SSL negotiation.");
3888 fd_note(params
.conn
->fd
, "client https connect");
3890 if (s
->tcp_keepalive
.enabled
) {
3891 commSetTcpKeepalive(params
.conn
->fd
, s
->tcp_keepalive
.idle
, s
->tcp_keepalive
.interval
, s
->tcp_keepalive
.timeout
);
3894 ++incoming_sockets_accepted
;
3896 // Socket is ready, setup the connection manager to start using it
3897 ConnStateData
*connState
= new ConnStateData(xact
);
3899 if (s
->flags
.tunnelSslBumping
) {
3900 debugs(33, 5, "httpsAccept: accept transparent connection: " << params
.conn
);
3902 if (!Config
.accessList
.ssl_bump
) {
3903 httpsSslBumpAccessCheckDone(ACCESS_DENIED
, connState
);
3907 // Create a fake HTTP request for ssl_bump ACL check,
3908 // using tproxy/intercept provided destination IP and port.
3909 HttpRequest
*request
= new HttpRequest();
3910 static char ip
[MAX_IPSTRLEN
];
3911 assert(params
.conn
->flags
& (COMM_TRANSPARENT
| COMM_INTERCEPTION
));
3912 request
->SetHost(params
.conn
->local
.toStr(ip
, sizeof(ip
)));
3913 request
->port
= params
.conn
->local
.port();
3914 request
->myportname
= s
->name
;
3916 ACLFilledChecklist
*acl_checklist
= new ACLFilledChecklist(Config
.accessList
.ssl_bump
, request
, NULL
);
3917 acl_checklist
->src_addr
= params
.conn
->remote
;
3918 acl_checklist
->my_addr
= s
->s
;
3919 acl_checklist
->nonBlockingCheck(httpsSslBumpAccessCheckDone
, connState
);
3922 SSL_CTX
*sslContext
= s
->staticSslContext
.get();
3923 httpsEstablish(connState
, sslContext
, Ssl::bumpNone
);
3927 /** handle a new FTP connection */
3929 ftpAccept(const CommAcceptCbParams
¶ms
)
3931 MasterXaction::Pointer xact
= params
.xaction
;
3932 AnyP::PortCfgPointer s
= xact
->squidPort
;
3934 // NP: it is possible the port was reconfigured when the call or accept() was queued.
3936 if (params
.flag
!= Comm::OK
) {
3937 // Its possible the call was still queued when the client disconnected
3938 debugs(33, 2, "ftpAccept: " << s
->listenConn
<< ": accept failure: " << xstrerr(params
.xerrno
));
3942 debugs(33, 4, HERE
<< params
.conn
<< ": accepted");
3943 fd_note(params
.conn
->fd
, "client ftp connect");
3945 if (s
->tcp_keepalive
.enabled
) {
3946 commSetTcpKeepalive(params
.conn
->fd
, s
->tcp_keepalive
.idle
, s
->tcp_keepalive
.interval
, s
->tcp_keepalive
.timeout
);
3949 ++incoming_sockets_accepted
;
3951 // Socket is ready, setup the connection manager to start using it
3952 ConnStateData
*connState
= new ConnStateData(xact
);
3954 if (connState
->transparent()) {
3955 char buf
[MAX_IPSTRLEN
];
3956 connState
->clientConnection
->local
.toUrl(buf
, MAX_IPSTRLEN
);
3957 connState
->ftp
.host
= buf
;
3958 const char *uri
= connState
->ftpBuildUri();
3959 debugs(33, 5, HERE
<< "FTP transparent URL: " << uri
);
3962 FtpWriteEarlyReply(connState
, 220, "Service ready");
3964 // TODO: Merge common httpAccept() parts, applying USE_DELAY_POOLS to FTP.
3968 ConnStateData::sslCrtdHandleReplyWrapper(void *data
, const HelperReply
&reply
)
3970 ConnStateData
* state_data
= (ConnStateData
*)(data
);
3971 state_data
->sslCrtdHandleReply(reply
);
3975 ConnStateData::sslCrtdHandleReply(const HelperReply
&reply
)
3977 if (reply
.result
== HelperReply::BrokenHelper
) {
3978 debugs(33, 5, HERE
<< "Certificate for " << sslConnectHostOrIp
<< " cannot be generated. ssl_crtd response: " << reply
);
3979 } else if (!reply
.other().hasContent()) {
3980 debugs(1, DBG_IMPORTANT
, HERE
<< "\"ssl_crtd\" helper returned <NULL> reply.");
3982 Ssl::CrtdMessage
reply_message(Ssl::CrtdMessage::REPLY
);
3983 if (reply_message
.parse(reply
.other().content(), reply
.other().contentSize()) != Ssl::CrtdMessage::OK
) {
3984 debugs(33, 5, HERE
<< "Reply from ssl_crtd for " << sslConnectHostOrIp
<< " is incorrect");
3986 if (reply
.result
!= HelperReply::Okay
) {
3987 debugs(33, 5, HERE
<< "Certificate for " << sslConnectHostOrIp
<< " cannot be generated. ssl_crtd response: " << reply_message
.getBody());
3989 debugs(33, 5, HERE
<< "Certificate for " << sslConnectHostOrIp
<< " was successfully recieved from ssl_crtd");
3990 SSL_CTX
*ctx
= Ssl::generateSslContextUsingPkeyAndCertFromMemory(reply_message
.getBody().c_str(), *port
);
3991 getSslContextDone(ctx
, true);
3996 getSslContextDone(NULL
);
3999 void ConnStateData::buildSslCertGenerationParams(Ssl::CertificateProperties
&certProperties
)
4001 certProperties
.commonName
= sslCommonName
.size() > 0 ? sslCommonName
.termedBuf() : sslConnectHostOrIp
.termedBuf();
4003 // fake certificate adaptation requires bump-server-first mode
4004 if (!sslServerBump
) {
4005 assert(port
->signingCert
.get());
4006 certProperties
.signWithX509
.resetAndLock(port
->signingCert
.get());
4007 if (port
->signPkey
.get())
4008 certProperties
.signWithPkey
.resetAndLock(port
->signPkey
.get());
4009 certProperties
.signAlgorithm
= Ssl::algSignTrusted
;
4013 // In case of an error while connecting to the secure server, use a fake
4014 // trusted certificate, with no mimicked fields and no adaptation
4015 // algorithms. There is nothing we can mimic so we want to minimize the
4016 // number of warnings the user will have to see to get to the error page.
4017 assert(sslServerBump
->entry
);
4018 if (sslServerBump
->entry
->isEmpty()) {
4019 if (X509
*mimicCert
= sslServerBump
->serverCert
.get())
4020 certProperties
.mimicCert
.resetAndLock(mimicCert
);
4022 ACLFilledChecklist
checklist(NULL
, sslServerBump
->request
.getRaw(),
4023 clientConnection
!= NULL
? clientConnection
->rfc931
: dash_str
);
4024 checklist
.sslErrors
= cbdataReference(sslServerBump
->sslErrors
);
4026 for (sslproxy_cert_adapt
*ca
= Config
.ssl_client
.cert_adapt
; ca
!= NULL
; ca
= ca
->next
) {
4027 // If the algorithm already set, then ignore it.
4028 if ((ca
->alg
== Ssl::algSetCommonName
&& certProperties
.setCommonName
) ||
4029 (ca
->alg
== Ssl::algSetValidAfter
&& certProperties
.setValidAfter
) ||
4030 (ca
->alg
== Ssl::algSetValidBefore
&& certProperties
.setValidBefore
) )
4033 if (ca
->aclList
&& checklist
.fastCheck(ca
->aclList
) == ACCESS_ALLOWED
) {
4034 const char *alg
= Ssl::CertAdaptAlgorithmStr
[ca
->alg
];
4035 const char *param
= ca
->param
;
4037 // For parameterless CN adaptation, use hostname from the
4039 if (ca
->alg
== Ssl::algSetCommonName
) {
4041 param
= sslConnectHostOrIp
.termedBuf();
4042 certProperties
.commonName
= param
;
4043 certProperties
.setCommonName
= true;
4044 } else if (ca
->alg
== Ssl::algSetValidAfter
)
4045 certProperties
.setValidAfter
= true;
4046 else if (ca
->alg
== Ssl::algSetValidBefore
)
4047 certProperties
.setValidBefore
= true;
4049 debugs(33, 5, HERE
<< "Matches certificate adaptation aglorithm: " <<
4050 alg
<< " param: " << (param
? param
: "-"));
4054 certProperties
.signAlgorithm
= Ssl::algSignEnd
;
4055 for (sslproxy_cert_sign
*sg
= Config
.ssl_client
.cert_sign
; sg
!= NULL
; sg
= sg
->next
) {
4056 if (sg
->aclList
&& checklist
.fastCheck(sg
->aclList
) == ACCESS_ALLOWED
) {
4057 certProperties
.signAlgorithm
= (Ssl::CertSignAlgorithm
)sg
->alg
;
4061 } else {// if (!sslServerBump->entry->isEmpty())
4062 // Use trusted certificate for a Squid-generated error
4063 // or the user would have to add a security exception
4064 // just to see the error page. We will close the connection
4065 // so that the trust is not extended to non-Squid content.
4066 certProperties
.signAlgorithm
= Ssl::algSignTrusted
;
4069 assert(certProperties
.signAlgorithm
!= Ssl::algSignEnd
);
4071 if (certProperties
.signAlgorithm
== Ssl::algSignUntrusted
) {
4072 assert(port
->untrustedSigningCert
.get());
4073 certProperties
.signWithX509
.resetAndLock(port
->untrustedSigningCert
.get());
4074 certProperties
.signWithPkey
.resetAndLock(port
->untrustedSignPkey
.get());
4076 assert(port
->signingCert
.get());
4077 certProperties
.signWithX509
.resetAndLock(port
->signingCert
.get());
4079 if (port
->signPkey
.get())
4080 certProperties
.signWithPkey
.resetAndLock(port
->signPkey
.get());
4082 signAlgorithm
= certProperties
.signAlgorithm
;
4086 ConnStateData::getSslContextStart()
4088 assert(areAllContextsForThisConnection());
4090 /* careful: freeAllContexts() above frees request, host, etc. */
4092 if (port
->generateHostCertificates
) {
4093 Ssl::CertificateProperties certProperties
;
4094 buildSslCertGenerationParams(certProperties
);
4095 sslBumpCertKey
= certProperties
.dbKey().c_str();
4096 assert(sslBumpCertKey
.size() > 0 && sslBumpCertKey
[0] != '\0');
4098 debugs(33, 5, HERE
<< "Finding SSL certificate for " << sslBumpCertKey
<< " in cache");
4099 Ssl::LocalContextStorage
*ssl_ctx_cache
= Ssl::TheGlobalContextStorage
.getLocalStorage(port
->s
);
4100 SSL_CTX
* dynCtx
= NULL
;
4101 Ssl::SSL_CTX_Pointer
*cachedCtx
= ssl_ctx_cache
? ssl_ctx_cache
->get(sslBumpCertKey
.termedBuf()) : NULL
;
4102 if (cachedCtx
&& (dynCtx
= cachedCtx
->get())) {
4103 debugs(33, 5, HERE
<< "SSL certificate for " << sslBumpCertKey
<< " have found in cache");
4104 if (Ssl::verifySslCertificate(dynCtx
, certProperties
)) {
4105 debugs(33, 5, HERE
<< "Cached SSL certificate for " << sslBumpCertKey
<< " is valid");
4106 getSslContextDone(dynCtx
);
4109 debugs(33, 5, HERE
<< "Cached SSL certificate for " << sslBumpCertKey
<< " is out of date. Delete this certificate from cache");
4111 ssl_ctx_cache
->del(sslBumpCertKey
.termedBuf());
4114 debugs(33, 5, HERE
<< "SSL certificate for " << sslBumpCertKey
<< " haven't found in cache");
4119 debugs(33, 5, HERE
<< "Generating SSL certificate for " << certProperties
.commonName
<< " using ssl_crtd.");
4120 Ssl::CrtdMessage
request_message(Ssl::CrtdMessage::REQUEST
);
4121 request_message
.setCode(Ssl::CrtdMessage::code_new_certificate
);
4122 request_message
.composeRequest(certProperties
);
4123 debugs(33, 5, HERE
<< "SSL crtd request: " << request_message
.compose().c_str());
4124 Ssl::Helper::GetInstance()->sslSubmit(request_message
, sslCrtdHandleReplyWrapper
, this);
4126 } catch (const std::exception
&e
) {
4127 debugs(33, DBG_IMPORTANT
, "ERROR: Failed to compose ssl_crtd " <<
4128 "request for " << certProperties
.commonName
<<
4129 " certificate: " << e
.what() << "; will now block to " <<
4130 "generate that certificate.");
4131 // fall through to do blocking in-process generation.
4133 #endif // USE_SSL_CRTD
4135 debugs(33, 5, HERE
<< "Generating SSL certificate for " << certProperties
.commonName
);
4136 dynCtx
= Ssl::generateSslContext(certProperties
, *port
);
4137 getSslContextDone(dynCtx
, true);
4140 getSslContextDone(NULL
);
4144 ConnStateData::getSslContextDone(SSL_CTX
* sslContext
, bool isNew
)
4146 // Try to add generated ssl context to storage.
4147 if (port
->generateHostCertificates
&& isNew
) {
4149 if (signAlgorithm
== Ssl::algSignTrusted
) {
4150 // Add signing certificate to the certificates chain
4151 X509
*cert
= port
->signingCert
.get();
4152 if (SSL_CTX_add_extra_chain_cert(sslContext
, cert
)) {
4153 // increase the certificate lock
4154 CRYPTO_add(&(cert
->references
),1,CRYPTO_LOCK_X509
);
4156 const int ssl_error
= ERR_get_error();
4157 debugs(33, DBG_IMPORTANT
, "WARNING: can not add signing certificate to SSL context chain: " << ERR_error_string(ssl_error
, NULL
));
4159 Ssl::addChainToSslContext(sslContext
, port
->certsToChain
.get());
4161 //else it is self-signed or untrusted do not attrach any certificate
4163 Ssl::LocalContextStorage
*ssl_ctx_cache
= Ssl::TheGlobalContextStorage
.getLocalStorage(port
->s
);
4164 assert(sslBumpCertKey
.size() > 0 && sslBumpCertKey
[0] != '\0');
4166 if (!ssl_ctx_cache
|| !ssl_ctx_cache
->add(sslBumpCertKey
.termedBuf(), new Ssl::SSL_CTX_Pointer(sslContext
))) {
4167 // If it is not in storage delete after using. Else storage deleted it.
4168 fd_table
[clientConnection
->fd
].dynamicSslContext
= sslContext
;
4171 debugs(33, 2, HERE
<< "Failed to generate SSL cert for " << sslConnectHostOrIp
);
4175 // If generated ssl context = NULL, try to use static ssl context.
4177 if (!port
->staticSslContext
) {
4178 debugs(83, DBG_IMPORTANT
, "Closing SSL " << clientConnection
->remote
<< " as lacking SSL context");
4179 clientConnection
->close();
4182 debugs(33, 5, HERE
<< "Using static ssl context.");
4183 sslContext
= port
->staticSslContext
.get();
4187 if (!httpsCreate(clientConnection
, sslContext
))
4190 // bumped intercepted conns should already have Config.Timeout.request set
4191 // but forwarded connections may only have Config.Timeout.lifetime. [Re]set
4192 // to make sure the connection does not get stuck on non-SSL clients.
4193 typedef CommCbMemFunT
<ConnStateData
, CommTimeoutCbParams
> TimeoutDialer
;
4194 AsyncCall::Pointer timeoutCall
= JobCallback(33, 5, TimeoutDialer
,
4195 this, ConnStateData::requestTimeout
);
4196 commSetConnTimeout(clientConnection
, Config
.Timeout
.request
, timeoutCall
);
4198 // Disable the client read handler until CachePeer selection is complete
4199 Comm::SetSelect(clientConnection
->fd
, COMM_SELECT_READ
, NULL
, NULL
, 0);
4200 Comm::SetSelect(clientConnection
->fd
, COMM_SELECT_READ
, clientNegotiateSSL
, this, 0);
4201 switchedToHttps_
= true;
4205 ConnStateData::switchToHttps(HttpRequest
*request
, Ssl::BumpMode bumpServerMode
)
4207 assert(!switchedToHttps_
);
4209 sslConnectHostOrIp
= request
->GetHost();
4210 sslCommonName
= request
->GetHost();
4212 // We are going to read new request
4213 flags
.readMore
= true;
4214 debugs(33, 5, HERE
<< "converting " << clientConnection
<< " to SSL");
4216 // If sslServerBump is set, then we have decided to deny CONNECT
4217 // and now want to switch to SSL to send the error to the client
4218 // without even peeking at the origin server certificate.
4219 if (bumpServerMode
== Ssl::bumpServerFirst
&& !sslServerBump
) {
4220 request
->flags
.sslPeek
= true;
4221 sslServerBump
= new Ssl::ServerBump(request
);
4223 // will call httpsPeeked() with certificate and connection, eventually
4224 FwdState::fwdStart(clientConnection
, sslServerBump
->entry
, sslServerBump
->request
.getRaw());
4228 // otherwise, use sslConnectHostOrIp
4229 getSslContextStart();
4233 ConnStateData::httpsPeeked(Comm::ConnectionPointer serverConnection
)
4235 Must(sslServerBump
!= NULL
);
4237 if (Comm::IsConnOpen(serverConnection
)) {
4238 SSL
*ssl
= fd_table
[serverConnection
->fd
].ssl
;
4240 Ssl::X509_Pointer
serverCert(SSL_get_peer_certificate(ssl
));
4241 assert(serverCert
.get() != NULL
);
4242 sslCommonName
= Ssl::CommonHostName(serverCert
.get());
4243 debugs(33, 5, HERE
<< "HTTPS server CN: " << sslCommonName
<<
4244 " bumped: " << *serverConnection
);
4246 pinConnection(serverConnection
, NULL
, NULL
, false);
4248 debugs(33, 5, HERE
<< "bumped HTTPS server: " << sslConnectHostOrIp
);
4250 debugs(33, 5, HERE
<< "Error while bumping: " << sslConnectHostOrIp
);
4251 Ip::Address intendedDest
;
4252 intendedDest
= sslConnectHostOrIp
.termedBuf();
4253 const bool isConnectRequest
= !port
->flags
.isIntercepted();
4255 // Squid serves its own error page and closes, so we want
4256 // a CN that causes no additional browser errors. Possible
4257 // only when bumping CONNECT with a user-typed address.
4258 if (intendedDest
.isAnyAddr() || isConnectRequest
)
4259 sslCommonName
= sslConnectHostOrIp
;
4260 else if (sslServerBump
->serverCert
.get())
4261 sslCommonName
= Ssl::CommonHostName(sslServerBump
->serverCert
.get());
4263 // copy error detail from bump-server-first request to CONNECT request
4264 if (currentobject
!= NULL
&& currentobject
->http
!= NULL
&& currentobject
->http
->request
)
4265 currentobject
->http
->request
->detailError(sslServerBump
->request
->errType
, sslServerBump
->request
->errDetail
);
4268 getSslContextStart();
4271 #endif /* USE_OPENSSL */
4273 /// check FD after clientHttp[s]ConnectionOpened, adjust HttpSockets as needed
4275 OpenedHttpSocket(const Comm::ConnectionPointer
&c
, const Ipc::FdNoteId portType
)
4277 if (!Comm::IsConnOpen(c
)) {
4278 Must(NHttpSockets
> 0); // we tried to open some
4279 --NHttpSockets
; // there will be fewer sockets than planned
4280 Must(HttpSockets
[NHttpSockets
] < 0); // no extra fds received
4282 if (!NHttpSockets
) // we could not open any listen sockets at all
4283 fatalf("Unable to open %s",FdNote(portType
));
4290 /// find any unused HttpSockets[] slot and store fd there or return false
4292 AddOpenedHttpSocket(const Comm::ConnectionPointer
&conn
)
4295 for (int i
= 0; i
< NHttpSockets
&& !found
; ++i
) {
4296 if ((found
= HttpSockets
[i
] < 0))
4297 HttpSockets
[i
] = conn
->fd
;
4303 clientHttpConnectionsOpen(void)
4305 for (AnyP::PortCfgPointer s
= HttpPortList
; s
!= NULL
; s
= s
->next
) {
4306 if (MAXTCPLISTENPORTS
== NHttpSockets
) {
4307 debugs(1, DBG_IMPORTANT
, "WARNING: You have too many 'http_port' lines.");
4308 debugs(1, DBG_IMPORTANT
, " The limit is " << MAXTCPLISTENPORTS
<< " HTTP ports.");
4313 if (s
->flags
.tunnelSslBumping
&& !Config
.accessList
.ssl_bump
) {
4314 debugs(33, DBG_IMPORTANT
, "WARNING: No ssl_bump configured. Disabling ssl-bump on " << AnyP::UriScheme(s
->transport
.protocol
) << "_port " << s
->s
);
4315 s
->flags
.tunnelSslBumping
= false;
4318 if (s
->flags
.tunnelSslBumping
&&
4319 !s
->staticSslContext
&&
4320 !s
->generateHostCertificates
) {
4321 debugs(1, DBG_IMPORTANT
, "Will not bump SSL at http_port " << s
->s
<< " due to SSL initialization failure.");
4322 s
->flags
.tunnelSslBumping
= false;
4324 if (s
->flags
.tunnelSslBumping
) {
4325 // Create ssl_ctx cache for this port.
4326 Ssl::TheGlobalContextStorage
.addLocalStorage(s
->s
, s
->dynamicCertMemCacheSize
== std::numeric_limits
<size_t>::max() ? 4194304 : s
->dynamicCertMemCacheSize
);
4330 // Fill out a Comm::Connection which IPC will open as a listener for us
4331 // then pass back when active so we can start a TcpAcceptor subscription.
4332 s
->listenConn
= new Comm::Connection
;
4333 s
->listenConn
->local
= s
->s
;
4334 s
->listenConn
->flags
= COMM_NONBLOCKING
| (s
->flags
.tproxyIntercept
? COMM_TRANSPARENT
: 0) | (s
->flags
.natIntercept
? COMM_INTERCEPTION
: 0);
4336 // setup the subscriptions such that new connections accepted by listenConn are handled by HTTP
4337 typedef CommCbFunPtrCallT
<CommAcceptCbPtrFun
> AcceptCall
;
4338 RefCount
<AcceptCall
> subCall
= commCbCall(5, 5, "httpAccept", CommAcceptCbPtrFun(httpAccept
, CommAcceptCbParams(NULL
)));
4339 Subscription::Pointer sub
= new CallSubscription
<AcceptCall
>(subCall
);
4341 AsyncCall::Pointer listenCall
= asyncCall(33,2, "clientListenerConnectionOpened",
4342 ListeningStartedDialer(&clientListenerConnectionOpened
, s
, Ipc::fdnHttpSocket
, sub
));
4343 Ipc::StartListening(SOCK_STREAM
, IPPROTO_TCP
, s
->listenConn
, Ipc::fdnHttpSocket
, listenCall
);
4345 HttpSockets
[NHttpSockets
] = -1; // set in clientListenerConnectionOpened
4352 clientHttpsConnectionsOpen(void)
4354 for (AnyP::PortCfgPointer s
= HttpsPortList
; s
!= NULL
; s
= s
->next
) {
4355 if (MAXTCPLISTENPORTS
== NHttpSockets
) {
4356 debugs(1, DBG_IMPORTANT
, "Ignoring 'https_port' lines exceeding the limit.");
4357 debugs(1, DBG_IMPORTANT
, "The limit is " << MAXTCPLISTENPORTS
<< " HTTPS ports.");
4361 if (!s
->staticSslContext
) {
4362 debugs(1, DBG_IMPORTANT
, "Ignoring https_port " << s
->s
<<
4363 " due to SSL initialization failure.");
4367 // TODO: merge with similar code in clientHttpConnectionsOpen()
4368 if (s
->flags
.tunnelSslBumping
&& !Config
.accessList
.ssl_bump
) {
4369 debugs(33, DBG_IMPORTANT
, "WARNING: No ssl_bump configured. Disabling ssl-bump on " << AnyP::UriScheme(s
->transport
.protocol
) << "_port " << s
->s
);
4370 s
->flags
.tunnelSslBumping
= false;
4373 if (s
->flags
.tunnelSslBumping
&& !s
->staticSslContext
&& !s
->generateHostCertificates
) {
4374 debugs(1, DBG_IMPORTANT
, "Will not bump SSL at http_port " << s
->s
<< " due to SSL initialization failure.");
4375 s
->flags
.tunnelSslBumping
= false;
4378 if (s
->flags
.tunnelSslBumping
) {
4379 // Create ssl_ctx cache for this port.
4380 Ssl::TheGlobalContextStorage
.addLocalStorage(s
->s
, s
->dynamicCertMemCacheSize
== std::numeric_limits
<size_t>::max() ? 4194304 : s
->dynamicCertMemCacheSize
);
4383 // Fill out a Comm::Connection which IPC will open as a listener for us
4384 s
->listenConn
= new Comm::Connection
;
4385 s
->listenConn
->local
= s
->s
;
4386 s
->listenConn
->flags
= COMM_NONBLOCKING
| (s
->flags
.tproxyIntercept
? COMM_TRANSPARENT
: 0) |
4387 (s
->flags
.natIntercept
? COMM_INTERCEPTION
: 0);
4389 // setup the subscriptions such that new connections accepted by listenConn are handled by HTTPS
4390 typedef CommCbFunPtrCallT
<CommAcceptCbPtrFun
> AcceptCall
;
4391 RefCount
<AcceptCall
> subCall
= commCbCall(5, 5, "httpsAccept", CommAcceptCbPtrFun(httpsAccept
, CommAcceptCbParams(NULL
)));
4392 Subscription::Pointer sub
= new CallSubscription
<AcceptCall
>(subCall
);
4394 AsyncCall::Pointer listenCall
= asyncCall(33, 2, "clientListenerConnectionOpened",
4395 ListeningStartedDialer(&clientListenerConnectionOpened
,
4396 s
, Ipc::fdnHttpsSocket
, sub
));
4397 Ipc::StartListening(SOCK_STREAM
, IPPROTO_TCP
, s
->listenConn
, Ipc::fdnHttpsSocket
, listenCall
);
4398 HttpSockets
[NHttpSockets
] = -1;
4405 clientFtpConnectionsOpen(void)
4407 for (AnyP::PortCfgPointer s
= FtpPortList
; s
!= NULL
; s
= s
->next
) {
4408 if (MAXTCPLISTENPORTS
== NHttpSockets
) {
4409 debugs(1, DBG_IMPORTANT
, "Ignoring 'ftp_port' lines exceeding the limit.");
4410 debugs(1, DBG_IMPORTANT
, "The limit is " << MAXTCPLISTENPORTS
<< " FTP ports.");
4414 // Fill out a Comm::Connection which IPC will open as a listener for us
4415 s
->listenConn
= new Comm::Connection
;
4416 s
->listenConn
->local
= s
->s
;
4417 s
->listenConn
->flags
= COMM_NONBLOCKING
| (s
->flags
.tproxyIntercept
? COMM_TRANSPARENT
: 0) |
4418 (s
->flags
.natIntercept
? COMM_INTERCEPTION
: 0);
4420 // setup the subscriptions such that new connections accepted by listenConn are handled by FTP
4421 typedef CommCbFunPtrCallT
<CommAcceptCbPtrFun
> AcceptCall
;
4422 RefCount
<AcceptCall
> subCall
= commCbCall(5, 5, "ftpAccept", CommAcceptCbPtrFun(ftpAccept
, CommAcceptCbParams(NULL
)));
4423 Subscription::Pointer sub
= new CallSubscription
<AcceptCall
>(subCall
);
4425 AsyncCall::Pointer listenCall
= asyncCall(33, 2, "clientListenerConnectionOpened",
4426 ListeningStartedDialer(&clientListenerConnectionOpened
,
4427 s
, Ipc::fdnFtpSocket
, sub
));
4428 Ipc::StartListening(SOCK_STREAM
, IPPROTO_TCP
, s
->listenConn
, Ipc::fdnFtpSocket
, listenCall
);
4429 HttpSockets
[NHttpSockets
] = -1;
4434 /// process clientHttpConnectionsOpen result
4436 clientListenerConnectionOpened(AnyP::PortCfgPointer
&s
, const Ipc::FdNoteId portTypeNote
, const Subscription::Pointer
&sub
)
4440 if (!OpenedHttpSocket(s
->listenConn
, portTypeNote
))
4443 Must(Comm::IsConnOpen(s
->listenConn
));
4445 // TCP: setup a job to handle accept() with subscribed handler
4446 AsyncJob::Start(new Comm::TcpAcceptor(s
, FdNote(portTypeNote
), sub
));
4448 debugs(1, DBG_IMPORTANT
, "Accepting " <<
4449 (s
->flags
.natIntercept
? "NAT intercepted " : "") <<
4450 (s
->flags
.tproxyIntercept
? "TPROXY intercepted " : "") <<
4451 (s
->flags
.tunnelSslBumping
? "SSL bumped " : "") <<
4452 (s
->flags
.accelSurrogate
? "reverse-proxy " : "")
4453 << FdNote(portTypeNote
) << " connections at "
4456 Must(AddOpenedHttpSocket(s
->listenConn
)); // otherwise, we have received a fd we did not ask for
4460 clientOpenListenSockets(void)
4462 clientHttpConnectionsOpen();
4464 clientHttpsConnectionsOpen();
4466 clientFtpConnectionsOpen();
4468 if (NHttpSockets
< 1)
4469 fatal("No HTTP, HTTPS or FTP ports configured");
4473 clientConnectionsClose(void)
4475 for (AnyP::PortCfgPointer s
= HttpPortList
; s
!= NULL
; s
= s
->next
) {
4476 if (s
->listenConn
!= NULL
) {
4477 debugs(1, DBG_IMPORTANT
, "Closing HTTP port " << s
->listenConn
->local
);
4478 s
->listenConn
->close();
4479 s
->listenConn
= NULL
;
4484 for (AnyP::PortCfgPointer s
= HttpsPortList
; s
!= NULL
; s
= s
->next
) {
4485 if (s
->listenConn
!= NULL
) {
4486 debugs(1, DBG_IMPORTANT
, "Closing HTTPS port " << s
->listenConn
->local
);
4487 s
->listenConn
->close();
4488 s
->listenConn
= NULL
;
4493 for (AnyP::PortCfgPointer s
= HttpPortList
; s
!= NULL
; s
= s
->next
) {
4494 if (s
->listenConn
!= NULL
) {
4495 debugs(1, DBG_IMPORTANT
, "Closing FTP port " << s
->listenConn
->local
);
4496 s
->listenConn
->close();
4497 s
->listenConn
= NULL
;
4501 // TODO see if we can drop HttpSockets array entirely */
4502 for (int i
= 0; i
< NHttpSockets
; ++i
) {
4503 HttpSockets
[i
] = -1;
4510 varyEvaluateMatch(StoreEntry
* entry
, HttpRequest
* request
)
4512 const char *vary
= request
->vary_headers
;
4513 int has_vary
= entry
->getReply()->header
.has(HDR_VARY
);
4514 #if X_ACCELERATOR_VARY
4517 entry
->getReply()->header
.has(HDR_X_ACCELERATOR_VARY
);
4520 if (!has_vary
|| !entry
->mem_obj
->vary_headers
) {
4522 /* Oops... something odd is going on here.. */
4523 debugs(33, DBG_IMPORTANT
, "varyEvaluateMatch: Oops. Not a Vary object on second attempt, '" <<
4524 entry
->mem_obj
->urlXXX() << "' '" << vary
<< "'");
4525 safe_free(request
->vary_headers
);
4530 /* This is not a varying object */
4534 /* virtual "vary" object found. Calculate the vary key and
4535 * continue the search
4537 vary
= httpMakeVaryMark(request
, entry
->getReply());
4540 request
->vary_headers
= xstrdup(vary
);
4543 /* Ouch.. we cannot handle this kind of variance */
4544 /* XXX This cannot really happen, but just to be complete */
4549 vary
= httpMakeVaryMark(request
, entry
->getReply());
4552 request
->vary_headers
= xstrdup(vary
);
4556 /* Ouch.. we cannot handle this kind of variance */
4557 /* XXX This cannot really happen, but just to be complete */
4559 } else if (strcmp(vary
, entry
->mem_obj
->vary_headers
) == 0) {
4562 /* Oops.. we have already been here and still haven't
4563 * found the requested variant. Bail out
4565 debugs(33, DBG_IMPORTANT
, "varyEvaluateMatch: Oops. Not a Vary match on second attempt, '" <<
4566 entry
->mem_obj
->urlXXX() << "' '" << vary
<< "'");
4572 ACLFilledChecklist
*
4573 clientAclChecklistCreate(const acl_access
* acl
, ClientHttpRequest
* http
)
4575 ConnStateData
* conn
= http
->getConn();
4576 ACLFilledChecklist
*ch
= new ACLFilledChecklist(acl
, http
->request
,
4577 cbdataReferenceValid(conn
) && conn
!= NULL
&& conn
->clientConnection
!= NULL
? conn
->clientConnection
->rfc931
: dash_str
);
4580 * hack for ident ACL. It needs to get full addresses, and a place to store
4581 * the ident result on persistent connections...
4583 /* connection oriented auth also needs these two lines for it's operation. */
4587 CBDATA_CLASS_INIT(ConnStateData
);
4590 ConnStateData::transparent() const
4592 return clientConnection
!= NULL
&& (clientConnection
->flags
& (COMM_TRANSPARENT
|COMM_INTERCEPTION
));
4596 ConnStateData::reading() const
4598 return reader
!= NULL
;
4602 ConnStateData::stopReading()
4605 Comm::ReadCancel(clientConnection
->fd
, reader
);
4611 ConnStateData::expectRequestBody(int64_t size
)
4613 bodyPipe
= new BodyPipe(this);
4615 bodyPipe
->setBodySize(size
);
4617 startDechunkingRequest();
4622 ConnStateData::mayNeedToReadMoreBody() const
4625 return 0; // request without a body or read/produced all body bytes
4627 if (!bodyPipe
->bodySizeKnown())
4628 return -1; // probably need to read more, but we cannot be sure
4630 const int64_t needToProduce
= bodyPipe
->unproducedSize();
4631 const int64_t haveAvailable
= static_cast<int64_t>(in
.buf
.length());
4633 if (needToProduce
<= haveAvailable
)
4634 return 0; // we have read what we need (but are waiting for pipe space)
4636 return needToProduce
- haveAvailable
;
4640 ConnStateData::stopReceiving(const char *error
)
4642 debugs(33, 4, HERE
<< "receiving error (" << clientConnection
<< "): " << error
<<
4643 "; old sending error: " <<
4644 (stoppedSending() ? stoppedSending_
: "none"));
4646 if (const char *oldError
= stoppedReceiving()) {
4647 debugs(33, 3, HERE
<< "already stopped receiving: " << oldError
);
4648 return; // nothing has changed as far as this connection is concerned
4651 stoppedReceiving_
= error
;
4653 if (const char *sendError
= stoppedSending()) {
4654 debugs(33, 3, HERE
<< "closing because also stopped sending: " << sendError
);
4655 clientConnection
->close();
4660 ConnStateData::expectNoForwarding()
4662 if (bodyPipe
!= NULL
) {
4663 debugs(33, 4, HERE
<< "no consumer for virgin body " << bodyPipe
->status());
4664 bodyPipe
->expectNoConsumption();
4668 /// initialize dechunking state
4670 ConnStateData::startDechunkingRequest()
4672 Must(bodyPipe
!= NULL
);
4673 debugs(33, 5, HERE
<< "start dechunking" << bodyPipe
->status());
4674 assert(!in
.bodyParser
);
4675 in
.bodyParser
= new ChunkedCodingParser
;
4678 /// put parsed content into input buffer and clean up
4680 ConnStateData::finishDechunkingRequest(bool withSuccess
)
4682 debugs(33, 5, HERE
<< "finish dechunking: " << withSuccess
);
4684 if (bodyPipe
!= NULL
) {
4685 debugs(33, 7, HERE
<< "dechunked tail: " << bodyPipe
->status());
4686 BodyPipe::Pointer myPipe
= bodyPipe
;
4687 stopProducingFor(bodyPipe
, withSuccess
); // sets bodyPipe->bodySize()
4688 Must(!bodyPipe
); // we rely on it being nil after we are done with body
4690 Must(myPipe
->bodySizeKnown());
4691 ClientSocketContext::Pointer context
= getCurrentContext();
4692 if (context
!= NULL
&& context
->http
&& context
->http
->request
)
4693 context
->http
->request
->setContentLength(myPipe
->bodySize());
4697 delete in
.bodyParser
;
4698 in
.bodyParser
= NULL
;
4701 ConnStateData::In::In() :
4706 ConnStateData::In::~In()
4708 delete bodyParser
; // TODO: pool
4712 ConnStateData::sendControlMsg(HttpControlMsg msg
)
4715 debugs(33, 3, HERE
<< "ignoring 1xx due to earlier closure");
4719 ClientSocketContext::Pointer context
= getCurrentContext();
4720 if (context
!= NULL
) {
4721 context
->writeControlMsg(msg
); // will call msg.cbSuccess
4725 debugs(33, 3, HERE
<< " closing due to missing context for 1xx");
4726 clientConnection
->close();
4729 /// Our close handler called by Comm when the pinned connection is closed
4731 ConnStateData::clientPinnedConnectionClosed(const CommCloseCbParams
&io
)
4733 // FwdState might repin a failed connection sooner than this close
4734 // callback is called for the failed connection.
4735 assert(pinning
.serverConnection
== io
.conn
);
4736 pinning
.closeHandler
= NULL
; // Comm unregisters handlers before calling
4737 const bool sawZeroReply
= pinning
.zeroReply
; // reset when unpinning
4738 unpinConnection(false);
4741 // if the server control connection is gone, reset state to login again
4742 // TODO: merge with similar code in FtpHandleUserRequest()
4743 debugs(33, 5, "will need to re-login due to FTP server closure");
4744 ftp
.readGreeting
= false;
4745 FtpChangeState(this, ConnStateData::FTP_BEGIN
, "server closure");
4746 // XXX: Not enough. Gateway::ServerStateData::sendCommand() will not
4747 // re-login because clientState() is not ConnStateData::FTP_CONNECTED.
4750 if (sawZeroReply
&& clientConnection
!= NULL
) {
4751 debugs(33, 3, "Closing client connection on pinned zero reply.");
4752 clientConnection
->close();
4758 ConnStateData::pinConnection(const Comm::ConnectionPointer
&pinServer
, HttpRequest
*request
, CachePeer
*aPeer
, bool auth
, bool monitor
)
4760 if (!Comm::IsConnOpen(pinning
.serverConnection
) ||
4761 pinning
.serverConnection
->fd
!= pinServer
->fd
)
4762 pinNewConnection(pinServer
, request
, aPeer
, auth
);
4765 startPinnedConnectionMonitoring();
4769 ConnStateData::pinNewConnection(const Comm::ConnectionPointer
&pinServer
, HttpRequest
*request
, CachePeer
*aPeer
, bool auth
)
4771 unpinConnection(true); // closes pinned connection, if any, and resets fields
4773 pinning
.serverConnection
= pinServer
;
4775 debugs(33, 3, HERE
<< pinning
.serverConnection
);
4777 Must(pinning
.serverConnection
!= NULL
);
4779 // when pinning an SSL bumped connection, the request may be NULL
4780 const char *pinnedHost
= "[unknown]";
4782 pinning
.host
= xstrdup(request
->GetHost());
4783 pinning
.port
= request
->port
;
4784 pinnedHost
= pinning
.host
;
4786 pinning
.port
= pinServer
->remote
.port();
4788 pinning
.pinned
= true;
4790 pinning
.peer
= cbdataReference(aPeer
);
4791 pinning
.auth
= auth
;
4792 char stmp
[MAX_IPSTRLEN
];
4793 char desc
[FD_DESC_SZ
];
4794 snprintf(desc
, FD_DESC_SZ
, "%s pinned connection for %s (%d)",
4795 (auth
|| !aPeer
) ? pinnedHost
: aPeer
->name
,
4796 clientConnection
->remote
.toUrl(stmp
,MAX_IPSTRLEN
),
4797 clientConnection
->fd
);
4798 fd_note(pinning
.serverConnection
->fd
, desc
);
4800 typedef CommCbMemFunT
<ConnStateData
, CommCloseCbParams
> Dialer
;
4801 pinning
.closeHandler
= JobCallback(33, 5,
4802 Dialer
, this, ConnStateData::clientPinnedConnectionClosed
);
4803 // remember the pinned connection so that cb does not unpin a fresher one
4804 typedef CommCloseCbParams Params
;
4805 Params
¶ms
= GetCommParams
<Params
>(pinning
.closeHandler
);
4806 params
.conn
= pinning
.serverConnection
;
4807 comm_add_close_handler(pinning
.serverConnection
->fd
, pinning
.closeHandler
);
4810 /// [re]start monitoring pinned connection for server closures so that we can
4811 /// propagate them to an _idle_ client pinned to the server
4813 ConnStateData::startPinnedConnectionMonitoring()
4815 if (pinning
.readHandler
!= NULL
)
4816 return; // already monitoring
4818 typedef CommCbMemFunT
<ConnStateData
, CommIoCbParams
> Dialer
;
4819 pinning
.readHandler
= JobCallback(33, 3,
4820 Dialer
, this, ConnStateData::clientPinnedConnectionRead
);
4821 Comm::Read(pinning
.serverConnection
, pinning
.readHandler
);
4825 ConnStateData::stopPinnedConnectionMonitoring()
4827 if (pinning
.readHandler
!= NULL
) {
4828 Comm::ReadCancel(pinning
.serverConnection
->fd
, pinning
.readHandler
);
4829 pinning
.readHandler
= NULL
;
4833 /// Our read handler called by Comm when the server either closes an idle pinned connection or
4834 /// perhaps unexpectedly sends something on that idle (from Squid p.o.v.) connection.
4836 ConnStateData::clientPinnedConnectionRead(const CommIoCbParams
&io
)
4838 pinning
.readHandler
= NULL
; // Comm unregisters handlers before calling
4840 if (io
.flag
== Comm::ERR_CLOSING
)
4841 return; // close handler will clean up
4843 // We could use getConcurrentRequestCount(), but this may be faster.
4844 const bool clientIsIdle
= !getCurrentContext();
4846 debugs(33, 3, "idle pinned " << pinning
.serverConnection
<< " read " <<
4847 io
.size
<< (clientIsIdle
? " with idle client" : ""));
4849 assert(pinning
.serverConnection
== io
.conn
);
4850 pinning
.serverConnection
->close();
4852 // If we are still sending data to the client, do not close now. When we are done sending,
4853 // ClientSocketContext::keepaliveNextRequest() checks pinning.serverConnection and will close.
4854 // However, if we are idle, then we must close to inform the idle client and minimize races.
4855 if (clientIsIdle
&& clientConnection
!= NULL
)
4856 clientConnection
->close();
4859 const Comm::ConnectionPointer
4860 ConnStateData::validatePinnedConnection(HttpRequest
*request
, const CachePeer
*aPeer
)
4862 debugs(33, 7, HERE
<< pinning
.serverConnection
);
4865 if (!Comm::IsConnOpen(pinning
.serverConnection
))
4867 else if (pinning
.auth
&& pinning
.host
&& request
&& strcasecmp(pinning
.host
, request
->GetHost()) != 0)
4869 else if (request
&& pinning
.port
!= request
->port
)
4871 else if (pinning
.peer
&& !cbdataReferenceValid(pinning
.peer
))
4873 else if (aPeer
!= pinning
.peer
)
4877 /* The pinning info is not safe, remove any pinning info */
4878 unpinConnection(true);
4881 return pinning
.serverConnection
;
4884 Comm::ConnectionPointer
4885 ConnStateData::borrowPinnedConnection(HttpRequest
*request
, const CachePeer
*aPeer
)
4887 debugs(33, 7, pinning
.serverConnection
);
4888 if (validatePinnedConnection(request
, aPeer
) != NULL
)
4889 stopPinnedConnectionMonitoring();
4891 return pinning
.serverConnection
; // closed if validation failed
4895 ConnStateData::unpinConnection(const bool andClose
)
4897 debugs(33, 3, HERE
<< pinning
.serverConnection
);
4900 cbdataReferenceDone(pinning
.peer
);
4902 if (Comm::IsConnOpen(pinning
.serverConnection
)) {
4903 if (pinning
.closeHandler
!= NULL
) {
4904 comm_remove_close_handler(pinning
.serverConnection
->fd
, pinning
.closeHandler
);
4905 pinning
.closeHandler
= NULL
;
4908 stopPinnedConnectionMonitoring();
4910 // close the server side socket if requested
4912 pinning
.serverConnection
->close();
4913 pinning
.serverConnection
= NULL
;
4916 safe_free(pinning
.host
);
4918 pinning
.zeroReply
= false;
4920 /* NOTE: pinning.pinned should be kept. This combined with fd == -1 at the end of a request indicates that the host
4921 * connection has gone away */
4925 ConnStateData::ftpBuildUri(const char *file
)
4928 ftp
.uri
.append(ftp
.host
);
4929 if (port
->ftp_track_dirs
&& ftp
.workingDir
.size()) {
4930 if (ftp
.workingDir
[0] != '/')
4931 ftp
.uri
.append("/");
4932 ftp
.uri
.append(ftp
.workingDir
);
4935 if (ftp
.uri
[ftp
.uri
.size() - 1] != '/')
4936 ftp
.uri
.append("/");
4938 if (port
->ftp_track_dirs
&& file
) {
4939 //remove any '/' from the beginning of path
4940 while (*file
== '/')
4942 ftp
.uri
.append(file
);
4945 return ftp
.uri
.termedBuf();
4949 ConnStateData::ftpSetWorkingDir(const char *dir
)
4951 ftp
.workingDir
= dir
;
4955 FtpAcceptDataConnection(const CommAcceptCbParams
¶ms
)
4957 ConnStateData
*connState
= static_cast<ConnStateData
*>(params
.data
);
4959 if (params
.flag
!= Comm::OK
) {
4960 // Its possible the call was still queued when the client disconnected
4961 debugs(33, 2, HERE
<< connState
->ftp
.dataListenConn
<< ": accept "
4962 "failure: " << xstrerr(params
.xerrno
));
4966 debugs(33, 4, "accepted " << params
.conn
);
4967 fd_note(params
.conn
->fd
, "passive client ftp data");
4968 ++incoming_sockets_accepted
;
4970 if (!connState
->clientConnection
) {
4971 debugs(33, 5, "late data connection?");
4972 FtpCloseDataConnection(connState
); // in case we are still listening
4973 params
.conn
->close();
4975 if (params
.conn
->remote
!= connState
->clientConnection
->remote
) {
4976 debugs(33, 2, "rogue data conn? ctrl: " << connState
->clientConnection
->remote
);
4977 params
.conn
->close();
4978 // Some FTP servers close control connection here, but it may make
4979 // things worse from DoS p.o.v. and no better from data stealing p.o.v.
4981 FtpCloseDataConnection(connState
);
4982 connState
->ftp
.dataConn
= params
.conn
;
4983 connState
->ftp
.uploadAvailSize
= 0;
4984 debugs(33, 7, "ready for data");
4985 if (connState
->ftp
.onDataAcceptCall
!= NULL
) {
4986 AsyncCall::Pointer call
= connState
->ftp
.onDataAcceptCall
;
4987 connState
->ftp
.onDataAcceptCall
= NULL
;
4988 // If we got an upload request, start reading data from the client.
4989 if (connState
->ftp
.state
== ConnStateData::FTP_HANDLE_UPLOAD_REQUEST
)
4990 connState
->readSomeFtpData();
4992 Must(connState
->ftp
.state
== ConnStateData::FTP_HANDLE_DATA_REQUEST
);
4995 mb
.Printf("150 Data connection opened.\r\n");
4996 Comm::Write(connState
->clientConnection
, &mb
, call
);
5002 FtpCloseDataConnection(ConnStateData
*conn
)
5004 if (conn
->ftp
.listener
!= NULL
) {
5005 conn
->ftp
.listener
->cancel("no longer needed");
5006 conn
->ftp
.listener
= NULL
;
5009 if (Comm::IsConnOpen(conn
->ftp
.dataListenConn
)) {
5010 debugs(33, 5, HERE
<< "FTP closing client data listen socket: " <<
5011 *conn
->ftp
.dataListenConn
);
5012 conn
->ftp
.dataListenConn
->close();
5014 conn
->ftp
.dataListenConn
= NULL
;
5016 if (conn
->ftp
.reader
!= NULL
) {
5017 // Comm::ReadCancel can deal with negative FDs
5018 Comm::ReadCancel(conn
->ftp
.dataConn
->fd
, conn
->ftp
.reader
);
5019 conn
->ftp
.reader
= NULL
;
5022 if (Comm::IsConnOpen(conn
->ftp
.dataConn
)) {
5023 debugs(33, 5, HERE
<< "FTP closing client data connection: " <<
5024 *conn
->ftp
.dataConn
);
5025 conn
->ftp
.dataConn
->close();
5027 conn
->ftp
.dataConn
= NULL
;
5030 /// Writes FTP [error] response before we fully parsed the FTP request and
5031 /// created the corresponding HTTP request wrapper for that FTP request.
5033 FtpWriteEarlyReply(ConnStateData
*connState
, const int code
, const char *msg
)
5035 debugs(33, 7, HERE
<< code
<< ' ' << msg
);
5036 assert(99 < code
&& code
< 1000);
5040 mb
.Printf("%i %s\r\n", code
, msg
);
5042 AsyncCall::Pointer call
= commCbCall(33, 5, "FtpWroteEarlyReply",
5043 CommIoCbPtrFun(&FtpWroteEarlyReply
, connState
));
5044 Comm::Write(connState
->clientConnection
, &mb
, call
);
5046 connState
->flags
.readMore
= false;
5048 // TODO: Create master transaction. Log it in FtpWroteEarlyReply.
5052 FtpWriteReply(ClientSocketContext
*context
, MemBuf
&mb
)
5054 debugs(11, 2, "FTP Client " << context
->clientConnection
);
5055 debugs(11, 2, "FTP Client REPLY:\n---------\n" << mb
.buf
<<
5058 AsyncCall::Pointer call
= commCbCall(33, 5, "FtpWroteReply",
5059 CommIoCbPtrFun(&FtpWroteReply
, context
));
5060 Comm::Write(context
->clientConnection
, &mb
, call
);
5064 FtpWriteCustomReply(ClientSocketContext
*context
, const int code
, const char *msg
, const HttpReply
*reply
)
5066 debugs(33, 7, HERE
<< code
<< ' ' << msg
);
5067 assert(99 < code
&& code
< 1000);
5069 const bool sendDetails
= reply
!= NULL
&&
5070 reply
->header
.has(HDR_FTP_STATUS
) && reply
->header
.has(HDR_FTP_REASON
);
5075 mb
.Printf("%i-%s\r\n", code
, msg
);
5076 mb
.Printf(" Server reply:\r\n");
5077 FtpPrintReply(mb
, reply
, " ");
5078 mb
.Printf("%i \r\n", code
);
5080 mb
.Printf("%i %s\r\n", code
, msg
);
5082 FtpWriteReply(context
, mb
);
5086 FtpChangeState(ConnStateData
*connState
, const ConnStateData::FtpState newState
, const char *reason
)
5089 if (connState
->ftp
.state
== newState
) {
5090 debugs(33, 3, "client state unchanged at " << connState
->ftp
.state
<<
5091 " because " << reason
);
5092 connState
->ftp
.state
= newState
;
5094 debugs(33, 3, "client state was " << connState
->ftp
.state
<<
5095 ", now " << newState
<< " because " << reason
);
5096 connState
->ftp
.state
= newState
;
5100 /** Parse an FTP request
5102 * \note Sets result->flags.parsed_ok to 0 if failed to parse the request,
5103 * to 1 if the request was correctly parsed.
5104 * \param[in] connState a ConnStateData. The caller must make sure it is not null
5105 * \param[out] mehtod_p will be set as a side-effect of the parsing.
5106 * Pointed-to value will be set to Http::METHOD_NONE in case of
5108 * \param[out] http_ver will be set as a side-effect of the parsing
5109 * \return NULL on incomplete requests,
5110 * a ClientSocketContext structure on success or failure.
5112 static ClientSocketContext
*
5113 FtpParseRequest(ConnStateData
*connState
, HttpRequestMethod
*method_p
, Http::ProtocolVersion
*http_ver
)
5115 *http_ver
= Http::ProtocolVersion(1, 1);
5117 // TODO: Use tokenizer for parsing instead of raw pointer manipulation.
5118 const char *inBuf
= connState
->in
.buf
.rawContent();
5120 const char *const eor
=
5121 static_cast<const char *>(memchr(inBuf
, '\n',
5122 min(static_cast<size_t>(connState
->in
.buf
.length()), Config
.maxRequestHeaderSize
)));
5124 if (eor
== NULL
&& connState
->in
.buf
.length() >= Config
.maxRequestHeaderSize
) {
5125 FtpChangeState(connState
, ConnStateData::FTP_ERROR
, "huge req");
5126 FtpWriteEarlyReply(connState
, 421, "Too large request");
5131 debugs(33, 5, HERE
<< "Incomplete request, waiting for end of request");
5135 const size_t req_sz
= eor
+ 1 - inBuf
;
5137 // skip leading whitespaces
5138 const char *boc
= inBuf
; // beginning of command
5139 while (boc
< eor
&& isspace(*boc
)) ++boc
;
5141 debugs(33, 5, HERE
<< "Empty request, ignoring");
5142 connNoteUseOfBuffer(connState
, req_sz
);
5146 const char *eoc
= boc
; // end of command
5147 while (eoc
< eor
&& !isspace(*eoc
)) ++eoc
;
5148 connState
->in
.buf
.setAt(eoc
- inBuf
, '\0');
5150 const char *bop
= eoc
+ 1; // beginning of parameter
5151 while (bop
< eor
&& isspace(*bop
)) ++bop
;
5153 const char *eop
= eor
- 1;
5154 while (isspace(*eop
)) --eop
;
5156 connState
->in
.buf
.setAt(eop
+ 1 - inBuf
, '\0');
5160 debugs(33, 7, HERE
<< "Parsed FTP command " << boc
<< " with " <<
5161 (bop
== NULL
? "no " : "") << "parameters" <<
5162 (bop
!= NULL
? ": " : "") << bop
);
5164 // TODO: Use SBuf instead of String
5165 const String cmd
= boc
;
5166 String params
= bop
;
5168 connNoteUseOfBuffer(connState
, req_sz
);
5170 if (!connState
->ftp
.readGreeting
) {
5171 // the first command must be USER
5172 if (!connState
->pinning
.pinned
&& cmd
.caseCmp("USER") != 0) {
5173 FtpWriteEarlyReply(connState
, 530, "Must login first");
5178 // We need to process USER request now because it sets ftp server Hostname.
5179 if (cmd
.caseCmp("USER") == 0 &&
5180 !FtpHandleUserRequest(connState
, cmd
, params
))
5183 if (!FtpSupportedCommand(cmd
)) {
5184 FtpWriteEarlyReply(connState
, 502, "Unknown or unsupported command");
5188 *method_p
= !cmd
.caseCmp("APPE") || !cmd
.caseCmp("STOR") ||
5189 !cmd
.caseCmp("STOU") ? Http::METHOD_PUT
: Http::METHOD_GET
;
5192 const char *aPath
= params
.size() > 0 && Ftp::hasPathParameter(cmd
)?
5193 params
.termedBuf() : NULL
;
5194 uri
= xstrdup(connState
->ftpBuildUri(aPath
));
5195 HttpRequest
*const request
=
5196 HttpRequest::CreateFromUrlAndMethod(uri
, *method_p
);
5197 if (request
== NULL
) {
5198 debugs(33, 5, HERE
<< "Invalid FTP URL: " << connState
->ftp
.uri
);
5199 FtpWriteEarlyReply(connState
, 501, "Invalid host");
5200 connState
->ftp
.uri
.clean();
5205 request
->http_ver
= *http_ver
;
5207 // Our fake Request-URIs are not distinctive enough for caching to work
5208 request
->flags
.cachable
= false; // XXX: reset later by maybeCacheable()
5209 request
->flags
.noCache
= true;
5211 request
->header
.putStr(HDR_FTP_COMMAND
, cmd
.termedBuf());
5212 request
->header
.putStr(HDR_FTP_ARGUMENTS
, params
.termedBuf() != NULL
?
5213 params
.termedBuf() : "");
5214 if (*method_p
== Http::METHOD_PUT
) {
5215 request
->header
.putStr(HDR_EXPECT
, "100-continue");
5216 request
->header
.putStr(HDR_TRANSFER_ENCODING
, "chunked");
5219 ClientHttpRequest
*const http
= new ClientHttpRequest(connState
);
5220 http
->request
= request
;
5221 HTTPMSGLOCK(http
->request
);
5222 http
->req_sz
= req_sz
;
5225 ClientSocketContext
*const result
=
5226 new ClientSocketContext(connState
->clientConnection
, http
);
5228 StoreIOBuffer tempBuffer
;
5229 tempBuffer
.data
= result
->reqbuf
;
5230 tempBuffer
.length
= HTTP_REQBUF_SZ
;
5232 ClientStreamData newServer
= new clientReplyContext(http
);
5233 ClientStreamData newClient
= result
;
5234 clientStreamInit(&http
->client_stream
, clientGetMoreData
, clientReplyDetach
,
5235 clientReplyStatus
, newServer
, clientSocketRecipient
,
5236 clientSocketDetach
, newClient
, tempBuffer
);
5238 result
->registerWithConn();
5239 result
->flags
.parsed_ok
= 1;
5240 connState
->flags
.readMore
= false;
5245 FtpHandleReply(ClientSocketContext
*context
, HttpReply
*reply
, StoreIOBuffer data
)
5247 if (context
->http
&& context
->http
->al
!= NULL
&&
5248 !context
->http
->al
->reply
&& reply
) {
5249 context
->http
->al
->reply
= reply
;
5250 HTTPMSGLOCK(context
->http
->al
->reply
);
5253 static FtpReplyHandler
*handlers
[] = {
5255 NULL
, // FTP_CONNECTED
5256 FtpHandleFeatReply
, // FTP_HANDLE_FEAT
5257 FtpHandlePasvReply
, // FTP_HANDLE_PASV
5258 FtpHandlePortReply
, // FTP_HANDLE_PORT
5259 FtpHandleDataReply
, // FTP_HANDLE_DATA_REQUEST
5260 FtpHandleUploadReply
, // FTP_HANDLE_UPLOAD_REQUEST
5261 FtpHandleEprtReply
,// FTP_HANDLE_EPRT
5262 FtpHandleEpsvReply
,// FTP_HANDLE_EPSV
5263 NULL
, // FTP_HANDLE_CWD
5264 NULL
, //FTP_HANDLE_PASS
5265 NULL
, // FTP_HANDLE_CDUP
5266 FtpHandleErrorReply
// FTP_ERROR
5268 const ConnStateData::FtpState state
= context
->getConn()->ftp
.state
;
5269 FtpReplyHandler
*const handler
= handlers
[state
];
5271 (*handler
)(context
, reply
, data
);
5273 FtpWriteForwardedReply(context
, reply
);
5277 FtpHandleFeatReply(ClientSocketContext
*context
, const HttpReply
*reply
, StoreIOBuffer data
)
5279 if (context
->http
->request
->errType
!= ERR_NONE
) {
5280 FtpWriteCustomReply(context
, 502, "Server does not support FEAT", reply
);
5284 HttpReply
*filteredReply
= reply
->clone();
5285 HttpHeader
&filteredHeader
= filteredReply
->header
;
5287 // Remove all unsupported commands from the response wrapper.
5288 int deletedCount
= 0;
5289 HttpHeaderPos pos
= HttpHeaderInitPos
;
5290 bool hasEPRT
= false;
5291 bool hasEPSV
= false;
5292 int prependSpaces
= 1;
5293 while (const HttpHeaderEntry
*e
= filteredHeader
.getEntry(&pos
)) {
5294 if (e
->id
== HDR_FTP_PRE
) {
5295 // assume RFC 2389 FEAT response format, quoted by Squid:
5296 // <"> SP NAME [SP PARAMS] <">
5297 // but accommodate MS servers sending four SPs before NAME
5298 if (e
->value
.size() < 4)
5300 const char *raw
= e
->value
.termedBuf();
5301 if (raw
[0] != '"' || raw
[1] != ' ')
5303 const char *beg
= raw
+ 1 + strspn(raw
+ 1, " "); // after quote and spaces
5304 // command name ends with (SP parameter) or quote
5305 const char *end
= beg
+ strcspn(beg
, " \"");
5310 // compute the number of spaces before the command
5311 prependSpaces
= beg
- raw
- 1;
5313 const String cmd
= e
->value
.substr(beg
-raw
, end
-raw
);
5315 if (!FtpSupportedCommand(cmd
))
5316 filteredHeader
.delAt(pos
, deletedCount
);
5320 else if (cmd
== "EPSV")
5326 int insertedCount
= 0;
5328 snprintf(buf
, sizeof(buf
), "\"%*s\"", prependSpaces
+ 4, "EPRT");
5329 filteredHeader
.putStr(HDR_FTP_PRE
, buf
);
5333 snprintf(buf
, sizeof(buf
), "\"%*s\"", prependSpaces
+ 4, "EPSV");
5334 filteredHeader
.putStr(HDR_FTP_PRE
, buf
);
5338 if (deletedCount
|| insertedCount
) {
5339 filteredHeader
.refreshMask();
5340 debugs(33, 5, "deleted " << deletedCount
<< " inserted " << insertedCount
);
5343 FtpWriteForwardedReply(context
, filteredReply
);
5347 FtpHandlePasvReply(ClientSocketContext
*context
, const HttpReply
*reply
, StoreIOBuffer data
)
5349 if (context
->http
->request
->errType
!= ERR_NONE
) {
5350 FtpWriteCustomReply(context
, 502, "Server does not support PASV", reply
);
5354 FtpCloseDataConnection(context
->getConn());
5356 Comm::ConnectionPointer conn
= new Comm::Connection
;
5357 ConnStateData
* const connState
= context
->getConn();
5358 conn
->flags
= COMM_NONBLOCKING
;
5359 conn
->local
= connState
->transparent() ?
5360 connState
->port
->s
: context
->clientConnection
->local
;
5361 conn
->local
.port(0);
5362 const char *const note
= connState
->ftp
.uri
.termedBuf();
5363 comm_open_listener(SOCK_STREAM
, IPPROTO_TCP
, conn
, note
);
5364 if (!Comm::IsConnOpen(conn
)) {
5365 debugs(5, DBG_CRITICAL
, HERE
<< "comm_open_listener failed:" <<
5366 conn
->local
<< " error: " << errno
);
5367 FtpWriteCustomReply(context
, 451, "Internal error");
5371 typedef CommCbFunPtrCallT
<CommAcceptCbPtrFun
> AcceptCall
;
5372 RefCount
<AcceptCall
> subCall
= commCbCall(5, 5, "FtpAcceptDataConnection",
5373 CommAcceptCbPtrFun(FtpAcceptDataConnection
, connState
));
5374 Subscription::Pointer sub
= new CallSubscription
<AcceptCall
>(subCall
);
5375 connState
->ftp
.listener
= subCall
.getRaw();
5376 connState
->ftp
.dataListenConn
= conn
;
5377 AsyncJob::Start(new Comm::TcpAcceptor(conn
, note
, sub
));
5379 char addr
[MAX_IPSTRLEN
];
5380 // remote server in interception setups and local address otherwise
5381 const Ip::Address
&server
= connState
->transparent() ?
5382 context
->clientConnection
->local
: conn
->local
;
5383 server
.toStr(addr
, MAX_IPSTRLEN
, AF_INET
);
5384 addr
[MAX_IPSTRLEN
- 1] = '\0';
5385 for (char *c
= addr
; *c
!= '\0'; ++c
) {
5390 // conn->fd is the client data connection (and its local port)
5391 const unsigned short port
= comm_local_port(conn
->fd
);
5392 conn
->local
.port(port
);
5394 // In interception setups, we combine remote server address with a
5395 // local port number and hope that traffic will be redirected to us.
5399 // Do not use "227 =a,b,c,d,p1,p2" format or omit parens: some nf_ct_ftp
5400 // versions block responses that use those alternative syntax rules!
5401 mb
.Printf("227 Entering Passive Mode (%s,%i,%i).\r\n",
5403 static_cast<int>(port
/ 256),
5404 static_cast<int>(port
% 256));
5406 debugs(11, 3, Raw("writing", mb
.buf
, mb
.size
));
5407 FtpWriteReply(context
, mb
);
5411 FtpHandlePortReply(ClientSocketContext
*context
, const HttpReply
*reply
, StoreIOBuffer data
)
5413 if (context
->http
->request
->errType
!= ERR_NONE
) {
5414 FtpWriteCustomReply(context
, 502, "Server does not support PASV (converted from PORT)", reply
);
5418 FtpWriteCustomReply(context
, 200, "PORT successfully converted to PASV.");
5420 // and wait for RETR
5424 FtpHandleErrorReply(ClientSocketContext
*context
, const HttpReply
*reply
, StoreIOBuffer data
)
5426 ConnStateData
*const connState
= context
->getConn();
5427 if (!connState
->pinning
.pinned
) // we failed to connect to server
5428 connState
->ftp
.uri
.clean();
5429 // 421: we will close due to FTP_ERROR
5430 FtpWriteErrorReply(context
, reply
, 421);
5434 FtpHandleDataReply(ClientSocketContext
*context
, const HttpReply
*reply
, StoreIOBuffer data
)
5436 ConnStateData
*const conn
= context
->getConn();
5438 if (reply
!= NULL
&& reply
->sline
.status() != Http::scOkay
) {
5439 FtpWriteForwardedReply(context
, reply
);
5440 if (conn
&& Comm::IsConnOpen(conn
->ftp
.dataConn
)) {
5441 debugs(33, 3, "closing " << conn
->ftp
.dataConn
<< " on KO reply");
5442 FtpCloseDataConnection(conn
);
5447 if (!conn
->ftp
.dataConn
) {
5448 // We got STREAM_COMPLETE (or error) and closed the client data conn.
5449 debugs(33, 3, "ignoring FTP srv data response after clt data closure");
5453 if (!FtpCheckDataConnPost(context
)) {
5454 FtpWriteCustomReply(context
, 425, "Data connection is not established.");
5455 FtpCloseDataConnection(conn
);
5459 debugs(33, 7, HERE
<< data
.length
);
5461 if (data
.length
<= 0) {
5462 FtpWroteReplyData(conn
->clientConnection
, NULL
, 0, Comm::OK
, 0, context
);
5467 mb
.init(data
.length
+ 1, data
.length
+ 1);
5468 mb
.append(data
.data
, data
.length
);
5470 AsyncCall::Pointer call
= commCbCall(33, 5, "FtpWroteReplyData",
5471 CommIoCbPtrFun(&FtpWroteReplyData
, context
));
5472 Comm::Write(conn
->ftp
.dataConn
, &mb
, call
);
5474 context
->noteSentBodyBytes(data
.length
);
5478 FtpWroteReplyData(const Comm::ConnectionPointer
&conn
, char *bufnotused
, size_t size
, Comm::Flag errflag
, int xerrno
, void *data
)
5480 if (errflag
== Comm::ERR_CLOSING
)
5483 ClientSocketContext
*const context
= static_cast<ClientSocketContext
*>(data
);
5484 ConnStateData
*const connState
= context
->getConn();
5486 if (errflag
!= Comm::OK
) {
5487 debugs(33, 3, HERE
<< "FTP reply data writing failed: " <<
5489 FtpCloseDataConnection(connState
);
5490 FtpWriteCustomReply(context
, 426, "Data connection error; transfer aborted");
5494 assert(context
->http
);
5495 context
->http
->out
.size
+= size
;
5497 switch (context
->socketState()) {
5499 debugs(33, 3, "Keep going");
5500 context
->pullData();
5502 case STREAM_COMPLETE
:
5503 debugs(33, 3, HERE
<< "FTP reply data transfer successfully complete");
5504 FtpWriteCustomReply(context
, 226, "Transfer complete");
5506 case STREAM_UNPLANNED_COMPLETE
:
5507 debugs(33, 3, HERE
<< "FTP reply data transfer failed: STREAM_UNPLANNED_COMPLETE");
5508 FtpWriteCustomReply(context
, 451, "Server error; transfer aborted");
5511 debugs(33, 3, HERE
<< "FTP reply data transfer failed: STREAM_FAILED");
5512 FtpWriteCustomReply(context
, 451, "Server error; transfer aborted");
5515 fatal("unreachable code");
5518 FtpCloseDataConnection(connState
);
5522 FtpHandleUploadReply(ClientSocketContext
*context
, const HttpReply
*reply
, StoreIOBuffer data
)
5524 FtpWriteForwardedReply(context
, reply
);
5525 // note that the client data connection may already be closed by now
5529 FtpWriteForwardedReply(ClientSocketContext
*context
, const HttpReply
*reply
)
5531 const AsyncCall::Pointer call
= commCbCall(33, 5, "FtpWroteReply",
5532 CommIoCbPtrFun(&FtpWroteReply
, context
));
5533 FtpWriteForwardedReply(context
, reply
, call
);
5537 FtpHandleEprtReply(ClientSocketContext
*context
, const HttpReply
*reply
, StoreIOBuffer data
)
5539 if (context
->http
->request
->errType
!= ERR_NONE
) {
5540 FtpWriteCustomReply(context
, 502, "Server does not support PASV (converted from EPRT)", reply
);
5544 FtpWriteCustomReply(context
, 200, "EPRT successfully converted to PASV.");
5546 // and wait for RETR
5550 FtpHandleEpsvReply(ClientSocketContext
*context
, const HttpReply
*reply
, StoreIOBuffer data
)
5552 if (context
->http
->request
->errType
!= ERR_NONE
) {
5553 FtpWriteCustomReply(context
, 502, "Cannot connect to server", reply
);
5557 FtpCloseDataConnection(context
->getConn());
5559 Comm::ConnectionPointer conn
= new Comm::Connection
;
5560 ConnStateData
* const connState
= context
->getConn();
5561 conn
->flags
= COMM_NONBLOCKING
;
5562 conn
->local
= connState
->transparent() ?
5563 connState
->port
->s
: context
->clientConnection
->local
;
5564 conn
->local
.port(0);
5565 const char *const note
= connState
->ftp
.uri
.termedBuf();
5566 comm_open_listener(SOCK_STREAM
, IPPROTO_TCP
, conn
, note
);
5567 if (!Comm::IsConnOpen(conn
)) {
5568 debugs(5, DBG_CRITICAL
, "comm_open_listener failed: " <<
5569 conn
->local
<< " error: " << errno
);
5570 FtpWriteCustomReply(context
, 451, "Internal error");
5574 typedef CommCbFunPtrCallT
<CommAcceptCbPtrFun
> AcceptCall
;
5575 RefCount
<AcceptCall
> subCall
= commCbCall(5, 5, "FtpAcceptDataConnection",
5576 CommAcceptCbPtrFun(FtpAcceptDataConnection
, connState
));
5577 Subscription::Pointer sub
= new CallSubscription
<AcceptCall
>(subCall
);
5578 connState
->ftp
.listener
= subCall
.getRaw();
5579 connState
->ftp
.dataListenConn
= conn
;
5580 AsyncJob::Start(new Comm::TcpAcceptor(conn
, note
, sub
));
5582 // conn->fd is the client data connection (and its local port)
5583 const unsigned int port
= comm_local_port(conn
->fd
);
5584 conn
->local
.port(port
);
5586 // In interception setups, we combine remote server address with a
5587 // local port number and hope that traffic will be redirected to us.
5590 mb
.Printf("229 Entering Extended Passive Mode (|||%u|)\r\n", port
);
5592 debugs(11, 3, Raw("writing", mb
.buf
, mb
.size
));
5593 FtpWriteReply(context
, mb
);
5596 /// writes FTP error response with given status and reply-derived error details
5598 FtpWriteErrorReply(ClientSocketContext
*context
, const HttpReply
*reply
, const int status
)
5603 assert(context
->http
);
5604 const HttpRequest
*request
= context
->http
->request
;
5606 if (request
->errType
!= ERR_NONE
)
5607 mb
.Printf("%i-%s\r\n", status
, errorPageName(request
->errType
));
5609 if (request
->errDetail
> 0) {
5610 // XXX: > 0 may not always mean that this is an errno
5611 mb
.Printf("%i-Error: (%d) %s\r\n", status
,
5613 strerror(request
->errDetail
));
5616 // XXX: Remove hard coded names. Use an error page template instead.
5617 const Adaptation::History::Pointer ah
= request
->adaptHistory();
5618 if (ah
!= NULL
) { // XXX: add adapt::<all_h but use lastMeta here
5619 const String info
= ah
->allMeta
.getByName("X-Response-Info");
5620 const String desc
= ah
->allMeta
.getByName("X-Response-Desc");
5622 mb
.Printf("%i-Information: %s\r\n", status
, info
.termedBuf());
5624 mb
.Printf("%i-Description: %s\r\n", status
, desc
.termedBuf());
5627 assert(reply
!= NULL
);
5628 const char *reason
= reply
->header
.has(HDR_FTP_REASON
) ?
5629 reply
->header
.getStr(HDR_FTP_REASON
):
5630 reply
->sline
.reason();
5632 mb
.Printf("%i %s\r\n", status
, reason
); // error terminating line
5634 // TODO: errorpage.cc should detect FTP client and use
5635 // configurable FTP-friendly error templates which we should
5636 // write to the client "as is" instead of hiding most of the info
5638 FtpWriteReply(context
, mb
);
5641 /// writes FTP response based on HTTP reply that is not an FTP-response wrapper
5643 FtpWriteForwardedForeign(ClientSocketContext
*context
, const HttpReply
*reply
)
5645 ConnStateData
*const connState
= context
->getConn();
5646 FtpChangeState(connState
, ConnStateData::FTP_CONNECTED
, "foreign reply");
5647 //Close the data connection.
5648 FtpCloseDataConnection(connState
);
5649 // 451: We intend to keep the control connection open.
5650 FtpWriteErrorReply(context
, reply
, 451);
5654 FtpWriteForwardedReply(ClientSocketContext
*context
, const HttpReply
*reply
, AsyncCall::Pointer call
)
5656 assert(reply
!= NULL
);
5657 const HttpHeader
&header
= reply
->header
;
5658 ConnStateData
*const connState
= context
->getConn();
5660 // adaptation and forwarding errors lack HDR_FTP_STATUS
5661 if (!header
.has(HDR_FTP_STATUS
)) {
5662 FtpWriteForwardedForeign(context
, reply
);
5666 assert(header
.has(HDR_FTP_REASON
));
5668 const int status
= header
.getInt(HDR_FTP_STATUS
);
5669 debugs(33, 7, HERE
<< "status: " << status
);
5671 // Status 125 or 150 implies upload or data request, but we still check
5672 // the state in case the server is buggy.
5673 if ((status
== 125 || status
== 150) &&
5674 (connState
->ftp
.state
== ConnStateData::FTP_HANDLE_UPLOAD_REQUEST
||
5675 connState
->ftp
.state
== ConnStateData::FTP_HANDLE_DATA_REQUEST
)) {
5676 if (FtpCheckDataConnPost(context
)) {
5677 // If the data connection is ready, start reading data (here)
5678 // and forward the response to client (further below).
5679 debugs(33, 7, "data connection established, start data transfer");
5680 if (connState
->ftp
.state
== ConnStateData::FTP_HANDLE_UPLOAD_REQUEST
)
5681 connState
->readSomeFtpData();
5683 // If we are waiting to accept the data connection, keep waiting.
5684 if (Comm::IsConnOpen(connState
->ftp
.dataListenConn
)) {
5685 debugs(33, 7, "wait for the client to establish a data connection");
5686 connState
->ftp
.onDataAcceptCall
= call
;
5687 // TODO: Add connect timeout for passive connections listener?
5688 // TODO: Remember server response so that we can forward it?
5690 // Either the connection was establised and closed after the
5691 // data was transferred OR we failed to establish an active
5692 // data connection and already sent the error to the client.
5693 // In either case, there is nothing more to do.
5694 debugs(33, 7, "done with data OR active connection failed");
5702 FtpPrintReply(mb
, reply
);
5704 debugs(11, 2, "FTP Client " << context
->clientConnection
);
5705 debugs(11, 2, "FTP Client REPLY:\n---------\n" << mb
.buf
<<
5708 Comm::Write(context
->clientConnection
, &mb
, call
);
5712 FtpPrintReply(MemBuf
&mb
, const HttpReply
*reply
, const char *const prefix
)
5714 const HttpHeader
&header
= reply
->header
;
5716 HttpHeaderPos pos
= HttpHeaderInitPos
;
5717 while (const HttpHeaderEntry
*e
= header
.getEntry(&pos
)) {
5718 if (e
->id
== HDR_FTP_PRE
) {
5720 if (httpHeaderParseQuotedString(e
->value
.rawBuf(), e
->value
.size(), &raw
))
5721 mb
.Printf("%s\r\n", raw
.termedBuf());
5725 if (header
.has(HDR_FTP_STATUS
)) {
5726 const char *reason
= header
.getStr(HDR_FTP_REASON
);
5727 mb
.Printf("%i %s\r\n", header
.getInt(HDR_FTP_STATUS
),
5728 (reason
? reason
: 0));
5733 FtpWroteEarlyReply(const Comm::ConnectionPointer
&conn
, char *bufnotused
, size_t size
, Comm::Flag errflag
, int xerrno
, void *data
)
5735 if (errflag
== Comm::ERR_CLOSING
)
5738 if (errflag
!= Comm::OK
) {
5739 debugs(33, 3, HERE
<< "FTP reply writing failed: " << xstrerr(xerrno
));
5744 ConnStateData
*const connState
= static_cast<ConnStateData
*>(data
);
5745 ClientSocketContext::Pointer context
= connState
->getCurrentContext();
5746 if (context
!= NULL
&& context
->http
) {
5747 context
->http
->out
.size
+= size
;
5748 context
->http
->out
.headers_sz
+= size
;
5751 connState
->flags
.readMore
= true;
5752 connState
->readSomeData();
5756 FtpWroteReply(const Comm::ConnectionPointer
&conn
, char *bufnotused
, size_t size
, Comm::Flag errflag
, int xerrno
, void *data
)
5758 if (errflag
== Comm::ERR_CLOSING
)
5761 if (errflag
!= Comm::OK
) {
5762 debugs(33, 3, HERE
<< "FTP reply writing failed: " <<
5768 ClientSocketContext
*const context
=
5769 static_cast<ClientSocketContext
*>(data
);
5770 ConnStateData
*const connState
= context
->getConn();
5772 assert(context
->http
);
5773 context
->http
->out
.size
+= size
;
5774 context
->http
->out
.headers_sz
+= size
;
5776 if (connState
->ftp
.state
== ConnStateData::FTP_ERROR
) {
5777 debugs(33, 5, "closing on FTP server error");
5782 const clientStream_status_t socketState
= context
->socketState();
5783 debugs(33, 5, "FTP client stream state " << socketState
);
5784 switch (socketState
) {
5785 case STREAM_UNPLANNED_COMPLETE
:
5791 case STREAM_COMPLETE
:
5792 connState
->flags
.readMore
= true;
5793 FtpChangeState(connState
, ConnStateData::FTP_CONNECTED
, "FtpWroteReply");
5794 if (connState
->in
.bodyParser
)
5795 connState
->finishDechunkingRequest(false);
5796 context
->keepaliveNextRequest();
5802 FtpHandleRequest(ClientSocketContext
*context
, String
&cmd
, String
¶ms
) {
5803 if (HttpRequest
*request
= context
->http
->request
) {
5804 MemBuf
*mb
= new MemBuf
;
5807 packerToMemInit(&p
, mb
);
5811 debugs(11, 2, "FTP Client " << context
->clientConnection
);
5812 debugs(11, 2, "FTP Client REQUEST:\n---------\n" << mb
->buf
<<
5817 static std::pair
<const char *, FtpRequestHandler
*> handlers
[] = {
5818 std::make_pair("LIST", FtpHandleDataRequest
),
5819 std::make_pair("NLST", FtpHandleDataRequest
),
5820 std::make_pair("MLSD", FtpHandleDataRequest
),
5821 std::make_pair("FEAT", FtpHandleFeatRequest
),
5822 std::make_pair("PASV", FtpHandlePasvRequest
),
5823 std::make_pair("PORT", FtpHandlePortRequest
),
5824 std::make_pair("RETR", FtpHandleDataRequest
),
5825 std::make_pair("EPRT", FtpHandleEprtRequest
),
5826 std::make_pair("EPSV", FtpHandleEpsvRequest
),
5827 std::make_pair("CWD", FtpHandleCwdRequest
),
5828 std::make_pair("PASS", FtpHandlePassRequest
),
5829 std::make_pair("CDUP", FtpHandleCdupRequest
),
5832 FtpRequestHandler
*handler
= NULL
;
5833 if (context
->http
->request
->method
== Http::METHOD_PUT
)
5834 handler
= FtpHandleUploadRequest
;
5836 for (size_t i
= 0; i
< sizeof(handlers
) / sizeof(*handlers
); ++i
) {
5837 if (cmd
.caseCmp(handlers
[i
].first
) == 0) {
5838 handler
= handlers
[i
].second
;
5844 return handler
!= NULL
? (*handler
)(context
, cmd
, params
) : true;
5847 /// Called to parse USER command, which is required to create an HTTP request
5848 /// wrapper. Thus, errors are handled with FtpWriteEarlyReply() here.
5850 FtpHandleUserRequest(ConnStateData
*connState
, const String
&cmd
, String
¶ms
)
5852 if (params
.size() == 0) {
5853 FtpWriteEarlyReply(connState
, 501, "Missing username");
5857 const String::size_type eou
= params
.rfind('@');
5858 if (eou
== String::npos
|| eou
+ 1 >= params
.size()) {
5859 FtpWriteEarlyReply(connState
, 501, "Missing host");
5863 const String login
= params
.substr(0, eou
);
5864 String host
= params
.substr(eou
+ 1, params
.size());
5865 // If we can parse it as raw IPv6 address, then surround with "[]".
5866 // Otherwise (domain, IPv4, [bracketed] IPv6, garbage, etc), use as is.
5867 if (host
.pos(":")) {
5868 char ipBuf
[MAX_IPSTRLEN
];
5870 ipa
= host
.termedBuf();
5871 if (!ipa
.isAnyAddr()) {
5872 ipa
.toHostStr(ipBuf
, MAX_IPSTRLEN
);
5876 connState
->ftp
.host
= host
;
5879 if (connState
->ftp
.readGreeting
)
5880 oldUri
= connState
->ftp
.uri
;
5882 connState
->ftpSetWorkingDir(NULL
);
5883 connState
->ftpBuildUri();
5885 if (!connState
->ftp
.readGreeting
) {
5886 debugs(11, 3, "set URI to " << connState
->ftp
.uri
);
5887 } else if (oldUri
.caseCmp(connState
->ftp
.uri
) == 0) {
5888 debugs(11, 5, "keep URI as " << oldUri
);
5890 debugs(11, 3, "reset URI from " << oldUri
<< " to " << connState
->ftp
.uri
);
5891 FtpCloseDataConnection(connState
);
5892 connState
->ftp
.readGreeting
= false;
5893 connState
->unpinConnection(true); // close control connection to the server
5894 FtpChangeState(connState
, ConnStateData::FTP_BEGIN
, "URI reset");
5903 FtpHandleFeatRequest(ClientSocketContext
*context
, String
&cmd
, String
¶ms
)
5905 FtpChangeState(context
->getConn(), ConnStateData::FTP_HANDLE_FEAT
, "FtpHandleFeatRequest");
5911 FtpHandlePasvRequest(ClientSocketContext
*context
, String
&cmd
, String
¶ms
)
5913 ConnStateData
*const connState
= context
->getConn();
5915 if (connState
->ftp
.gotEpsvAll
) {
5916 FtpSetReply(context
, 500, "Bad PASV command");
5920 if (params
.size() > 0) {
5921 FtpSetReply(context
, 501, "Unexpected parameter");
5925 FtpChangeState(context
->getConn(), ConnStateData::FTP_HANDLE_PASV
, "FtpHandlePasvRequest");
5926 // no need to fake PASV request via FtpSetDataCommand() in true PASV case
5930 /// [Re]initializes dataConn for active data transfers. Does not connect.
5932 bool FtpCreateDataConnection(ClientSocketContext
*context
, Ip::Address cltAddr
)
5934 ConnStateData
*const connState
= context
->getConn();
5936 assert(connState
->clientConnection
!= NULL
);
5937 assert(!connState
->clientConnection
->remote
.isAnyAddr());
5939 if (cltAddr
!= connState
->clientConnection
->remote
) {
5940 debugs(33, 2, "rogue PORT " << cltAddr
<< " request? ctrl: " << connState
->clientConnection
->remote
);
5941 // Closing the control connection would not help with attacks because
5942 // the client is evidently able to connect to us. Besides, closing
5943 // makes retrials easier for the client and more damaging to us.
5944 FtpSetReply(context
, 501, "Prohibited parameter value");
5948 FtpCloseDataConnection(context
->getConn());
5950 Comm::ConnectionPointer conn
= new Comm::Connection();
5951 conn
->remote
= cltAddr
;
5953 // Use local IP address of the control connection as the source address
5954 // of the active data connection, or some clients will refuse to accept.
5955 conn
->flags
|= COMM_DOBIND
;
5956 conn
->local
= connState
->clientConnection
->local
;
5957 // RFC 959 requires active FTP connections to originate from port 20
5958 // but that would preclude us from supporting concurrent transfers! (XXX?)
5959 conn
->local
.port(0);
5961 debugs(11, 3, "will actively connect from " << conn
->local
<< " to " <<
5964 context
->getConn()->ftp
.dataConn
= conn
;
5965 context
->getConn()->ftp
.uploadAvailSize
= 0;
5970 FtpHandlePortRequest(ClientSocketContext
*context
, String
&cmd
, String
¶ms
)
5972 // TODO: Should PORT errors trigger FtpCloseDataConnection() cleanup?
5974 const ConnStateData
*connState
= context
->getConn();
5975 if (connState
->ftp
.gotEpsvAll
) {
5976 FtpSetReply(context
, 500, "Rejecting PORT after EPSV ALL");
5980 if (!params
.size()) {
5981 FtpSetReply(context
, 501, "Missing parameter");
5985 Ip::Address cltAddr
;
5986 if (!Ftp::ParseIpPort(params
.termedBuf(), NULL
, cltAddr
)) {
5987 FtpSetReply(context
, 501, "Invalid parameter");
5991 if (!FtpCreateDataConnection(context
, cltAddr
))
5994 FtpChangeState(context
->getConn(), ConnStateData::FTP_HANDLE_PORT
, "FtpHandlePortRequest");
5995 FtpSetDataCommand(context
);
5996 return true; // forward our fake PASV request
6000 FtpHandleDataRequest(ClientSocketContext
*context
, String
&cmd
, String
¶ms
)
6002 if (!FtpCheckDataConnPre(context
))
6005 FtpChangeState(context
->getConn(), ConnStateData::FTP_HANDLE_DATA_REQUEST
, "FtpHandleDataRequest");
6011 FtpHandleUploadRequest(ClientSocketContext
*context
, String
&cmd
, String
¶ms
)
6013 if (!FtpCheckDataConnPre(context
))
6016 FtpChangeState(context
->getConn(), ConnStateData::FTP_HANDLE_UPLOAD_REQUEST
, "FtpHandleDataRequest");
6022 FtpHandleEprtRequest(ClientSocketContext
*context
, String
&cmd
, String
¶ms
)
6024 debugs(11, 3, "Process an EPRT " << params
);
6026 const ConnStateData
*connState
= context
->getConn();
6027 if (connState
->ftp
.gotEpsvAll
) {
6028 FtpSetReply(context
, 500, "Rejecting EPRT after EPSV ALL");
6032 if (!params
.size()) {
6033 FtpSetReply(context
, 501, "Missing parameter");
6037 Ip::Address cltAddr
;
6038 if (!Ftp::ParseProtoIpPort(params
.termedBuf(), cltAddr
)) {
6039 FtpSetReply(context
, 501, "Invalid parameter");
6043 if (!FtpCreateDataConnection(context
, cltAddr
))
6046 FtpChangeState(context
->getConn(), ConnStateData::FTP_HANDLE_EPRT
, "FtpHandleEprtRequest");
6047 FtpSetDataCommand(context
);
6048 return true; // forward our fake PASV request
6052 FtpHandleEpsvRequest(ClientSocketContext
*context
, String
&cmd
, String
¶ms
)
6054 debugs(11, 3, "Process an EPSV command with params: " << params
);
6055 if (params
.size() <= 0) {
6056 // treat parameterless EPSV as "use the protocol of the ctrl conn"
6057 } else if (params
.caseCmp("ALL") == 0) {
6058 ConnStateData
*connState
= context
->getConn();
6059 FtpSetReply(context
, 200, "EPSV ALL ok");
6060 connState
->ftp
.gotEpsvAll
= true;
6062 } else if (params
.cmp("2") == 0) {
6063 if (!Ip::EnableIpv6
) {
6064 FtpSetReply(context
, 522, "Network protocol not supported, use (1)");
6067 } else if (params
.cmp("1") != 0) {
6068 FtpSetReply(context
, 501, "Unsupported EPSV parameter");
6072 FtpChangeState(context
->getConn(), ConnStateData::FTP_HANDLE_EPSV
, "FtpHandleEpsvRequest");
6073 FtpSetDataCommand(context
);
6074 return true; // forward our fake PASV request
6078 FtpHandleCwdRequest(ClientSocketContext
*context
, String
&cmd
, String
¶ms
)
6080 FtpChangeState(context
->getConn(), ConnStateData::FTP_HANDLE_CWD
, "FtpHandleCwdRequest");
6085 FtpHandlePassRequest(ClientSocketContext
*context
, String
&cmd
, String
¶ms
)
6087 FtpChangeState(context
->getConn(), ConnStateData::FTP_HANDLE_PASS
, "FtpHandlePassRequest");
6092 FtpHandleCdupRequest(ClientSocketContext
*context
, String
&cmd
, String
¶ms
)
6094 FtpChangeState(context
->getConn(), ConnStateData::FTP_HANDLE_CDUP
, "FtpHandleCdupRequest");
6098 // Convert client PORT, EPRT, PASV, or EPSV data command to Squid PASV command.
6099 // Squid server-side decides what data command to use on that side.
6101 FtpSetDataCommand(ClientSocketContext
*context
)
6103 ClientHttpRequest
*const http
= context
->http
;
6104 assert(http
!= NULL
);
6105 HttpRequest
*const request
= http
->request
;
6106 assert(request
!= NULL
);
6107 HttpHeader
&header
= request
->header
;
6108 header
.delById(HDR_FTP_COMMAND
);
6109 header
.putStr(HDR_FTP_COMMAND
, "PASV");
6110 header
.delById(HDR_FTP_ARGUMENTS
);
6111 header
.putStr(HDR_FTP_ARGUMENTS
, "");
6112 debugs(11, 5, "client data command converted to fake PASV");
6115 /// check that client data connection is ready for future I/O or at least
6116 /// has a chance of becoming ready soon.
6118 FtpCheckDataConnPre(ClientSocketContext
*context
)
6120 ConnStateData
*const connState
= context
->getConn();
6121 if (Comm::IsConnOpen(connState
->ftp
.dataConn
))
6124 if (Comm::IsConnOpen(connState
->ftp
.dataListenConn
)) {
6125 // We are still waiting for a client to connect to us after PASV.
6126 // Perhaps client's data conn handshake has not reached us yet.
6127 // After we talk to the server, FtpCheckDataConnPost() will recheck.
6128 debugs(33, 3, "expecting clt data conn " << connState
->ftp
.dataListenConn
);
6132 if (!connState
->ftp
.dataConn
|| connState
->ftp
.dataConn
->remote
.isAnyAddr()) {
6133 debugs(33, 5, "missing " << connState
->ftp
.dataConn
);
6134 // TODO: use client address and default port instead.
6135 FtpSetReply(context
, 425, "Use PORT or PASV first");
6139 // active transfer: open a connection from Squid to client
6140 AsyncCall::Pointer connector
= context
->getConn()->ftp
.connector
=
6141 commCbCall(17, 3, "FtpConnectDoneWrapper",
6142 CommConnectCbPtrFun(FtpHandleConnectDone
, context
));
6144 Comm::ConnOpener
*cs
= new Comm::ConnOpener(connState
->ftp
.dataConn
,
6146 Config
.Timeout
.connect
);
6147 AsyncJob::Start(cs
);
6148 return false; // ConnStateData::processFtpRequest waits FtpHandleConnectDone
6151 /// Check that client data connection is ready for immediate I/O.
6153 FtpCheckDataConnPost(ClientSocketContext
*context
)
6155 ConnStateData
*connState
= context
->getConn();
6157 const Comm::ConnectionPointer
&dataConn
= connState
->ftp
.dataConn
;
6158 if (!Comm::IsConnOpen(dataConn
)) {
6159 debugs(33, 3, "missing client data conn: " << dataConn
);
6166 FtpHandleConnectDone(const Comm::ConnectionPointer
&conn
, Comm::Flag status
, int xerrno
, void *data
)
6168 ClientSocketContext
*context
= static_cast<ClientSocketContext
*>(data
);
6169 context
->getConn()->ftp
.connector
= NULL
;
6171 if (status
!= Comm::OK
) {
6173 FtpSetReply(context
, 425, "Cannot open data connection.");
6174 assert(context
->http
&& context
->http
->storeEntry() != NULL
);
6176 assert(context
->getConn()->ftp
.dataConn
== conn
);
6177 assert(Comm::IsConnOpen(conn
));
6178 fd_note(conn
->fd
, "active client ftp data");
6180 context
->getConn()->resumeFtpRequest(context
);
6184 FtpSetReply(ClientSocketContext
*context
, const int code
, const char *msg
)
6186 ClientHttpRequest
*const http
= context
->http
;
6187 assert(http
!= NULL
);
6188 assert(http
->storeEntry() == NULL
);
6190 HttpReply
*const reply
= new HttpReply
;
6191 reply
->sline
.set(Http::ProtocolVersion(1, 1), Http::scNoContent
);
6192 HttpHeader
&header
= reply
->header
;
6193 header
.putTime(HDR_DATE
, squid_curtime
);
6199 header
.putInt64(HDR_CONTENT_LENGTH
, 0);
6200 header
.putInt(HDR_FTP_STATUS
, code
);
6201 header
.putStr(HDR_FTP_REASON
, msg
);
6202 reply
->hdrCacheInit();
6204 setLogUri(http
, urlCanonicalClean(http
->request
));
6206 clientStreamNode
*const node
= context
->getClientReplyContext();
6207 clientReplyContext
*const repContext
=
6208 dynamic_cast<clientReplyContext
*>(node
->data
.getRaw());
6209 assert(repContext
!= NULL
);
6212 flags
.cachable
= false; // force releaseRequest() in storeCreateEntry()
6213 flags
.noCache
= true;
6214 repContext
->createStoreEntry(http
->request
->method
, flags
);
6215 http
->storeEntry()->replaceHttpReply(reply
);
6218 /// Whether Squid FTP gateway supports a given feature (e.g., a command).
6220 FtpSupportedCommand(const String
&name
)
6222 static std::set
<std::string
> BlackList
;
6223 if (BlackList
.empty()) {
6224 /* Add FTP commands that Squid cannot gateway correctly */
6226 // we probably do not support AUTH TLS.* and AUTH SSL,
6227 // but let's disclaim all AUTH support to KISS, for now
6228 BlackList
.insert("AUTH");
6231 // we claim support for all commands that we do not know about
6232 return BlackList
.find(name
.termedBuf()) == BlackList
.end();