debugs(93,3, HERE << "closing pconn" << status());
// comm_close will clear timeout
connection->close();
+ connection = NULL;
}
writer = NULL;
Comm::ConnOpener::ConnOpener(Comm::ConnectionPointer &c, AsyncCall::Pointer &handler, time_t ctimeout) :
AsyncJob("Comm::ConnOpener"),
- connect_timeout(ctimeout),
- host(NULL),
- solo(c),
- callback(handler),
- total_tries(0),
- fail_retries(0),
- connstart(0)
-{
- memset(&calls, 0, sizeof(calls));
-}
+ host_(NULL),
+ conn_(c),
+ callback_(handler),
+ totalTries_(0),
+ failRetries_(0),
+ connectTimeout_(ctimeout),
+ connStart_(0)
+{}
Comm::ConnOpener::~ConnOpener()
{
- safe_free(host);
- solo = NULL;
- calls.earlyabort = NULL;
- calls.timeout = NULL;
+ safe_free(host_);
}
bool
Comm::ConnOpener::doneAll() const
{
- // is the conn to be opened still waiting?
- if (solo != NULL) {
- debugs(5, 6, HERE << " Comm::ConnOpener::doneAll() ? NO. 'solo' is still set");
+ // is the conn_ to be opened still waiting?
+ if (conn_ != NULL) {
return false;
}
// is the callback still to be called?
- if (callback != NULL) {
- debugs(5, 6, HERE << " Comm::ConnOpener::doneAll() ? NO. callback is still set");
+ if (callback_ != NULL) {
return false;
}
- debugs(5, 6, HERE << " Comm::ConnOpener::doneAll() ? YES.");
- return true;
+ return AsyncJob::doneAll();
}
void
Comm::ConnOpener::swanSong()
{
// cancel any event watchers
- if (calls.earlyabort != NULL) {
- calls.earlyabort->cancel("Comm::ConnOpener::swanSong");
- calls.earlyabort = NULL;
+ // done here to get the "swanSong" mention in cancel debugging.
+ if (calls_.earlyAbort_ != NULL) {
+ calls_.earlyAbort_->cancel("Comm::ConnOpener::swanSong");
+ calls_.earlyAbort_ = NULL;
}
- if (calls.timeout != NULL) {
- calls.timeout->cancel("Comm::ConnOpener::swanSong");
- calls.timeout = NULL;
+ if (calls_.timeout_ != NULL) {
+ calls_.timeout_->cancel("Comm::ConnOpener::swanSong");
+ calls_.timeout_ = NULL;
}
// recover what we can from the job
- if (solo != NULL && solo->fd > -1) {
+ if (conn_ != NULL && conn_->isOpen()) {
// it never reached fully open, so abort the FD
- close(solo->fd);
- fd_table[solo->fd].flags.open = 0;
+ conn_->close();
+ fd_table[conn_->fd].flags.open = 0;
// inform the caller
- callCallback(COMM_ERR_CONNECT, 0);
+ doneConnecting(COMM_ERR_CONNECT, 0);
}
}
Comm::ConnOpener::setHost(const char * new_host)
{
// unset and erase if already set.
- if (host != NULL)
- safe_free(host);
+ if (host_ != NULL)
+ safe_free(host_);
// set the new one if given.
if (new_host != NULL)
- host = xstrdup(new_host);
+ host_ = xstrdup(new_host);
}
const char *
Comm::ConnOpener::getHost() const
{
- return host;
+ return host_;
}
+/**
+ * Connection attempt are completed. One way or the other.
+ * Pass the results back to the external handler.
+ */
void
-Comm::ConnOpener::callCallback(comm_err_t status, int xerrno)
+Comm::ConnOpener::doneConnecting(comm_err_t status, int xerrno)
{
/* remove handlers we don't want to happen anymore */
- if (solo != NULL && solo->fd > 0) {
- if (calls.earlyabort != NULL) {
- comm_remove_close_handler(solo->fd, calls.earlyabort);
- calls.earlyabort->cancel("Comm::ConnOpener completed.");
- calls.earlyabort = NULL;
+ if (conn_ != NULL && conn_->isOpen()) {
+ if (calls_.earlyAbort_ != NULL) {
+ comm_remove_close_handler(conn_->fd, calls_.earlyAbort_);
+ calls_.earlyAbort_->cancel("Comm::ConnOpener completed.");
+ calls_.earlyAbort_ = NULL;
}
- if (calls.timeout != NULL) {
- commSetTimeout(solo->fd, -1, NULL, NULL);
- calls.timeout->cancel("Comm::ConnOpener completed.");
- calls.timeout = NULL;
+ if (calls_.timeout_ != NULL) {
+ commSetTimeout(conn_->fd, -1, NULL, NULL);
+ calls_.timeout_->cancel("Comm::ConnOpener completed.");
+ calls_.timeout_ = NULL;
}
}
- if (callback != NULL) {
+ if (callback_ != NULL) {
typedef CommConnectCbParams Params;
- Params ¶ms = GetCommParams<Params>(callback);
- params.conn = solo;
+ Params ¶ms = GetCommParams<Params>(callback_);
+ params.conn = conn_;
params.flag = status;
params.xerrno = xerrno;
- ScheduleCallHere(callback);
- callback = NULL;
+ ScheduleCallHere(callback_);
+ callback_ = NULL;
}
/* ensure cleared local state, we are done. */
- solo = NULL;
-
- assert(doneAll());
+ conn_ = NULL;
}
+/** Actual start opening a TCP connection. */
void
Comm::ConnOpener::start()
{
- Must(solo != NULL);
+ Must(conn_ != NULL);
- /* handle connecting to one single path */
- if (solo->fd < 0) {
+ /* get a socket open ready for connecting with */
+ if (!conn_->isOpen()) {
#if USE_IPV6
/* outbound sockets have no need to be protocol agnostic. */
- if (solo->local.IsIPv6() && solo->local.IsIPv4()) {
- solo->local.SetIPv4();
+ if (conn_->remote.IsIPv4()) {
+ conn_->local.SetIPv4();
}
#endif
- solo->fd = comm_openex(SOCK_STREAM, IPPROTO_TCP, solo->local, solo->flags, solo->tos, host);
- if (solo->fd < 0) {
- callCallback(COMM_ERR_CONNECT, 0);
+ conn_->fd = comm_openex(SOCK_STREAM, IPPROTO_TCP, conn_->local, conn_->flags, conn_->tos, host_);
+ if (!conn_->isOpen()) {
+ doneConnecting(COMM_ERR_CONNECT, 0);
return;
}
- if (calls.earlyabort == NULL) {
+ if (calls_.earlyAbort_ == NULL) {
typedef CommCbMemFunT<Comm::ConnOpener, CommConnectCbParams> Dialer;
- calls.earlyabort = asyncCall(5, 4, "Comm::ConnOpener::earlyAbort",
+ calls_.earlyAbort_ = asyncCall(5, 4, "Comm::ConnOpener::earlyAbort",
Dialer(this, &Comm::ConnOpener::earlyAbort));
}
- comm_add_close_handler(solo->fd, calls.earlyabort);
+ comm_add_close_handler(conn_->fd, calls_.earlyAbort_);
- if (calls.timeout == NULL) {
+ if (calls_.timeout_ == NULL) {
typedef CommCbMemFunT<Comm::ConnOpener, CommTimeoutCbParams> Dialer;
- calls.timeout = asyncCall(5, 4, "Comm::ConnOpener::timeout",
+ calls_.timeout_ = asyncCall(5, 4, "Comm::ConnOpener::timeout",
Dialer(this, &Comm::ConnOpener::timeout));
}
- debugs(5, 3, HERE << "FD " << solo->fd << " timeout " << connect_timeout);
- commSetTimeout(solo->fd, connect_timeout, calls.timeout);
+ debugs(5, 3, HERE << "FD " << conn_->fd << " timeout " << connectTimeout_);
+ commSetTimeout(conn_->fd, connectTimeout_, calls_.timeout_);
- if (connstart == 0) {
- connstart = squid_curtime;
+ if (connStart_ == 0) {
+ connStart_ = squid_curtime;
}
}
- total_tries++;
+ totalTries_++;
- switch (comm_connect_addr(solo->fd, solo->remote) ) {
+ switch (comm_connect_addr(conn_->fd, conn_->remote) ) {
case COMM_INPROGRESS:
// check for timeout FIRST.
- if(squid_curtime - connstart > connect_timeout) {
- debugs(5, 5, HERE << "FD " << solo->fd << ": * - ERR took too long already.");
- callCallback(COMM_TIMEOUT, errno);
+ if(squid_curtime - connStart_ > connectTimeout_) {
+ debugs(5, 5, HERE << "FD " << conn_->fd << ": * - ERR took too long already.");
+ doneConnecting(COMM_TIMEOUT, errno);
return;
} else {
- debugs(5, 5, HERE << "FD " << solo->fd << ": COMM_INPROGRESS");
- commSetSelect(solo->fd, COMM_SELECT_WRITE, Comm::ConnOpener::ConnectRetry, this, 0);
+ debugs(5, 5, HERE << "FD " << conn_->fd << ": COMM_INPROGRESS");
+ commSetSelect(conn_->fd, COMM_SELECT_WRITE, Comm::ConnOpener::ConnectRetry, this, 0);
}
break;
case COMM_OK:
- debugs(5, 5, HERE << "FD " << solo->fd << ": COMM_OK - connected");
+ debugs(5, 5, HERE << "FD " << conn_->fd << ": COMM_OK - connected");
/*
* stats.conn_open is used to account for the number of
* based on the max-conn option. We need to increment here,
* even if the connection may fail.
*/
- if (solo->getPeer())
- solo->getPeer()->stats.conn_open++;
+ if (conn_->getPeer())
+ conn_->getPeer()->stats.conn_open++;
/* TODO: remove these fd_table accesses. But old code still depends on fd_table flags to
* indicate the state of a raw fd object being passed around.
* Also, legacy code still depends on comm_local_port() with no access to Comm::Connection
* when those are done comm_local_port can become one of our member functions to do the below.
*/
- fd_table[solo->fd].flags.open = 1;
- solo->local.SetPort(comm_local_port(solo->fd));
- if (solo->local.IsAnyAddr()) {
- solo->local = fd_table[solo->fd].local_addr;
+ fd_table[conn_->fd].flags.open = 1;
+ conn_->local.SetPort(comm_local_port(conn_->fd));
+ if (conn_->local.IsAnyAddr()) {
+ conn_->local = fd_table[conn_->fd].local_addr;
}
- if (host != NULL)
- ipcacheMarkGoodAddr(host, solo->remote);
- callCallback(COMM_OK, 0);
+ if (host_ != NULL)
+ ipcacheMarkGoodAddr(host_, conn_->remote);
+ doneConnecting(COMM_OK, 0);
break;
default:
- debugs(5, 5, HERE "FD " << solo->fd << ": * - try again");
- fail_retries++;
- if (host != NULL)
- ipcacheMarkBadAddr(host, solo->remote);
+ debugs(5, 5, HERE "FD " << conn_->fd << ": * - try again");
+ failRetries_++;
+ if (host_ != NULL)
+ ipcacheMarkBadAddr(host_, conn_->remote);
#if USE_ICMP
if (Config.onoff.test_reachability)
- netdbDeleteAddrNetwork(solo->remote);
+ netdbDeleteAddrNetwork(conn_->remote);
#endif
// check for timeout FIRST.
- if(squid_curtime - connstart > connect_timeout) {
- debugs(5, 5, HERE << "FD " << solo->fd << ": * - ERR took too long already.");
- callCallback(COMM_TIMEOUT, errno);
- } else if (fail_retries < Config.connect_retries) {
+ if(squid_curtime - connStart_ > connectTimeout_) {
+ debugs(5, 5, HERE << "FD " << conn_->fd << ": * - ERR took too long already.");
+ doneConnecting(COMM_TIMEOUT, errno);
+ } else if (failRetries_ < Config.connect_retries) {
start();
} else {
// send ERROR back to the upper layer.
- debugs(5, 5, HERE << "FD " << solo->fd << ": * - ERR tried too many times already.");
- callCallback(COMM_ERR_CONNECT, errno);
+ debugs(5, 5, HERE << "FD " << conn_->fd << ": * - ERR tried too many times already.");
+ doneConnecting(COMM_ERR_CONNECT, errno);
}
}
}
+/** Abort connection attempt.
+ * Handles the case(s) when a partially setup connection gets closed early.
+ */
void
Comm::ConnOpener::earlyAbort(const CommConnectCbParams &io)
{
debugs(5, 3, HERE << "FD " << io.conn->fd);
- callCallback(COMM_ERR_CLOSING, io.xerrno); // NP: is closing or shutdown better?
+ doneConnecting(COMM_ERR_CLOSING, io.xerrno); // NP: is closing or shutdown better?
}
+/** Make an FD connection attempt.
+ * Handles the case(s) when a partially setup connection gets closed early.
+ */
void
Comm::ConnOpener::connect(const CommConnectCbParams &unused)
{
start();
}
+/**
+ * Handles the case(s) when a partially setup connection gets timed out.
+ * NP: When commSetTimeout accepts generic CommCommonCbParams this can die.
+ */
void
Comm::ConnOpener::timeout(const CommTimeoutCbParams &unused)
{
start();
}
+/* Legacy Wrapper for the retry event after COMM_INPROGRESS
+ * TODO: As soon as comm IO accepts Async calls we can use a ConnOpener::connect call
+ */
void
Comm::ConnOpener::ConnectRetry(int fd, void *data)
{
*/
class ConnOpener : public AsyncJob
{
-public:
- // ****** AsynJob API implementation ******
-
- /** Actual start opening a TCP connection. */
- void start();
-
- virtual bool doneAll() const;
+protected:
+ virtual void start();
virtual void swanSong();
public:
- // ****** ConnOpener API iplementation ******
+ virtual bool doneAll() const;
/** attempt to open a connection. */
ConnOpener(Comm::ConnectionPointer &, AsyncCall::Pointer &handler, time_t connect_timeout);
/* These objects may NOT be created without connections to act on. Do not define this operator. */
ConnOpener(const ConnOpener &);
/* These objects may NOT be copied. Do not define this operator. */
- ConnOpener operator =(const ConnOpener &c);
+ ConnOpener & operator =(const ConnOpener &c);
- /** Make an FD connection attempt.
- * Handles the case(s) when a partially setup connection gets closed early.
- */
void connect(const CommConnectCbParams &unused);
-
- /** Abort connection attempt.
- * Handles the case(s) when a partially setup connection gets closed early.
- */
void earlyAbort(const CommConnectCbParams &);
-
- /**
- * Handles the case(s) when a partially setup connection gets timed out.
- * NP: When commSetTimeout accepts generic CommCommonCbParams this can die.
- */
void timeout(const CommTimeoutCbParams &unused);
-
- /**
- * Connection attempt are completed. One way or the other.
- * Pass the results back to the external handler.
- */
- void callCallback(comm_err_t status, int xerrno);
-
- // Legacy Wrapper for the retry event after COMM_INPROGRESS
- // As soon as comm IO accepts Async calls we can use a ConnOpener::connect call
+ void doneConnecting(comm_err_t status, int xerrno);
static void ConnectRetry(int fd, void *data);
private:
+ char *host_; ///< domain name we are trying to connect to.
+ Comm::ConnectionPointer conn_; ///< single connection currently being opened.
+ AsyncCall::Pointer callback_; ///< handler to be called on connection completion.
+
+ int totalTries_; ///< total number of connection attempts over all destinations so far.
+ int failRetries_; ///< number of retries current destination has been tried.
+
/**
* time at which to abandon the connection.
* the connection-done callback will be passed COMM_TIMEOUT
*/
- time_t connect_timeout;
-
- char *host; ///< domain name we are trying to connect to.
-
- Comm::ConnectionPointer solo; ///< single connection currently being opened.
- AsyncCall::Pointer callback; ///< handler to be called on connection completion.
+ time_t connectTimeout_;
- int total_tries; ///< total number of connection attempts over all destinations so far.
- int fail_retries; ///< number of retries current destination has been tried.
- time_t connstart; ///< time at which this series of connection attempts was started.
+ /// time at which this series of connection attempts was started.
+ time_t connStart_;
/// handles to calls which we may need to cancel.
- struct _calls {
- AsyncCall::Pointer earlyabort;
- AsyncCall::Pointer timeout;
- } calls;
+ struct Calls {
+ AsyncCall::Pointer earlyAbort_;
+ AsyncCall::Pointer timeout_;
+ } calls_;
CBDATA_CLASS2(ConnOpener);
};
Comm::Connection::Connection() :
local(),
remote(),
- peer_type(HIER_NONE),
+ peerType(HIER_NONE),
fd(-1),
tos(0),
flags(COMM_NONBLOCKING),
Comm::Connection::~Connection()
{
close();
- if (_peer) {
- cbdataReferenceDone(_peer);
- }
+ cbdataReferenceDone(_peer);
}
Comm::ConnectionPointer &
c->local = local;
c->remote = remote;
- c->peer_type = peer_type;
+ c->peerType = peerType;
c->tos = tos;
c->flags = flags;
void
Comm::Connection::close()
{
- if (isOpen())
+ if (isOpen()) {
comm_close(fd);
- fd = -1;
+ fd = -1;
+ }
}
void
/** standard empty connection creation */
Connection();
- /** These objects may not be exactly duplicated. Use copyDetails() instead. */
- Connection(const Connection &c);
-
/** Clear the connection properties and close any open socket. */
~Connection();
*/
ConnectionPointer & copyDetails() const;
- /** These objects may not be exactly duplicated. Use clone() instead. */
- Connection & operator =(const Connection &c);
-
/** Close any open socket. */
void close();
/** determine whether this object describes an active connection or not. */
bool isOpen() const { return (fd >= 0); }
+ /** retrieve the peer pointer for use.
+ * The caller is responsible for all CBDATA operations regarding the
+ * used of the pointer returned.
+ */
+ peer * const getPeer() const { return _peer; }
+
+ /** alter the stored peer pointer.
+ * Perform appropriate CBDATA operations for locking the peer pointer
+ */
+ void setPeer(peer * p);
+
+private:
+ /** These objects may not be exactly duplicated. Use copyDetails() instead. */
+ Connection(const Connection &c);
+
+ /** These objects may not be exactly duplicated. Use copyDetails() instead. */
+ Connection & operator =(const Connection &c);
+
+public:
/** Address/Port for the Squid end of a TCP link. */
Ip::Address local;
Ip::Address remote;
/** Hierarchy code for this connection link */
- hier_code peer_type;
+ hier_code peerType;
/** Socket used by this connection. -1 if no socket has been opened. */
int fd;
/** COMM flags set on this connection */
int flags;
- /** retrieve the peer pointer for use.
- * The caller is responsible for all CBDATA operations regarding the
- * used of the pointer returned.
- */
- peer * const getPeer() const { return _peer; }
-
- /** alter the stored peer pointer.
- * Perform appropriate CBDATA operations for locking the peer pointer
- */
- void setPeer(peer * p);
-
private:
/** cache_peer data object (if any) */
peer *_peer;
namespace Comm
{
-class Connection;
-
class ListenStateData
{
FwdState* fwd = (FwdState*)d;
Pointer tmp = fwd; // Grab a temporary pointer to keep the object alive during our scope.
- if (fwd->paths.size() > 0 && fwd->paths[0]->isOpen()) {
- comm_remove_close_handler(fwd->paths[0]->fd, fwdServerClosedWrapper, fwd);
+ if (fwd->isServerConnectionOpen()) {
+ comm_remove_close_handler(fwd->serverConnection()->fd, fwdServerClosedWrapper, fwd);
}
fwd->paths.clean();
fwd->self = NULL;
entry = NULL;
- if (paths.size() > 0 && paths[0]->isOpen()) {
- comm_remove_close_handler(paths[0]->fd, fwdServerClosedWrapper, this);
- debugs(17, 3, HERE << "closing FD " << paths[0]->fd);
- paths[0]->close();
+ if (isServerConnectionOpen()) {
+ comm_remove_close_handler(serverConnection()->fd, fwdServerClosedWrapper, this);
+ debugs(17, 3, HERE << "closing FD " << serverConnection()->fd);
+ serverConnection()->close();
}
paths.clean();
* Frees fwdState without closing FD or generating an abort
*/
void
+FwdState::unregister(Comm::ConnectionPointer conn)
+{
+ debugs(17, 3, HERE << entry->url() );
+ assert(serverConnection() == conn);
+ assert(conn->isOpen());
+ comm_remove_close_handler(conn->fd, fwdServerClosedWrapper, this);
+}
+
+// Legacy method to be removed in favor of the above as soon as possible
+void
FwdState::unregister(int fd)
{
- debugs(17, 3, HERE << entry->url() );
- assert(fd == paths[0]->fd);
+ debugs(17, 3, HERE << entry->url() );
+ assert(fd == serverConnection()->fd);
assert(fd > -1);
comm_remove_close_handler(fd, fwdServerClosedWrapper, this);
+ serverConnection()->fd = -1;
}
/**
if (reforward()) {
debugs(17, 3, HERE << "re-forwarding " << entry->getReply()->sline.status << " " << entry->url());
- if (paths[0]->fd > -1)
- unregister(paths[0]->fd);
+ if (isServerConnectionOpen())
+ unregister(serverConnection());
entry->reset();
*/
connectStart();
} else {
- debugs(17, 3, HERE << "server FD " << paths[0]->fd << " not re-forwarding status " << entry->getReply()->sline.status);
+ debugs(17, 3, HERE << "server FD " << serverConnection()->fd << " not re-forwarding status " << entry->getReply()->sline.status);
EBIT_CLR(entry->flags, ENTRY_FWD_HDR_WAIT);
entry->complete();
- if (paths[0]->fd < 0)
+ if (!isServerConnectionOpen())
completed();
self = NULL; // refcounted
FwdState::serverClosed(int fd)
{
debugs(17, 2, HERE << "FD " << fd << " " << entry->url());
- assert(paths[0]->fd == fd);
+ assert(serverConnection()->fd == fd);
- if (paths[0]->getPeer()) {
- paths[0]->getPeer()->stats.conn_open--;
+ if (serverConnection()->getPeer()) {
+ serverConnection()->getPeer()->stats.conn_open--;
}
retryOrBail();
FwdState::handleUnregisteredServerEnd()
{
debugs(17, 2, HERE << "self=" << self << " err=" << err << ' ' << entry->url());
- assert(paths[0]->fd < 0);
+ assert(!isServerConnectionOpen());
retryOrBail();
}
fail(anErr);
- if (paths[0]->getPeer()) {
- peerConnectFailed(paths[0]->getPeer());
- paths[0]->getPeer()->stats.conn_open--;
+ if (serverConnection()->getPeer()) {
+ peerConnectFailed(serverConnection()->getPeer());
+ serverConnection()->getPeer()->stats.conn_open--;
}
- paths[0]->close();
+ serverConnection()->close();
return;
}
}
- if (paths[0]->getPeer() && !SSL_session_reused(ssl)) {
- if (paths[0]->getPeer()->sslSession)
- SSL_SESSION_free(paths[0]->getPeer()->sslSession);
+ if (serverConnection()->getPeer() && !SSL_session_reused(ssl)) {
+ if (serverConnection()->getPeer()->sslSession)
+ SSL_SESSION_free(serverConnection()->getPeer()->sslSession);
- paths[0]->getPeer()->sslSession = SSL_get1_session(ssl);
+ serverConnection()->getPeer()->sslSession = SSL_get1_session(ssl);
}
dispatch();
{
SSL *ssl;
SSL_CTX *sslContext = NULL;
- const peer *peer = paths[0]->getPeer();
- int fd = paths[0]->fd;
+ const peer *peer = serverConnection()->getPeer();
+ int fd = serverConnection()->fd;
if (peer) {
assert(peer->use_ssl);
/* it might have been a timeout with a partially open link */
if (paths.size() > 0) {
- if (paths[0]->getPeer())
- peerConnectFailed(paths[0]->getPeer());
+ if (serverConnection()->getPeer())
+ peerConnectFailed(serverConnection()->getPeer());
- paths[0]->close();
+ serverConnection()->close();
}
retryOrBail();
return;
}
#if REDUNDANT_NOW
- if (Config.onoff.log_ip_on_direct && paths[0]->peer_type == HIER_DIRECT)
+ if (Config.onoff.log_ip_on_direct && serverConnection()->peerType == HIER_DIRECT)
updateHierarchyInfo();
#endif
- debugs(17, 3, "FD " << paths[0]->fd << ": '" << entry->url() << "'" );
+ debugs(17, 3, "FD " << serverConnection()->fd << ": '" << entry->url() << "'" );
- comm_add_close_handler(paths[0]->fd, fwdServerClosedWrapper, this);
+ comm_add_close_handler(serverConnection()->fd, fwdServerClosedWrapper, this);
- if (paths[0]->getPeer())
- peerConnectSucceded(paths[0]->getPeer());
+ if (serverConnection()->getPeer())
+ peerConnectSucceded(serverConnection()->getPeer());
updateHierarchyInfo();
#if USE_SSL
- if ((paths[0]->getPeer() && paths[0]->getPeer()->use_ssl) ||
- (!paths[0]->getPeer() && request->protocol == PROTO_HTTPS)) {
+ if ((serverConnection()->getPeer() && serverConnection()->getPeer()->use_ssl) ||
+ (!serverConnection()->getPeer() && request->protocol == PROTO_HTTPS)) {
initiateSSL();
return;
}
FwdState::connectTimeout(int fd)
{
debugs(17, 2, "fwdConnectTimeout: FD " << fd << ": '" << entry->url() << "'" );
- assert(fd == paths[0]->fd);
+ assert(fd == serverConnection()->fd);
- if (Config.onoff.log_ip_on_direct && paths[0]->peer_type == HIER_DIRECT)
+ if (Config.onoff.log_ip_on_direct && serverConnection()->peerType == HIER_DIRECT)
updateHierarchyInfo();
if (entry->isEmpty()) {
/* This marks the peer DOWN ... */
if (paths.size() > 0)
- if (paths[0]->getPeer())
- peerConnectFailed(paths[0]->getPeer());
+ if (serverConnection()->getPeer())
+ peerConnectFailed(serverConnection()->getPeer());
}
- paths[0]->close();
+ serverConnection()->close();
}
/**
if (n_tries == 0) // first attempt
request->hier.first_conn_start = current_time;
- Comm::ConnectionPointer conn = paths[0];
-
/* connection timeout */
int ctimeout;
- if (conn->getPeer()) {
- ctimeout = conn->getPeer()->connect_timeout > 0 ? conn->getPeer()->connect_timeout : Config.Timeout.peer_connect;
+ if (serverConnection()->getPeer()) {
+ ctimeout = serverConnection()->getPeer()->connect_timeout > 0 ?
+ serverConnection()->getPeer()->connect_timeout : Config.Timeout.peer_connect;
} else {
ctimeout = Config.Timeout.connect;
}
ctimeout = ftimeout;
request->flags.pinned = 0;
- if (conn->peer_type == PINNED) {
+ if (serverConnection()->peerType == PINNED) {
ConnStateData *pinned_connection = request->pinnedConnection();
assert(pinned_connection);
- conn->fd = pinned_connection->validatePinnedConnection(request, conn->getPeer());
- if (conn->isOpen()) {
+ serverConnection()->fd = pinned_connection->validatePinnedConnection(request, serverConnection()->getPeer());
+ if (isServerConnectionOpen()) {
pinned_connection->unpinConnection();
#if 0
- if (!conn->getPeer())
- conn->peer_type = HIER_DIRECT;
+ if (!serverConnection()->getPeer())
+ serverConnection()->peerType = HIER_DIRECT;
#endif
n_tries++;
request->flags.pinned = 1;
if (pinned_connection->pinnedAuth())
request->flags.auth = 1;
updateHierarchyInfo();
+ Comm::ConnectionPointer conn = serverConnection();
FwdState::connectDone(conn, COMM_OK, 0);
return;
}
debugs(17,2,HERE << " Pinned connection " << pinned_connection << " not valid. Releasing.");
request->releasePinnedConnection();
paths.shift();
- conn = NULL; // maybe release the conn memory. it's not needed by us anyway.
connectStart();
return;
}
const char *host;
int port;
- if (conn->getPeer()) {
- host = conn->getPeer()->host;
- port = conn->getPeer()->http_port;
- conn->fd = fwdPconnPool->pop(conn->getPeer()->name, conn->getPeer()->http_port, request->GetHost(), conn->local, checkRetriable());
+ if (serverConnection()->getPeer()) {
+ host = serverConnection()->getPeer()->host;
+ port = serverConnection()->getPeer()->http_port;
+ serverConnection()->fd = fwdPconnPool->pop(serverConnection()->getPeer()->name,
+ serverConnection()->getPeer()->http_port,
+ request->GetHost(), serverConnection()->local,
+ checkRetriable());
} else {
host = request->GetHost();
port = request->port;
- conn->fd = fwdPconnPool->pop(host, port, NULL, conn->local, checkRetriable());
+ serverConnection()->fd = fwdPconnPool->pop(host, port, NULL, serverConnection()->local, checkRetriable());
}
- conn->remote.SetPort(port);
+ serverConnection()->remote.SetPort(port);
- if (conn->isOpen()) {
- debugs(17, 3, HERE << "reusing pconn FD " << conn->fd);
+ if (isServerConnectionOpen()) {
+ debugs(17, 3, HERE << "reusing pconn FD " << serverConnection()->fd);
n_tries++;
- if (!conn->getPeer())
+ if (!serverConnection()->getPeer())
origin_tries++;
updateHierarchyInfo();
- comm_add_close_handler(conn->fd, fwdServerClosedWrapper, this);
+ comm_add_close_handler(serverConnection()->fd, fwdServerClosedWrapper, this);
dispatch();
return;
#endif
AsyncCall::Pointer call = commCbCall(17,3, "fwdConnectDoneWrapper", CommConnectCbPtrFun(fwdConnectDoneWrapper, this));
- Comm::ConnOpener *cs = new Comm::ConnOpener(paths[0], call, ctimeout);
+ Comm::ConnectionPointer conn = serverConnection();
+ Comm::ConnOpener *cs = new Comm::ConnOpener(conn, call, ctimeout);
cs->setHost(host);
AsyncJob::AsyncStart(cs);
}
* is attached to something and will be deallocated when server_fd
* is closed.
*/
- assert(paths.size() > 0 && paths[0]->fd > -1);
+ assert(isServerConnectionOpen());
- fd_note(paths[0]->fd, entry->url());
+ fd_note(serverConnection()->fd, entry->url());
- fd_table[paths[0]->fd].noteUse(fwdPconnPool);
+ fd_table[serverConnection()->fd].noteUse(fwdPconnPool);
/*assert(!EBIT_TEST(entry->flags, ENTRY_DISPATCHED)); */
assert(entry->ping_status != PING_WAITING);
int tos = 1;
int tos_len = sizeof(tos);
clientFde->upstreamTOS = 0;
- if (setsockopt(paths[0]->fd,SOL_IP,IP_RECVTOS,&tos,tos_len)==0) {
+ if (setsockopt(serverConnection()->fd, SOL_IP, IP_RECVTOS, &tos, tos_len)==0) {
unsigned char buf[512];
int len = 512;
- if (getsockopt(paths[0]->fd,SOL_IP,IP_PKTOPTIONS,buf,(socklen_t*)&len) == 0) {
+ if (getsockopt(serverConnection()->fd, SOL_IP, IP_PKTOPTIONS, buf, (socklen_t*)&len) == 0) {
/* Parse the PKTOPTIONS structure to locate the TOS data message
* prepared in the kernel by the ZPH incoming TCP TOS preserving
* patch.
pbuf += CMSG_LEN(o->cmsg_len);
}
} else {
- debugs(33, DBG_IMPORTANT, "ZPH: error in getsockopt(IP_PKTOPTIONS) on FD " << paths[0]->fd << " " << xstrerror());
+ debugs(33, DBG_IMPORTANT, "ZPH: error in getsockopt(IP_PKTOPTIONS) on FD " << serverConnection()->fd << " " << xstrerror());
}
} else {
- debugs(33, DBG_IMPORTANT, "ZPH: error in setsockopt(IP_RECVTOS) on FD " << paths[0]->fd << " " << xstrerror());
+ debugs(33, DBG_IMPORTANT, "ZPH: error in setsockopt(IP_RECVTOS) on FD " << serverConnection()->fd << " " << xstrerror());
}
}
#endif
- if (paths.size() > 0 && paths[0]->getPeer() != NULL) {
- paths[0]->getPeer()->stats.fetches++;
- request->peer_login = paths[0]->getPeer()->login;
- request->peer_domain = paths[0]->getPeer()->domain;
+ if (serverConnection()->getPeer() != NULL) {
+ serverConnection()->getPeer()->stats.fetches++;
+ request->peer_login = serverConnection()->getPeer()->login;
+ request->peer_domain = serverConnection()->getPeer()->domain;
httpStart(this);
} else {
request->peer_login = NULL;
* transient (network) error; its a bug.
*/
flags.dont_retry = 1;
- paths[0]->close();
+ serverConnection()->close();
break;
}
}
assert(paths.size() > 0);
- char nextHop[256]; //
+ char nextHop[256];
- if (paths[0]->getPeer()) {
+ if (serverConnection()->getPeer()) {
// went to peer, log peer host name
- snprintf(nextHop,256,"%s", paths[0]->getPeer()->name);
+ snprintf(nextHop,256,"%s", serverConnection()->getPeer()->name);
} else {
// went DIRECT, must honor log_ip_on_direct
if (!Config.onoff.log_ip_on_direct)
snprintf(nextHop,256,"%s",request->GetHost()); // domain name
else
- paths[0]->remote.NtoA(nextHop, 256);
+ serverConnection()->remote.NtoA(nextHop, 256);
}
- request->hier.peer_local_port = paths[0]->local.GetPort();
+ request->hier.peer_local_port = serverConnection()->local.GetPort();
assert(nextHop[0]);
- hierarchyNote(&request->hier, paths[0]->peer_type, nextHop);
+ hierarchyNote(&request->hier, serverConnection()->peerType, nextHop);
}
static void fwdStart(int fd, StoreEntry *, HttpRequest *);
void startComplete();
void fail(ErrorState *err);
+ void unregister(Comm::ConnectionPointer conn);
void unregister(int fd);
void complete();
void handleUnregisteredServerEnd();
void ftpPasvFailed(bool val) { flags.ftp_pasv_failed = val; }
- Comm::ConnectionPointer conn() const { return paths[0]; };
+ /** return a ConnectionPointer to the current server connection (may or may not be open) */
+ Comm::ConnectionPointer serverConnection() const { assert(paths.size() > 0); return paths[0]; };
+
+ /** test if the current server connection is open */
+ bool isServerConnectionOpen() const { return (paths.size() > 0 && serverConnection()->isOpen()); };
private:
// hidden for safer management of self; use static fwdStart
unsigned int forward_completed:1;
} flags;
- /** possible paths which may be tried (in sequence stored) */
+ /** connections to open, in order, until successful */
Comm::Paths paths;
// NP: keep this last. It plays with private/public
typedef CommCbMemFunT<FtpStateData, CommCloseCbParams> Dialer;
AsyncCall::Pointer closer = asyncCall(9, 5, "FtpStateData::ctrlClosed",
Dialer(this, &FtpStateData::ctrlClosed));
- ctrl.opened(theFwdState->conn()->fd, closer);
+ ctrl.opened(theFwdState->serverConnection()->fd, closer);
if (request->method == METHOD_PUT)
flags.put = 1;
gopher_request_parse(fwd->request,
&gopherState->type_id, gopherState->request);
- comm_add_close_handler(fwd->conn()->fd, gopherStateFree, gopherState);
+ comm_add_close_handler(fwd->serverConnection()->fd, gopherStateFree, gopherState);
if (((gopherState->type_id == GOPHER_INDEX) || (gopherState->type_id == GOPHER_CSO))
&& (strchr(gopherState->request, '?') == NULL)) {
gopherToHTML(gopherState, (char *) NULL, 0);
fwd->complete();
- fwd->conn()->close();
return;
}
- gopherState->fd = fwd->conn()->fd; // TODO: save the conn() in gopher instead of the FD
+ gopherState->fd = fwd->serverConnection()->fd; // TODO: save the serverConnection() in gopher instead of the FD
gopherState->fwd = fwd;
- gopherSendRequest(fwd->conn()->fd, gopherState);
- commSetTimeout(fwd->conn()->fd, Config.Timeout.read, gopherTimeout, gopherState);
+ gopherSendRequest(fwd->serverConnection()->fd, gopherState);
+ commSetTimeout(fwd->serverConnection()->fd, Config.Timeout.read, gopherTimeout, gopherState);
}
debugs(11,5,HERE << "HttpStateData " << this << " created");
ignoreCacheControl = false;
surrogateNoStore = false;
- fd = fwd->conn()->fd; // TODO: store Comm::Connection instead of FD
+ fd = fwd->serverConnection()->fd; // TODO: store Comm::Connection instead of FD
readBuf = new MemBuf;
readBuf->init();
orig_request = HTTPMSGLOCK(fwd->request);
orig_request->hier.peer_http_request_sent.tv_sec = 0;
orig_request->hier.peer_http_request_sent.tv_usec = 0;
- if (fwd->conn() != NULL)
- _peer = cbdataReference(fwd->conn()->getPeer()); /* might be NULL */
+ if (fwd->serverConnection() != NULL)
+ _peer = cbdataReference(fwd->serverConnection()->getPeer()); /* might be NULL */
if (_peer) {
const char *url;
HTTPMSGUNLOCK(orig_request);
+ cbdataReferenceDone(_peer);
+
debugs(11,5, HERE << "HttpStateData " << this << " destroyed; FD " << fd);
}
orig_request->pinnedConnection()->pinConnection(fd, orig_request, _peer,
(request->flags.connection_auth != 0));
} else {
- fwd->pconnPush(fwd->conn(), _peer, request, orig_request->GetHost(), client_addr);
+ fwd->pconnPush(fwd->serverConnection(), _peer, request, orig_request->GetHost(), client_addr);
}
fd = -1;
debugs(28, 3, HERE << "Doing ident lookup" );
checklist->asyncInProgress(true);
// TODO: store a Comm::Connection in either checklist or ConnStateData one day.
- Comm::Connection cc; // IDENT will clone it's own copy for alterations.
- cc.local = checklist->conn()->me;
- cc.remote = checklist->conn()->peer;
- Comm::ConnectionPointer ccp = &cc;
- Ident::Start(ccp, LookupDone, checklist);
+ Comm::ConnectionPointer cc = new Comm::Connection;
+ cc->local = checklist->conn()->me;
+ cc->remote = checklist->conn()->peer;
+ Ident::Start(cc, LookupDone, checklist);
} else {
debugs(28, DBG_IMPORTANT, "IdentLookup::checkForAsync: Can't start ident lookup. No client connection" );
checklist->currentAnswer(ACCESS_DENIED);
assert(ia->cur < ia->count);
// loop over each result address, adding to the possible destinations.
- Comm::ConnectionPointer p;
int ip = ia->cur;
for (int n = 0; n < ia->count; n++, ip++) {
+ Comm::ConnectionPointer p;
+
if (ip >= ia->count) ip = 0; // looped back to zero.
// Enforce forward_max_tries configuration.
p->remote.SetPort(fs->_peer->http_port);
else
p->remote.SetPort(psstate->request->port);
- p->peer_type = fs->code;
+ p->peerType = fs->code;
// check for a configured outgoing address for this destination...
getOutgoingAddress(psstate->request, p);
#endif
if (conn != NULL && conn->getPeer())
- hierarchyNote(&tunnelState->request->hier, conn->peer_type, conn->getPeer()->host);
+ hierarchyNote(&tunnelState->request->hier, conn->peerType, conn->getPeer()->host);
else if (Config.onoff.log_ip_on_direct)
- hierarchyNote(&tunnelState->request->hier, conn->peer_type, fd_table[conn->fd].ipaddr);
+ hierarchyNote(&tunnelState->request->hier, conn->peerType, fd_table[conn->fd].ipaddr);
else
- hierarchyNote(&tunnelState->request->hier, conn->peer_type, tunnelState->host);
+ hierarchyNote(&tunnelState->request->hier, conn->peerType, tunnelState->host);
if (status != COMM_OK) {
/* At this point only the TCP handshake has failed. no data has been passed.
whoisStart(FwdState * fwd)
{
WhoisState *p;
- int fd = fwd->conn()->fd;
+ int fd = fwd->serverConnection()->fd;
char *buf;
size_t l;
CBDATA_INIT_TYPE(WhoisState);