-
/*
- * DEBUG: section 72 Peer Digest Routines
- * AUTHOR: Alex Rousskov
- *
- * SQUID Web Proxy Cache http://www.squid-cache.org/
- * ----------------------------------------------------------
- *
- * Squid is the result of efforts by numerous individuals from
- * the Internet community; see the CONTRIBUTORS file for full
- * details. Many organizations have provided support for Squid's
- * development; see the SPONSORS file for full details. Squid is
- * Copyrighted (C) 2001 by the Regents of the University of
- * California; see the COPYRIGHT file for full details. Squid
- * incorporates software developed and/or copyrighted by other
- * sources; see the CREDITS file for full details.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
+ * Copyright (C) 1996-2017 The Squid Software Foundation and contributors
*
+ * Squid software is distributed under GPLv2+ license and includes
+ * contributions from numerous individuals and organizations.
+ * Please see the COPYING and CONTRIBUTORS files for details.
*/
+/* DEBUG: section 72 Peer Digest Routines */
+
#include "squid.h"
#if USE_CACHE_DIGESTS
#include "CacheDigest.h"
#include "store_key_md5.h"
#include "StoreClient.h"
#include "tools.h"
+#include "util.h"
/* local types */
static time_t peerDigestIncDelay(const PeerDigest * pd);
static time_t peerDigestNewDelay(const StoreEntry * e);
static void peerDigestSetCheck(PeerDigest * pd, time_t delay);
-static void peerDigestClean(PeerDigest *);
static EVH peerDigestCheck;
static void peerDigestRequest(PeerDigest * pd);
static STCB peerDigestHandleReply;
#define StoreDigestCBlockSize sizeof(StoreDigestCBlock)
/* min interval for requesting digests from a given peer */
-static const time_t PeerDigestReqMinGap = 5 * 60; /* seconds */
+static const time_t PeerDigestReqMinGap = 5 * 60; /* seconds */
/* min interval for requesting digests (cumulative request stream) */
-static const time_t GlobDigestReqMinGap = 1 * 60; /* seconds */
+static const time_t GlobDigestReqMinGap = 1 * 60; /* seconds */
/* local vars */
-static time_t pd_last_req_time = 0; /* last call to Check */
+static time_t pd_last_req_time = 0; /* last call to Check */
/* initialize peer digest */
static void
pd->times.initialized = squid_curtime;
}
-static void
-peerDigestClean(PeerDigest * pd)
+CBDATA_CLASS_INIT(PeerDigest);
+
+CBDATA_CLASS_INIT(DigestFetchState);
+
+DigestFetchState::DigestFetchState(PeerDigest *aPd, HttpRequest *req) :
+ pd(cbdataReference(aPd)),
+ entry(NULL),
+ old_entry(NULL),
+ sc(NULL),
+ old_sc(NULL),
+ request(req),
+ offset(0),
+ mask_offset(0),
+ start_time(squid_curtime),
+ resp_time(0),
+ expires(0),
+ bufofs(0),
+ state(DIGEST_READ_REPLY)
{
- assert(pd);
+ HTTPMSGLOCK(request);
- if (pd->cd)
- cacheDigestDestroy(pd->cd);
+ sent.msg = 0;
+ sent.bytes = 0;
- pd->host.clean();
+ recv.msg = 0;
+ recv.bytes = 0;
+
+ *buf = 0;
}
-CBDATA_CLASS_INIT(PeerDigest);
+DigestFetchState::~DigestFetchState()
+{
+ /* unlock everything */
+ storeUnregister(sc, entry, this);
+
+ entry->unlock("DigestFetchState destructed");
+ entry = NULL;
+
+ HTTPMSGUNLOCK(request);
+
+ assert(pd == NULL);
+}
/* allocate new peer digest, call Init, and lock everything */
PeerDigest *
if (cbdataReferenceValidDone(peerTmp, &p))
peerNoteDigestGone((CachePeer *)p);
- peerDigestClean(pd);
+ delete pd->cd;
+ pd->host.clean();
delete pd;
}
pd->flags.needed = true;
pd->times.needed = squid_curtime;
- peerDigestSetCheck(pd, 0); /* check asap */
+ peerDigestSetCheck(pd, 0); /* check asap */
}
/* currently we do not have a reason to disable without destroying */
{
debugs(72, 2, "peerDigestDisable: peer " << pd->host.buf() << " disabled for good");
pd->times.disabled = squid_curtime;
- pd->times.next_check = -1; /* never */
+ pd->times.next_check = -1; /* never */
pd->flags.usable = 0;
- if (pd->cd) {
- cacheDigestDestroy(pd->cd);
- pd->cd = NULL;
- }
+ delete pd->cd
+ pd->cd = nullptr;
/* we do not destroy the pd itself to preserve its "history" and stats */
}
{
assert(pd);
return pd->times.retry_delay > 0 ?
- 2 * pd->times.retry_delay : /* exponential backoff */
- PeerDigestReqMinGap; /* minimal delay */
+ 2 * pd->times.retry_delay : /* exponential backoff */
+ PeerDigestReqMinGap; /* minimal delay */
}
/* artificially increases Expires: setting to avoid race conditions
assert(!pd->flags.requested);
- pd->times.next_check = 0; /* unknown */
+ pd->times.next_check = 0; /* unknown */
if (!cbdataReferenceValid(pd->peer)) {
peerDigestNotePeerGone(pd);
}
if (req_time <= squid_curtime)
- peerDigestRequest(pd); /* will set pd->flags.requested */
+ peerDigestRequest(pd); /* will set pd->flags.requested */
else
peerDigestSetCheck(pd, req_time - squid_curtime);
}
-CBDATA_TYPE(DigestFetchState);
-
/* ask store for a digest */
static void
peerDigestRequest(PeerDigest * pd)
{
CachePeer *p = pd->peer;
StoreEntry *e, *old_e;
- char *url;
+ char *url = NULL;
const cache_key *key;
HttpRequest *req;
- DigestFetchState *fetch = NULL;
StoreIOBuffer tempBuffer;
pd->req_result = NULL;
if (p->digest_url)
url = xstrdup(p->digest_url);
else
- url = internalRemoteUri(p->host, p->http_port,
- "/squid-internal-periodic/", StoreDigestFileName);
+ url = xstrdup(internalRemoteUri(p->host, p->http_port, "/squid-internal-periodic/", SBuf(StoreDigestFileName)));
- req = HttpRequest::CreateFromUrl(url);
+ const MasterXaction::Pointer mx = new MasterXaction(XactionInitiator::initCacheDigest);
+ req = HttpRequest::FromUrl(url, mx);
assert(req);
/* add custom headers */
assert(!req->header.len);
- req->header.putStr(HDR_ACCEPT, StoreDigestMimeStr);
+ req->header.putStr(Http::HdrType::ACCEPT, StoreDigestMimeStr);
- req->header.putStr(HDR_ACCEPT, "text/html");
+ req->header.putStr(Http::HdrType::ACCEPT, "text/html");
if (p->login &&
p->login[0] != '*' &&
strcmp(p->login, "PASS") != 0 &&
strcmp(p->login, "PASSTHRU") != 0 &&
- strcmp(p->login, "NEGOTIATE") != 0 &&
+ strncmp(p->login, "NEGOTIATE",9) != 0 &&
strcmp(p->login, "PROXYPASS") != 0) {
- xstrncpy(req->login, p->login, MAX_LOGIN_SZ);
+ req->url.userInfo(SBuf(p->login)); // XXX: performance regression make peer login SBuf as well.
}
/* create fetch state structure */
- CBDATA_INIT_TYPE(DigestFetchState);
-
- fetch = cbdataAlloc(DigestFetchState);
-
- fetch->request = req;
- HTTPMSGLOCK(fetch->request);
-
- fetch->pd = cbdataReference(pd);
-
- fetch->offset = 0;
-
- fetch->state = DIGEST_READ_REPLY;
+ DigestFetchState *fetch = new DigestFetchState(pd, req);
/* update timestamps */
- fetch->start_time = squid_curtime;
-
pd->times.requested = squid_curtime;
-
pd_last_req_time = squid_curtime;
-
req->flags.cachable = true;
/* the rest is based on clientProcessExpired() */
/* set lastmod to trigger IMS request if possible */
if (old_e)
- e->lastmod = old_e->lastmod;
+ e->lastModified(old_e->lastModified());
/* push towards peer cache */
debugs(72, 3, "peerDigestRequest: forwarding to fwdStart...");
storeClientCopy(fetch->sc, e, tempBuffer,
peerDigestHandleReply, fetch);
+
+ safe_free(url);
}
/* Handle the data copying .. */
* try to destroy the fetch structure, and we like to know if they
* do
*/
- fetch = cbdataReference(fetch);
+ CbcPointer<DigestFetchState> tmpLock = fetch;
/* Repeat this loop until we're out of data OR the state changes */
/* (So keep going if the state has changed and we still have data */
break;
case DIGEST_READ_DONE:
- goto finish;
+ return;
break;
default:
}
if (retsize < 0)
- goto finish;
+ return;
/*
* The returned size indicates how much of the buffer was read -
storeClientCopy(fetch->sc, fetch->entry, tempBuffer,
peerDigestHandleReply, fetch);
}
-
-finish:
- /* Get rid of our reference, we've finished with it for now */
- cbdataReferenceDone(fetch);
}
/* wait for full http headers to be received then parse them */
/* our old entry is fine */
assert(fetch->old_entry);
- if (!fetch->old_entry->mem_obj->request) {
+ if (!fetch->old_entry->mem_obj->request)
fetch->old_entry->mem_obj->request = fetch->entry->mem_obj->request;
- HTTPMSGLOCK(fetch->old_entry->mem_obj->request);
- }
assert(fetch->old_entry->mem_obj->request);
- HttpReply *old_rep = (HttpReply *) fetch->old_entry->getReply();
-
- old_rep->updateOnNotModified(reply);
-
- fetch->old_entry->timestampsSet();
+ Store::Root().updateOnNotModified(fetch->old_entry, *fetch->entry);
/* get rid of 304 reply */
storeUnregister(fetch->sc, fetch->entry, fetch);
} else {
/* some kind of a bug */
peerDigestFetchAbort(fetch, buf, reply->sline.reason());
- return -1; /* XXX -1 will abort stuff in ReadReply! */
+ return -1; /* XXX -1 will abort stuff in ReadReply! */
}
/* must have a ready-to-use store entry if we got here */
}
fetch->state = DIGEST_READ_CBLOCK;
- return hdr_size; /* Say how much data we read */
- } else {
- /* need more data, do we have space? */
+ return hdr_size; /* Say how much data we read */
+ }
- if (size >= SM_PAGE_SIZE) {
- peerDigestFetchAbort(fetch, buf, "stored header too big");
- return -1;
- } else {
- return 0; /* We need to read more to parse .. */
- }
+ /* need more data, do we have space? */
+ if (size >= SM_PAGE_SIZE) {
+ peerDigestFetchAbort(fetch, buf, "stored header too big");
+ return -1;
}
- fatal("peerDigestSwapInHeaders() - shouldn't get here!\n");
- return 0; /* keep gcc happy */
+ return 0; /* We need to read more to parse .. */
}
int
peerDigestFetchAbort(fetch, buf, "invalid digest cblock");
return -1;
}
- } else {
- /* need more data, do we have space? */
+ }
- if (size >= SM_PAGE_SIZE) {
- peerDigestFetchAbort(fetch, buf, "digest cblock too big");
- return -1;
- } else {
- return 0; /* We need more data */
- }
+ /* need more data, do we have space? */
+ if (size >= SM_PAGE_SIZE) {
+ peerDigestFetchAbort(fetch, buf, "digest cblock too big");
+ return -1;
}
- fatal("peerDigestSwapInCBlock(): shouldn't get here!\n");
- return 0; /* keep gcc happy */
+ return 0; /* We need more data */
}
int
fetch->mask_offset << ", expected " << pd->cd->mask_size);
assert(fetch->mask_offset == pd->cd->mask_size);
assert(peerDigestFetchedEnough(fetch, NULL, 0, "peerDigestSwapInMask"));
- return -1; /* XXX! */
- } else {
- /* We always read everything, so return so */
- return size;
+ return -1; /* XXX! */
}
- fatal("peerDigestSwapInMask(): shouldn't get here!\n");
- return 0; /* keep gcc happy */
+ /* We always read everything, so return size */
+ return size;
}
static int
peerDigestFetchedEnough(DigestFetchState * fetch, char *buf, ssize_t size, const char *step_name)
{
PeerDigest *pd = NULL;
- const char *host = "<unknown>"; /* peer host */
- const char *reason = NULL; /* reason for completion */
- const char *no_bug = NULL; /* successful completion if set */
+ const char *host = "<unknown>"; /* peer host */
+ const char *reason = NULL; /* reason for completion */
+ const char *no_bug = NULL; /* successful completion if set */
const int pdcb_valid = cbdataReferenceValid(fetch->pd);
const int pcb_valid = cbdataReferenceValid(fetch->pd->peer);
if (!(pd = fetch->pd))
reason = "peer digest disappeared?!";
-#if DONT /* WHY NOT? /HNO */
+#if DONT /* WHY NOT? /HNO */
else if (!cbdataReferenceValid(pd))
reason = "invalidated peer digest?!";
if (!reason && !size) {
if (!pd->cd)
reason = "null digest?!";
- else if (fetch->mask_offset != (int)pd->cd->mask_size)
+ else if (fetch->mask_offset != pd->cd->mask_size)
reason = "premature end of digest?!";
else if (!peerDigestUseful(pd))
reason = "useless digest";
pd->times.received = squid_curtime;
pd->times.req_delay = fetch->resp_time;
- kb_incr(&pd->stats.sent.kbytes, (size_t) fetch->sent.bytes);
- kb_incr(&pd->stats.recv.kbytes, (size_t) fetch->recv.bytes);
+ pd->stats.sent.kbytes += fetch->sent.bytes;
+ pd->stats.recv.kbytes += fetch->recv.bytes;
pd->stats.sent.msgs += fetch->sent.msg;
pd->stats.recv.msgs += fetch->recv.msg;
if (err) {
debugs(72, DBG_IMPORTANT, "" << (pcb_valid ? "temporary " : "" ) << "disabling (" << pd->req_result << ") digest from " << host);
- if (pd->cd) {
- cacheDigestDestroy(pd->cd);
- pd->cd = NULL;
- }
+ delete pd->cd;
+ pd->cd = nullptr;
pd->flags.usable = false;
}
/* update global stats */
- kb_incr(&statCounter.cd.kbytes_sent, (size_t) fetch->sent.bytes);
-
- kb_incr(&statCounter.cd.kbytes_recv, (size_t) fetch->recv.bytes);
-
+ statCounter.cd.kbytes_sent += fetch->sent.bytes;
+ statCounter.cd.kbytes_recv += fetch->recv.bytes;
statCounter.cd.msgs_sent += fetch->sent.msg;
-
statCounter.cd.msgs_recv += fetch->recv.msg;
- /* unlock everything */
- storeUnregister(fetch->sc, fetch->entry, fetch);
-
- fetch->entry->unlock("peerDigestFetchFinish new");
-
- HTTPMSGUNLOCK(fetch->request);
-
- fetch->entry = NULL;
-
- assert(fetch->pd == NULL);
-
- cbdataFree(fetch);
+ delete fetch;
}
/* calculate fetch stats after completion */
debugs(72, 3, "peerDigestFetchFinish: expires: " <<
(long int) fetch->expires << " (" << std::showpos <<
(int) (fetch->expires - squid_curtime) << "), lmt: " <<
- std::noshowpos << (long int) fetch->entry->lastmod << " (" <<
- std::showpos << (int) (fetch->entry->lastmod - squid_curtime) <<
+ std::noshowpos << (long int) fetch->entry->lastModified() << " (" <<
+ std::showpos << (int) (fetch->entry->lastModified() - squid_curtime) <<
")");
}
}
/* check consistency further */
- if ((size_t)cblock.mask_size != cacheDigestCalcMaskSize(cblock.capacity, cblock.bits_per_entry)) {
+ if ((size_t)cblock.mask_size != CacheDigest::CalcMaskSize(cblock.capacity, cblock.bits_per_entry)) {
debugs(72, DBG_CRITICAL, host << " digest cblock is corrupted " <<
"(mask size mismatch: " << cblock.mask_size << " ? " <<
- cacheDigestCalcMaskSize(cblock.capacity, cblock.bits_per_entry)
+ CacheDigest::CalcMaskSize(cblock.capacity, cblock.bits_per_entry)
<< ").");
return 0;
}
debugs(72, 2, host << " digest changed size: " << cblock.mask_size <<
" -> " << pd->cd->mask_size);
freed_size = pd->cd->mask_size;
- cacheDigestDestroy(pd->cd);
- pd->cd = NULL;
+ delete pd->cd;
+ pd->cd = nullptr;
}
if (!pd->cd) {
debugs(72, 2, "creating " << host << " digest; size: " << cblock.mask_size << " (" <<
std::showpos << (int) (cblock.mask_size - freed_size) << ") bytes");
- pd->cd = cacheDigestCreate(cblock.capacity, cblock.bits_per_entry);
+ pd->cd = new CacheDigest(cblock.capacity, cblock.bits_per_entry);
if (cblock.mask_size >= freed_size)
- kb_incr(&statCounter.cd.memory, cblock.mask_size - freed_size);
+ statCounter.cd.memory += (cblock.mask_size - freed_size);
}
assert(pd->cd);
peerDigestUseful(const PeerDigest * pd)
{
/* TODO: we should calculate the prob of a false hit instead of bit util */
- const int bit_util = cacheDigestBitUtil(pd->cd);
+ const auto bit_util = pd->cd->usedMaskPercent();
- if (bit_util > 65) {
+ if (bit_util > 65.0) {
debugs(72, DBG_CRITICAL, "Warning: " << pd->host <<
- " peer digest has too many bits on (" << bit_util << "%%).");
-
+ " peer digest has too many bits on (" << bit_util << "%).");
return 0;
}
}
#endif
+