/*
- * Copyright (C) 1996-2016 The Squid Software Foundation and contributors
+ * Copyright (C) 1996-2021 The Squid Software Foundation and contributors
*
* Squid software is distributed under GPLv2+ license and includes
* contributions from numerous individuals and organizations.
/* DEBUG: section 20 Storage Manager */
#include "squid.h"
+#include "base/AsyncCbdataCalls.h"
+#include "base/PackableStream.h"
+#include "base/TextException.h"
#include "CacheDigest.h"
#include "CacheManager.h"
+#include "CollapsedForwarding.h"
#include "comm/Connection.h"
#include "comm/Read.h"
+#if HAVE_DISKIO_MODULE_IPCIO
+#include "DiskIO/IpcIo/IpcIoFile.h"
+#endif
#include "ETag.h"
#include "event.h"
#include "fde.h"
#include "HttpRequest.h"
#include "mem_node.h"
#include "MemObject.h"
+#include "MemStore.h"
#include "mgr/Registration.h"
#include "mgr/StoreIoAction.h"
#include "profiler/Profiler.h"
#define STORE_IN_MEM_BUCKETS (229)
-/** \todo Convert these string constants to enum string-arrays generated */
+// TODO: Convert these string constants to enum string-arrays generated
const char *memStatusStr[] = {
"NOT_IN_MEMORY",
const char *swapStatusStr[] = {
"SWAPOUT_NONE",
"SWAPOUT_WRITING",
- "SWAPOUT_DONE"
+ "SWAPOUT_DONE",
+ "SWAPOUT_FAILED"
};
/*
Root().stat(*output);
}
+/// reports the current state of Store-related queues
+static void
+StatQueues(StoreEntry *e)
+{
+ assert(e);
+ PackableStream stream(*e);
+ CollapsedForwarding::StatQueue(stream);
+#if HAVE_DISKIO_MODULE_IPCIO
+ stream << "\n";
+ IpcIoFile::StatQueue(stream);
+#endif
+ stream.flush();
+}
+
// XXX: new/delete operators need to be replaced with MEMPROXY_CLASS
// definitions but doing so exposes bug 4370, and maybe 4354 and 4355
void *
pool->freeOne(address);
}
-void
+bool
StoreEntry::makePublic(const KeyScope scope)
{
/* This object can be cached for a long time */
- if (!EBIT_TEST(flags, RELEASE_REQUEST))
- setPublicKey(scope);
+ return !EBIT_TEST(flags, RELEASE_REQUEST) && setPublicKey(scope);
}
void
-StoreEntry::makePrivate()
+StoreEntry::makePrivate(const bool shareable)
{
- /* This object should never be cached at all */
- expireNow();
- releaseRequest(); /* delete object when not used */
+ releaseRequest(shareable); /* delete object when not used */
}
void
+StoreEntry::clearPrivate()
+{
+ assert(!EBIT_TEST(flags, RELEASE_REQUEST));
+ EBIT_CLR(flags, KEY_PRIVATE);
+ shareableWhenPrivate = false;
+}
+
+bool
StoreEntry::cacheNegatively()
{
/* This object may be negatively cached */
- negativeCache();
- makePublic();
+ if (makePublic()) {
+ negativeCache();
+ return true;
+ }
+ return false;
}
size_t
* ->deferRead (fd, buf, len, callback, DelayAwareRead, this)
*/
- if (amountToRead == 0) {
+ if (amountToRead <= 0) {
assert (mem_obj);
- /* read ahead limit */
- /* Perhaps these two calls should both live in MemObject */
-#if USE_DELAY_POOLS
- if (!mem_obj->readAheadPolicyCanRead()) {
-#endif
- mem_obj->delayRead(DeferredRead(DeferReader, this, CommRead(conn, buf, len, callback)));
- return;
-#if USE_DELAY_POOLS
- }
-
- /* delay id limit */
- mem_obj->mostBytesAllowed().delayRead(DeferredRead(DeferReader, this, CommRead(conn, buf, len, callback)));
+ mem_obj->delayRead(DeferredRead(DeferReader, this, CommRead(conn, buf, len, callback)));
return;
-
-#endif
-
}
if (fd_table[conn->fd].closing()) {
// readers appeared to care around 2009/12/14 as they skipped reading
// for other reasons. Closing may already be true at the delyaAwareRead
// call time or may happen while we wait after delayRead() above.
- debugs(20, 3, HERE << "wont read from closing " << conn << " for " <<
- callback);
+ debugs(20, 3, "will not read from closing " << conn << " for " << callback);
return; // the read callback will never be called
}
// XXX: Type names mislead. STORE_DISK_CLIENT actually means that we should
// open swapin file, aggressively trim memory, and ignore read-ahead gap.
// It does not mean we will read from disk exclusively (or at all!).
+// STORE_MEM_CLIENT covers all other cases, including in-memory entries,
+// newly created entries, and entries not backed by disk or memory cache.
// XXX: May create STORE_DISK_CLIENT with no disk caching configured.
// XXX: Collapsed clients cannot predict their type.
store_client_t
return STORE_MEM_CLIENT;
}
+ if (swapoutFailed())
+ return STORE_MEM_CLIENT;
+
if (store_status == STORE_OK) {
/* the object has completed. */
if (mem_obj->inmem_lo == 0 && !isEmpty()) {
- if (swap_status == SWAPOUT_DONE) {
+ if (swappedOut()) {
debugs(20,7, HERE << mem_obj << " lo: " << mem_obj->inmem_lo << " hi: " << mem_obj->endOffset() << " size: " << mem_obj->object_sz);
if (mem_obj->endOffset() == mem_obj->object_sz) {
/* hot object fully swapped in (XXX: or swapped out?) */
timestamp(-1),
lastref(-1),
expires(-1),
- lastmod(-1),
+ lastModified_(-1),
swap_file_sz(0),
refcount(0),
flags(0),
ping_status(PING_NONE),
store_status(STORE_PENDING),
swap_status(SWAPOUT_NONE),
- lock_count(0)
+ lock_count(0),
+ shareableWhenPrivate(false)
{
debugs(20, 5, "StoreEntry constructed, this=" << this);
}
if (!deferredProducer)
deferredProducer = producer;
else
- debugs(20, 5, HERE << "Deferred producer call is allready set to: " <<
+ debugs(20, 5, "Deferred producer call is already set to: " <<
*deferredProducer << ", requested call: " << *producer);
}
void
StoreEntry::destroyMemObject()
{
- debugs(20, 3, HERE << "destroyMemObject " << mem_obj);
+ debugs(20, 3, mem_obj << " in " << *this);
- if (MemObject *mem = mem_obj) {
- // Store::Root() is FATALly missing during shutdown
- if (mem->xitTable.index >= 0 && !shutting_down)
- Store::Root().transientsDisconnect(*mem);
- if (mem->memCache.index >= 0 && !shutting_down)
- Store::Root().memoryDisconnect(*this);
+ // Store::Root() is FATALly missing during shutdown
+ if (hasTransients() && !shutting_down)
+ Store::Root().transientsDisconnect(*this);
+ if (hasMemStore() && !shutting_down)
+ Store::Root().memoryDisconnect(*this);
+ if (auto memObj = mem_obj) {
setMemStatus(NOT_IN_MEMORY);
mem_obj = NULL;
- delete mem;
+ delete memObj;
}
}
StoreEntry *e = static_cast<StoreEntry *>(static_cast<hash_link *>(data));
assert(e != NULL);
- if (e == NullStoreEntry::getInstance())
- return;
-
// Store::Root() is FATALly missing during shutdown
- if (e->swap_filen >= 0 && !shutting_down)
+ if (e->hasDisk() && !shutting_down)
e->disk().disconnect(*e);
e->destroyMemObject();
StoreEntry::hashInsert(const cache_key * someKey)
{
debugs(20, 3, "StoreEntry::hashInsert: Inserting Entry " << *this << " key '" << storeKeyText(someKey) << "'");
+ assert(!key);
key = storeKeyDup(someKey);
hash_join(store_table, this);
}
/* -------------------------------------------------------------------------- */
-/* get rid of memory copy of the object */
-void
-StoreEntry::purgeMem()
-{
- if (mem_obj == NULL)
- return;
-
- debugs(20, 3, "StoreEntry::purgeMem: Freeing memory-copy of " << getMD5Text());
-
- Store::Root().memoryUnlink(*this);
-
- if (swap_status != SWAPOUT_DONE)
- release();
-}
-
void
StoreEntry::lock(const char *context)
{
}
void
-StoreEntry::setReleaseFlag()
+StoreEntry::releaseRequest(const bool shareable)
{
+ debugs(20, 3, shareable << ' ' << *this);
+ if (!shareable)
+ shareableWhenPrivate = false; // may already be false
if (EBIT_TEST(flags, RELEASE_REQUEST))
return;
-
- debugs(20, 3, "StoreEntry::setReleaseFlag: '" << getMD5Text() << "'");
-
- EBIT_SET(flags, RELEASE_REQUEST);
-
- Store::Root().markForUnlink(*this);
-}
-
-void
-StoreEntry::releaseRequest()
-{
- if (EBIT_TEST(flags, RELEASE_REQUEST))
- return;
-
- setReleaseFlag(); // makes validToSend() false, preventing future hits
-
- setPrivateKey();
+ setPrivateKey(shareable, true);
}
int
if (lock_count)
return (int) lock_count;
- if (store_status == STORE_PENDING)
- setReleaseFlag();
+ abandon(context);
+ return 0;
+}
+/// keep the unlocked StoreEntry object in the local store_table (if needed) or
+/// delete it (otherwise)
+void
+StoreEntry::doAbandon(const char *context)
+{
+ debugs(20, 5, *this << " via " << (context ? context : "somebody"));
+ assert(!locked());
assert(storePendingNClients(this) == 0);
- if (EBIT_TEST(flags, RELEASE_REQUEST)) {
+ // Both aborted local writers and aborted local readers (of remote writers)
+ // are STORE_PENDING, but aborted readers should never release().
+ if (EBIT_TEST(flags, RELEASE_REQUEST) ||
+ (store_status == STORE_PENDING && !Store::Root().transientsReader(*this))) {
this->release();
- return 0;
+ return;
}
if (EBIT_TEST(flags, KEY_PRIVATE))
debugs(20, DBG_IMPORTANT, "WARNING: " << __FILE__ << ":" << __LINE__ << ": found KEY_PRIVATE");
Store::Root().handleIdleEntry(*this); // may delete us
- return 0;
-}
-
-void
-StoreEntry::getPublicByRequestMethod (StoreClient *aClient, HttpRequest * request, const HttpRequestMethod& method)
-{
- assert (aClient);
- StoreEntry *result = storeGetPublicByRequestMethod( request, method);
-
- if (!result)
- aClient->created (NullStoreEntry::getInstance());
- else
- aClient->created (result);
-}
-
-void
-StoreEntry::getPublicByRequest (StoreClient *aClient, HttpRequest * request)
-{
- assert (aClient);
- StoreEntry *result = storeGetPublicByRequest (request);
-
- if (!result)
- result = NullStoreEntry::getInstance();
-
- aClient->created (result);
-}
-
-void
-StoreEntry::getPublic (StoreClient *aClient, const char *uri, const HttpRequestMethod& method)
-{
- assert (aClient);
- StoreEntry *result = storeGetPublic (uri, method);
-
- if (!result)
- result = NullStoreEntry::getInstance();
-
- aClient->created (result);
}
StoreEntry *
storeGetPublic(const char *uri, const HttpRequestMethod& method)
{
- return Store::Root().get(storeKeyPublic(uri, method));
+ return Store::Root().find(storeKeyPublic(uri, method));
}
StoreEntry *
storeGetPublicByRequestMethod(HttpRequest * req, const HttpRequestMethod& method, const KeyScope keyScope)
{
- return Store::Root().get(storeKeyPublicByRequestMethod(req, method, keyScope));
+ return Store::Root().find(storeKeyPublicByRequestMethod(req, method, keyScope));
}
StoreEntry *
* concept'.
*/
void
-StoreEntry::setPrivateKey()
+StoreEntry::setPrivateKey(const bool shareable, const bool permanent)
{
- if (key && EBIT_TEST(flags, KEY_PRIVATE))
- return; /* is already private */
-
- if (key) {
- setReleaseFlag(); // will markForUnlink(); all caches/workers will know
+ debugs(20, 3, shareable << permanent << ' ' << *this);
+ if (permanent)
+ EBIT_SET(flags, RELEASE_REQUEST); // may already be set
+ if (!shareable)
+ shareableWhenPrivate = false; // may already be false
- // TODO: move into SwapDir::markForUnlink() already called by Root()
- if (swap_filen > -1)
- storeDirSwapLog(this, SWAP_LOG_DEL);
+ if (EBIT_TEST(flags, KEY_PRIVATE))
+ return;
+ if (key) {
+ Store::Root().evictCached(*this); // all caches/workers will know
hashDelete();
}
assert(hash_lookup(store_table, newkey) == NULL);
EBIT_SET(flags, KEY_PRIVATE);
+ shareableWhenPrivate = shareable;
hashInsert(newkey);
}
-void
+bool
StoreEntry::setPublicKey(const KeyScope scope)
{
+ debugs(20, 3, *this);
if (key && !EBIT_TEST(flags, KEY_PRIVATE))
- return; /* is already public */
+ return true; // already public
assert(mem_obj);
assert(!EBIT_TEST(flags, RELEASE_REQUEST));
- adjustVary();
- forcePublicKey(calcPublicKey(scope));
+ try {
+ EntryGuard newVaryMarker(adjustVary(), "setPublicKey+failure");
+ const cache_key *pubKey = calcPublicKey(scope);
+ Store::Root().addWriting(this, pubKey);
+ forcePublicKey(pubKey);
+ newVaryMarker.unlockAndReset("setPublicKey+success");
+ return true;
+ } catch (const std::exception &ex) {
+ debugs(20, 2, "for " << *this << " failed: " << ex.what());
+ }
+ return false;
}
void
void
StoreEntry::forcePublicKey(const cache_key *newkey)
{
+ debugs(20, 3, storeKeyText(newkey) << " for " << *this);
+ assert(mem_obj);
+
if (StoreEntry *e2 = (StoreEntry *)hash_lookup(store_table, newkey)) {
assert(e2 != this);
- debugs(20, 3, "Making old " << *e2 << " private.");
- e2->setPrivateKey();
- e2->release();
+ debugs(20, 3, "releasing clashing " << *e2);
+ e2->release(true);
}
if (key)
hashDelete();
- EBIT_CLR(flags, KEY_PRIVATE);
+ clearPrivate();
+ assert(mem_obj->hasUris());
hashInsert(newkey);
- if (swap_filen > -1)
+ if (hasDisk())
storeDirSwapLog(this, SWAP_LOG_ADD);
}
StoreEntry::calcPublicKey(const KeyScope keyScope)
{
assert(mem_obj);
- return mem_obj->request ? storeKeyPublicByRequest(mem_obj->request, keyScope) :
+ return mem_obj->request ? storeKeyPublicByRequest(mem_obj->request.getRaw(), keyScope) :
storeKeyPublic(mem_obj->storeId(), mem_obj->method, keyScope);
}
/// Updates mem_obj->request->vary_headers to reflect the current Vary.
/// The vary_headers field is used to calculate the Vary marker key.
/// Releases the old Vary marker with an outdated key (if any).
-void
+/// \returns new (locked) Vary marker StoreEntry or, if none was needed, nil
+/// \throws std::exception on failures
+StoreEntry *
StoreEntry::adjustVary()
{
assert(mem_obj);
if (!mem_obj->request)
- return;
+ return nullptr;
- HttpRequest *request = mem_obj->request;
+ HttpRequestPointer request(mem_obj->request);
+ const auto &reply = mem_obj->freshestReply();
if (mem_obj->vary_headers.isEmpty()) {
/* First handle the case where the object no longer varies */
*/
request->vary_headers.clear(); /* free old "bad" variance key */
if (StoreEntry *pe = storeGetPublic(mem_obj->storeId(), mem_obj->method))
- pe->release();
+ pe->release(true);
}
/* Make sure the request knows the variance status */
if (request->vary_headers.isEmpty())
- request->vary_headers = httpMakeVaryMark(request, mem_obj->getReply());
+ request->vary_headers = httpMakeVaryMark(request.getRaw(), &reply);
}
// TODO: storeGetPublic() calls below may create unlocked entries.
// We should add/use storeHas() API or lock/unlock those entries.
if (!mem_obj->vary_headers.isEmpty() && !storeGetPublic(mem_obj->storeId(), mem_obj->method)) {
/* Create "vary" base object */
- String vary;
StoreEntry *pe = storeCreateEntry(mem_obj->storeId(), mem_obj->logUri(), request->flags, request->method);
+ // XXX: storeCreateEntry() already tries to make `pe` public under
+ // certain conditions. If those conditions do not apply to Vary markers,
+ // then refactor to call storeCreatePureEntry() above. Otherwise,
+ // refactor to simply check whether `pe` is already public below.
+ if (!pe->makePublic()) {
+ pe->unlock("StoreEntry::adjustVary+failed_makePublic");
+ throw TexcHere("failed to make Vary marker public");
+ }
/* We are allowed to do this typecast */
- HttpReply *rep = new HttpReply;
+ const HttpReplyPointer rep(new HttpReply);
rep->setHeaders(Http::scOkay, "Internal marker object", "x-squid-internal/vary", -1, -1, squid_curtime + 100000);
- vary = mem_obj->getReply()->header.getList(Http::HdrType::VARY);
+ auto vary = reply.header.getList(Http::HdrType::VARY);
if (vary.size()) {
/* Again, we own this structure layout */
}
#if X_ACCELERATOR_VARY
- vary = mem_obj->getReply()->header.getList(Http::HdrType::HDR_X_ACCELERATOR_VARY);
+ vary = reply.header.getList(Http::HdrType::HDR_X_ACCELERATOR_VARY);
if (vary.size() > 0) {
/* Again, we own this structure layout */
}
#endif
- pe->replaceHttpReply(rep, false); // no write until key is public
+ pe->replaceHttpReply(rep, false); // no write until timestampsSet()
pe->timestampsSet();
- pe->makePublic();
-
- pe->startWriting(); // after makePublic()
+ pe->startWriting(); // after timestampsSet()
pe->complete();
- pe->unlock("StoreEntry::forcePublicKey+Vary");
+ return pe;
}
+ return nullptr;
}
StoreEntry *
-storeCreatePureEntry(const char *url, const char *log_url, const RequestFlags &flags, const HttpRequestMethod& method)
+storeCreatePureEntry(const char *url, const char *log_url, const HttpRequestMethod& method)
{
StoreEntry *e = NULL;
debugs(20, 3, "storeCreateEntry: '" << url << "'");
e = new StoreEntry();
- e->makeMemObject();
- e->mem_obj->setUris(url, log_url, method);
-
- if (flags.cachable) {
- EBIT_CLR(e->flags, RELEASE_REQUEST);
- } else {
- e->releaseRequest();
- }
+ e->createMemObject(url, log_url, method);
e->store_status = STORE_PENDING;
e->refcount = 0;
StoreEntry *
storeCreateEntry(const char *url, const char *logUrl, const RequestFlags &flags, const HttpRequestMethod& method)
{
- StoreEntry *e = storeCreatePureEntry(url, logUrl, flags, method);
+ StoreEntry *e = storeCreatePureEntry(url, logUrl, method);
e->lock("storeCreateEntry");
- if (neighbors_do_private_keys || !flags.hierarchical)
- e->setPrivateKey();
- else
- e->setPublicKey();
+ if (!neighbors_do_private_keys && flags.hierarchical && flags.cachable && e->setPublicKey())
+ return e;
+ e->setPrivateKey(false, !flags.cachable);
return e;
}
assert(store_status == STORE_PENDING);
// XXX: caller uses content offset, but we also store headers
- if (const HttpReply *reply = mem_obj->getReply())
- writeBuffer.offset += reply->hdr_sz;
+ writeBuffer.offset += mem_obj->baseReply().hdr_sz;
debugs(20, 5, "storeWrite: writing " << writeBuffer.length << " bytes for '" << getMD5Text() << "'");
PROF_stop(StoreEntry_write);
storeGetMemSpace(writeBuffer.length);
mem_obj->write(writeBuffer);
- if (!EBIT_TEST(flags, DELAY_SENDING))
- invokeHandlers();
+ if (EBIT_TEST(flags, ENTRY_FWD_HDR_WAIT) && !mem_obj->readAheadPolicyCanRead()) {
+ debugs(20, 3, "allow Store clients to get entry content after buffering too much for " << *this);
+ EBIT_CLR(flags, ENTRY_FWD_HDR_WAIT);
+ }
+
+ invokeHandlers();
}
/* Append incoming data from a primary server to an entry. */
* XXX sigh, offset might be < 0 here, but it gets "corrected"
* later. This offset crap is such a mess.
*/
- tempBuffer.offset = mem_obj->endOffset() - (getReply() ? getReply()->hdr_sz : 0);
+ tempBuffer.offset = mem_obj->endOffset() - mem_obj->baseReply().hdr_sz;
write(tempBuffer);
}
*buf = 0;
int x;
-#ifdef VA_COPY
- va_args ap;
+ va_list ap;
/* Fix of bug 753r. The value of vargs is undefined
* after vsnprintf() returns. Make a copy of vargs
- * incase we loop around and call vsnprintf() again.
+ * in case we loop around and call vsnprintf() again.
*/
- VA_COPY(ap,vargs);
+ va_copy(ap,vargs);
errno = 0;
if ((x = vsnprintf(buf, sizeof(buf), fmt, ap)) < 0) {
fatal(xstrerr(errno));
return;
}
va_end(ap);
-#else /* VA_COPY */
- errno = 0;
- if ((x = vsnprintf(buf, sizeof(buf), fmt, vargs)) < 0) {
- fatal(xstrerr(errno));
- return;
- }
-#endif /*VA_COPY*/
if (x < static_cast<int>(sizeof(buf))) {
append(buf, x);
int non_get;
int not_entry_cachable;
int wrong_content_length;
- int negative_cached;
int too_big;
int too_small;
int private_key;
if (mem_obj->object_sz >= 0 &&
mem_obj->object_sz < Config.Store.minObjectSize)
return 1;
- if (getReply()->content_length > -1)
- if (getReply()->content_length < Config.Store.minObjectSize)
- return 1;
+
+ const auto clen = mem().baseReply().content_length;
+ if (clen >= 0 && clen < Config.Store.minObjectSize)
+ return 1;
return 0;
}
if (mem_obj->endOffset() > store_maxobjsize)
return true;
- if (getReply()->content_length < 0)
- return false;
-
- return (getReply()->content_length > store_maxobjsize);
+ const auto clen = mem_obj->baseReply().content_length;
+ return (clen >= 0 && clen > store_maxobjsize);
}
// TODO: move "too many open..." checks outside -- we are called too early/late
if (store_status == STORE_OK && EBIT_TEST(flags, ENTRY_BAD_LENGTH)) {
debugs(20, 2, "StoreEntry::checkCachable: NO: wrong content-length");
++store_check_cachable_hist.no.wrong_content_length;
- } else if (EBIT_TEST(flags, ENTRY_NEGCACHED)) {
- debugs(20, 3, "StoreEntry::checkCachable: NO: negative cached");
- ++store_check_cachable_hist.no.negative_cached;
- return 0; /* avoid release call below */
- } else if (!mem_obj || !getReply()) {
+ } else if (!mem_obj) {
// XXX: In bug 4131, we forgetHit() without mem_obj, so we need
// this segfault protection, but how can we get such a HIT?
debugs(20, 2, "StoreEntry::checkCachable: NO: missing parts: " << *this);
} else if (EBIT_TEST(flags, KEY_PRIVATE)) {
debugs(20, 3, "StoreEntry::checkCachable: NO: private key");
++store_check_cachable_hist.no.private_key;
- } else if (swap_status != SWAPOUT_NONE) {
+ } else if (hasDisk()) {
/*
- * here we checked the swap_status because the remaining
- * cases are only relevant only if we haven't started swapping
- * out the object yet.
+ * the remaining cases are only relevant if we haven't
+ * started swapping out the object yet.
*/
return 1;
} else if (storeTooManyDiskFilesOpen()) {
storeAppendPrintf(sentry, "no.wrong_content_length\t%d\n",
store_check_cachable_hist.no.wrong_content_length);
storeAppendPrintf(sentry, "no.negative_cached\t%d\n",
- store_check_cachable_hist.no.negative_cached);
+ 0); // TODO: Remove this backward compatibility hack.
storeAppendPrintf(sentry, "no.missing_parts\t%d\n",
store_check_cachable_hist.no.missing_parts);
storeAppendPrintf(sentry, "no.too_big\t%d\n",
{
debugs(20, 3, "storeComplete: '" << getMD5Text() << "'");
+ // To preserve forwarding retries, call FwdState::complete() instead.
+ EBIT_CLR(flags, ENTRY_FWD_HDR_WAIT);
+
if (store_status != STORE_PENDING) {
/*
* if we're not STORE_PENDING, then probably we got aborted
return;
}
- /* This is suspect: mem obj offsets include the headers. do we adjust for that
- * in use of object_sz?
- */
mem_obj->object_sz = mem_obj->endOffset();
store_status = STORE_OK;
EBIT_SET(flags, ENTRY_ABORTED);
+ // allow the Store clients to be told about the problem
+ EBIT_CLR(flags, ENTRY_FWD_HDR_WAIT);
+
setMemStatus(NOT_IN_MEMORY);
store_status = STORE_OK;
/* Notify the server side */
- /*
- * DPW 2007-05-07
- * Should we check abort.data for validity?
- */
- if (mem_obj->abort.callback) {
- if (!cbdataReferenceValid(mem_obj->abort.data))
- debugs(20, DBG_IMPORTANT,HERE << "queueing event when abort.data is not valid");
- eventAdd("mem_obj->abort.callback",
- mem_obj->abort.callback,
- mem_obj->abort.data,
- 0.0,
- true);
- unregisterAbort();
+ if (mem_obj->abortCallback) {
+ ScheduleCallHere(mem_obj->abortCallback);
+ mem_obj->abortCallback = nullptr;
}
/* XXX Should we reverse these two, so that there is no
storeGetMemSpace(int size)
{
PROF_start(storeGetMemSpace);
- StoreEntry *e = NULL;
- int released = 0;
- static time_t last_check = 0;
- size_t pages_needed;
- RemovalPurgeWalker *walker;
-
- if (squid_curtime == last_check) {
- PROF_stop(storeGetMemSpace);
- return;
- }
-
- last_check = squid_curtime;
-
- pages_needed = (size + SM_PAGE_SIZE-1) / SM_PAGE_SIZE;
-
- if (mem_node::InUseCount() + pages_needed < store_pages_max) {
- PROF_stop(storeGetMemSpace);
- return;
- }
-
- debugs(20, 2, "storeGetMemSpace: Starting, need " << pages_needed <<
- " pages");
-
- /* XXX what to set as max_scan here? */
- walker = mem_policy->PurgeInit(mem_policy, 100000);
-
- while ((e = walker->Next(walker))) {
- e->purgeMem();
- ++released;
-
- if (mem_node::InUseCount() + pages_needed < store_pages_max)
- break;
- }
-
- walker->Done(walker);
- debugs(20, 3, "storeGetMemSpace stats:");
- debugs(20, 3, " " << std::setw(6) << hot_obj_count << " HOT objects");
- debugs(20, 3, " " << std::setw(6) << released << " were released");
+ if (!shutting_down) // Store::Root() is FATALly missing during shutdown
+ Store::Root().freeMemorySpace(size);
PROF_stop(storeGetMemSpace);
}
#define MAINTAIN_MAX_SCAN 1024
#define MAINTAIN_MAX_REMOVE 64
-/* release an object from a cache */
void
-StoreEntry::release()
+StoreEntry::release(const bool shareable)
{
PROF_start(storeRelease);
- debugs(20, 3, "releasing " << *this << ' ' << getMD5Text());
+ debugs(20, 3, shareable << ' ' << *this << ' ' << getMD5Text());
/* If, for any reason we can't discard this object because of an
* outstanding request, mark it for pending release */
if (locked()) {
- expireNow();
- debugs(20, 3, "storeRelease: Only setting RELEASE_REQUEST bit");
- releaseRequest();
+ releaseRequest(shareable);
PROF_stop(storeRelease);
return;
}
- if (Store::Controller::store_dirs_rebuilding && swap_filen > -1) {
+ if (Store::Controller::store_dirs_rebuilding && hasDisk()) {
/* TODO: Teach disk stores to handle releases during rebuild instead. */
- Store::Root().memoryUnlink(*this);
-
- setPrivateKey();
-
// lock the entry until rebuilding is done
lock("storeLateRelease");
- setReleaseFlag();
+ releaseRequest(shareable);
LateReleaseStack.push(this);
+ PROF_stop(storeRelease);
return;
}
storeLog(STORE_LOG_RELEASE, this);
- if (swap_filen > -1 && !EBIT_TEST(flags, KEY_PRIVATE)) {
- // log before unlink() below clears swap_filen
- storeDirSwapLog(this, SWAP_LOG_DEL);
- }
-
- Store::Root().unlink(*this);
+ Store::Root().evictCached(*this);
destroyStoreEntry(static_cast<hash_link *>(this));
PROF_stop(storeRelease);
}
eventAdd("storeLateRelease", storeLateRelease, NULL, 0.0, 1);
}
-/* return 1 if a store entry is locked */
-int
-StoreEntry::locked() const
-{
- if (lock_count)
- return 1;
-
- /*
- * SPECIAL, PUBLIC entries should be "locked";
- * XXX: Their owner should lock them then instead of relying on this hack.
- */
- if (EBIT_TEST(flags, ENTRY_SPECIAL))
- if (!EBIT_TEST(flags, KEY_PRIVATE))
- return 1;
-
- return 0;
-}
-
+/// whether the base response has all the body bytes we expect
+/// \returns true for responses with unknown/unspecified body length
+/// \returns true for responses with the right number of accumulated body bytes
bool
StoreEntry::validLength() const
{
int64_t diff;
- const HttpReply *reply;
assert(mem_obj != NULL);
- reply = getReply();
+ const auto reply = &mem_obj->baseReply();
debugs(20, 3, "storeEntryValidLength: Checking '" << getMD5Text() << "'");
debugs(20, 5, "storeEntryValidLength: object_len = " <<
objectLen());
Mgr::RegisterAction("store_io", "Store IO Interface Stats", &Mgr::StoreIoAction::Create, 0, 1);
Mgr::RegisterAction("store_check_cachable_stats", "storeCheckCachable() Stats",
storeCheckCachableStats, 0, 1);
+ Mgr::RegisterAction("store_queues", "SMP Transients and Caching Queues", StatQueues, 0, 1);
}
void
void
storeConfigure(void)
{
- Store::Root().updateLimits();
+ Store::Root().configure();
}
bool
if (mem_obj->inmem_lo != 0)
return 0;
- if (!Config.onoff.memory_cache_first && swap_status == SWAPOUT_DONE && refcount == 1)
+ if (!Config.onoff.memory_cache_first && swappedOut() && refcount == 1)
return 0;
return 1;
#else
expires = squid_curtime;
#endif
- EBIT_SET(flags, ENTRY_NEGCACHED);
+ if (expires > squid_curtime) {
+ EBIT_SET(flags, ENTRY_NEGCACHED);
+ debugs(20, 6, "expires = " << expires << " +" << (expires-squid_curtime) << ' ' << *this);
+ }
}
void
return 0;
// now check that the entry has a cache backing or is collapsed
- if (swap_filen > -1) // backed by a disk cache
+ if (hasDisk()) // backed by a disk cache
return 1;
if (swappingOut()) // will be backed by a disk cache
bool
StoreEntry::timestampsSet()
{
- const HttpReply *reply = getReply();
+ debugs(20, 7, *this << " had " << describeTimestamps());
+
+ // TODO: Remove change-reducing "&" before the official commit.
+ const auto reply = &mem().freshestReply();
+
time_t served_date = reply->date;
int age = reply->header.getInt(Http::HdrType::AGE);
/* Compute the timestamp, mimicking RFC2616 section 13.2.3. */
// compensate for Squid-to-server and server-to-Squid delays
if (mem_obj && mem_obj->request) {
- const time_t request_sent =
- mem_obj->request->hier.peer_http_request_sent.tv_sec;
- if (0 < request_sent && request_sent < squid_curtime)
- served_date -= (squid_curtime - request_sent);
+ struct timeval responseTime;
+ if (mem_obj->request->hier.peerResponseTime(responseTime))
+ served_date -= responseTime.tv_sec;
}
time_t exp = 0;
else
exp = reply->expires;
- if (lastmod == reply->last_modified && timestamp == served_date && expires == exp)
- return false;
+ if (timestamp == served_date && expires == exp) {
+ // if the reply lacks LMT, then we now know that our effective
+ // LMT (i.e., timestamp) will stay the same, otherwise, old and
+ // new modification times must match
+ if (reply->last_modified < 0 || reply->last_modified == lastModified())
+ return false; // nothing has changed
+ }
expires = exp;
- lastmod = reply->last_modified;
+ lastModified_ = reply->last_modified;
timestamp = served_date;
+ debugs(20, 5, *this << " has " << describeTimestamps());
+ return true;
+}
+
+bool
+StoreEntry::updateOnNotModified(const StoreEntry &e304)
+{
+ assert(mem_obj);
+ assert(e304.mem_obj);
+
+ // update reply before calling timestampsSet() below
+ const auto &oldReply = mem_obj->freshestReply();
+ const auto updatedReply = oldReply.recreateOnNotModified(e304.mem_obj->baseReply());
+ if (updatedReply) // HTTP 304 brought in new information
+ mem_obj->updateReply(*updatedReply);
+ // else continue to use the previous update, if any
+
+ if (!timestampsSet() && !updatedReply)
+ return false;
+
+ // Keep the old mem_obj->vary_headers; see HttpHeader::skipUpdateHeader().
+
+ debugs(20, 5, "updated basics in " << *this << " with " << e304);
+ mem_obj->appliedUpdates = true; // helps in triage; may already be true
return true;
}
void
-StoreEntry::registerAbort(STABH * cb, void *data)
+StoreEntry::registerAbortCallback(const AsyncCall::Pointer &handler)
{
assert(mem_obj);
- assert(mem_obj->abort.callback == NULL);
- mem_obj->abort.callback = cb;
- mem_obj->abort.data = cbdataReference(data);
+ assert(!mem_obj->abortCallback);
+ mem_obj->abortCallback = handler;
}
void
-StoreEntry::unregisterAbort()
+StoreEntry::unregisterAbortCallback(const char *reason)
{
assert(mem_obj);
- if (mem_obj->abort.callback) {
- mem_obj->abort.callback = NULL;
- cbdataReferenceDone(mem_obj->abort.data);
+ if (mem_obj->abortCallback) {
+ mem_obj->abortCallback->cancel(reason);
+ mem_obj->abortCallback = nullptr;
}
}
debugs(20, l, "StoreEntry->timestamp: " << timestamp);
debugs(20, l, "StoreEntry->lastref: " << lastref);
debugs(20, l, "StoreEntry->expires: " << expires);
- debugs(20, l, "StoreEntry->lastmod: " << lastmod);
+ debugs(20, l, "StoreEntry->lastModified_: " << lastModified_);
debugs(20, l, "StoreEntry->swap_file_sz: " << swap_file_sz);
debugs(20, l, "StoreEntry->refcount: " << refcount);
debugs(20, l, "StoreEntry->flags: " << storeEntryFlags(this));
return;
// are we using a shared memory cache?
- if (Config.memShared && IamWorkerProcess()) {
+ if (MemStore::Enabled()) {
// This method was designed to update replacement policy, not to
// actually purge something from the memory cache (TODO: rename?).
// Shared memory cache does not have a policy that needs updates.
return mem_obj->storeId();
}
-MemObject *
-StoreEntry::makeMemObject()
+void
+StoreEntry::createMemObject()
{
- if (!mem_obj)
- mem_obj = new MemObject();
- return mem_obj;
+ assert(!mem_obj);
+ mem_obj = new MemObject();
}
void
StoreEntry::createMemObject(const char *aUrl, const char *aLogUrl, const HttpRequestMethod &aMethod)
{
- makeMemObject();
+ assert(!mem_obj);
+ ensureMemObject(aUrl, aLogUrl, aMethod);
+}
+
+void
+StoreEntry::ensureMemObject(const char *aUrl, const char *aLogUrl, const HttpRequestMethod &aMethod)
+{
+ if (!mem_obj)
+ mem_obj = new MemObject();
mem_obj->setUris(aUrl, aLogUrl, aMethod);
}
}
}
-int64_t
-StoreEntry::objectLen() const
-{
- assert(mem_obj != NULL);
- return mem_obj->object_sz;
-}
-
-int64_t
-StoreEntry::contentLen() const
-{
- assert(mem_obj != NULL);
- assert(getReply() != NULL);
- return objectLen() - getReply()->hdr_sz;
-}
-
-HttpReply const *
-StoreEntry::getReply () const
-{
- if (NULL == mem_obj)
- return NULL;
-
- return mem_obj->getReply();
-}
-
void
StoreEntry::reset()
{
- assert (mem_obj);
- debugs(20, 3, "StoreEntry::reset: " << url());
- mem_obj->reset();
- HttpReply *rep = (HttpReply *) getReply(); // bypass const
- rep->reset();
- expires = lastmod = timestamp = -1;
+ debugs(20, 3, url());
+ mem().reset();
+ expires = lastModified_ = timestamp = -1;
}
/*
{
lock("StoreEntry::storeErrorResponse");
buffer();
- replaceHttpReply(reply);
+ replaceHttpReply(HttpReplyPointer(reply));
flush();
complete();
negativeCache();
- releaseRequest();
+ releaseRequest(false); // if it is safe to negatively cache, sharing is OK
unlock("StoreEntry::storeErrorResponse");
}
* a new reply. This eats the reply.
*/
void
-StoreEntry::replaceHttpReply(HttpReply *rep, bool andStartWriting)
+StoreEntry::replaceHttpReply(const HttpReplyPointer &rep, const bool andStartWriting)
{
debugs(20, 3, "StoreEntry::replaceHttpReply: " << url());
return;
}
- mem_obj->replaceHttpReply(rep);
+ mem_obj->replaceBaseReply(rep);
if (andStartWriting)
startWriting();
/* TODO: when we store headers separately remove the header portion */
/* TODO: mark the length of the headers ? */
/* We ONLY want the headers */
-
assert (isEmpty());
assert(mem_obj);
- const HttpReply *rep = getReply();
- assert(rep);
+ // Per MemObject replies definitions, we can only write our base reply.
+ // Currently, all callers replaceHttpReply() first, so there is no updated
+ // reply here anyway. Eventually, we may need to support the
+ // updateOnNotModified(),startWriting() sequence as well.
+ assert(!mem_obj->updatedReply());
+ const auto rep = &mem_obj->baseReply();
buffer();
- rep->packHeadersInto(this);
+ rep->packHeadersUsingSlowPacker(*this);
mem_obj->markEndOfReplyHeaders();
- EBIT_CLR(flags, ENTRY_FWD_HDR_WAIT);
rep->body.packInto(this);
flush();
+
+ // The entry headers are written, new clients
+ // should not collapse anymore.
+ if (hittingRequiresCollapsing()) {
+ setCollapsingRequirement(false);
+ Store::Root().transientsClearCollapsingRequirement(*this);
+ }
}
char const *
-StoreEntry::getSerialisedMetaData()
+StoreEntry::getSerialisedMetaData(size_t &length) const
{
StoreMeta *tlv_list = storeSwapMetaBuild(this);
int swap_hdr_sz;
char *result = storeSwapMetaPack(tlv_list, &swap_hdr_sz);
storeSwapTLVFree(tlv_list);
assert (swap_hdr_sz >= 0);
- mem_obj->swap_hdr_sz = (size_t) swap_hdr_sz;
+ length = static_cast<size_t>(swap_hdr_sz);
return result;
}
void
StoreEntry::transientsAbandonmentCheck()
{
- if (mem_obj && !mem_obj->smpCollapsed && // this worker is responsible
- mem_obj->xitTable.index >= 0 && // other workers may be interested
- mem_obj->memCache.index < 0 && // rejected by the shared memory cache
+ if (mem_obj && !Store::Root().transientsReader(*this) && // this worker is responsible
+ hasTransients() && // other workers may be interested
+ !hasMemStore() && // rejected by the shared memory cache
mem_obj->swapout.decision == MemObject::SwapOut::swImpossible) {
debugs(20, 7, "cannot be shared: " << *this);
if (!shutting_down) // Store::Root() is FATALly missing during shutdown
- Store::Root().transientsAbandon(*this);
+ Store::Root().stopSharing(*this);
}
}
}
bool
-StoreEntry::modifiedSince(HttpRequest * request) const
+StoreEntry::modifiedSince(const time_t ims, const int imslen) const
{
- int object_length;
- time_t mod_time = lastmod;
-
- if (mod_time < 0)
- mod_time = timestamp;
+ const time_t mod_time = lastModified();
debugs(88, 3, "modifiedSince: '" << url() << "'");
if (mod_time < 0)
return true;
- /* Find size of the object */
- object_length = getReply()->content_length;
-
- if (object_length < 0)
- object_length = contentLen();
+ assert(imslen < 0); // TODO: Either remove imslen or support it properly.
- if (mod_time > request->ims) {
+ if (mod_time > ims) {
debugs(88, 3, "--> YES: entry newer than client");
return true;
- } else if (mod_time < request->ims) {
+ } else if (mod_time < ims) {
debugs(88, 3, "--> NO: entry older than client");
return false;
- } else if (request->imslen < 0) {
- debugs(88, 3, "--> NO: same LMT, no client length");
- return false;
- } else if (request->imslen == object_length) {
- debugs(88, 3, "--> NO: same LMT, same length");
- return false;
} else {
- debugs(88, 3, "--> YES: same LMT, different length");
- return true;
+ debugs(88, 3, "--> NO: same LMT");
+ return false;
}
}
bool
StoreEntry::hasEtag(ETag &etag) const
{
- if (const HttpReply *reply = getReply()) {
+ if (const auto reply = hasFreshestReply()) {
etag = reply->header.getETag(Http::HdrType::ETAG);
if (etag.str)
return true;
bool
StoreEntry::hasOneOfEtags(const String &reqETags, const bool allowWeakMatch) const
{
- const ETag repETag = getReply()->header.getETag(Http::HdrType::ETAG);
- if (!repETag.str)
- return strListIsMember(&reqETags, "*", ',');
+ const auto repETag = mem().freshestReply().header.getETag(Http::HdrType::ETAG);
+ if (!repETag.str) {
+ static SBuf asterisk("*", 1);
+ return strListIsMember(&reqETags, asterisk, ',');
+ }
bool matched = false;
const char *pos = NULL;
Store::Disk &
StoreEntry::disk() const
{
- assert(0 <= swap_dirn && swap_dirn < Config.cacheSwap.n_configured);
+ assert(hasDisk());
const RefCount<Store::Disk> &sd = INDEXSD(swap_dirn);
assert(sd);
return *sd;
}
+bool
+StoreEntry::hasDisk(const sdirno dirn, const sfileno filen) const
+{
+ checkDisk();
+ if (dirn < 0 && filen < 0)
+ return swap_dirn >= 0;
+ Must(dirn >= 0);
+ const bool matchingDisk = (swap_dirn == dirn);
+ return filen < 0 ? matchingDisk : (matchingDisk && swap_filen == filen);
+}
+
+void
+StoreEntry::attachToDisk(const sdirno dirn, const sfileno fno, const swap_status_t status)
+{
+ debugs(88, 3, "attaching entry with key " << getMD5Text() << " : " <<
+ swapStatusStr[status] << " " << dirn << " " <<
+ std::hex << std::setw(8) << std::setfill('0') <<
+ std::uppercase << fno);
+ checkDisk();
+ swap_dirn = dirn;
+ swap_filen = fno;
+ swap_status = status;
+ checkDisk();
+}
+
+void
+StoreEntry::detachFromDisk()
+{
+ swap_dirn = -1;
+ swap_filen = -1;
+ swap_status = SWAPOUT_NONE;
+}
+
+void
+StoreEntry::checkDisk() const
+{
+ try {
+ if (swap_dirn < 0) {
+ Must(swap_filen < 0);
+ Must(swap_status == SWAPOUT_NONE);
+ } else {
+ Must(swap_filen >= 0);
+ Must(swap_dirn < Config.cacheSwap.n_configured);
+ if (swapoutFailed()) {
+ Must(EBIT_TEST(flags, RELEASE_REQUEST));
+ } else {
+ Must(swappingOut() || swappedOut());
+ }
+ }
+ } catch (...) {
+ debugs(88, DBG_IMPORTANT, "ERROR: inconsistent disk entry state " <<
+ *this << "; problem: " << CurrentException);
+ throw;
+ }
+}
+
/*
* return true if the entry is in a state where
* it can accept more data (ie with write() method)
return true;
}
+const char *
+StoreEntry::describeTimestamps() const
+{
+ LOCAL_ARRAY(char, buf, 256);
+ snprintf(buf, 256, "LV:%-9d LU:%-9d LM:%-9d EX:%-9d",
+ static_cast<int>(timestamp),
+ static_cast<int>(lastref),
+ static_cast<int>(lastModified_),
+ static_cast<int>(expires));
+ return buf;
+}
+
+void
+StoreEntry::setCollapsingRequirement(const bool required)
+{
+ if (required)
+ EBIT_SET(flags, ENTRY_REQUIRES_COLLAPSING);
+ else
+ EBIT_CLR(flags, ENTRY_REQUIRES_COLLAPSING);
+}
+
+static std::ostream &
+operator <<(std::ostream &os, const Store::IoStatus &io)
+{
+ switch (io) {
+ case Store::ioUndecided:
+ os << 'u';
+ break;
+ case Store::ioReading:
+ os << 'r';
+ break;
+ case Store::ioWriting:
+ os << 'w';
+ break;
+ case Store::ioDone:
+ os << 'o';
+ break;
+ }
+ return os;
+}
+
std::ostream &operator <<(std::ostream &os, const StoreEntry &e)
{
os << "e:";
- if (e.mem_obj) {
- if (e.mem_obj->xitTable.index > -1)
- os << 't' << e.mem_obj->xitTable.index;
- if (e.mem_obj->memCache.index > -1)
- os << 'm' << e.mem_obj->memCache.index;
+ if (e.hasTransients()) {
+ const auto &xitTable = e.mem_obj->xitTable;
+ os << 't' << xitTable.io << xitTable.index;
+ }
+
+ if (e.hasMemStore()) {
+ const auto &memCache = e.mem_obj->memCache;
+ os << 'm' << memCache.io << memCache.index << '@' << memCache.offset;
}
+
+ // Do not use e.hasDisk() here because its checkDisk() call may calls us.
if (e.swap_filen > -1 || e.swap_dirn > -1)
os << 'd' << e.swap_filen << '@' << e.swap_dirn;
// print only set flags, using unique letters
if (e.flags) {
if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) os << 'S';
- if (EBIT_TEST(e.flags, ENTRY_REVALIDATE)) os << 'R';
+ if (EBIT_TEST(e.flags, ENTRY_REVALIDATE_ALWAYS)) os << 'R';
if (EBIT_TEST(e.flags, DELAY_SENDING)) os << 'P';
if (EBIT_TEST(e.flags, RELEASE_REQUEST)) os << 'X';
if (EBIT_TEST(e.flags, REFRESH_REQUEST)) os << 'F';
- if (EBIT_TEST(e.flags, ENTRY_DISPATCHED)) os << 'D';
- if (EBIT_TEST(e.flags, KEY_PRIVATE)) os << 'I';
+ if (EBIT_TEST(e.flags, ENTRY_REVALIDATE_STALE)) os << 'E';
+ if (EBIT_TEST(e.flags, KEY_PRIVATE)) {
+ os << 'I';
+ if (e.shareableWhenPrivate)
+ os << 'H';
+ }
if (EBIT_TEST(e.flags, ENTRY_FWD_HDR_WAIT)) os << 'W';
if (EBIT_TEST(e.flags, ENTRY_NEGCACHED)) os << 'N';
if (EBIT_TEST(e.flags, ENTRY_VALIDATED)) os << 'V';
if (EBIT_TEST(e.flags, ENTRY_BAD_LENGTH)) os << 'L';
if (EBIT_TEST(e.flags, ENTRY_ABORTED)) os << 'A';
+ if (EBIT_TEST(e.flags, ENTRY_REQUIRES_COLLAPSING)) os << 'C';
}
- if (e.mem_obj && e.mem_obj->smpCollapsed)
- os << 'O';
-
return os << '/' << &e << '*' << e.locks();
}
-/* NullStoreEntry */
-
-NullStoreEntry NullStoreEntry::_instance;
-
-NullStoreEntry *
-NullStoreEntry::getInstance()
-{
- return &_instance;
-}
-
-char const *
-NullStoreEntry::getMD5Text() const
-{
- return "N/A";
-}
-
void
-NullStoreEntry::operator delete(void*)
-{
- fatal ("Attempt to delete NullStoreEntry\n");
-}
-
-char const *
-NullStoreEntry::getSerialisedMetaData()
+Store::EntryGuard::onException() noexcept
{
- return NULL;
+ SWALLOW_EXCEPTIONS({
+ entry_->releaseRequest(false);
+ entry_->unlock(context_);
+ });
}