mem_node.cc \
MemBuf.cc \
MemObject.cc \
+ MemStore.cc \
Notes.h \
Notes.cc \
Parsing.cc \
tests/stub_HttpReply.cc \
tests/stub_HttpRequest.cc \
tests/stub_libcomm.cc \
- tests/stub_MemStore.cc \
mime.h \
tests/stub_mime.cc \
tests/stub_Port.cc \
tests/stub_libeui.cc \
tests/stub_libicmp.cc \
tests/stub_liblog.cc \
- tests/stub_MemStore.cc \
tests/stub_neighbors.cc \
tests/stub_pconn.cc \
tests/stub_Port.cc \
MasterXaction.cc \
MasterXaction.h \
MemObject.cc \
+ MemStore.cc \
Notes.h \
Notes.cc \
StoreSwapLogData.cc \
MasterXaction.h \
MemBuf.cc \
MemObject.cc \
+ MemStore.cc \
mem_node.cc \
Notes.h \
Notes.cc \
tests/stub_liblog.cc \
tests/stub_libmgr.cc \
tests/stub_libsecurity.cc \
- tests/stub_MemStore.cc \
mime.h \
tests/stub_mime.cc \
tests/stub_neighbors.cc \
{
const int64_t entryLimit = EntryLimit();
if (entryLimit <= 0)
- return; // no memory cache configured or a misconfiguration
+ return; // no shared memory cache configured or a misconfiguration
// check compatibility with the disk cache, if any
if (Config.cacheSwap.n_configured > 0) {
}
}
+bool
+MemStore::Requested()
+{
+ return Config.memShared && Config.memMaxSize > 0;
+}
+
/// calculates maximum number of entries we need to store and map
int64_t
MemStore::EntryLimit()
{
- if (!Config.memShared || !Config.memMaxSize)
- return 0; // no memory cache configured
+ if (!Requested())
+ return 0;
const int64_t minEntrySize = Ipc::Mem::PageSize();
const int64_t entryLimit = Config.memMaxSize / minEntrySize;
debugs(20, DBG_IMPORTANT, "WARNING: memory_cache_shared is on, but only"
" a single worker is running");
}
+
+ if (MemStore::Requested() && Config.memMaxSize < Ipc::Mem::PageSize()) {
+ debugs(20, DBG_IMPORTANT, "WARNING: mem-cache size is too small (" <<
+ (Config.memMaxSize / 1024.0) << " KB), should be >= " <<
+ (Ipc::Mem::PageSize() / 1024.0) << " KB");
+ }
}
void
void
MemStoreRr::create()
{
- if (!Config.memShared)
+ if (!MemStore::Enabled())
return;
const int64_t entryLimit = MemStore::EntryLimit();
- if (entryLimit <= 0) {
- if (Config.memMaxSize > 0) {
- debugs(20, DBG_IMPORTANT, "WARNING: mem-cache size is too small ("
- << (Config.memMaxSize / 1024.0) << " KB), should be >= " <<
- (Ipc::Mem::PageSize() / 1024.0) << " KB");
- }
- return; // no memory cache configured or a misconfiguration
- }
-
+ assert(entryLimit > 0);
Must(!spaceOwner);
spaceOwner = shm_new(Ipc::Mem::PageStack)(SpaceLabel, SpacePoolId,
entryLimit, 0);
virtual bool updateAnchored(StoreEntry &) override;
virtual void evictCached(StoreEntry &) override;
virtual void evictIfFound(const cache_key *) override;
- virtual bool smpAware() const override { return true; }
+ /// whether Squid is correctly configured to use a shared memory cache
+ static bool Enabled() { return EntryLimit() > 0; }
static int64_t EntryLimit();
+ /// whether Squid is configured to use a shared memory cache
+ /// (it may still be disabled due to the implicit minimum entry size limit)
+ static bool Requested();
protected:
friend ShmWriter;
#endif
} onoff;
- int64_t collapsed_forwarding_shared_entries_limit;
+ int64_t shared_transient_entries_limit;
int pipeline_max_prefetch;
void
Transients::init()
{
+ assert(Enabled());
const int64_t entryLimit = EntryLimit();
- if (entryLimit <= 0)
- return; // no SMP support or a misconfiguration
+ assert(entryLimit > 0);
Must(!map);
map = new TransientsMap(MapLabel);
int64_t
Transients::EntryLimit()
{
- // TODO: we should also check whether any SMP-aware caching is configured
- if (!UsingSmp() || !Config.onoff.collapsed_forwarding)
- return 0; // no SMP collapsed forwarding possible or needed
-
- return Config.collapsed_forwarding_shared_entries_limit;
+ return (UsingSmp() && Store::Controller::SmpAware()) ?
+ Config.shared_transient_entries_limit : 0;
}
bool
void
TransientsRr::create()
{
- if (!Config.onoff.collapsed_forwarding)
- return;
-
const int64_t entryLimit = Transients::EntryLimit();
if (entryLimit <= 0)
return; // no SMP configured or a misconfiguration
virtual void evictCached(StoreEntry &) override;
virtual void evictIfFound(const cache_key *) override;
virtual void maintain() override;
- virtual bool smpAware() const override { return true; }
/// Whether an entry with the given public key exists and (but) was
/// marked for removal some time ago; get(key) returns nil in such cases.
static int64_t EntryLimit();
+ /// Can we create and initialize Transients?
+ static bool Enabled() { return EntryLimit(); }
+
protected:
void addEntry(StoreEntry*, const cache_key *, const Store::IoStatus);
See also: collapsed_forwarding.
DOC_END
-NAME: collapsed_forwarding_shared_entries_limit
+NAME: shared_transient_entries_limit collapsed_forwarding_shared_entries_limit
COMMENT: (number of entries)
TYPE: int64_t
-LOC: Config.collapsed_forwarding_shared_entries_limit
+LOC: Config.shared_transient_entries_limit
DEFAULT: 16384
DOC_START
- This limits the size of a table used for sharing information
- about collapsible entries among SMP workers. Limiting sharing
- too much results in cache content duplication and missed
- collapsing opportunities. Using excessively large values
- wastes shared memory.
+ This directive limits the size of a table used for sharing current
+ transaction information among SMP workers. A table entry stores meta
+ information about a single cache entry being delivered to Squid
+ client(s) by one or more SMP workers. A single table entry consumes
+ less than 128 shared memory bytes.
- The limit should be significantly larger then the number of
- concurrent collapsible entries one wants to share. For a cache
- that handles less than 5000 concurrent requests, the default
+ The limit should be significantly larger than the number of
+ concurrent non-collapsed cachable responses leaving Squid. For a
+ cache that handles less than 5000 concurrent requests, the default
setting of 16384 should be plenty.
- If the limit is set to zero, it disables sharing of collapsed
- forwarding between SMP workers.
+ Using excessively large values wastes shared memory. Limiting the
+ table size too much results in hash collisions, leading to lower hit
+ ratio and missed SMP request collapsing opportunities: Transactions
+ left without a table entry cannot cache their responses and are
+ invisible to other concurrent requests for the same resource.
+
+ A zero limit is allowed but unsupported. A positive small limit
+ lowers hit ratio, but zero limit disables a lot of essential
+ synchronization among SMP workers, leading to HTTP violations (e.g.,
+ stale hit responses). It also disables shared collapsed forwarding:
+ A worker becomes unable to collapse its requests on transactions in
+ other workers, resulting in more trips to the origin server and more
+ cache thrashing.
DOC_END
COMMENT_START
// TODO: Consider also allowing regular (non-collapsed) revalidation hits.
// TODO: support collapsed revalidation for Vary-controlled entries
bool collapsingAllowed = Config.onoff.collapsed_forwarding &&
- !Store::Root().smpAware() &&
+ !Store::Controller::SmpAware() &&
http->request->vary_headers.isEmpty();
StoreEntry *entry = nullptr;
#include "HttpRequest.h"
#include "mem_node.h"
#include "MemObject.h"
+#include "MemStore.h"
#include "mgr/Registration.h"
#include "mgr/StoreIoAction.h"
#include "profiler/Profiler.h"
return;
// are we using a shared memory cache?
- if (Config.memShared && IamWorkerProcess()) {
+ if (MemStore::Enabled()) {
// This method was designed to update replacement policy, not to
// actually purge something from the memory cache (TODO: rename?).
// Shared memory cache does not have a policy that needs updates.
Store::Controller::Controller() :
swapDir(new Disks),
- memStore(NULL),
+ sharedMemStore(nullptr),
+ localMemStore(false),
transients(NULL)
{
assert(!store_table);
Store::Controller::~Controller()
{
- delete memStore;
+ delete sharedMemStore;
delete transients;
delete swapDir;
void
Store::Controller::init()
{
- if (Config.memShared && IamWorkerProcess()) {
- memStore = new MemStore;
- memStore->init();
+ if (IamWorkerProcess()) {
+ if (MemStore::Enabled()) {
+ sharedMemStore = new MemStore;
+ sharedMemStore->init();
+ } else if (Config.memMaxSize > 0) {
+ localMemStore = true;
+ }
}
swapDir->init();
- if (UsingSmp() && IamWorkerProcess() && Config.onoff.collapsed_forwarding &&
- smpAware()) {
+ if (Transients::Enabled() && IamWorkerProcess()) {
transients = new Transients;
transients->init();
}
void
Store::Controller::getStats(StoreInfoStats &stats) const
{
- if (memStore)
- memStore->getStats(stats);
+ if (sharedMemStore)
+ sharedMemStore->getStats(stats);
else {
// move this code to a non-shared memory cache class when we have it
stats.mem.shared = false;
stats.mem.capacity = Config.memMaxSize;
stats.mem.size = mem_node::StoreMemSize();
- stats.mem.count = hot_obj_count;
+ if (localMemStore) {
+ // XXX: also count internal/in-transit objects
+ stats.mem.count = hot_obj_count;
+ } else {
+ // XXX: count internal/in-transit objects instead
+ stats.mem.count = hot_obj_count;
+ }
}
swapDir->getStats(stats);
Math::doublePercent(currentSize(), maxSize()),
Math::doublePercent((maxSize() - currentSize()), maxSize()));
- if (memStore)
- memStore->stat(output);
+ if (sharedMemStore)
+ sharedMemStore->stat(output);
/* now the swapDir */
swapDir->stat(output);
void
Store::Controller::sync(void)
{
- if (memStore)
- memStore->sync();
+ if (sharedMemStore)
+ sharedMemStore->sync();
swapDir->sync();
}
swapDir->reference(e);
// Notify the memory cache that we're referencing this object again
- if (memStore && e.mem_status == IN_MEMORY)
- memStore->reference(e);
+ if (sharedMemStore && e.mem_status == IN_MEMORY)
+ sharedMemStore->reference(e);
// TODO: move this code to a non-shared memory cache class when we have it
if (e.mem_obj) {
keepInStoreTable = swapDir->dereference(e) || keepInStoreTable;
// Notify the memory cache that we're not referencing this object any more
- if (memStore && e.mem_status == IN_MEMORY)
- keepInStoreTable = memStore->dereference(e) || keepInStoreTable;
+ if (sharedMemStore && e.mem_status == IN_MEMORY)
+ keepInStoreTable = sharedMemStore->dereference(e) || keepInStoreTable;
// TODO: move this code to a non-shared memory cache class when we have it
if (e.mem_obj) {
if (mem_policy->Dereferenced)
mem_policy->Dereferenced(mem_policy, &e, &e.mem_obj->repl);
// non-shared memory cache relies on store_table
- if (!memStore)
+ if (localMemStore)
keepInStoreTable = wantsLocalMemory || keepInStoreTable;
}
}
}
- if (memStore) {
- if (StoreEntry *e = memStore->get(key)) {
+ if (sharedMemStore) {
+ if (StoreEntry *e = sharedMemStore->get(key)) {
debugs(20, 3, HERE << "got mem-cached entry: " << *e);
return e;
}
return;
}
- if (memStore)
- memStore->evictIfFound(key);
+ if (sharedMemStore)
+ sharedMemStore->evictIfFound(key);
if (swapDir)
swapDir->evictIfFound(key);
if (transients)
Store::Controller::memoryOut(StoreEntry &e, const bool preserveSwappable)
{
bool keepInLocalMemory = false;
- if (memStore)
- memStore->write(e); // leave keepInLocalMemory false
- else
+ if (sharedMemStore)
+ sharedMemStore->write(e); // leave keepInLocalMemory false
+ else if (localMemStore)
keepInLocalMemory = keepForLocalMemoryCache(e);
debugs(20, 7, HERE << "keepInLocalMemory: " << keepInLocalMemory);
Store::Controller::memoryEvictCached(StoreEntry &e)
{
// TODO: Untangle memory caching from mem_obj.
- if (memStore)
- memStore->evictCached(e);
+ if (sharedMemStore)
+ sharedMemStore->evictCached(e);
else // TODO: move into [non-shared] memory cache class when we have one
if (!e.locked())
e.destroyMemObject();
void
Store::Controller::memoryDisconnect(StoreEntry &e)
{
- if (memStore)
- memStore->disconnect(e);
+ if (sharedMemStore)
+ sharedMemStore->disconnect(e);
// else nothing to do for non-shared memory cache
}
// have a dedicated storage for them (that would not purge them).
// They are not managed [well] by any specific Store handled below.
keepInLocalMemory = true;
- } else if (memStore) {
- // leave keepInLocalMemory false; memStore maintains its own cache
- } else {
+ } else if (sharedMemStore) {
+ // leave keepInLocalMemory false; sharedMemStore maintains its own cache
+ } else if (localMemStore) {
keepInLocalMemory = keepForLocalMemoryCache(e) && // in good shape and
// the local memory cache is not overflowing
memoryCacheHasSpaceFor(memoryPagesDebt_);
/* update stored image of the old entry */
- if (memStore && old->mem_status == IN_MEMORY && !EBIT_TEST(old->flags, ENTRY_SPECIAL))
- memStore->updateHeaders(old);
+ if (sharedMemStore && old->mem_status == IN_MEMORY && !EBIT_TEST(old->flags, ENTRY_SPECIAL))
+ sharedMemStore->updateHeaders(old);
if (old->hasDisk())
swapDir->updateHeaders(old);
bool found = false;
bool inSync = false;
- if (memStore && collapsed->mem_obj->memCache.io == MemObject::ioDone) {
+ if (sharedMemStore && collapsed->mem_obj->memCache.io == MemObject::ioDone) {
found = true;
inSync = true;
debugs(20, 7, "fully mem-loaded " << *collapsed);
- } else if (memStore && collapsed->hasMemStore()) {
+ } else if (sharedMemStore && collapsed->hasMemStore()) {
found = true;
- inSync = memStore->updateAnchored(*collapsed);
+ inSync = sharedMemStore->updateAnchored(*collapsed);
// TODO: handle entries attached to both memory and disk
} else if (swapDir && collapsed->hasDisk()) {
found = true;
debugs(20, 7, "anchoring " << entry);
bool found = false;
- if (memStore)
- found = memStore->anchorToCache(entry, inSync);
+ if (sharedMemStore)
+ found = sharedMemStore->anchorToCache(entry, inSync);
if (!found && swapDir)
found = swapDir->anchorToCache(entry, inSync);
}
bool
-Store::Controller::smpAware() const
+Store::Controller::SmpAware()
{
- return memStore || (swapDir && swapDir->smpAware());
+ return MemStore::Enabled() || Disks::SmpAware();
}
void
virtual void evictCached(StoreEntry &) override;
virtual void evictIfFound(const cache_key *) override;
virtual int callback() override;
- virtual bool smpAware() const override;
/// \returns a locally indexed and SMP-tracked matching StoreEntry (or nil)
/// Slower than peek() but does not restrict StoreEntry use and storage.
/// \returns an iterator for all Store entries
StoreSearch *search();
+ /// whether there are any SMP-aware storages
+ static bool SmpAware();
+
/// the number of cache_dirs being rebuilt; TODO: move to Disks::Rebuilding
static int store_dirs_rebuilding;
void checkTransients(const StoreEntry &) const;
Disks *swapDir; ///< summary view of all disk caches
- Memory *memStore; ///< memory cache
+ Memory *sharedMemStore; ///< memory cache that multiple workers can use
+ bool localMemStore; ///< whether local (non-shared) memory cache is enabled
/// A shared table of public store entries that do not know whether they
/// will belong to a memory cache, a disk cache, or will be uncachable
virtual void reference(StoreEntry &e) override;
virtual bool dereference(StoreEntry &e) override;
virtual void maintain() override;
- virtual bool smpAware() const override { return false; }
+ /// whether this disk storage is capable of serving multiple workers
+ virtual bool smpAware() const = 0;
/// the size of the smallest entry this cache_dir can store
int64_t minObjectSize() const;
}
SwapDir &
-Store::Disks::dir(const int i) const
+Store::Disks::Dir(const int i)
{
SwapDir *sd = INDEXSD(i);
assert(sd);
}
for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
- if (dir(i).active())
+ if (Dir(i).active())
store(i)->create();
}
}
* above
* Step 3: have the hash index walk the searches itself.
*/
- if (dir(i).active())
+ if (Dir(i).active())
store(i)->init();
}
uint64_t result = 0;
for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
- if (dir(i).doReportStat())
+ if (Dir(i).doReportStat())
result += store(i)->maxSize();
}
uint64_t result = 0;
for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
- if (dir(i).doReportStat())
+ if (Dir(i).doReportStat())
result += store(i)->minSize();
}
uint64_t result = 0;
for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
- if (dir(i).doReportStat())
+ if (Dir(i).doReportStat())
result += store(i)->currentSize();
}
uint64_t result = 0;
for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
- if (dir(i).doReportStat())
+ if (Dir(i).doReportStat())
result += store(i)->currentCount();
}
secondLargestMaximumObjectSize = -1;
for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
- const auto &disk = dir(i);
+ const auto &disk = Dir(i);
if (!disk.active())
continue;
Store::Disks::evictIfFound(const cache_key *key)
{
for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
- if (dir(i).active())
- dir(i).evictIfFound(key);
+ if (Dir(i).active())
+ Dir(i).evictIfFound(key);
}
}
static int idx = 0;
for (int n = 0; n < cacheDirs; ++n) {
idx = (idx + 1) % cacheDirs;
- SwapDir &sd = dir(idx);
+ SwapDir &sd = Dir(idx);
if (!sd.active())
continue;
Store::Disks::updateAnchored(StoreEntry &entry)
{
return entry.hasDisk() &&
- dir(entry.swap_dirn).updateAnchored(entry);
+ Dir(entry.swap_dirn).updateAnchored(entry);
}
bool
-Store::Disks::smpAware() const
+Store::Disks::SmpAware()
{
for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
// A mix is not supported, but we conservatively check every
// dir because features like collapsed revalidation should
// currently be disabled if any dir is SMP-aware
- if (dir(i).smpAware())
+ if (Dir(i).smpAware())
return true;
}
return false;
Store::Disks::hasReadableEntry(const StoreEntry &e) const
{
for (int i = 0; i < Config.cacheSwap.n_configured; ++i)
- if (dir(i).active() && dir(i).hasReadableEntry(e))
+ if (Dir(i).active() && Dir(i).hasReadableEntry(e))
return true;
return false;
}
/// Additional unknown-size entry bytes required by disks in order to
/// reduce the risk of selecting the wrong disk cache for the growing entry.
int64_t accumulateMore(const StoreEntry&) const;
- virtual bool smpAware() const override;
+ /// whether any disk cache is SMP-aware
+ static bool SmpAware();
/// whether any of disk caches has entry with e.key
bool hasReadableEntry(const StoreEntry &) const;
private:
/* migration logic */
SwapDir *store(int const x) const;
- SwapDir &dir(int const idx) const;
+ static SwapDir &Dir(int const idx);
int64_t largestMinimumObjectSize; ///< maximum of all Disk::minObjectSize()s
int64_t largestMaximumObjectSize; ///< maximum of all Disk::maxObjectSize()s
/// prepare for shutdown
virtual void sync() {}
-
- /// whether this storage is capable of serving multiple workers;
- /// a true result does not imply [lack of] non-SMP support because
- /// [only] some SMP-aware storages also support non-SMP configss
- virtual bool smpAware() const = 0;
};
} // namespace Store
virtual void evictCached(StoreEntry &) override {}
virtual void evictIfFound(const cache_key *) override {}
virtual bool hasReadableEntry(const StoreEntry &) const override { return false; }
+ virtual bool smpAware() const override { return false; }
};
typedef RefCount<TestSwapDir> TestSwapDirPointer;
void MemStore::evictIfFound(const cache_key *) STUB
bool MemStore::anchorToCache(StoreEntry&, bool&) STUB_RETVAL(false)
bool MemStore::updateAnchored(StoreEntry&) STUB_RETVAL(false)
+int64_t MemStore::EntryLimit() STUB_RETVAL(0)
void Store::Controller::maintain() STUB
bool Store::Controller::markedForDeletion(const cache_key *) const STUB_RETVAL(false)
void Store::Controller::freeMemorySpace(const int) STUB
+bool Store::Controller::SmpAware() STUB_RETVAL(false)
std::ostream &operator <<(std::ostream &os, const StoreEntry &)
{