From: Alex Rousskov Date: Sun, 6 Apr 2014 21:02:18 +0000 (-0600) Subject: Lifted 16777216 slot limit from rock cache_dirs and shared memory caches. X-Git-Tag: SQUID_3_5_0_1~170^2~11 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=36c84e199a995f639a720362195bda49368c6a53;p=thirdparty%2Fsquid.git Lifted 16777216 slot limit from rock cache_dirs and shared memory caches. Also fixed rock disk space waste warning. Rock store and shared memory cache code used entry and slot limit as if the two were the same. In large caches, the number of entry slots may exceed the number of entries supported by Squid (16777216 entries per store). This is especially likely with large caches storing large entries (many slots per entry with maximum number of entries) or large caches using small slot sizes. AFAICT, the bug led to the "tail" of cache storage being unused and cache entries being purged even though there was still space to store them. Also, Squid was allocating smaller shared memory tables (and temporary cache index build tables) than actually needed for the configured cache sizes. Note that this change does not remove the per-store 16777216 entry limit. The old [small] rock warning about wasted disk space assumed that a cache entry occupies exactly one slot. The updated warnings do not. Also fixed and simplified db scanning condition during cache index build. --- diff --git a/src/MemStore.cc b/src/MemStore.cc index f327bf1e50..981907f30b 100644 --- a/src/MemStore.cc +++ b/src/MemStore.cc @@ -91,21 +91,25 @@ MemStore::stat(StoreEntry &e) const Math::doublePercent(currentSize(), maxSize())); if (map) { - const int limit = map->entryLimit(); - storeAppendPrintf(&e, "Maximum entries: %9d\n", limit); - if (limit > 0) { + const int entryLimit = map->entryLimit(); + const int slotLimit = map->sliceLimit(); + storeAppendPrintf(&e, "Maximum entries: %9d\n", entryLimit); + if (entryLimit > 0) { storeAppendPrintf(&e, "Current entries: %" PRId64 " %.2f%%\n", - currentCount(), (100.0 * currentCount() / limit)); + currentCount(), (100.0 * currentCount() / entryLimit)); + } + storeAppendPrintf(&e, "Maximum slots: %9d\n", slotLimit); + if (slotLimit > 0) { const unsigned int slotsFree = Ipc::Mem::PagesAvailable(Ipc::Mem::PageId::cachePage); - if (slotsFree <= static_cast(limit)) { - const int usedSlots = limit - static_cast(slotsFree); + if (slotsFree <= static_cast(slotLimit)) { + const int usedSlots = slotLimit - static_cast(slotsFree); storeAppendPrintf(&e, "Used slots: %9d %.2f%%\n", - usedSlots, (100.0 * usedSlots / limit)); + usedSlots, (100.0 * usedSlots / slotLimit)); } - if (limit < 100) { // XXX: otherwise too expensive to count + if (slotLimit < 100) { // XXX: otherwise too expensive to count Ipc::ReadWriteLockStats stats; map->updateStats(stats); stats.dump(e); @@ -619,7 +623,7 @@ MemStore::reserveSapForWriting(Ipc::Mem::PageId &page) } void -MemStore::noteFreeMapSlice(const sfileno sliceId) +MemStore::noteFreeMapSlice(const Ipc::StoreMapSliceId sliceId) { Ipc::Mem::PageId &pageId = map->extras(sliceId).page; debugs(20, 9, "slice " << sliceId << " freed " << pageId); diff --git a/src/MemStore.h b/src/MemStore.h index e67cc4a506..e57e776537 100644 --- a/src/MemStore.h +++ b/src/MemStore.h @@ -72,7 +72,7 @@ protected: sfileno reserveSapForWriting(Ipc::Mem::PageId &page); // Ipc::StoreMapCleaner API - virtual void noteFreeMapSlice(const sfileno sliceId); + virtual void noteFreeMapSlice(const Ipc::StoreMapSliceId sliceId); private: // TODO: move freeSlots into map diff --git a/src/Transients.cc b/src/Transients.cc index a592528881..a4f1dbd89f 100644 --- a/src/Transients.cc +++ b/src/Transients.cc @@ -288,7 +288,7 @@ Transients::copyToShm(const StoreEntry &e, const sfileno index, } void -Transients::noteFreeMapSlice(const sfileno sliceId) +Transients::noteFreeMapSlice(const Ipc::StoreMapSliceId sliceId) { // TODO: we should probably find the entry being deleted and abort it } diff --git a/src/Transients.h b/src/Transients.h index eeffd99de6..a0ebf2063e 100644 --- a/src/Transients.h +++ b/src/Transients.h @@ -73,7 +73,7 @@ protected: bool abandonedAt(const sfileno index) const; // Ipc::StoreMapCleaner API - virtual void noteFreeMapSlice(const sfileno sliceId); + virtual void noteFreeMapSlice(const Ipc::StoreMapSliceId sliceId); private: /// shared packed info indexed by Store keys, for creating new StoreEntries diff --git a/src/fs/rock/RockRebuild.cc b/src/fs/rock/RockRebuild.cc index e230bdf687..610a57e50b 100644 --- a/src/fs/rock/RockRebuild.cc +++ b/src/fs/rock/RockRebuild.cc @@ -82,13 +82,13 @@ public: /* store entry-level information indexed by sfileno */ uint64_t size; ///< payload seen so far uint32_t version; ///< DbCellHeader::version to distinguish same-URL chains - uint32_t state:3; ///< current entry state (one of the State values) - uint32_t anchored:1; ///< whether we loaded the inode slot for this entry + uint8_t state:3; ///< current entry state (one of the State values) + uint8_t anchored:1; ///< whether we loaded the inode slot for this entry /* db slot-level information indexed by slotId, starting with firstSlot */ - uint32_t mapped:1; ///< whether this slot was added to a mapped entry - uint32_t freed:1; ///< whether this slot was marked as free - sfileno more:25; ///< another slot in some entry chain (unordered) + uint8_t mapped:1; ///< whether this slot was added to a mapped entry + uint8_t freed:1; ///< whether this slot was marked as free + Ipc::StoreMapSliceId more; ///< another slot in some entry chain (unordered) bool used() const { return freed || mapped || more != -1; } /// possible entry states @@ -101,7 +101,8 @@ Rock::Rebuild::Rebuild(SwapDir *dir): AsyncJob("Rock::Rebuild"), sd(dir), entries(NULL), dbSize(0), - dbEntrySize(0), + dbSlotSize(0), + dbSlotLimit(0), dbEntryLimit(0), fd(-1), dbOffset(0), @@ -111,8 +112,10 @@ Rock::Rebuild::Rebuild(SwapDir *dir): AsyncJob("Rock::Rebuild"), assert(sd); memset(&counts, 0, sizeof(counts)); dbSize = sd->diskOffsetLimit(); // we do not care about the trailer waste - dbEntrySize = sd->slotSize; - dbEntryLimit = sd->entryLimit(); + dbSlotSize = sd->slotSize; + dbEntryLimit = sd->entryLimitActual(); + dbSlotLimit = sd->slotLimitActual(); + assert(dbEntryLimit <= dbSlotLimit); } Rock::Rebuild::~Rebuild() @@ -150,9 +153,8 @@ Rock::Rebuild::start() buf.init(SM_PAGE_SIZE, SM_PAGE_SIZE); dbOffset = SwapDir::HeaderSize; - loadingPos = 0; - entries = new LoadingEntry[dbEntryLimit]; + entries = new LoadingEntry[dbSlotLimit]; checkpoint(); } @@ -168,7 +170,7 @@ Rock::Rebuild::checkpoint() bool Rock::Rebuild::doneAll() const { - return dbOffset >= dbSize && validationPos >= dbEntryLimit && + return loadingPos >= dbSlotLimit && validationPos >= dbSlotLimit && AsyncJob::doneAll(); } @@ -182,7 +184,7 @@ Rock::Rebuild::Steps(void *data) void Rock::Rebuild::steps() { - if (dbOffset < dbSize) + if (loadingPos < dbSlotLimit) loadingSteps(); else validationSteps(); @@ -203,14 +205,14 @@ Rock::Rebuild::loadingSteps() const timeval loopStart = current_time; int loaded = 0; - while (loaded < dbEntryLimit && dbOffset < dbSize) { + while (loadingPos < dbSlotLimit) { loadOneSlot(); - dbOffset += dbEntrySize; + dbOffset += dbSlotSize; ++loadingPos; ++loaded; if (counts.scancount % 1000 == 0) - storeRebuildProgress(sd->index, dbEntryLimit, counts.scancount); + storeRebuildProgress(sd->index, dbSlotLimit, counts.scancount); if (opt_foreground_rebuild) continue; // skip "few entries at a time" check below @@ -257,7 +259,7 @@ Rock::Rebuild::loadOneSlot() freeSlotIfIdle(slotId, false); return; } - if (!header.sane(dbEntrySize, dbEntryLimit)) { + if (!header.sane(dbSlotSize, dbSlotLimit)) { debugs(47, DBG_IMPORTANT, "WARNING: cache_dir[" << sd->index << "]: " << "Ignoring malformed cache entry meta data at " << dbOffset); freeSlotIfIdle(slotId, true); @@ -303,7 +305,7 @@ Rock::Rebuild::validationSteps() const timeval loopStart = current_time; int validated = 0; - while (validationPos < dbEntryLimit) { + while (validationPos < dbSlotLimit) { validateOneEntry(); ++validationPos; ++validated; diff --git a/src/fs/rock/RockRebuild.h b/src/fs/rock/RockRebuild.h index 8bb0291aee..4a58f89b3e 100644 --- a/src/fs/rock/RockRebuild.h +++ b/src/fs/rock/RockRebuild.h @@ -55,8 +55,9 @@ private: LoadingEntry *entries; ///< store entries being loaded from disk int64_t dbSize; - int dbEntrySize; - int dbEntryLimit; + int dbSlotSize; ///< the size of a db cell, including the cell header + int dbSlotLimit; ///< total number of db cells + int dbEntryLimit; ///< maximum number of entries that can be stored in db int fd; // store db file descriptor int64_t dbOffset; diff --git a/src/fs/rock/RockSwapDir.cc b/src/fs/rock/RockSwapDir.cc index 91f98c475a..2fed69c6ea 100644 --- a/src/fs/rock/RockSwapDir.cc +++ b/src/fs/rock/RockSwapDir.cc @@ -24,6 +24,7 @@ #include #include +#include #if HAVE_SYS_STAT_H #include @@ -209,11 +210,27 @@ Rock::SwapDir::swappedOut(const StoreEntry &) } int64_t -Rock::SwapDir::entryLimitAllowed() const +Rock::SwapDir::slotLimitAbsolute() const { - const int64_t eLimitLo = map ? map->entryLimit() : 0; // dynamic shrinking unsupported - const int64_t eWanted = (maxSize() - HeaderSize)/slotSize; - return min(max(eLimitLo, eWanted), entryLimitHigh()); + // the max value is an invalid one; all values must be below the limit + assert(std::numeric_limits::max() == + std::numeric_limits::max()); + return std::numeric_limits::max(); +} + +int64_t +Rock::SwapDir::slotLimitActual() const +{ + const int64_t sWanted = (maxSize() - HeaderSize)/slotSize; + const int64_t sLimitLo = map ? map->sliceLimit() : 0; // dynamic shrinking unsupported + const int64_t sLimitHi = slotLimitAbsolute(); + return min(max(sLimitLo, sWanted), sLimitHi); +} + +int64_t +Rock::SwapDir::entryLimitActual() const +{ + return min(slotLimitActual(), entryLimitAbsolute()); } // TODO: encapsulate as a tool @@ -542,20 +559,35 @@ Rock::SwapDir::validateOptions() const int64_t slotSizeRoundingWaste = slotSize; const int64_t maxRoundingWaste = max(maxSizeRoundingWaste, slotSizeRoundingWaste); - const int64_t usableDiskSize = diskOffset(entryLimitAllowed()); - const int64_t diskWasteSize = maxSize() - usableDiskSize; - Must(diskWasteSize >= 0); - - // warn if maximum db size is not reachable due to sfileno limit - if (entryLimitAllowed() == entryLimitHigh() && - diskWasteSize >= maxRoundingWaste) { - debugs(47, DBG_CRITICAL, "Rock store cache_dir[" << index << "] '" << path << "':"); - debugs(47, DBG_CRITICAL, "\tmaximum number of entries: " << entryLimitAllowed()); - debugs(47, DBG_CRITICAL, "\tdb slot size: " << slotSize << " Bytes"); - debugs(47, DBG_CRITICAL, "\tmaximum db size: " << maxSize() << " Bytes"); - debugs(47, DBG_CRITICAL, "\tusable db size: " << usableDiskSize << " Bytes"); - debugs(47, DBG_CRITICAL, "\tdisk space waste: " << diskWasteSize << " Bytes"); - debugs(47, DBG_CRITICAL, "WARNING: Rock store config wastes space."); + + // an entry consumes at least one slot; round up to reduce false warnings + const int64_t blockSize = static_cast(slotSize); + const int64_t maxObjSize = max(blockSize, + ((maxObjectSize()+blockSize-1)/blockSize)*blockSize); + + // Does the "sfileno*max-size" limit match configured db capacity? + const double entriesMayOccupy = entryLimitAbsolute()*static_cast(maxObjSize); + if (entriesMayOccupy + maxRoundingWaste < maxSize()) { + const int64_t diskWasteSize = maxSize() - static_cast(entriesMayOccupy); + debugs(47, DBG_CRITICAL, "WARNING: Rock cache_dir " << path << " wastes disk space due to entry limits:" << + "\n\tconfigured db capacity: " << maxSize() << " bytes" << + "\n\tconfigured db slot size: " << slotSize << " bytes" << + "\n\tconfigured maximum entry size: " << maxObjectSize() << " bytes" << + "\n\tmaximum number of cache_dir entries supported by Squid: " << entryLimitAbsolute() << + "\n\tdisk space all entries may use: " << entriesMayOccupy << " bytes" << + "\n\tdisk space wasted: " << diskWasteSize << " bytes"); + } + + // Does the "absolute slot count" limit match configured db capacity? + const double slotsMayOccupy = slotLimitAbsolute()*static_cast(slotSize); + if (slotsMayOccupy + maxRoundingWaste < maxSize()) { + const int64_t diskWasteSize = maxSize() - static_cast(entriesMayOccupy); + debugs(47, DBG_CRITICAL, "WARNING: Rock cache_dir " << path << " wastes disk space due to slot limits:" << + "\n\tconfigured db capacity: " << maxSize() << " bytes" << + "\n\tconfigured db slot size: " << slotSize << " bytes" << + "\n\tmaximum number of rock cache_dir slots supported by Squid: " << slotLimitAbsolute() << + "\n\tdisk space all slots may use: " << slotsMayOccupy << " bytes" << + "\n\tdisk space wasted: " << diskWasteSize << " bytes"); } } @@ -634,10 +666,10 @@ Rock::SwapDir::createStoreIO(StoreEntry &e, StoreIOState::STFNCB *cbFile, StoreI } int64_t -Rock::SwapDir::diskOffset(int filen) const +Rock::SwapDir::diskOffset(const SlotId sid) const { - assert(filen >= 0); - return HeaderSize + slotSize*filen; + assert(sid >= 0); + return HeaderSize + slotSize*sid; } int64_t @@ -651,19 +683,7 @@ int64_t Rock::SwapDir::diskOffsetLimit() const { assert(map); - return diskOffset(map->entryLimit()); -} - -int -Rock::SwapDir::entryMaxPayloadSize() const -{ - return slotSize - sizeof(DbCellHeader); -} - -int -Rock::SwapDir::entriesNeeded(const int64_t objSize) const -{ - return (objSize + entryMaxPayloadSize() - 1) / entryMaxPayloadSize(); + return diskOffset(map->sliceLimit()); } bool @@ -693,11 +713,11 @@ Rock::SwapDir::useFreeSlot(Ipc::Mem::PageId &pageId) bool Rock::SwapDir::validSlotId(const SlotId slotId) const { - return 0 <= slotId && slotId < entryLimitAllowed(); + return 0 <= slotId && slotId < slotLimitActual(); } void -Rock::SwapDir::noteFreeMapSlice(const sfileno sliceId) +Rock::SwapDir::noteFreeMapSlice(const Ipc::StoreMapSliceId sliceId) { Ipc::Mem::PageId pageId; pageId.pool = index+1; @@ -770,8 +790,9 @@ Rock::SwapDir::ioCompletedNotification() xstrerror()); debugs(47, 2, "Rock cache_dir[" << index << "] limits: " << - std::setw(12) << maxSize() << " disk bytes and " << - std::setw(7) << map->entryLimit() << " entries"); + std::setw(12) << maxSize() << " disk bytes, " << + std::setw(7) << map->entryLimit() << " entries, and " << + std::setw(7) << map->sliceLimit() << " slots"); rebuild(); } @@ -947,26 +968,28 @@ Rock::SwapDir::statfs(StoreEntry &e) const currentSize() / 1024.0, Math::doublePercent(currentSize(), maxSize())); - if (map) { - const int limit = map->entryLimit(); - storeAppendPrintf(&e, "Maximum entries: %9d\n", limit); - if (limit > 0) { + const int entryLimit = entryLimitActual(); + const int slotLimit = slotLimitActual(); + storeAppendPrintf(&e, "Maximum entries: %9d\n", entryLimit); + if (map && entryLimit > 0) { const int entryCount = map->entryCount(); storeAppendPrintf(&e, "Current entries: %9d %.2f%%\n", - entryCount, (100.0 * entryCount / limit)); + entryCount, (100.0 * entryCount / entryLimit)); + } + storeAppendPrintf(&e, "Maximum slots: %9d\n", slotLimit); + if (map && slotLimit > 0) { const unsigned int slotsFree = !freeSlots ? 0 : freeSlots->size(); - if (slotsFree <= static_cast(limit)) { - const int usedSlots = limit - static_cast(slotsFree); + if (slotsFree <= static_cast(slotLimit)) { + const int usedSlots = slotLimit - static_cast(slotsFree); storeAppendPrintf(&e, "Used slots: %9d %.2f%%\n", - usedSlots, (100.0 * usedSlots / limit)); + usedSlots, (100.0 * usedSlots / slotLimit)); } - if (limit < 100) { // XXX: otherwise too expensive to count + if (slotLimit < 100) { // XXX: otherwise too expensive to count Ipc::ReadWriteLockStats stats; map->updateStats(stats); stats.dump(e); } - } } storeAppendPrintf(&e, "Pending operations: %d out of %d\n", @@ -1012,7 +1035,7 @@ void Rock::SwapDirRr::create() Must(mapOwners.empty() && freeSlotsOwners.empty()); for (int i = 0; i < Config.cacheSwap.n_configured; ++i) { if (const Rock::SwapDir *const sd = dynamic_cast(INDEXSD(i))) { - const int64_t capacity = sd->entryLimitAllowed(); + const int64_t capacity = sd->slotLimitActual(); SwapDir::DirMap::Owner *const mapOwner = SwapDir::DirMap::Init(sd->inodeMapPath(), capacity); diff --git a/src/fs/rock/RockSwapDir.h b/src/fs/rock/RockSwapDir.h index dc90fd001c..f9abde75fa 100644 --- a/src/fs/rock/RockSwapDir.h +++ b/src/fs/rock/RockSwapDir.h @@ -46,8 +46,10 @@ public: // temporary path to the shared memory stack of free slots const char *freeSlotsPath() const; - int64_t entryLimitHigh() const { return SwapFilenMax; } ///< Core limit - int64_t entryLimitAllowed() const; + int64_t entryLimitAbsolute() const { return SwapFilenMax+1; } ///< Core limit + int64_t entryLimitActual() const; ///< max number of possible entries in db + int64_t slotLimitAbsolute() const; ///< Rock store implementation limit + int64_t slotLimitActual() const; ///< total number of slots in this db /// removes a slot from a list of free slots or returns false bool useFreeSlot(Ipc::Mem::PageId &pageId); @@ -61,7 +63,7 @@ public: void writeError(StoreEntry &e); /* StoreMapCleaner API */ - virtual void noteFreeMapSlice(const sfileno fileno); + virtual void noteFreeMapSlice(const Ipc::StoreMapSliceId fileno); uint64_t slotSize; ///< all db slots are of this size @@ -108,9 +110,6 @@ protected: void ignoreReferences(StoreEntry &e); ///< delete from repl policy scope int64_t diskOffsetLimit() const; - int entryLimit() const { return map->entryLimit(); } - int entryMaxPayloadSize() const; - int entriesNeeded(const int64_t objSize) const; void anchorEntry(StoreEntry &e, const sfileno filen, const Ipc::StoreMapAnchor &anchor); bool updateCollapsedWith(StoreEntry &collapsed, const Ipc::StoreMapAnchor &anchor); diff --git a/src/ipc/StoreMap.cc b/src/ipc/StoreMap.cc index 73e6c706f1..cda2a396fe 100644 --- a/src/ipc/StoreMap.cc +++ b/src/ipc/StoreMap.cc @@ -9,32 +9,32 @@ #include "tools.h" Ipc::StoreMap::Owner * -Ipc::StoreMap::Init(const char *const path, const int limit, const size_t extrasSize) +Ipc::StoreMap::Init(const char *const path, const int slotLimit, const size_t extrasSize) { - assert(limit > 0); // we should not be created otherwise - Owner *const owner = shm_new(Shared)(path, limit, extrasSize); - debugs(54, 5, HERE << "new map [" << path << "] created: " << limit); + assert(slotLimit > 0); // we should not be created otherwise + Owner *const owner = shm_new(Shared)(path, slotLimit, extrasSize); + debugs(54, 5, HERE << "new map [" << path << "] created: " << slotLimit); return owner; } Ipc::StoreMap::Owner * -Ipc::StoreMap::Init(const char *const path, const int limit) +Ipc::StoreMap::Init(const char *const path, const int slotLimit) { - return Init(path, limit, 0); + return Init(path, slotLimit, 0); } Ipc::StoreMap::StoreMap(const char *const aPath): cleaner(NULL), path(aPath), shared(shm_old(Shared)(aPath)) { - assert(shared->limit > 0); // we should not be created otherwise + assert(shared->slotLimit > 0); // we should not be created otherwise debugs(54, 5, HERE << "attached map [" << path << "] created: " << - shared->limit); + shared->slotLimit); } int Ipc::StoreMap::compareVersions(const sfileno fileno, time_t newVersion) const { - assert(valid(fileno)); + assert(validEntry(fileno)); Anchor &inode = shared->slots[fileno].anchor; // note: we do not lock, so comparison may be inacurate @@ -51,7 +51,7 @@ Ipc::StoreMap::compareVersions(const sfileno fileno, time_t newVersion) const void Ipc::StoreMap::forgetWritingEntry(sfileno fileno) { - assert(valid(fileno)); + assert(validEntry(fileno)); Anchor &inode = shared->slots[fileno].anchor; assert(inode.writing()); @@ -64,7 +64,7 @@ Ipc::StoreMap::forgetWritingEntry(sfileno fileno) inode.rewind(); inode.lock.unlockExclusive(); - --shared->count; + --shared->entryCount; debugs(54, 8, "closed entry " << fileno << " for writing " << path); } @@ -107,7 +107,7 @@ Ipc::StoreMap::openForWritingAt(const sfileno fileno, bool overwriteExisting) assert(s.empty()); s.start = -1; // we have not allocated any slices yet - ++shared->count; + ++shared->entryCount; //s.setKey(key); // XXX: the caller should do that debugs(54, 5, "opened entry " << fileno << " for writing " << path); @@ -122,7 +122,7 @@ Ipc::StoreMap::openForWritingAt(const sfileno fileno, bool overwriteExisting) void Ipc::StoreMap::startAppending(const sfileno fileno) { - assert(valid(fileno)); + assert(validEntry(fileno)); Anchor &s = shared->slots[fileno].anchor; assert(s.writing()); s.lock.startAppending(); @@ -132,7 +132,7 @@ Ipc::StoreMap::startAppending(const sfileno fileno) void Ipc::StoreMap::closeForWriting(const sfileno fileno, bool lockForReading) { - assert(valid(fileno)); + assert(validEntry(fileno)); Anchor &s = shared->slots[fileno].anchor; assert(s.writing()); if (lockForReading) { @@ -150,25 +150,25 @@ Ipc::StoreMap::closeForWriting(const sfileno fileno, bool lockForReading) Ipc::StoreMap::Slice & Ipc::StoreMap::writeableSlice(const AnchorId anchorId, const SliceId sliceId) { - assert(valid(anchorId)); + assert(validEntry(anchorId)); assert(shared->slots[anchorId].anchor.writing()); - assert(valid(sliceId)); + assert(validSlice(sliceId)); return shared->slots[sliceId].slice; } const Ipc::StoreMap::Slice & Ipc::StoreMap::readableSlice(const AnchorId anchorId, const SliceId sliceId) const { - assert(valid(anchorId)); + assert(validEntry(anchorId)); assert(shared->slots[anchorId].anchor.reading()); - assert(valid(sliceId)); + assert(validSlice(sliceId)); return shared->slots[sliceId].slice; } Ipc::StoreMap::Anchor & Ipc::StoreMap::writeableEntry(const AnchorId anchorId) { - assert(valid(anchorId)); + assert(validEntry(anchorId)); assert(shared->slots[anchorId].anchor.writing()); return shared->slots[anchorId].anchor; } @@ -176,7 +176,7 @@ Ipc::StoreMap::writeableEntry(const AnchorId anchorId) const Ipc::StoreMap::Anchor & Ipc::StoreMap::readableEntry(const AnchorId anchorId) const { - assert(valid(anchorId)); + assert(validEntry(anchorId)); assert(shared->slots[anchorId].anchor.reading()); return shared->slots[anchorId].anchor; } @@ -185,7 +185,7 @@ void Ipc::StoreMap::abortWriting(const sfileno fileno) { debugs(54, 5, "aborting entry " << fileno << " for writing " << path); - assert(valid(fileno)); + assert(validEntry(fileno)); Anchor &s = shared->slots[fileno].anchor; assert(s.writing()); s.lock.appending = false; // locks out any new readers @@ -202,7 +202,7 @@ Ipc::StoreMap::abortWriting(const sfileno fileno) const Ipc::StoreMap::Anchor * Ipc::StoreMap::peekAtReader(const sfileno fileno) const { - assert(valid(fileno)); + assert(validEntry(fileno)); const Anchor &s = shared->slots[fileno].anchor; if (s.reading()) return &s; // immediate access by lock holder so no locking @@ -215,7 +215,7 @@ Ipc::StoreMap::peekAtReader(const sfileno fileno) const const Ipc::StoreMap::Anchor & Ipc::StoreMap::peekAtEntry(const sfileno fileno) const { - assert(valid(fileno)); + assert(validEntry(fileno)); return shared->slots[fileno].anchor; } @@ -224,7 +224,7 @@ Ipc::StoreMap::freeEntry(const sfileno fileno) { debugs(54, 5, "marking entry " << fileno << " to be freed in " << path); - assert(valid(fileno)); + assert(validEntry(fileno)); Anchor &s = shared->slots[fileno].anchor; if (s.lock.lockExclusive()) @@ -282,7 +282,7 @@ Ipc::StoreMap::freeChain(const sfileno fileno, Anchor &inode, const bool keepLoc if (!keepLocked) inode.lock.unlockExclusive(); - --shared->count; + --shared->entryCount; debugs(54, 5, "freed entry " << fileno << " in " << path); } @@ -307,7 +307,7 @@ const Ipc::StoreMap::Anchor * Ipc::StoreMap::openForReadingAt(const sfileno fileno) { debugs(54, 5, "opening entry " << fileno << " for reading " << path); - assert(valid(fileno)); + assert(validEntry(fileno)); Anchor &s = shared->slots[fileno].anchor; if (!s.lock.lockShared()) { @@ -337,7 +337,7 @@ Ipc::StoreMap::openForReadingAt(const sfileno fileno) void Ipc::StoreMap::closeForReading(const sfileno fileno) { - assert(valid(fileno)); + assert(validEntry(fileno)); Anchor &s = shared->slots[fileno].anchor; assert(s.reading()); s.lock.unlockShared(); @@ -352,8 +352,8 @@ Ipc::StoreMap::purgeOne() const int searchLimit = min(10000, entryLimit()); int tries = 0; for (; tries < searchLimit; ++tries) { - const sfileno fileno = static_cast(++shared->victim % shared->limit); - assert(valid(fileno)); + const sfileno fileno = static_cast(++shared->victim % entryLimit()); + assert(validEntry(fileno)); Anchor &s = shared->slots[fileno].anchor; if (s.lock.lockExclusive()) { // the caller wants a free slice; empty anchor is not enough @@ -377,41 +377,53 @@ Ipc::StoreMap::importSlice(const SliceId sliceId, const Slice &slice) // "get free slice" API. This is not something we can double check // reliably because the anchor for the imported slice may not have been // imported yet. - assert(valid(sliceId)); + assert(validSlice(sliceId)); shared->slots[sliceId].slice = slice; } int Ipc::StoreMap::entryLimit() const { - return shared->limit; + return min(sliceLimit(), static_cast(SwapFilenMax+1)); } int Ipc::StoreMap::entryCount() const { - return shared->count; + return shared->entryCount; +} + +int +Ipc::StoreMap::sliceLimit() const +{ + return shared->slotLimit; } void Ipc::StoreMap::updateStats(ReadWriteLockStats &stats) const { - for (int i = 0; i < shared->limit; ++i) + for (int i = 0; i < shared->slotLimit; ++i) shared->slots[i].anchor.lock.updateStats(stats); } bool -Ipc::StoreMap::valid(const int pos) const +Ipc::StoreMap::validEntry(const int pos) const { return 0 <= pos && pos < entryLimit(); } +bool +Ipc::StoreMap::validSlice(const int pos) const +{ + return 0 <= pos && pos < sliceLimit(); +} + sfileno Ipc::StoreMap::anchorIndexByKey(const cache_key *const key) const { const uint64_t *const k = reinterpret_cast(key); // TODO: use a better hash function - return (k[0] + k[1]) % shared->limit; + return (k[0] + k[1]) % entryLimit(); } Ipc::StoreMap::Anchor & @@ -468,21 +480,22 @@ Ipc::StoreMapAnchor::rewind() /* Ipc::StoreMap::Shared */ -Ipc::StoreMap::Shared::Shared(const int aLimit, const size_t anExtrasSize): - limit(aLimit), extrasSize(anExtrasSize), count(0), victim(0), - slots(aLimit) +Ipc::StoreMap::Shared::Shared(const int aSlotLimit, const size_t anExtrasSize): + slotLimit(aSlotLimit), extrasSize(anExtrasSize), entryCount(0), + victim(0), + slots(aSlotLimit) { } size_t Ipc::StoreMap::Shared::sharedMemorySize() const { - return SharedMemorySize(limit, extrasSize); + return SharedMemorySize(slotLimit, extrasSize); } size_t -Ipc::StoreMap::Shared::SharedMemorySize(const int limit, const size_t extrasSize) +Ipc::StoreMap::Shared::SharedMemorySize(const int slotLimit, const size_t extrasSize) { - return sizeof(Shared) + limit * (sizeof(StoreMapSlot) + extrasSize); + return sizeof(Shared) + slotLimit * (sizeof(StoreMapSlot) + extrasSize); } diff --git a/src/ipc/StoreMap.h b/src/ipc/StoreMap.h index b0d3cbcf1c..e99521146b 100644 --- a/src/ipc/StoreMap.h +++ b/src/ipc/StoreMap.h @@ -108,13 +108,13 @@ public: class Shared { public: - Shared(const int aLimit, const size_t anExtrasSize); + Shared(const int aSlotLimit, const size_t anExtrasSize); size_t sharedMemorySize() const; - static size_t SharedMemorySize(const int limit, const size_t anExtrasSize); + static size_t SharedMemorySize(const int aSlotLimit, const size_t anExtrasSize); - const int limit; ///< maximum number of store entries - const size_t extrasSize; ///< size of slice extra data - Atomic::Word count; ///< current number of entries + const int slotLimit; ///< total number of slots + const size_t extrasSize; ///< size of extra data in each slot + Atomic::Word entryCount; ///< current number of entries Atomic::WordT victim; ///< starting point for purge search Ipc::Mem::FlexibleArray slots; ///< storage }; @@ -123,7 +123,7 @@ public: typedef Mem::Owner Owner; /// initialize shared memory - static Owner *Init(const char *const path, const int limit); + static Owner *Init(const char *const path, const int slotLimit); StoreMap(const char *const aPath); @@ -186,9 +186,12 @@ public: /// copies slice to its designated position void importSlice(const SliceId sliceId, const Slice &slice); - bool valid(const int n) const; ///< whether n is a valid slice coordinate + /* SwapFilenMax limits the number of entries, but not slices or slots */ + bool validEntry(const int n) const; ///< whether n is a valid slice coordinate + bool validSlice(const int n) const; ///< whether n is a valid slice coordinate int entryCount() const; ///< number of writeable and readable entries int entryLimit() const; ///< maximum entryCount() possible + int sliceLimit() const; ///< maximum number of slices possible /// adds approximate current stats to the supplied ones void updateStats(ReadWriteLockStats &stats) const; @@ -224,9 +227,9 @@ public: StoreMapWithExtras(const char *const path); /// write access to the extras; call openForWriting() first! - ExtrasT &extras(const sfileno fileno); + ExtrasT &extras(const StoreMapSliceId sid); /// read-only access to the extras; call openForReading() first! - const ExtrasT &extras(const sfileno fileno) const; + const ExtrasT &extras(const StoreMapSliceId sid) const; protected: @@ -240,7 +243,7 @@ public: virtual ~StoreMapCleaner() {} /// adjust slice-linked state before a locked Readable slice is erased - virtual void noteFreeMapSlice(const sfileno sliceId) = 0; + virtual void noteFreeMapSlice(const StoreMapSliceId sliceId) = 0; }; // StoreMapWithExtras implementation @@ -257,24 +260,24 @@ StoreMapWithExtras::StoreMapWithExtras(const char *const aPath): StoreMap(aPath) { const size_t sharedSizeWithoutExtras = - Shared::SharedMemorySize(entryLimit(), 0); + Shared::SharedMemorySize(sliceLimit(), 0); sharedExtras = reinterpret_cast(reinterpret_cast(shared.getRaw()) + sharedSizeWithoutExtras); } template ExtrasT & -StoreMapWithExtras::extras(const sfileno fileno) +StoreMapWithExtras::extras(const StoreMapSliceId sid) { - return const_cast(const_cast(this)->extras(fileno)); + return const_cast(const_cast(this)->extras(sid)); } template const ExtrasT & -StoreMapWithExtras::extras(const sfileno fileno) const +StoreMapWithExtras::extras(const StoreMapSliceId sid) const { assert(sharedExtras); - assert(valid(fileno)); - return sharedExtras[fileno]; + assert(validSlice(sid)); + return sharedExtras[sid]; } } // namespace Ipc diff --git a/src/tests/stub_MemStore.cc b/src/tests/stub_MemStore.cc index e5d74c3d43..94750d8233 100644 --- a/src/tests/stub_MemStore.cc +++ b/src/tests/stub_MemStore.cc @@ -18,7 +18,7 @@ void MemStore::unlink(StoreEntry &e) STUB void MemStore::disconnect(StoreEntry &e) STUB void MemStore::reference(StoreEntry &) STUB void MemStore::maintain() STUB -void MemStore::noteFreeMapSlice(const sfileno) STUB +void MemStore::noteFreeMapSlice(const Ipc::StoreMapSliceId) STUB void MemStore::get(String const, STOREGETCLIENT, void *) STUB void MemStore::init() STUB void MemStore::getStats(StoreInfoStats&) const STUB