From 6d68a23059926e33ebb9a690614ab0e224a96e92 Mon Sep 17 00:00:00 2001 From: Alex Rousskov Date: Sun, 20 Jan 2013 11:54:42 -0700 Subject: [PATCH] Polished names, debugging, comments, whitespaces, style. --- src/MemStore.cc | 5 ++- src/MemStore.h | 2 +- src/fs/rock/RockRebuild.cc | 17 +++++----- src/fs/rock/RockRebuild.h | 7 ++--- src/fs/rock/RockSwapDir.cc | 4 +-- src/fs/rock/RockSwapDir.h | 4 +-- src/ipc/StoreMap.cc | 63 ++++++++++++++++++-------------------- src/ipc/StoreMap.h | 10 +++--- src/ipc/mem/Pointer.h | 1 + src/store.cc | 1 - 10 files changed, 54 insertions(+), 60 deletions(-) diff --git a/src/MemStore.cc b/src/MemStore.cc index 014a946cc2..ea4ee2d3fc 100644 --- a/src/MemStore.cc +++ b/src/MemStore.cc @@ -546,7 +546,6 @@ MemStore::EntryLimit() return entryLimit; } - /// reports our needs for shared memory pages to Ipc::Mem::Pages class MemStoreClaimMemoryNeedsRr: public RegisteredRunner { @@ -604,8 +603,8 @@ protected: virtual void create(const RunnerRegistry &); private: - Ipc::Mem::Owner *spaceOwner; - MemStoreMap::Owner *mapOwner; + Ipc::Mem::Owner *spaceOwner; ///< free slices Owner + MemStoreMap::Owner *mapOwner; ///< primary map Owner }; RunnerRegistrationEntry(rrAfterConfig, MemStoreRr); diff --git a/src/MemStore.h b/src/MemStore.h index 8825707175..02fc2188c8 100644 --- a/src/MemStore.h +++ b/src/MemStore.h @@ -8,7 +8,7 @@ // StoreEntry restoration info not already stored by Ipc::StoreMap struct MemStoreMapExtras { - Ipc::Mem::PageId page; ///< shared memory page with the slice content + Ipc::Mem::PageId page; ///< shared memory page with entry slice content }; typedef Ipc::StoreMapWithExtras MemStoreMap; diff --git a/src/fs/rock/RockRebuild.cc b/src/fs/rock/RockRebuild.cc index 077b2ab36d..61a7dc6161 100644 --- a/src/fs/rock/RockRebuild.cc +++ b/src/fs/rock/RockRebuild.cc @@ -49,7 +49,7 @@ public: } /* namespace Rock */ /** - Several layers of information is manipualted during the rebuild: + Several layers of information are manipualted during the rebuild: Store Entry: Response message plus all the metainformation associated with it. Identified by store key. At any given time, from Squid point @@ -100,10 +100,9 @@ Rock::Rebuild::Rebuild(SwapDir *dir): AsyncJob("Rock::Rebuild"), dbSize(0), dbEntrySize(0), dbEntryLimit(0), - dbSlot(0), fd(-1), dbOffset(0), - slotPos(0), + loadingPos(0), validationPos(0) { assert(sd); @@ -148,7 +147,7 @@ Rock::Rebuild::start() buf.init(SM_PAGE_SIZE, SM_PAGE_SIZE); dbOffset = SwapDir::HeaderSize; - slotPos = 0; + loadingPos = 0; entries = new LoadingEntry[dbEntryLimit]; @@ -191,7 +190,7 @@ Rock::Rebuild::steps() void Rock::Rebuild::loadingSteps() { - debugs(47,5, HERE << sd->index << " slot " << slotPos << " at " << + debugs(47,5, HERE << sd->index << " slot " << loadingPos << " at " << dbOffset << " <= " << dbSize); // Balance our desire to maximize the number of entries processed at once @@ -204,7 +203,7 @@ Rock::Rebuild::loadingSteps() while (loaded < dbEntryLimit && dbOffset < dbSize) { loadOneSlot(); dbOffset += dbEntrySize; - ++slotPos; + ++loadingPos; ++loaded; if (counts.scancount % 1000 == 0) @@ -226,7 +225,7 @@ Rock::Rebuild::loadingSteps() void Rock::Rebuild::loadOneSlot() { - debugs(47,5, HERE << sd->index << " slot " << slotPos << " at " << + debugs(47,5, HERE << sd->index << " slot " << loadingPos << " at " << dbOffset << " <= " << dbSize); ++counts.scancount; @@ -239,7 +238,7 @@ Rock::Rebuild::loadOneSlot() if (!storeRebuildLoadEntry(fd, sd->index, buf, counts)) return; - const SlotId slotId = slotPos; + const SlotId slotId = loadingPos; // get our header DbCellHeader header; @@ -386,7 +385,7 @@ Rock::Rebuild::swanSong() void Rock::Rebuild::failure(const char *msg, int errNo) { - debugs(47,5, HERE << sd->index << " slot " << slotPos << " at " << + debugs(47,5, HERE << sd->index << " slot " << loadingPos << " at " << dbOffset << " <= " << dbSize); if (errNo) diff --git a/src/fs/rock/RockRebuild.h b/src/fs/rock/RockRebuild.h index d56182fc4f..ef60af80bf 100644 --- a/src/fs/rock/RockRebuild.h +++ b/src/fs/rock/RockRebuild.h @@ -58,13 +58,12 @@ private: int64_t dbSize; int dbEntrySize; int dbEntryLimit; - int dbSlot; int fd; // store db file descriptor int64_t dbOffset; - sfileno slotPos; - sfileno validationPos; - MemBuf buf; + sfileno loadingPos; ///< index of the db slot being loaded from disk now + sfileno validationPos; ///< index of the loaded db slot being validated now + MemBuf buf; ///< space to load current db slot (and entry metadata) into StoreRebuildData counts; diff --git a/src/fs/rock/RockSwapDir.cc b/src/fs/rock/RockSwapDir.cc index db102acba2..3ede34bc87 100644 --- a/src/fs/rock/RockSwapDir.cc +++ b/src/fs/rock/RockSwapDir.cc @@ -558,8 +558,8 @@ Rock::SwapDir::createStoreIO(StoreEntry &e, StoreIOState::STFNCB *cbFile, StoreI int64_t Rock::SwapDir::diskOffset(int filen) const { - assert(filen >= 0); - return HeaderSize + slotSize*filen; + assert(filen >= 0); + return HeaderSize + slotSize*filen; } int64_t diff --git a/src/fs/rock/RockSwapDir.h b/src/fs/rock/RockSwapDir.h index 6866d1b536..301d0f410e 100644 --- a/src/fs/rock/RockSwapDir.h +++ b/src/fs/rock/RockSwapDir.h @@ -117,8 +117,8 @@ private: DiskIOStrategy *io; RefCount theFile; ///< cache storage for this cache_dir - Ipc::Mem::Pointer freeSlots; ///< free slots - Ipc::Mem::PageId *waitingForPage; ///< one-page cache for a "hot" free slot + Ipc::Mem::Pointer freeSlots; ///< all unused slots + Ipc::Mem::PageId *waitingForPage; ///< one-page cache for a "hot" free slot /* configurable options */ DiskFile::Config fileConfig; ///< file-level configuration options diff --git a/src/ipc/StoreMap.cc b/src/ipc/StoreMap.cc index ac7eb27698..4f992977e8 100644 --- a/src/ipc/StoreMap.cc +++ b/src/ipc/StoreMap.cc @@ -73,7 +73,7 @@ Ipc::StoreMap::Anchor * Ipc::StoreMap::openForWriting(const cache_key *const key, sfileno &fileno) { debugs(54, 5, "opening entry with key " << storeKeyText(key) - << " for writing in map [" << path << ']'); + << " for writing " << path); const int idx = anchorIndexByKey(key); if (Anchor *anchor = openForWritingAt(idx)) { @@ -96,8 +96,8 @@ Ipc::StoreMap::openForWritingAt(const sfileno fileno, bool overwriteExisting) // bail if we cannot empty this position if (!s.waitingToBeFreed && s.state == Anchor::Readable && !overwriteExisting) { lock.unlockExclusive(); - debugs(54, 5, "cannot open existing entry at " << fileno << - " for writing in map [" << path << ']'); + debugs(54, 5, "cannot open existing entry " << fileno << + " for writing " << path); return NULL; } @@ -112,10 +112,10 @@ Ipc::StoreMap::openForWritingAt(const sfileno fileno, bool overwriteExisting) //s.setKey(key); // XXX: the caller should do that debugs(54, 5, "opened entry " << fileno << " for writing " << path); return &s; // and keep the entry locked - } + } - debugs(54, 5, "cannot open busy entry at " << fileno << - " for writing in map [" << path << ']'); + debugs(54, 5, "cannot open busy entry " << fileno << + " for writing " << path); return NULL; } @@ -128,8 +128,8 @@ Ipc::StoreMap::closeForWriting(const sfileno fileno, bool lockForReading) s.state = Anchor::Readable; if (lockForReading) { s.lock.switchExclusiveToShared(); - debugs(54, 5, "switched entry at " << fileno << - " from writing to reading in map [" << path << ']'); + debugs(54, 5, "switched entry " << fileno << + " from writing to reading " << path); } else { s.lock.unlockExclusive(); debugs(54, 5, "closed entry " << fileno << " for writing " << path); @@ -142,7 +142,7 @@ Ipc::StoreMap::writeableSlice(const AnchorId anchorId, const SliceId sliceId) assert(valid(anchorId)); assert(shared->slots[anchorId].anchor.state == Anchor::Writeable); assert(valid(sliceId)); - return shared->slots[sliceId].slice; + return shared->slots[sliceId].slice; } const Ipc::StoreMap::Slice & @@ -151,7 +151,7 @@ Ipc::StoreMap::readableSlice(const AnchorId anchorId, const SliceId sliceId) con assert(valid(anchorId)); assert(shared->slots[anchorId].anchor.state == Anchor::Readable); assert(valid(sliceId)); - return shared->slots[sliceId].slice; + return shared->slots[sliceId].slice; } Ipc::StoreMap::Anchor & @@ -166,8 +166,7 @@ Ipc::StoreMap::writeableEntry(const AnchorId anchorId) void Ipc::StoreMap::abortWriting(const sfileno fileno) { - debugs(54, 5, "abort entry at " << fileno << - " for writing in map [" << path << ']'); + debugs(54, 5, "aborting entry " << fileno << " for writing " << path); assert(valid(fileno)); Anchor &s = shared->slots[fileno].anchor; assert(s.state == Anchor::Writeable); @@ -178,8 +177,7 @@ Ipc::StoreMap::abortWriting(const sfileno fileno) void Ipc::StoreMap::abortIo(const sfileno fileno) { - debugs(54, 5, "abort entry at " << fileno << - " for I/O in map [" << path << ']'); + debugs(54, 5, "aborting entry " << fileno << " for I/O in " << path); assert(valid(fileno)); Anchor &s = shared->slots[fileno].anchor; @@ -211,8 +209,7 @@ Ipc::StoreMap::peekAtReader(const sfileno fileno) const void Ipc::StoreMap::freeEntry(const sfileno fileno) { - debugs(54, 5, HERE << " marking entry at " << fileno << " to be freed in" - " map [" << path << ']'); + debugs(54, 5, "marking entry " << fileno << " to be freed in " << path); assert(valid(fileno)); Anchor &s = shared->slots[fileno].anchor; @@ -228,10 +225,10 @@ void Ipc::StoreMap::freeChain(const sfileno fileno, Anchor &inode, const bool keepLocked) { debugs(54, 7, "freeing " << inode.state << " entry " << fileno << - " in map [" << path << ']'); + " in " << path); if (inode.state != Anchor::Empty) { sfileno sliceId = inode.start; - debugs(54, 7, "first slice " << sliceId); + debugs(54, 8, "first slice " << sliceId); while (sliceId >= 0) { Slice &slice = shared->slots[sliceId].slice; const sfileno nextId = slice.next; @@ -248,15 +245,14 @@ Ipc::StoreMap::freeChain(const sfileno fileno, Anchor &inode, const bool keepLoc if (!keepLocked) inode.lock.unlockExclusive(); --shared->count; - debugs(54, 5, "freed entry " << fileno << - " in map [" << path << ']'); + debugs(54, 5, "freed entry " << fileno << " in " << path); } const Ipc::StoreMap::Anchor * Ipc::StoreMap::openForReading(const cache_key *const key, sfileno &fileno) { debugs(54, 5, "opening entry with key " << storeKeyText(key) - << " for reading in map [" << path << ']'); + << " for reading " << path); const int idx = anchorIndexByKey(key); if (const Anchor *slot = openForReadingAt(idx)) { if (slot->sameKey(key)) { @@ -272,28 +268,27 @@ Ipc::StoreMap::openForReading(const cache_key *const key, sfileno &fileno) const Ipc::StoreMap::Anchor * Ipc::StoreMap::openForReadingAt(const sfileno fileno) { - debugs(54, 5, "opening entry at " << fileno << - " for reading in map [" << path << ']'); + debugs(54, 5, "opening entry " << fileno << " for reading " << path); assert(valid(fileno)); Anchor &s = shared->slots[fileno].anchor; if (!s.lock.lockShared()) { - debugs(54, 5, "cannot open busy entry at " << fileno << - "for reading in map [" << path << ']'); + debugs(54, 5, "cannot open busy entry " << fileno << + " for reading " << path); return NULL; } if (s.state == Anchor::Empty) { s.lock.unlockShared(); - debugs(54, 7, "cannot open empty entry at " << fileno << - " for reading in map [" << path << ']'); + debugs(54, 7, "cannot open empty entry " << fileno << + " for reading " << path); return NULL; } if (s.waitingToBeFreed) { s.lock.unlockShared(); - debugs(54, 7, HERE << "cannot open marked entry at " << fileno << - " for reading in map [" << path << ']'); + debugs(54, 7, HERE << "cannot open marked entry " << fileno << + " for reading " << path); return NULL; } @@ -321,20 +316,20 @@ Ipc::StoreMap::purgeOne() const int searchLimit = min(10000, entryLimit()); int tries = 0; for (; tries < searchLimit; ++tries) { - const sfileno fileno = shared->victim++ % shared->limit; + const sfileno fileno = shared->victim++ % shared->limit; assert(valid(fileno)); Anchor &s = shared->slots[fileno].anchor; if (s.lock.lockExclusive()) { if (s.state == Anchor::Readable) { // skip empties // this entry may be marked for deletion, and that is OK freeChain(fileno, s, false); - debugs(54, 5, "purged entry at " << fileno); + debugs(54, 5, "purged entry " << fileno << " from " << path); return true; - } + } s.lock.unlockExclusive(); } - } - debugs(54, 5, "found no entries to purge; tried: " << tries); + } + debugs(54, 5, "no entries to purge from " << path << "; tried: " << tries); return false; } diff --git a/src/ipc/StoreMap.h b/src/ipc/StoreMap.h index 19c6990761..7acdd0ba07 100644 --- a/src/ipc/StoreMap.h +++ b/src/ipc/StoreMap.h @@ -1,10 +1,10 @@ #ifndef SQUID_IPC_STORE_MAP_H #define SQUID_IPC_STORE_MAP_H -#include "typedefs.h" #include "ipc/ReadWriteLock.h" #include "ipc/mem/FlexibleArray.h" #include "ipc/mem/Pointer.h" +#include "typedefs.h" namespace Ipc { @@ -68,11 +68,13 @@ public: State state; ///< current state }; -/// XXX: a hack to allocate one shared array for both anchors and slices +/// A hack to allocate one shared array for both anchors and slices. +/// Anchors are indexed by store entry ID and are independent from each other. +/// Slices are indexed by slice IDs and form entry chains using slice.next. class StoreMapSlot { public: - StoreMapAnchor anchor; - StoreMapSlice slice; + StoreMapAnchor anchor; ///< information about store entry as a whole + StoreMapSlice slice; ///< information about one stored entry piece }; class StoreMapCleaner; diff --git a/src/ipc/mem/Pointer.h b/src/ipc/mem/Pointer.h index f46092d08c..33f4693f66 100644 --- a/src/ipc/mem/Pointer.h +++ b/src/ipc/mem/Pointer.h @@ -32,6 +32,7 @@ public: ~Owner(); + /// Raw access; handy to finalize initiatization, but avoid if possible. Class *object() { return theObject; } private: diff --git a/src/store.cc b/src/store.cc index a922cae909..30d0e589c6 100644 --- a/src/store.cc +++ b/src/store.cc @@ -559,7 +559,6 @@ StoreEntry::releaseRequest() int StoreEntry::unlock(const char *context) { - debugs(20, 3, (context ? context : "somebody") << " unlocking key " << getMD5Text() << ' ' << *this); --lock_count; -- 2.47.2