Before commit
4310f8b, anchorToCache() (known then as anchorCollapsed())
was only called for not-yet-anchored entries. For anchored entries, we
called updateCollapsed(). In that commit, a new anchorToCache() caller
(i.e. allowSharing() after peek()) started calling anchorToCache() for
all entries, but anchorToCache() code was not updated to skip anchoring
of anchored entries, leading to an extra read lock placed on a peek()ed
hit entry. The extra lock prevented the entry slot from being reused for
other entries when the entry was later purged (i.e. marked for removal),
drastically decreasing hit ratio in some environments.
Fixed anchorToCache() no longer anchors anchored entries. To make sure
we covered all callers/cases, especially in patches, we changed the call
parameters, addressing an old code simplification TODO.
}
bool
-MemStore::anchorToCache(StoreEntry &entry, bool &inSync)
+MemStore::anchorToCache(StoreEntry &entry)
{
+ Assure(!entry.hasMemStore());
+ Assure(entry.mem().memCache.io != MemObject::ioDone);
+
if (!map)
return false;
return false;
anchorEntry(entry, index, *slot);
- inSync = updateAnchoredWith(entry, index, *slot);
- return true; // even if inSync is false
+ if (!updateAnchoredWith(entry, index, *slot))
+ throw TextException("updateAnchoredWith() failure", Here());
+ return true;
}
bool
virtual bool dereference(StoreEntry &e) override;
virtual void updateHeaders(StoreEntry *e) override;
virtual void maintain() override;
- virtual bool anchorToCache(StoreEntry &e, bool &inSync) override;
+ virtual bool anchorToCache(StoreEntry &) override;
virtual bool updateAnchored(StoreEntry &) override;
virtual void evictCached(StoreEntry &) override;
virtual void evictIfFound(const cache_key *) override;
}
bool
-Rock::SwapDir::anchorToCache(StoreEntry &entry, bool &inSync)
+Rock::SwapDir::anchorToCache(StoreEntry &entry)
{
+ Assure(!entry.hasDisk());
+
if (!map || !theFile || !theFile->canRead())
return false;
return false;
anchorEntry(entry, filen, *slot);
- inSync = updateAnchoredWith(entry, *slot);
- return true; // even if inSync is false
+ return true;
}
bool
assert(entry.hasDisk(index));
- const Ipc::StoreMapAnchor &s = map->readableEntry(entry.swap_filen);
- return updateAnchoredWith(entry, s);
-}
-
-bool
-Rock::SwapDir::updateAnchoredWith(StoreEntry &entry, const Ipc::StoreMapAnchor &anchor)
-{
+ const auto &anchor = map->readableEntry(entry.swap_filen);
entry.swap_file_sz = anchor.basics.swap_file_sz;
return true;
}
protected:
/* Store API */
- virtual bool anchorToCache(StoreEntry &entry, bool &inSync);
+ virtual bool anchorToCache(StoreEntry &);
virtual bool updateAnchored(StoreEntry &);
/* protected ::SwapDir API */
StoreIOState::Pointer createUpdateIO(const Ipc::StoreMapUpdate &update, StoreIOState::STFNCB *, StoreIOState::STIOCB *, void *);
void anchorEntry(StoreEntry &e, const sfileno filen, const Ipc::StoreMapAnchor &anchor);
- bool updateAnchoredWith(StoreEntry &, const Ipc::StoreMapAnchor &);
friend class Rebuild;
friend class IoState;
/// make stored metadata and HTTP headers the same as in the given entry
virtual void updateHeaders(StoreEntry *) {}
- /// If Transients entry cannot be attached to this storage, return false.
- /// If the entry is not found, return false. Otherwise, return true after
- /// tying the entry to this cache and setting inSync to updateAnchored().
- virtual bool anchorToCache(StoreEntry &, bool &/*inSync*/) { return false; }
+ /// tie StoreEntry to this storage if this storage has a matching entry
+ /// \retval true if this storage has a matching entry
+ virtual bool anchorToCache(StoreEntry &) { return false; }
/// Update a local Transients entry with fresh info from this cache (if any).
/// Return true iff the cache supports Transients entries and
void
Store::Controller::allowSharing(StoreEntry &entry, const cache_key *key)
{
- // TODO: refactor to throw on anchorToCache() inSync errors!
-
// anchorToCache() below and many find() callers expect a registered entry
addReading(&entry, key);
if (entry.hasTransients()) {
// store hadWriter before computing `found`; \see Transients::get()
const auto hadWriter = transients->hasWriter(entry);
- bool inSync = false;
- const bool found = anchorToCache(entry, inSync);
- if (found && !inSync)
- throw TexcHere("cannot sync");
+ const auto found = anchorToCache(entry);
if (!found) {
// !found should imply hittingRequiresCollapsing() regardless of writer presence
if (!entry.hittingRequiresCollapsing()) {
if (sharedMemStore && collapsed->mem_obj->memCache.io == MemObject::ioDone) {
found = true;
inSync = true;
- debugs(20, 7, "fully mem-loaded " << *collapsed);
+ debugs(20, 7, "already handled by memory store: " << *collapsed);
} else if (sharedMemStore && collapsed->hasMemStore()) {
found = true;
inSync = sharedMemStore->updateAnchored(*collapsed);
found = true;
inSync = swapDir->updateAnchored(*collapsed);
} else {
- found = anchorToCache(*collapsed, inSync);
+ try {
+ found = anchorToCache(*collapsed);
+ inSync = found;
+ } catch (...) {
+ // TODO: Write an exception handler for the entire method.
+ debugs(20, 3, "anchorToCache() failed for " << *collapsed << ": " << CurrentException);
+ collapsed->abort();
+ return;
+ }
}
if (entryStatus.waitingToBeFreed && !found) {
collapsed->setCollapsingRequirement(true);
}
-/// Called for Transients entries that are not yet anchored to a cache.
+/// If possible and has not been done, associates the entry with its store(s).
/// \returns false for not-yet-cached entries that we may attach later
/// \returns true for other entries after synchronizing them with their store
-/// and setting inSync to reflect that synchronization outcome.
bool
-Store::Controller::anchorToCache(StoreEntry &entry, bool &inSync)
+Store::Controller::anchorToCache(StoreEntry &entry)
{
assert(entry.hasTransients());
assert(transientsReader(entry));
+ // TODO: Attach entries to both memory and disk
+
+ // TODO: Reduce code duplication with syncCollapsed()
+ if (sharedMemStore && entry.mem().memCache.io == MemObject::ioDone) {
+ debugs(20, 5, "already handled by memory store: " << entry);
+ return true;
+ } else if (sharedMemStore && entry.hasMemStore()) {
+ debugs(20, 5, "already anchored to memory store: " << entry);
+ return true;
+ } else if (swapDir && entry.hasDisk()) {
+ debugs(20, 5, "already anchored to disk: " << entry);
+ return true;
+ }
+
debugs(20, 7, "anchoring " << entry);
Transients::EntryStatus entryStatus;
transients->status(entry, entryStatus);
- inSync = false;
bool found = false;
if (sharedMemStore)
- found = sharedMemStore->anchorToCache(entry, inSync);
+ found = sharedMemStore->anchorToCache(entry);
if (!found && swapDir)
- found = swapDir->anchorToCache(entry, inSync);
+ found = swapDir->anchorToCache(entry);
- if (inSync) {
+ if (found) {
debugs(20, 7, "anchored " << entry);
- assert(found);
entry.setCollapsingRequirement(false);
return true;
}
- if (found) {
- debugs(20, 5, "failed to anchor " << entry);
- return true;
- }
+ if (entryStatus.waitingToBeFreed)
+ throw TextException("will never be able to anchor to an already marked entry", Here());
- if (entryStatus.waitingToBeFreed) {
- debugs(20, 5, "failed on marked unattached " << entry);
- return true;
- }
-
- if (!entryStatus.hasWriter) {
- debugs(20, 5, "failed on abandoned-by-writer " << entry);
- return true;
- }
+ if (!entryStatus.hasWriter)
+ throw TextException("will never be able to anchor to an abandoned-by-writer entry", Here());
debugs(20, 7, "skipping not yet cached " << entry);
entry.setCollapsingRequirement(true);
void memoryEvictCached(StoreEntry &);
void transientsUnlinkByKeyIfFound(const cache_key *);
bool keepForLocalMemoryCache(StoreEntry &e) const;
- bool anchorToCache(StoreEntry &e, bool &inSync);
+ bool anchorToCache(StoreEntry &);
void checkTransients(const StoreEntry &) const;
void checkFoundCandidate(const StoreEntry &) const;
}
bool
-Store::Disks::anchorToCache(StoreEntry &entry, bool &inSync)
+Store::Disks::anchorToCache(StoreEntry &entry)
{
+ if (entry.hasDisk())
+ return true; // already anchored
+
if (const int cacheDirs = Config.cacheSwap.n_configured) {
// ask each cache_dir until the entry is found; use static starting
// point to avoid asking the same subset of disks more often
if (!sd.active())
continue;
- if (sd.anchorToCache(entry, inSync)) {
+ if (sd.anchorToCache(entry)) {
debugs(20, 3, "cache_dir " << idx << " anchors " << entry);
return true;
}
virtual bool dereference(StoreEntry &e) override;
virtual void updateHeaders(StoreEntry *) override;
virtual void maintain() override;
- virtual bool anchorToCache(StoreEntry &e, bool &inSync) override;
+ virtual bool anchorToCache(StoreEntry &) override;
virtual bool updateAnchored(StoreEntry &) override;
virtual void evictCached(StoreEntry &) override;
virtual void evictIfFound(const cache_key *) override;
bool MemStore::dereference(StoreEntry &) STUB_RETVAL(false)
void MemStore::evictCached(StoreEntry&) STUB
void MemStore::evictIfFound(const cache_key *) STUB
-bool MemStore::anchorToCache(StoreEntry&, bool&) STUB_RETVAL(false)
+bool MemStore::anchorToCache(StoreEntry&) STUB_RETVAL(false)
bool MemStore::updateAnchored(StoreEntry&) STUB_RETVAL(false)
int64_t MemStore::EntryLimit() STUB_RETVAL(0)
bool Disks::dereference(StoreEntry &) STUB_RETVAL(false)
void Disks::updateHeaders(StoreEntry *) STUB
void Disks::maintain() STUB
-bool Disks::anchorToCache(StoreEntry &, bool &) STUB_RETVAL(false)
+bool Disks::anchorToCache(StoreEntry &) STUB_RETVAL(false)
bool Disks::updateAnchored(StoreEntry &) STUB_RETVAL(false)
void Disks::evictCached(StoreEntry &) STUB
void Disks::evictIfFound(const cache_key *) STUB