From: Dmitry Kurochkin Date: Tue, 17 Jul 2012 11:41:40 +0000 (-0600) Subject: Restore memory caching ability lost since r11969. X-Git-Tag: SQUID_3_2_0_19~26 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=82ad8fe650403a9a72d7574c8263187fa0552092;p=thirdparty%2Fsquid.git Restore memory caching ability lost since r11969. Honor maximum_object_size_in_memory for non-shared memory cache. Since r11969, Squid calls trimMemory() for all entries to release unused MemObjects memory. Unfortunately, that revision also started trimming objects that could have been stored in memory cache later. Trimmed objects are no longer eligible for memory caching. It is possible that IN_MEMORY check in StoreEntry::trimMemory() was preventing excessive trimming in the past, but after SMP changes, IN_MEMORY flag is set only after the object is committed to a memory cache in StoreController::handleIdleEntry(), so we cannot rely on it. For example: clientReplyContext::removeStoreReference() storeUnregister() StoreEntry::swapOut() StoreEntry::trimMemory() StoreEntry::releaseRequest() StoreEntry::unlock() StoreController::handleIdleEntry() // never get here because entry is set IN_MEMORY status // already marked for release This change adds StoreController::keepForLocalMemoryCache() and MemStore::keepInLocalMemory() methods to check if an entry could be stored in a memory cache (and, hence, should not be trimmed). The store-related part of the trimming logic has been moved from StoreEntry::trimMemory() to StoreController::maybeTrimMemory(). StoreEntry code now focuses on checks that are not specific to any memory cache settings or admission policies. These changes may resulted in Squid honoring maximum_object_size_in_memory for non-shared memory cache. We may have been ignoring that option for non-shared memory caches since SMP changes because the check for it was specific to a shared memory cache. --- diff --git a/src/MemStore.cc b/src/MemStore.cc index 1e055f3c53..eb5d155bec 100644 --- a/src/MemStore.cc +++ b/src/MemStore.cc @@ -251,14 +251,40 @@ MemStore::copyFromShm(StoreEntry &e, const MemStoreMap::Extras &extras) return true; } -void -MemStore::considerKeeping(StoreEntry &e) +bool +MemStore::keepInLocalMemory(const StoreEntry &e) const { if (!e.memoryCachable()) { debugs(20, 7, HERE << "Not memory cachable: " << e); - return; // cannot keep due to entry state or properties + return false; // will not cache due to entry state or properties + } + + assert(e.mem_obj); + const int64_t loadedSize = e.mem_obj->endOffset(); + const int64_t expectedSize = e.mem_obj->expectedReplySize(); // may be < 0 + const int64_t ramSize = max(loadedSize, expectedSize); + + if (ramSize > static_cast(Config.Store.maxInMemObjSize)) { + debugs(20, 5, HERE << "Too big max(" << + loadedSize << ", " << expectedSize << "): " << e); + return false; // will not cache due to cachable entry size limits + } + + if (!willFit(ramSize)) { + debugs(20, 5, HERE << "Wont fit max(" << + loadedSize << ", " << expectedSize << "): " << e); + return false; // will not cache due to memory cache slot limit } + return true; +} + +void +MemStore::considerKeeping(StoreEntry &e) +{ + if (!keepInLocalMemory(e)) + return; + // since we copy everything at once, we can only keep complete entries if (e.store_status != STORE_OK) { debugs(20, 7, HERE << "Incomplete: " << e); @@ -270,6 +296,12 @@ MemStore::considerKeeping(StoreEntry &e) const int64_t loadedSize = e.mem_obj->endOffset(); const int64_t expectedSize = e.mem_obj->expectedReplySize(); + // objects of unknown size are not allowed into memory cache, for now + if (expectedSize < 0) { + debugs(20, 5, HERE << "Unknown expected size: " << e); + return; + } + // since we copy everything at once, we can only keep fully loaded entries if (loadedSize != expectedSize) { debugs(20, 7, HERE << "partially loaded: " << loadedSize << " != " << @@ -277,18 +309,12 @@ MemStore::considerKeeping(StoreEntry &e) return; } - if (!willFit(expectedSize)) { - debugs(20, 5, HERE << "No mem-cache space for " << e); - return; // failed to free enough space - } - keep(e); // may still fail } bool -MemStore::willFit(int64_t need) +MemStore::willFit(int64_t need) const { - // TODO: obey configured maximum entry size (with page-based rounding) return need <= static_cast(Ipc::Mem::PageSize()); } diff --git a/src/MemStore.h b/src/MemStore.h index 17b2e0e752..b4a03ac8e1 100644 --- a/src/MemStore.h +++ b/src/MemStore.h @@ -27,6 +27,9 @@ public: /// cache the entry or forget about it until the next considerKeeping call void considerKeeping(StoreEntry &e); + /// whether e should be kept in local RAM for possible future caching + bool keepInLocalMemory(const StoreEntry &e) const; + /* Store API */ virtual int callback(); virtual StoreEntry * get(const cache_key *); @@ -47,7 +50,7 @@ public: static int64_t EntryLimit(); protected: - bool willFit(int64_t needed); + bool willFit(int64_t needed) const; void keep(StoreEntry &e); bool copyToShm(StoreEntry &e, MemStoreMap::Extras &extras); diff --git a/src/Store.h b/src/Store.h index afc9eb5e66..968d1337d1 100644 --- a/src/Store.h +++ b/src/Store.h @@ -351,6 +351,11 @@ public: /// called when the entry is no longer needed by any transaction virtual void handleIdleEntry(StoreEntry &e) {} + // XXX: This method belongs to Store::Root/StoreController, but it is here + // because test cases use non-StoreController derivatives as Root + /// called to get rid of no longer needed entry data in RAM, if any + virtual void maybeTrimMemory(StoreEntry &e, const bool preserveSwappable) {} + private: static RefCount CurrentRoot; }; diff --git a/src/SwapDir.h b/src/SwapDir.h index 8247143775..e1d7030adc 100644 --- a/src/SwapDir.h +++ b/src/SwapDir.h @@ -60,6 +60,7 @@ public: /* Store parent API */ virtual void handleIdleEntry(StoreEntry &e); + virtual void maybeTrimMemory(StoreEntry &e, const bool preserveSwappable); virtual void init(); @@ -91,6 +92,7 @@ public: private: void createOneStore(Store &aStore); + bool keepForLocalMemoryCache(const StoreEntry &e) const; StorePointer swapDir; ///< summary view of all disk caches MemStore *memStore; ///< memory cache diff --git a/src/store.cc b/src/store.cc index f7ad5839e8..c65857421b 100644 --- a/src/store.cc +++ b/src/store.cc @@ -1446,14 +1446,6 @@ StoreEntry::memoryCachable() const if (!Config.onoff.memory_cache_first && swap_status == SWAPOUT_DONE && refcount == 1) return 0; - if (Config.memShared && IamWorkerProcess()) { - const int64_t expectedSize = mem_obj->expectedReplySize(); - // objects of unknown size are not allowed into memory cache, for now - if (expectedSize < 0 || - expectedSize > static_cast(Config.Store.maxInMemObjSize)) - return 0; - } - return 1; } diff --git a/src/store_dir.cc b/src/store_dir.cc index 0b27b76ea8..dc6f87e2ff 100644 --- a/src/store_dir.cc +++ b/src/store_dir.cc @@ -781,6 +781,40 @@ StoreController::get(String const key, STOREGETCLIENT aCallback, void *aCallback fatal("not implemented"); } +// move this into [non-shared] memory cache class when we have one +/// whether e should be kept in local RAM for possible future caching +bool +StoreController::keepForLocalMemoryCache(const StoreEntry &e) const +{ + if (!e.memoryCachable()) + return false; + + // does the current and expected size obey memory caching limits? + assert(e.mem_obj); + const int64_t loadedSize = e.mem_obj->endOffset(); + const int64_t expectedSize = e.mem_obj->expectedReplySize(); // may be < 0 + const int64_t ramSize = max(loadedSize, expectedSize); + const int64_t ramLimit = min( + static_cast(Config.memMaxSize), + static_cast(Config.Store.maxInMemObjSize)); + return ramSize <= ramLimit; +} + +void +StoreController::maybeTrimMemory(StoreEntry &e, const bool preserveSwappable) +{ + bool keepInLocalMemory = false; + if (memStore) + keepInLocalMemory = memStore->keepInLocalMemory(e); + else + keepInLocalMemory = keepForLocalMemoryCache(e); + + debugs(20, 7, HERE << "keepInLocalMemory: " << keepInLocalMemory); + + if (!keepInLocalMemory) + e.trimMemory(preserveSwappable); +} + void StoreController::handleIdleEntry(StoreEntry &e) { @@ -795,7 +829,7 @@ StoreController::handleIdleEntry(StoreEntry &e) memStore->considerKeeping(e); // leave keepInLocalMemory false; memStore maintains its own cache } else { - keepInLocalMemory = e.memoryCachable() && // entry is in good shape and + keepInLocalMemory = keepForLocalMemoryCache(e) && // in good shape and // the local memory cache is not overflowing (mem_node::InUseCount() <= store_pages_max); } diff --git a/src/store_swapout.cc b/src/store_swapout.cc index 832a543d36..a8d7bcdbb0 100644 --- a/src/store_swapout.cc +++ b/src/store_swapout.cc @@ -198,7 +198,7 @@ StoreEntry::swapOut() const bool weAreOrMayBeSwappingOut = swappingOut() || mayStartSwapOut(); - trimMemory(weAreOrMayBeSwappingOut); + Store::Root().maybeTrimMemory(*this, weAreOrMayBeSwappingOut); if (!weAreOrMayBeSwappingOut) return; // nothing else to do diff --git a/src/tests/stub_MemStore.cc b/src/tests/stub_MemStore.cc index 206dbe4bf3..08a1b7c16e 100644 --- a/src/tests/stub_MemStore.cc +++ b/src/tests/stub_MemStore.cc @@ -13,6 +13,7 @@ MemStore::MemStore() STUB MemStore::~MemStore() STUB +bool MemStore::keepInLocalMemory(const StoreEntry &) const STUB_RETVAL(false) void MemStore::considerKeeping(StoreEntry &) STUB void MemStore::reference(StoreEntry &) STUB void MemStore::maintain() STUB