Honor maximum_object_size_in_memory for non-shared memory cache.
Since r11969, Squid calls trimMemory() for all entries to release unused
MemObjects memory. Unfortunately, that revision also started trimming objects
that could have been stored in memory cache later. Trimmed objects are no
longer eligible for memory caching.
It is possible that IN_MEMORY check in StoreEntry::trimMemory() was preventing
excessive trimming in the past, but after SMP changes, IN_MEMORY flag is set
only after the object is committed to a memory cache in
StoreController::handleIdleEntry(), so we cannot rely on it. For example:
clientReplyContext::removeStoreReference()
storeUnregister()
StoreEntry::swapOut()
StoreEntry::trimMemory()
StoreEntry::releaseRequest()
StoreEntry::unlock()
StoreController::handleIdleEntry() // never get here because entry is
set IN_MEMORY status // already marked for release
This change adds StoreController::keepForLocalMemoryCache() and
MemStore::keepInLocalMemory() methods to check if an entry could be stored in
a memory cache (and, hence, should not be trimmed). The store-related part of
the trimming logic has been moved from StoreEntry::trimMemory() to
StoreController::maybeTrimMemory(). StoreEntry code now focuses on checks that
are not specific to any memory cache settings or admission policies.
These changes may resulted in Squid honoring maximum_object_size_in_memory for
non-shared memory cache. We may have been ignoring that option for non-shared
memory caches since SMP changes because the check for it was specific to a
shared memory cache.
return true;
}
-void
-MemStore::considerKeeping(StoreEntry &e)
+bool
+MemStore::keepInLocalMemory(const StoreEntry &e) const
{
if (!e.memoryCachable()) {
debugs(20, 7, HERE << "Not memory cachable: " << e);
- return; // cannot keep due to entry state or properties
+ return false; // will not cache due to entry state or properties
+ }
+
+ assert(e.mem_obj);
+ const int64_t loadedSize = e.mem_obj->endOffset();
+ const int64_t expectedSize = e.mem_obj->expectedReplySize(); // may be < 0
+ const int64_t ramSize = max(loadedSize, expectedSize);
+
+ if (ramSize > static_cast<int64_t>(Config.Store.maxInMemObjSize)) {
+ debugs(20, 5, HERE << "Too big max(" <<
+ loadedSize << ", " << expectedSize << "): " << e);
+ return false; // will not cache due to cachable entry size limits
+ }
+
+ if (!willFit(ramSize)) {
+ debugs(20, 5, HERE << "Wont fit max(" <<
+ loadedSize << ", " << expectedSize << "): " << e);
+ return false; // will not cache due to memory cache slot limit
}
+ return true;
+}
+
+void
+MemStore::considerKeeping(StoreEntry &e)
+{
+ if (!keepInLocalMemory(e))
+ return;
+
// since we copy everything at once, we can only keep complete entries
if (e.store_status != STORE_OK) {
debugs(20, 7, HERE << "Incomplete: " << e);
const int64_t loadedSize = e.mem_obj->endOffset();
const int64_t expectedSize = e.mem_obj->expectedReplySize();
+ // objects of unknown size are not allowed into memory cache, for now
+ if (expectedSize < 0) {
+ debugs(20, 5, HERE << "Unknown expected size: " << e);
+ return;
+ }
+
// since we copy everything at once, we can only keep fully loaded entries
if (loadedSize != expectedSize) {
debugs(20, 7, HERE << "partially loaded: " << loadedSize << " != " <<
return;
}
- if (!willFit(expectedSize)) {
- debugs(20, 5, HERE << "No mem-cache space for " << e);
- return; // failed to free enough space
- }
-
keep(e); // may still fail
}
bool
-MemStore::willFit(int64_t need)
+MemStore::willFit(int64_t need) const
{
- // TODO: obey configured maximum entry size (with page-based rounding)
return need <= static_cast<int64_t>(Ipc::Mem::PageSize());
}
/// cache the entry or forget about it until the next considerKeeping call
void considerKeeping(StoreEntry &e);
+ /// whether e should be kept in local RAM for possible future caching
+ bool keepInLocalMemory(const StoreEntry &e) const;
+
/* Store API */
virtual int callback();
virtual StoreEntry * get(const cache_key *);
static int64_t EntryLimit();
protected:
- bool willFit(int64_t needed);
+ bool willFit(int64_t needed) const;
void keep(StoreEntry &e);
bool copyToShm(StoreEntry &e, MemStoreMap::Extras &extras);
/// called when the entry is no longer needed by any transaction
virtual void handleIdleEntry(StoreEntry &e) {}
+ // XXX: This method belongs to Store::Root/StoreController, but it is here
+ // because test cases use non-StoreController derivatives as Root
+ /// called to get rid of no longer needed entry data in RAM, if any
+ virtual void maybeTrimMemory(StoreEntry &e, const bool preserveSwappable) {}
+
private:
static RefCount<Store> CurrentRoot;
};
/* Store parent API */
virtual void handleIdleEntry(StoreEntry &e);
+ virtual void maybeTrimMemory(StoreEntry &e, const bool preserveSwappable);
virtual void init();
private:
void createOneStore(Store &aStore);
+ bool keepForLocalMemoryCache(const StoreEntry &e) const;
StorePointer swapDir; ///< summary view of all disk caches
MemStore *memStore; ///< memory cache
if (!Config.onoff.memory_cache_first && swap_status == SWAPOUT_DONE && refcount == 1)
return 0;
- if (Config.memShared && IamWorkerProcess()) {
- const int64_t expectedSize = mem_obj->expectedReplySize();
- // objects of unknown size are not allowed into memory cache, for now
- if (expectedSize < 0 ||
- expectedSize > static_cast<int64_t>(Config.Store.maxInMemObjSize))
- return 0;
- }
-
return 1;
}
fatal("not implemented");
}
+// move this into [non-shared] memory cache class when we have one
+/// whether e should be kept in local RAM for possible future caching
+bool
+StoreController::keepForLocalMemoryCache(const StoreEntry &e) const
+{
+ if (!e.memoryCachable())
+ return false;
+
+ // does the current and expected size obey memory caching limits?
+ assert(e.mem_obj);
+ const int64_t loadedSize = e.mem_obj->endOffset();
+ const int64_t expectedSize = e.mem_obj->expectedReplySize(); // may be < 0
+ const int64_t ramSize = max(loadedSize, expectedSize);
+ const int64_t ramLimit = min(
+ static_cast<int64_t>(Config.memMaxSize),
+ static_cast<int64_t>(Config.Store.maxInMemObjSize));
+ return ramSize <= ramLimit;
+}
+
+void
+StoreController::maybeTrimMemory(StoreEntry &e, const bool preserveSwappable)
+{
+ bool keepInLocalMemory = false;
+ if (memStore)
+ keepInLocalMemory = memStore->keepInLocalMemory(e);
+ else
+ keepInLocalMemory = keepForLocalMemoryCache(e);
+
+ debugs(20, 7, HERE << "keepInLocalMemory: " << keepInLocalMemory);
+
+ if (!keepInLocalMemory)
+ e.trimMemory(preserveSwappable);
+}
+
void
StoreController::handleIdleEntry(StoreEntry &e)
{
memStore->considerKeeping(e);
// leave keepInLocalMemory false; memStore maintains its own cache
} else {
- keepInLocalMemory = e.memoryCachable() && // entry is in good shape and
+ keepInLocalMemory = keepForLocalMemoryCache(e) && // in good shape and
// the local memory cache is not overflowing
(mem_node::InUseCount() <= store_pages_max);
}
const bool weAreOrMayBeSwappingOut = swappingOut() || mayStartSwapOut();
- trimMemory(weAreOrMayBeSwappingOut);
+ Store::Root().maybeTrimMemory(*this, weAreOrMayBeSwappingOut);
if (!weAreOrMayBeSwappingOut)
return; // nothing else to do
MemStore::MemStore() STUB
MemStore::~MemStore() STUB
+bool MemStore::keepInLocalMemory(const StoreEntry &) const STUB_RETVAL(false)
void MemStore::considerKeeping(StoreEntry &) STUB
void MemStore::reference(StoreEntry &) STUB
void MemStore::maintain() STUB