// LruCacheShared -- Implements a thread-safe unordered map where the
// least-recently-used (LRU) entries are removed once a fixed size is hit.
+#include <atomic>
#include <cassert>
#include <list>
#include <memory>
size_t max_size; // Once max_size elements are in the cache, start to
// remove the least-recently-used elements.
- size_t current_size; // Number of entries currently in the cache.
+ std::atomic<size_t> current_size;// Number of entries currently in the cache.
std::mutex cache_mutex;
LruList list; // Contains key/data pairs. Maintains LRU order with
struct LruCacheSharedStats stats;
- // These get called only from within the LRU and assume the LRU is locked.
+ // The reason for these functions is to allow derived classes to do their
+ // size book keeping differently (e.g. host_cache). This effectively
+ // decouples the current_size variable from the actual size in memory,
+ // so these functions should only be called when something is actually
+ // added or removed from memory (e.g. in find_else_insert, remove, etc).
virtual void increase_size()
{
current_size++;
size_t mem_size() override
{
- std::lock_guard<std::mutex> cache_lock(cache_mutex);
return current_size;
}
// In concrete terms, never have a standalone HostTracker object outside
// the host cache add or remove stuff to itself, as that will incorrectly
// change the current_size of the cache.
+
void update(int size) override
{
- // Same idea as in LruCacheShared::remove(), use shared pointers
- // to hold the pruned data until after the cache is unlocked.
- // Do not change the order of data and cache_lock, as the data must
- // self destruct after cache_lock.
- std::list<Data> data;
-
- std::lock_guard<std::mutex> cache_lock(cache_mutex);
-
- if (size < 0)
- assert( current_size >= (size_t) -size );
- current_size += size;
- if (current_size > max_size)
+ if ( size < 0 )
+ {
+ assert( current_size >= (size_t) -size);
+ }
+ if ( (current_size += size) > max_size )
+ {
+ // Same idea as in LruCacheShared::remove(), use shared pointers
+ // to hold the pruned data until after the cache is unlocked.
+ // Do not change the order of data and cache_lock, as the data must
+ // self destruct after cache_lock.
+ std::list<Data> data;
+ std::lock_guard<std::mutex> cache_lock(cache_mutex);
LruBase::prune(data);
+ }
}
- // These get called only from within the LRU and assume the LRU is locked.
void increase_size() override
{
current_size += mem_chunk;