return list.begin()->second;
}
+ if ( is_new )
+ *is_new = false;
stats.cache_hits++;
list.splice(list.begin(), list, it->second);
return list.begin()->second;
// Remove entry associated with Key.
// Returns true if entry existed, false otherwise.
- virtual bool remove(const Key&);
+ // Sets new_size to the number of entries in the cache after removal.
+ virtual bool remove(const Key&, size_t* new_size = nullptr);
// Remove entry associated with key and return removed data.
// Returns true and copy of data if entry existed. Returns false if
// entry did not exist.
- virtual bool remove(const Key&, Data&);
+ // Sets new_size to the number of entries in the cache after removal.
+ virtual bool remove(const Key&, Data&, size_t* new_size = nullptr);
const PegInfo* get_pegs() const
{ return lru_cache_shared_peg_names; }
auto map_iter = map.find(key);
if (map_iter != map.end())
{
+ if ( new_data )
+ *new_data = false;
// coverity[missing_lock]
stats.find_hits++;
list.splice(list.begin(), list, map_iter->second); // update LRU
}
template<typename Key, typename Value, typename Hash, typename Eq, typename Purgatory>
-bool LruCacheShared<Key, Value, Hash, Eq, Purgatory>::remove(const Key& key)
+bool LruCacheShared<Key, Value, Hash, Eq, Purgatory>::remove(const Key& key, size_t* new_size)
{
// There is a potential race condition here, when the destructor of
// the object being removed needs to call back into the cache and lock
auto map_iter = map.find(key);
if (map_iter == map.end())
+ {
+ if (new_size)
+ *new_size = list.size();
+
return false; // Key is not in LruCache.
+ }
data = map_iter->second->second;
map.erase(map_iter);
stats.removes++;
+ if (new_size)
+ *new_size = list.size();
+
assert( data.use_count() > 0 );
// Now, data can go out of scope and if it needs to lock again while
}
template<typename Key, typename Value, typename Hash, typename Eq, typename Purgatory>
-bool LruCacheShared<Key, Value, Hash, Eq, Purgatory>::remove(const Key& key, Data& data)
+bool LruCacheShared<Key, Value, Hash, Eq, Purgatory>::remove(const Key& key, Data& data, size_t* new_size)
{
std::lock_guard<std::mutex> cache_lock(cache_mutex);
auto map_iter = map.find(key);
if (map_iter == map.end())
+ {
+ if (new_size)
+ *new_size = list.size();
+
return false; // Key is not in LruCache.
+ }
data = map_iter->second->second;
map.erase(map_iter);
stats.removes++;
+ if (new_size)
+ *new_size = list.size();
+
assert( data.use_count() > 0 );
return true;
return (*segments[segment_idx])[key];
}
- bool remove(const Key& key)
+ bool remove(const Key& key, size_t* new_size = nullptr)
{
std::size_t segment_idx = get_segment_idx(key);
- return segments[segment_idx]->remove(key);
+ return segments[segment_idx]->remove(key, new_size);
}
- bool remove(const Key& key, Data& data)
+ bool remove(const Key& key, Data& data, size_t* new_size = nullptr)
{
std::size_t idx = get_segment_idx(key);
- return segments[idx]->remove(key, data);
+ return segments[idx]->remove(key, data, new_size);
}
Data find_else_create(const Key& key, bool* new_data)
CHECK(vec[0].first == 3 and vec[0].second == 300);
CHECK(vec[1].first == 2 and vec[1].second == 200);
+ bool is_new = false;
+
// Check non-existent entry; find_else_create() would return a new entry
- auto& entry = lru_cache.find_else_create(4);
+ auto& entry = lru_cache.find_else_create(4, &is_new);
+ CHECK_EQUAL(true, is_new);
entry = 400;
+
// Subsequent calls would return the same one
- CHECK(lru_cache.find_else_create(4) == 400);
+ CHECK(lru_cache.find_else_create(4, &is_new) == 400);
+ CHECK_EQUAL(false, is_new);
entry = 4000;
- CHECK(lru_cache.find_else_create(4) == 4000);
+
+ is_new = true;
+ CHECK(lru_cache.find_else_create(4, &is_new) == 4000);
+ CHECK_EQUAL(false, is_new);
// The cache is changed after the above calls
vec.clear();
TEST(lru_cache_shared, remove_test)
{
std::string data;
+ size_t cache_size = 0;
LruCacheShared<int, std::string, std::hash<int> > lru_cache(5);
for (int i = 0; i < 5; i++)
std::shared_ptr<std::string> data_ptr;
lru_cache[1]->assign("one");
CHECK(1 == lru_cache.size());
- CHECK(true == lru_cache.remove(1, data_ptr));
+ CHECK(true == lru_cache.remove(1, data_ptr, &cache_size));
+ CHECK_EQUAL(0, cache_size);
CHECK(*data_ptr == "one");
CHECK(0 == lru_cache.size());
// Verify that removing an item that does not exist does not affect
// cache.
- CHECK(false == lru_cache.remove(3));
- CHECK(false == lru_cache.remove(4, data_ptr));
+ cache_size = 0;
+ CHECK(false == lru_cache.remove(3, &cache_size));
+ CHECK_EQUAL(2, cache_size);
+ cache_size = 0;
+ CHECK(false == lru_cache.remove(4, data_ptr, &cache_size));
+ CHECK_EQUAL(2, cache_size);
CHECK(2 == lru_cache.size());
auto vec = lru_cache.get_all_data();
CHECK(2 == lru_cache.size());
}
+TEST(lru_cache_shared, find_else_create)
+{
+ LruCacheShared<int, int, std::hash<int>> lru_cache(3);
+ bool is_new = false;
+
+ auto entry = lru_cache.find_else_create(4, &is_new);
+ CHECK_EQUAL(true, is_new);
+ *entry = 400;
+
+ // Subsequent calls would return the same one
+ entry = lru_cache.find_else_create(4, &is_new);
+ CHECK_EQUAL(400, *entry);
+ CHECK_EQUAL(false, is_new);
+ CHECK_EQUAL(1, lru_cache.size());
+}
+
// Test statistics counters.
TEST(lru_cache_shared, stats_test)
{
+ size_t cache_size = 0;
LruCacheShared<int, std::string, std::hash<int> > lru_cache(5);
for (int i = 0; i < 10; i++)
CHECK(lru_cache.set_max_size(3) == true); // change size prunes; in addition to previous 5
- lru_cache.remove(7); // Removes - hit
- lru_cache.remove(10); // Removes - miss
+ CHECK_EQUAL(false, lru_cache.remove(10, &cache_size)); // Removes - miss
+ CHECK_EQUAL(3, cache_size);
+
+ CHECK_EQUAL(true, lru_cache.remove(7, &cache_size)); // Removes - hit
+ CHECK_EQUAL(2, cache_size);
const PegCount* stats = lru_cache.get_counts();
std::shared_ptr<std::string> data(new std::string("one"));
lru_cache.find_else_insert(1,data);
CHECK(1 == lru_cache.size());
+
+ // Try to remove an entry that doesn't exist from the same segment.
+ size_t cache_size = 0;
+ CHECK(false == lru_cache.remove(5, data_ptr, &cache_size));
+ CHECK_EQUAL(1, cache_size);
+
+ // Remove an entry that does exist.
CHECK(true == lru_cache.remove(1, data_ptr));
CHECK(*data_ptr == "one");
CHECK(0 == lru_cache.size());
}
// Test 8 segments
-TEST (segmented_lru_cache, segements_8)
+TEST (segmented_lru_cache, segments_8)
{
SegmentedLruCache<int, std::string> lru_cache(1024,8);
std::shared_ptr<std::string> data(new std::string("12345"));
CHECK(lru_cache.set_max_size(16) == true);
- lru_cache.remove(7);
- lru_cache.remove(8);
- lru_cache.remove(11); // not in cache
+ size_t cache_size = 0;
+ lru_cache.remove(7, &cache_size);
+ CHECK_EQUAL(1, cache_size);
+
+ cache_size = 0;
+ lru_cache.remove(8, &cache_size);
+ CHECK_EQUAL(1, cache_size);
+
+ cache_size = 0;
+ lru_cache.remove(11, &cache_size); // not in cache
+ CHECK_EQUAL(1, cache_size);
const PegCount* stats = lru_cache.get_counts();
public:
HostCacheIp(const size_t initial_size) : HostCacheIpSpec(initial_size) { }
- bool remove(const KeyType& key) override
+ bool remove(const KeyType& key, size_t* new_size = nullptr) override
{
LruBase::Data data;
- return remove(key, data);
+ return remove(key, data, new_size);
}
- bool remove(const KeyType& key, LruBase::Data& data) override
+ bool remove(const KeyType& key, LruBase::Data& data, size_t* new_size = nullptr) override
{
- bool out = LruBase::remove(key, data);
+ bool out = LruBase::remove(key, data, new_size);
data->remove_flows();
return out;
}
std::shared_ptr<Value> find_else_create(const Key& key, bool* new_data);
std::vector<std::pair<Key, std::shared_ptr<Value>>> get_all_data();
bool find_else_insert(const Key& key, std::shared_ptr<Value>& value);
- bool remove(const Key& key);
+ bool remove(const Key& key, size_t* new_size = nullptr);
bool remove(const Key& key, typename LruCacheSharedMemcap
- <snort::SfIp, snort::HostTracker, HashIp, IpEqualTo, HTPurgatory>::Data& data);
+ <snort::SfIp, snort::HostTracker, HashIp, IpEqualTo, HTPurgatory>::Data& data, size_t* new_size = nullptr);
size_t mem_size();
std::vector<HostCacheIp*> seg_list;
}
template<typename Key, typename Value>
-bool HostCacheSegmented<Key, Value>::remove(const Key& key)
+bool HostCacheSegmented<Key, Value>::remove(const Key& key, size_t* new_size)
{
uint8_t idx = get_segment_idx(key);
- return seg_list[idx]->remove(key);
+ return seg_list[idx]->remove(key, new_size);
}
template<typename Key, typename Value>
-bool HostCacheSegmented<Key, Value>::remove(const Key& key, typename LruCacheSharedMemcap<snort::SfIp, snort::HostTracker, HashIp, IpEqualTo, HTPurgatory>::Data& data)
+bool HostCacheSegmented<Key, Value>::remove(const Key& key, typename LruCacheSharedMemcap<snort::SfIp, snort::HostTracker, HashIp, IpEqualTo, HTPurgatory>::Data& data, size_t* new_size)
{
uint8_t idx = get_segment_idx(key);
- return seg_list[idx]->remove(key, data);
+ return seg_list[idx]->remove(key, data, new_size);
}
/*
// any such reference will inherently break the deadlock. We want to see
// that this mechanism is built-in into the remove function.
- bool res = host_cache.remove(ip);
+ size_t cache_size = 0;
+ bool res = host_cache.remove(ip, &cache_size);
CHECK(res == true);
+ CHECK_EQUAL(3, cache_size);
// And, finally, a remove with a reference too:
i--;
ip.set(hk);
HostCacheIp::Data ht_ptr;
- res = host_cache.remove(ip, ht_ptr);
+ res = host_cache.remove(ip, ht_ptr, &cache_size);
CHECK(res == true);
CHECK(ht_ptr != nullptr);
+ CHECK_EQUAL(2, cache_size);
}
int main(int argc, char** argv)
// cache, because sum_stats resets the pegs.
module.sum_stats(true);
- // add 3 entries to segment 3
- SfIp ip1, ip2, ip3;
+ // add 4 entries to segment 3 and remove 1
+ SfIp ip1, ip2, ip3, ip4;
ip1.set("1.1.1.1");
ip2.set("2.2.2.2");
ip3.set("3.3.3.3");
+ ip4.set("4.4.4.4");
+
+ bool is_new;
+ host_cache.find_else_create(ip1, &is_new);
+ CHECK(is_new);
+ host_cache.find_else_create(ip2, &is_new);
+ CHECK(is_new);
+ host_cache.find_else_create(ip3, &is_new);
+ CHECK(is_new);
+ host_cache.find_else_create(ip4, &is_new);
+ CHECK(is_new);
+
+ size_t cache_size = 0;
+ CHECK_EQUAL(true, host_cache.remove(ip4, &cache_size));
+ CHECK_EQUAL(3, cache_size);
- host_cache.find_else_create(ip1, nullptr);
- host_cache.find_else_create(ip2, nullptr);
- host_cache.find_else_create(ip3, nullptr);
module.sum_stats(true); // does not call reset
- CHECK(ht_stats[0] == 3);
- CHECK(ht_stats[2] == 3*mc); // bytes_in_use
- CHECK(ht_stats[3] == 3); // items_in_use
+ CHECK_EQUAL(4, ht_stats[0]); // adds
+ CHECK_EQUAL(3*mc, ht_stats[2]); // bytes_in_use
+ CHECK_EQUAL(3, ht_stats[3]); // items_in_use
// no pruning needed for resizing higher than current size in segment 3
CHECK(host_cache.seg_list[2]->reload_resize(host_cache.get_mem_chunk() * 10 ) == false);
CHECK(ht_stats[3] == 1); // one left
// alloc_prune 1 entry
- host_cache.find_else_create(ip1, nullptr);
+ is_new = false;
+ host_cache.find_else_create(ip1, &is_new);
+ CHECK_EQUAL(true, is_new);
// 1 hit, 1 remove
- host_cache.find_else_create(ip1, nullptr);
- host_cache.remove(ip1);
+ host_cache.find_else_create(ip1, &is_new);
+ CHECK_EQUAL(false, is_new);
+
+ host_cache.remove(ip1, &cache_size);
+ CHECK_EQUAL(0, cache_size);
module.sum_stats(true);
- CHECK(ht_stats[0] == 4); // 4 adds
+ CHECK(ht_stats[0] == 5); // 5 adds
CHECK(ht_stats[1] == 1); // 1 alloc_prunes
CHECK(ht_stats[2] == 0); // 0 bytes_in_use
CHECK(ht_stats[3] == 0); // 0 items_in_use
CHECK(ht_stats[4] == 1); // 1 hit
- CHECK(ht_stats[5] == 4); // 4 misses
+ CHECK(ht_stats[5] == 5); // 5 misses
CHECK(ht_stats[6] == 2); // 2 reload_prunes
- CHECK(ht_stats[7] == 1); // 1 remove
+ CHECK(ht_stats[7] == 2); // 2 removes
ht_stats = module.get_counts();
- CHECK(ht_stats[0] == 4);
+ CHECK(ht_stats[0] == 5); // adds
}