const PegInfo lru_cache_shared_peg_names[] =
{
- { CountType::SUM, "lru_cache_adds", "lru cache added new entry" },
- { CountType::SUM, "lru_cache_prunes", "lru cache pruned entry to make space for new entry" },
- { CountType::SUM, "lru_cache_find_hits", "lru cache found entry in cache" },
- { CountType::SUM, "lru_cache_find_misses", "lru cache did not find entry in cache" },
- { CountType::SUM, "lru_cache_removes", "lru cache found entry and removed it" },
+ { CountType::SUM, "adds", "lru cache added new entry" },
+ { CountType::SUM, "alloc_prunes", "lru cache pruned entry to make space for new entry" },
+ { CountType::SUM, "find_hits", "lru cache found entry in cache" },
+ { CountType::SUM, "find_misses", "lru cache did not find entry in cache" },
+ { CountType::SUM, "reload_prunes", "lru cache pruned entry for lower memcap during reload" },
+ { CountType::SUM, "removes", "lru cache found entry and removed it" },
{ CountType::END, nullptr, nullptr },
};
struct LruCacheSharedStats
{
- PegCount adds = 0; // An insert that added new entry.
- PegCount prunes = 0; // When an old entry is removed to make
- // room for a new entry.
- PegCount find_hits = 0; // Found entry in cache.
- PegCount find_misses = 0; // Did not find entry in cache.
- PegCount removes = 0; // Found entry and removed it.
+ PegCount adds = 0; // an insert that added new entry
+ PegCount alloc_prunes = 0; // when an old entry is removed to make room for a new entry
+ PegCount find_hits = 0; // found entry in cache
+ PegCount find_misses = 0; // did not find entry in cache
+ PegCount reload_prunes = 0; // when an old entry is removed due to lower memcap during reload
+ PegCount removes = 0; // found entry and removed it
};
template<typename Key, typename Value, typename Hash>
return max_size;
}
- // Modify the maximum number of entries allowed in the cache.
- // If the size is reduced, the oldest entries are removed.
+ // Modify the maximum number of entries allowed in the cache. If the size is reduced,
+ // the oldest entries are removed. This pruning doesn't utilize reload resource tuner.
bool set_max_size(size_t newsize);
// Remove entry associated with Key.
current_size--;
}
- // Caller must lock and unlock.
- void prune(std::list<Data>& data)
+ // Caller must lock and unlock. Don't use this during snort reload for which
+ // we need gradual pruning and size reduction via reload resource tuner.
+ void prune(std::vector<Data>& data)
{
LruListIter list_iter;
assert(data.empty());
while (current_size > max_size && !list.empty())
{
list_iter = --list.end();
- data.push_back(list_iter->second); // increase reference count
+ data.emplace_back(list_iter->second); // increase reference count
decrease_size();
map.erase(list_iter->first);
list.erase(list_iter);
- stats.prunes++;
+ ++stats.alloc_prunes;
}
}
};
return false; // Not allowed to set size to zero.
// Like with remove(), we need local temporary references to data being
- // deleted, to avoid race condition. This data list needs to self-destruct
+ // deleted, to avoid race condition. This data needs to self-destruct
// after the cache_lock does.
- std::list<Data> data;
+ std::vector<Data> data;
std::lock_guard<std::mutex> cache_lock(cache_mutex);
// unlocking the cache_mutex, because the cache must be locked when we
// return the data pointer (below), or else, some other thread might
// delete it before we got a chance to return it.
- std::list<Data> tmp_data;
+ std::vector<Data> tmp_data;
std::lock_guard<std::mutex> cache_lock(cache_mutex);
{
LruMapIter map_iter;
- std::list<Data> tmp_data;
+ std::vector<Data> tmp_data;
std::lock_guard<std::mutex> cache_lock(cache_mutex);
map_iter = map.find(key);
PegCount* stats = lru_cache.get_counts();
CHECK(stats[0] == 10); // adds
- CHECK(stats[1] == 7); // prunes
+ CHECK(stats[1] == 7); // alloc prunes
CHECK(stats[2] == 3); // find hits
CHECK(stats[3] == 12); // find misses
- CHECK(stats[4] == 1); // removes
+ CHECK(stats[4] == 0); // reload prunes
+ CHECK(stats[5] == 1); // removes
// Check statistics names.
const PegInfo* pegs = lru_cache.get_pegs();
- CHECK(!strcmp(pegs[0].name, "lru_cache_adds"));
- CHECK(!strcmp(pegs[1].name, "lru_cache_prunes"));
- CHECK(!strcmp(pegs[2].name, "lru_cache_find_hits"));
- CHECK(!strcmp(pegs[3].name, "lru_cache_find_misses"));
- CHECK(!strcmp(pegs[4].name, "lru_cache_removes"));
+ CHECK(!strcmp(pegs[0].name, "adds"));
+ CHECK(!strcmp(pegs[1].name, "alloc_prunes"));
+ CHECK(!strcmp(pegs[2].name, "find_hits"));
+ CHECK(!strcmp(pegs[3].name, "find_misses"));
+ CHECK(!strcmp(pegs[4].name, "reload_prunes"));
+ CHECK(!strcmp(pegs[5].name, "removes"));
}
int main(int argc, char** argv)
{
public:
using LruBase = LruCacheShared<Key, Value, Hash>;
+ using LruBase::cache_mutex;
+ using LruBase::current_size;
+ using LruBase::list;
+ using LruBase::map;
+ using LruBase::max_size;
+ using LruBase::mem_chunk;
+ using LruBase::stats;
+ using Data = typename LruBase::Data;
+ using LruListIter = typename LruBase::LruListIter;
+ using ValueType = typename LruBase::ValueType;
LruCacheSharedMemcap() = delete;
LruCacheSharedMemcap(const LruCacheSharedMemcap& arg) = delete;
}
- using Data = typename LruBase::Data;
- using ValueType = typename LruBase::ValueType;
+ // If the new memcap causes pruning, don't modify max_size and return true signifying that
+ // a gradual pruning/resizing is needed. Otherwise, modify max_size and return false.
+ bool reload_resize(size_t new_size)
+ {
+ if ( current_size > new_size )
+ return true;
- using LruBase::current_size;
- using LruBase::max_size;
- using LruBase::mem_chunk;
- using LruBase::cache_mutex;
+ std::lock_guard<std::mutex> cache_lock(cache_mutex);
+ max_size = new_size;
+ return false;
+ }
+
+ // Prune a few entries at each call and set the max_size to the current watermark.
+ // Return true when the desired memcap is reached.
+ bool reload_prune(size_t new_size, unsigned max_prune)
+ {
+ std::unique_lock<std::mutex> reload_lock(reload_mutex, std::try_to_lock);
+ if ( !reload_lock.owns_lock() )
+ return false; // some other thread wins this round
+
+ // Since decrease_size() does not account associated objects in host_tracker,
+ // we may over-prune if we remove max_prune entries in a single attempt. Instead,
+ // we acquire lock, hold data, release lock, and delete data in each iteration.
+ while ( max_prune-- > 0 )
+ {
+ // Get a local temporary reference of data being deleted (as if a trash can).
+ // To avoid race condition, data needs to self-destruct after the cache_lock does.
+ Data data;
+ std::lock_guard<std::mutex> cache_lock(cache_mutex);
+
+ if ( !list.empty() )
+ {
+ max_size = current_size;
+ if ( max_size > new_size )
+ {
+ LruListIter list_iter = --list.end();
+ data = list_iter->second; // increase reference count
+ decrease_size();
+ max_size -= mem_chunk; // in sync with current_size
+ map.erase(list_iter->first);
+ list.erase(list_iter);
+ ++stats.reload_prunes;
+ }
+ }
+
+ if ( max_size <= new_size or list.empty() )
+ {
+ max_size = new_size;
+ return true;
+ }
+ }
+
+ return false;
+ }
template <class T>
friend class HostCacheAllocIp;
// to hold the pruned data until after the cache is unlocked.
// Do not change the order of data and cache_lock, as the data must
// self destruct after cache_lock.
- std::list<Data> data;
+ std::vector<Data> data;
std::lock_guard<std::mutex> cache_lock(cache_mutex);
LruBase::prune(data);
}
current_size -= mem_chunk;
}
+ std::mutex reload_mutex;
+ friend class TEST_host_cache_module_misc_Test; // for unit test
};
typedef LruCacheSharedMemcap<snort::SfIp, snort::HostTracker, HashIp> HostCacheIp;
#include "managers/module_manager.h"
#include "utils/util.h"
-#include "host_cache.h"
-
using namespace snort;
using namespace std;
bool HostCacheModule::set(const char*, Value& v, SnortConfig*)
{
if ( v.is("dump_file") )
+ {
+ if ( dump_file )
+ snort_free((void*)dump_file);
dump_file = snort_strdup(v.get_string());
+ }
else if ( v.is("memcap") )
- host_cache_size = v.get_uint32();
+ hc_rrt.memcap = v.get_uint32();
else
return false;
bool HostCacheModule::begin(const char*, int, SnortConfig*)
{
- host_cache_size = 0;
+ hc_rrt.memcap = 0;
return true;
}
-bool HostCacheModule::end(const char* fqn, int, SnortConfig*)
+bool HostCacheModule::end(const char* fqn, int, SnortConfig* sc)
{
- if ( host_cache_size && !strcmp(fqn, HOST_CACHE_NAME) )
+ if ( hc_rrt.memcap && !strcmp(fqn, HOST_CACHE_NAME) )
{
- host_cache.set_max_size(host_cache_size);
+ if ( Snort::is_reloading() )
+ sc->register_reload_resource_tuner(hc_rrt);
+ else
+ host_cache.set_max_size(hc_rrt.memcap);
}
return true;
string str;
SfIpString ip_str;
const auto&& lru_data = host_cache.get_all_data();
+ // The current size may not exactly correspond to the number of trackers seen here
+ // as packet threads may continue to update cache, except when dumping upon exit or pause
+ out_stream << "Current host cache size: " << host_cache.mem_size() << " bytes, "
+ << lru_data.size() << " trackers" << endl << endl;
for ( const auto& elem : lru_data )
{
str = "IP: ";
out_stream.close();
if ( verbose )
- LogMessage("Dumped host cache of size = %lu to %s\n", lru_data.size(), file_name);
+ LogMessage("Dumped host cache to %s\n", file_name);
}
const PegInfo* HostCacheModule::get_pegs() const
// Loads host cache configuration data.
#include "framework/module.h"
+#include "main/snort.h"
+#include "main/snort_config.h"
+
+#include "host_cache.h"
#define HOST_CACHE_NAME "host_cache"
#define HOST_CACHE_HELP "global LRU cache of host_tracker data about hosts"
+class HostCacheReloadTuner : public snort::ReloadResourceTuner
+{
+public:
+ bool tinit() override
+ { return host_cache.reload_resize(memcap); }
+
+ bool tune_idle_context() override
+ { return host_cache.reload_prune(memcap, max_work_idle); }
+
+ bool tune_packet_context() override
+ { return host_cache.reload_prune(memcap, max_work); }
+
+ size_t memcap;
+};
+
class HostCacheModule : public snort::Module
{
public:
private:
const char* dump_file = nullptr;
- uint32_t host_cache_size;
+ HostCacheReloadTuner hc_rrt;
};
#endif
logged_message[LOG_MAX] = '\0';
}
time_t packet_time() { return 0; }
-}
+bool Snort::is_reloading() { return false; }
+} // end of namespace snort
extern "C"
{
void show_stats(PegCount*, const PegInfo*, unsigned, const char*) { }
void show_stats(PegCount*, const PegInfo*, const IndexVec&, const char*, FILE*) { }
+template <class T>
+HostCacheAllocIp<T>::HostCacheAllocIp()
+{
+ lru = &host_cache;
+}
+
TEST_GROUP(host_cache_module)
{
void setup() override
}
};
-// Test that HostCacheModule sets up host_cache size based on config.
-TEST(host_cache_module, host_cache_module_test_values)
+// Test stats when HostCacheModule sets/changes host_cache size.
+// This method is a friend of LruCacheSharedMemcap class.
+TEST(host_cache_module, misc)
{
Value size_val((double)2112);
Parameter size_param = { "size", Parameter::PT_INT, nullptr, nullptr, "cache size" };
const PegInfo* ht_pegs = module.get_pegs();
const PegCount* ht_stats = module.get_counts();
- CHECK(!strcmp(ht_pegs[0].name, "lru_cache_adds"));
- CHECK(!strcmp(ht_pegs[1].name, "lru_cache_prunes"));
- CHECK(!strcmp(ht_pegs[2].name, "lru_cache_find_hits"));
- CHECK(!strcmp(ht_pegs[3].name, "lru_cache_find_misses"));
- CHECK(!strcmp(ht_pegs[4].name, "lru_cache_removes"));
- CHECK(!ht_pegs[5].name);
-
- CHECK(ht_stats[0] == 0);
- CHECK(ht_stats[1] == 0);
- CHECK(ht_stats[2] == 0);
- CHECK(ht_stats[3] == 0);
- CHECK(ht_stats[4] == 0);
+ CHECK(!strcmp(ht_pegs[0].name, "adds"));
+ CHECK(!strcmp(ht_pegs[1].name, "alloc_prunes"));
+ CHECK(!strcmp(ht_pegs[2].name, "find_hits"));
+ CHECK(!strcmp(ht_pegs[3].name, "find_misses"));
+ CHECK(!strcmp(ht_pegs[4].name, "reload_prunes"));
+ CHECK(!strcmp(ht_pegs[5].name, "removes"));
+ CHECK(!ht_pegs[6].name);
+
+ // add 3 entries
+ SfIp ip1, ip2, ip3;
+ ip1.set("1.1.1.1");
+ ip2.set("2.2.2.2");
+ ip3.set("3.3.3.3");
+ host_cache.find_else_create(ip1, nullptr);
+ host_cache.find_else_create(ip2, nullptr);
+ host_cache.find_else_create(ip3, nullptr);
+ CHECK(ht_stats[0] == 3);
+
+ // no pruning needed for resizing higher than current size
+ CHECK(host_cache.reload_resize(2048) == false);
+
+ // pruning needed for resizing lower than current size
+ CHECK(host_cache.reload_resize(256) == true);
+
+ // pruning is not done when when reload_mutex is already locked
+ host_cache.reload_mutex.lock();
+ CHECK(host_cache.reload_prune(256, 2) == false);
+ host_cache.reload_mutex.unlock();
+
+ // prune 2 entries when reload_mutex is not locked
+ CHECK(host_cache.reload_prune(256, 2) == true);
+
+ // alloc_prune 1 entry
+ host_cache.find_else_create(ip1, nullptr);
+
+ // 1 hit, 1 remove
+ host_cache.find_else_create(ip1, nullptr);
+ host_cache.remove(ip1);
+
+ CHECK(ht_stats[0] == 4); // 4 adds
+ CHECK(ht_stats[1] == 1); // 1 alloc_prunes
+ CHECK(ht_stats[2] == 1); // 1 hit
+ CHECK(ht_stats[3] == 4); // 4 misses
+ CHECK(ht_stats[4] == 2); // 2 reload_prunes
+ CHECK(ht_stats[5] == 1); // 1 remove
size_val.set(&size_param);
module.end("host_cache", 0, nullptr);
ht_stats = module.get_counts();
- CHECK(ht_stats[0] == 0);
+ CHECK(ht_stats[0] == 4);
}
TEST(host_cache_module, log_host_cache_messages)
STRCMP_EQUAL(logged_message, "Couldn't open nowhere/host_cache.dump to write!\n");
module.log_host_cache("host_cache.dump", true);
- STRCMP_EQUAL(logged_message, "Dumped host cache of size = 0 to host_cache.dump\n");
+ STRCMP_EQUAL(logged_message, "Dumped host cache to host_cache.dump\n");
module.log_host_cache("host_cache.dump", true);
STRCMP_EQUAL(logged_message, "File host_cache.dump already exists!\n");
// request foo
//-------------------------------------------------------------------------
-Request::Request(int f)
-{
- fd = f;
- bytes_read = 0;
-}
-
bool Request::read(const int& f)
{
bool newline_found = false;
#ifdef SHELL
void Request::send_queued_response()
{
- if ( queued_response )
+ const char* qr = queued_response;
+ if ( qr )
{
- write_response(queued_response);
+ write_response(qr);
queued_response = nullptr;
}
}
#ifndef REQUEST_H
#define REQUEST_H
+#include <atomic>
+
#include "main/snort_types.h"
class Request
{
public:
- Request(int f = -1);
+ Request(int f = -1) : fd(f), bytes_read(0), queued_response(nullptr) { }
bool read(const int&);
const char* get() { return read_buf; }
int fd;
char read_buf[1024];
size_t bytes_read;
- const char* queued_response = nullptr;
+ std::atomic<const char*> queued_response;
};
#endif
bool StreamReloadResourceManager::tinit()
{
- bool must_tune = false;
-
- max_flows_change =
+ int max_flows_change =
config.flow_cache_cfg.max_flows - flow_con->get_flow_cache_config().max_flows;
if ( max_flows_change )
{
stream_base_stats.reload_total_adds += max_flows_change;
flow_con->set_flow_cache_config(config.flow_cache_cfg);
- must_tune = true;
+ return true;
}
- return must_tune;
+ return false;
}
bool StreamReloadResourceManager::tune_packet_context()
private:
StreamModuleConfig config;
- int max_flows_change = 0;
};
class StreamModule : public snort::Module