From: Mike Stepanek (mstepane) Date: Fri, 7 Feb 2020 17:53:42 +0000 (+0000) Subject: Merge pull request #1979 in SNORT/snort3 from ~MASHASAN/snort3:reload_host_cache... X-Git-Tag: 3.0.0-268~18 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=6d90d33d3b19e50991ad9ba8af521550bc401407;p=thirdparty%2Fsnort3.git Merge pull request #1979 in SNORT/snort3 from ~MASHASAN/snort3:reload_host_cache to master Squashed commit of the following: commit 11e7d21da60752a0281cd6e74485c5ce2cebe140 Author: Masud Hasan Date: Thu Jan 30 14:24:14 2020 -0500 host_tracker: Support host_cache reload with RRT when memcap changes --- diff --git a/src/hash/lru_cache_shared.cc b/src/hash/lru_cache_shared.cc index d7382d2c4..18ecae99e 100644 --- a/src/hash/lru_cache_shared.cc +++ b/src/hash/lru_cache_shared.cc @@ -26,10 +26,11 @@ const PegInfo lru_cache_shared_peg_names[] = { - { CountType::SUM, "lru_cache_adds", "lru cache added new entry" }, - { CountType::SUM, "lru_cache_prunes", "lru cache pruned entry to make space for new entry" }, - { CountType::SUM, "lru_cache_find_hits", "lru cache found entry in cache" }, - { CountType::SUM, "lru_cache_find_misses", "lru cache did not find entry in cache" }, - { CountType::SUM, "lru_cache_removes", "lru cache found entry and removed it" }, + { CountType::SUM, "adds", "lru cache added new entry" }, + { CountType::SUM, "alloc_prunes", "lru cache pruned entry to make space for new entry" }, + { CountType::SUM, "find_hits", "lru cache found entry in cache" }, + { CountType::SUM, "find_misses", "lru cache did not find entry in cache" }, + { CountType::SUM, "reload_prunes", "lru cache pruned entry for lower memcap during reload" }, + { CountType::SUM, "removes", "lru cache found entry and removed it" }, { CountType::END, nullptr, nullptr }, }; diff --git a/src/hash/lru_cache_shared.h b/src/hash/lru_cache_shared.h index 112ef261c..32bd2167a 100644 --- a/src/hash/lru_cache_shared.h +++ b/src/hash/lru_cache_shared.h @@ -39,12 +39,12 @@ extern const PegInfo lru_cache_shared_peg_names[]; struct LruCacheSharedStats { - PegCount adds = 0; // An insert that added new entry. - PegCount prunes = 0; // When an old entry is removed to make - // room for a new entry. - PegCount find_hits = 0; // Found entry in cache. - PegCount find_misses = 0; // Did not find entry in cache. - PegCount removes = 0; // Found entry and removed it. + PegCount adds = 0; // an insert that added new entry + PegCount alloc_prunes = 0; // when an old entry is removed to make room for a new entry + PegCount find_hits = 0; // found entry in cache + PegCount find_misses = 0; // did not find entry in cache + PegCount reload_prunes = 0; // when an old entry is removed due to lower memcap during reload + PegCount removes = 0; // found entry and removed it }; template @@ -99,8 +99,8 @@ public: return max_size; } - // Modify the maximum number of entries allowed in the cache. - // If the size is reduced, the oldest entries are removed. + // Modify the maximum number of entries allowed in the cache. If the size is reduced, + // the oldest entries are removed. This pruning doesn't utilize reload resource tuner. bool set_max_size(size_t newsize); // Remove entry associated with Key. @@ -167,19 +167,20 @@ protected: current_size--; } - // Caller must lock and unlock. - void prune(std::list& data) + // Caller must lock and unlock. Don't use this during snort reload for which + // we need gradual pruning and size reduction via reload resource tuner. + void prune(std::vector& data) { LruListIter list_iter; assert(data.empty()); while (current_size > max_size && !list.empty()) { list_iter = --list.end(); - data.push_back(list_iter->second); // increase reference count + data.emplace_back(list_iter->second); // increase reference count decrease_size(); map.erase(list_iter->first); list.erase(list_iter); - stats.prunes++; + ++stats.alloc_prunes; } } }; @@ -191,9 +192,9 @@ bool LruCacheShared::set_max_size(size_t newsize) return false; // Not allowed to set size to zero. // Like with remove(), we need local temporary references to data being - // deleted, to avoid race condition. This data list needs to self-destruct + // deleted, to avoid race condition. This data needs to self-destruct // after the cache_lock does. - std::list data; + std::vector data; std::lock_guard cache_lock(cache_mutex); @@ -242,7 +243,7 @@ find_else_create(const Key& key, bool* new_data) // unlocking the cache_mutex, because the cache must be locked when we // return the data pointer (below), or else, some other thread might // delete it before we got a chance to return it. - std::list tmp_data; + std::vector tmp_data; std::lock_guard cache_lock(cache_mutex); @@ -278,7 +279,7 @@ find_else_insert(const Key& key, std::shared_ptr& data) { LruMapIter map_iter; - std::list tmp_data; + std::vector tmp_data; std::lock_guard cache_lock(cache_mutex); map_iter = map.find(key); diff --git a/src/hash/test/lru_cache_shared_test.cc b/src/hash/test/lru_cache_shared_test.cc index a7e3cd843..11c6de1eb 100644 --- a/src/hash/test/lru_cache_shared_test.cc +++ b/src/hash/test/lru_cache_shared_test.cc @@ -179,18 +179,20 @@ TEST(lru_cache_shared, stats_test) PegCount* stats = lru_cache.get_counts(); CHECK(stats[0] == 10); // adds - CHECK(stats[1] == 7); // prunes + CHECK(stats[1] == 7); // alloc prunes CHECK(stats[2] == 3); // find hits CHECK(stats[3] == 12); // find misses - CHECK(stats[4] == 1); // removes + CHECK(stats[4] == 0); // reload prunes + CHECK(stats[5] == 1); // removes // Check statistics names. const PegInfo* pegs = lru_cache.get_pegs(); - CHECK(!strcmp(pegs[0].name, "lru_cache_adds")); - CHECK(!strcmp(pegs[1].name, "lru_cache_prunes")); - CHECK(!strcmp(pegs[2].name, "lru_cache_find_hits")); - CHECK(!strcmp(pegs[3].name, "lru_cache_find_misses")); - CHECK(!strcmp(pegs[4].name, "lru_cache_removes")); + CHECK(!strcmp(pegs[0].name, "adds")); + CHECK(!strcmp(pegs[1].name, "alloc_prunes")); + CHECK(!strcmp(pegs[2].name, "find_hits")); + CHECK(!strcmp(pegs[3].name, "find_misses")); + CHECK(!strcmp(pegs[4].name, "reload_prunes")); + CHECK(!strcmp(pegs[5].name, "removes")); } int main(int argc, char** argv) diff --git a/src/host_tracker/host_cache.h b/src/host_tracker/host_cache.h index 740514f4f..8e0abfa7c 100644 --- a/src/host_tracker/host_cache.h +++ b/src/host_tracker/host_cache.h @@ -51,6 +51,16 @@ class LruCacheSharedMemcap : public LruCacheShared, public Hos { public: using LruBase = LruCacheShared; + using LruBase::cache_mutex; + using LruBase::current_size; + using LruBase::list; + using LruBase::map; + using LruBase::max_size; + using LruBase::mem_chunk; + using LruBase::stats; + using Data = typename LruBase::Data; + using LruListIter = typename LruBase::LruListIter; + using ValueType = typename LruBase::ValueType; LruCacheSharedMemcap() = delete; LruCacheSharedMemcap(const LruCacheSharedMemcap& arg) = delete; @@ -75,13 +85,60 @@ public: } - using Data = typename LruBase::Data; - using ValueType = typename LruBase::ValueType; + // If the new memcap causes pruning, don't modify max_size and return true signifying that + // a gradual pruning/resizing is needed. Otherwise, modify max_size and return false. + bool reload_resize(size_t new_size) + { + if ( current_size > new_size ) + return true; - using LruBase::current_size; - using LruBase::max_size; - using LruBase::mem_chunk; - using LruBase::cache_mutex; + std::lock_guard cache_lock(cache_mutex); + max_size = new_size; + return false; + } + + // Prune a few entries at each call and set the max_size to the current watermark. + // Return true when the desired memcap is reached. + bool reload_prune(size_t new_size, unsigned max_prune) + { + std::unique_lock reload_lock(reload_mutex, std::try_to_lock); + if ( !reload_lock.owns_lock() ) + return false; // some other thread wins this round + + // Since decrease_size() does not account associated objects in host_tracker, + // we may over-prune if we remove max_prune entries in a single attempt. Instead, + // we acquire lock, hold data, release lock, and delete data in each iteration. + while ( max_prune-- > 0 ) + { + // Get a local temporary reference of data being deleted (as if a trash can). + // To avoid race condition, data needs to self-destruct after the cache_lock does. + Data data; + std::lock_guard cache_lock(cache_mutex); + + if ( !list.empty() ) + { + max_size = current_size; + if ( max_size > new_size ) + { + LruListIter list_iter = --list.end(); + data = list_iter->second; // increase reference count + decrease_size(); + max_size -= mem_chunk; // in sync with current_size + map.erase(list_iter->first); + list.erase(list_iter); + ++stats.reload_prunes; + } + } + + if ( max_size <= new_size or list.empty() ) + { + max_size = new_size; + return true; + } + } + + return false; + } template friend class HostCacheAllocIp; @@ -117,7 +174,7 @@ private: // to hold the pruned data until after the cache is unlocked. // Do not change the order of data and cache_lock, as the data must // self destruct after cache_lock. - std::list data; + std::vector data; std::lock_guard cache_lock(cache_mutex); LruBase::prune(data); } @@ -134,6 +191,8 @@ private: current_size -= mem_chunk; } + std::mutex reload_mutex; + friend class TEST_host_cache_module_misc_Test; // for unit test }; typedef LruCacheSharedMemcap HostCacheIp; diff --git a/src/host_tracker/host_cache_module.cc b/src/host_tracker/host_cache_module.cc index 54ad42897..9b72fdca3 100644 --- a/src/host_tracker/host_cache_module.cc +++ b/src/host_tracker/host_cache_module.cc @@ -32,8 +32,6 @@ #include "managers/module_manager.h" #include "utils/util.h" -#include "host_cache.h" - using namespace snort; using namespace std; @@ -84,9 +82,13 @@ static const Parameter host_cache_params[] = bool HostCacheModule::set(const char*, Value& v, SnortConfig*) { if ( v.is("dump_file") ) + { + if ( dump_file ) + snort_free((void*)dump_file); dump_file = snort_strdup(v.get_string()); + } else if ( v.is("memcap") ) - host_cache_size = v.get_uint32(); + hc_rrt.memcap = v.get_uint32(); else return false; @@ -95,15 +97,18 @@ bool HostCacheModule::set(const char*, Value& v, SnortConfig*) bool HostCacheModule::begin(const char*, int, SnortConfig*) { - host_cache_size = 0; + hc_rrt.memcap = 0; return true; } -bool HostCacheModule::end(const char* fqn, int, SnortConfig*) +bool HostCacheModule::end(const char* fqn, int, SnortConfig* sc) { - if ( host_cache_size && !strcmp(fqn, HOST_CACHE_NAME) ) + if ( hc_rrt.memcap && !strcmp(fqn, HOST_CACHE_NAME) ) { - host_cache.set_max_size(host_cache_size); + if ( Snort::is_reloading() ) + sc->register_reload_resource_tuner(hc_rrt); + else + host_cache.set_max_size(hc_rrt.memcap); } return true; @@ -154,6 +159,10 @@ void HostCacheModule::log_host_cache(const char* file_name, bool verbose) string str; SfIpString ip_str; const auto&& lru_data = host_cache.get_all_data(); + // The current size may not exactly correspond to the number of trackers seen here + // as packet threads may continue to update cache, except when dumping upon exit or pause + out_stream << "Current host cache size: " << host_cache.mem_size() << " bytes, " + << lru_data.size() << " trackers" << endl << endl; for ( const auto& elem : lru_data ) { str = "IP: "; @@ -164,7 +173,7 @@ void HostCacheModule::log_host_cache(const char* file_name, bool verbose) out_stream.close(); if ( verbose ) - LogMessage("Dumped host cache of size = %lu to %s\n", lru_data.size(), file_name); + LogMessage("Dumped host cache to %s\n", file_name); } const PegInfo* HostCacheModule::get_pegs() const diff --git a/src/host_tracker/host_cache_module.h b/src/host_tracker/host_cache_module.h index 8514b4b23..e0f1bc2b3 100644 --- a/src/host_tracker/host_cache_module.h +++ b/src/host_tracker/host_cache_module.h @@ -24,10 +24,29 @@ // Loads host cache configuration data. #include "framework/module.h" +#include "main/snort.h" +#include "main/snort_config.h" + +#include "host_cache.h" #define HOST_CACHE_NAME "host_cache" #define HOST_CACHE_HELP "global LRU cache of host_tracker data about hosts" +class HostCacheReloadTuner : public snort::ReloadResourceTuner +{ +public: + bool tinit() override + { return host_cache.reload_resize(memcap); } + + bool tune_idle_context() override + { return host_cache.reload_prune(memcap, max_work_idle); } + + bool tune_packet_context() override + { return host_cache.reload_prune(memcap, max_work); } + + size_t memcap; +}; + class HostCacheModule : public snort::Module { public: @@ -50,7 +69,7 @@ public: private: const char* dump_file = nullptr; - uint32_t host_cache_size; + HostCacheReloadTuner hc_rrt; }; #endif diff --git a/src/host_tracker/test/host_cache_module_test.cc b/src/host_tracker/test/host_cache_module_test.cc index e2d63267a..06972676f 100644 --- a/src/host_tracker/test/host_cache_module_test.cc +++ b/src/host_tracker/test/host_cache_module_test.cc @@ -58,7 +58,8 @@ void LogMessage(const char* format,...) logged_message[LOG_MAX] = '\0'; } time_t packet_time() { return 0; } -} +bool Snort::is_reloading() { return false; } +} // end of namespace snort extern "C" { @@ -68,6 +69,12 @@ const char* luaL_optlstring(lua_State*, int, const char*, size_t*) { return null void show_stats(PegCount*, const PegInfo*, unsigned, const char*) { } void show_stats(PegCount*, const PegInfo*, const IndexVec&, const char*, FILE*) { } +template +HostCacheAllocIp::HostCacheAllocIp() +{ + lru = &host_cache; +} + TEST_GROUP(host_cache_module) { void setup() override @@ -81,26 +88,60 @@ TEST_GROUP(host_cache_module) } }; -// Test that HostCacheModule sets up host_cache size based on config. -TEST(host_cache_module, host_cache_module_test_values) +// Test stats when HostCacheModule sets/changes host_cache size. +// This method is a friend of LruCacheSharedMemcap class. +TEST(host_cache_module, misc) { Value size_val((double)2112); Parameter size_param = { "size", Parameter::PT_INT, nullptr, nullptr, "cache size" }; const PegInfo* ht_pegs = module.get_pegs(); const PegCount* ht_stats = module.get_counts(); - CHECK(!strcmp(ht_pegs[0].name, "lru_cache_adds")); - CHECK(!strcmp(ht_pegs[1].name, "lru_cache_prunes")); - CHECK(!strcmp(ht_pegs[2].name, "lru_cache_find_hits")); - CHECK(!strcmp(ht_pegs[3].name, "lru_cache_find_misses")); - CHECK(!strcmp(ht_pegs[4].name, "lru_cache_removes")); - CHECK(!ht_pegs[5].name); - - CHECK(ht_stats[0] == 0); - CHECK(ht_stats[1] == 0); - CHECK(ht_stats[2] == 0); - CHECK(ht_stats[3] == 0); - CHECK(ht_stats[4] == 0); + CHECK(!strcmp(ht_pegs[0].name, "adds")); + CHECK(!strcmp(ht_pegs[1].name, "alloc_prunes")); + CHECK(!strcmp(ht_pegs[2].name, "find_hits")); + CHECK(!strcmp(ht_pegs[3].name, "find_misses")); + CHECK(!strcmp(ht_pegs[4].name, "reload_prunes")); + CHECK(!strcmp(ht_pegs[5].name, "removes")); + CHECK(!ht_pegs[6].name); + + // add 3 entries + SfIp ip1, ip2, ip3; + ip1.set("1.1.1.1"); + ip2.set("2.2.2.2"); + ip3.set("3.3.3.3"); + host_cache.find_else_create(ip1, nullptr); + host_cache.find_else_create(ip2, nullptr); + host_cache.find_else_create(ip3, nullptr); + CHECK(ht_stats[0] == 3); + + // no pruning needed for resizing higher than current size + CHECK(host_cache.reload_resize(2048) == false); + + // pruning needed for resizing lower than current size + CHECK(host_cache.reload_resize(256) == true); + + // pruning is not done when when reload_mutex is already locked + host_cache.reload_mutex.lock(); + CHECK(host_cache.reload_prune(256, 2) == false); + host_cache.reload_mutex.unlock(); + + // prune 2 entries when reload_mutex is not locked + CHECK(host_cache.reload_prune(256, 2) == true); + + // alloc_prune 1 entry + host_cache.find_else_create(ip1, nullptr); + + // 1 hit, 1 remove + host_cache.find_else_create(ip1, nullptr); + host_cache.remove(ip1); + + CHECK(ht_stats[0] == 4); // 4 adds + CHECK(ht_stats[1] == 1); // 1 alloc_prunes + CHECK(ht_stats[2] == 1); // 1 hit + CHECK(ht_stats[3] == 4); // 4 misses + CHECK(ht_stats[4] == 2); // 2 reload_prunes + CHECK(ht_stats[5] == 1); // 1 remove size_val.set(&size_param); @@ -110,7 +151,7 @@ TEST(host_cache_module, host_cache_module_test_values) module.end("host_cache", 0, nullptr); ht_stats = module.get_counts(); - CHECK(ht_stats[0] == 0); + CHECK(ht_stats[0] == 4); } TEST(host_cache_module, log_host_cache_messages) @@ -122,7 +163,7 @@ TEST(host_cache_module, log_host_cache_messages) STRCMP_EQUAL(logged_message, "Couldn't open nowhere/host_cache.dump to write!\n"); module.log_host_cache("host_cache.dump", true); - STRCMP_EQUAL(logged_message, "Dumped host cache of size = 0 to host_cache.dump\n"); + STRCMP_EQUAL(logged_message, "Dumped host cache to host_cache.dump\n"); module.log_host_cache("host_cache.dump", true); STRCMP_EQUAL(logged_message, "File host_cache.dump already exists!\n"); diff --git a/src/main/request.cc b/src/main/request.cc index d302145df..9eb9be71a 100644 --- a/src/main/request.cc +++ b/src/main/request.cc @@ -32,12 +32,6 @@ using namespace snort; // request foo //------------------------------------------------------------------------- -Request::Request(int f) -{ - fd = f; - bytes_read = 0; -} - bool Request::read(const int& f) { bool newline_found = false; @@ -106,9 +100,10 @@ void Request::respond(const char* s, bool queue_response, bool remote_only) #ifdef SHELL void Request::send_queued_response() { - if ( queued_response ) + const char* qr = queued_response; + if ( qr ) { - write_response(queued_response); + write_response(qr); queued_response = nullptr; } } diff --git a/src/main/request.h b/src/main/request.h index 707048b63..657db6d1c 100644 --- a/src/main/request.h +++ b/src/main/request.h @@ -22,12 +22,14 @@ #ifndef REQUEST_H #define REQUEST_H +#include + #include "main/snort_types.h" class Request { public: - Request(int f = -1); + Request(int f = -1) : fd(f), bytes_read(0), queued_response(nullptr) { } bool read(const int&); const char* get() { return read_buf; } @@ -41,6 +43,6 @@ private: int fd; char read_buf[1024]; size_t bytes_read; - const char* queued_response = nullptr; + std::atomic queued_response; }; #endif diff --git a/src/stream/base/stream_module.cc b/src/stream/base/stream_module.cc index a76bb0f5c..c8ba5a7c9 100644 --- a/src/stream/base/stream_module.cc +++ b/src/stream/base/stream_module.cc @@ -218,9 +218,7 @@ bool StreamReloadResourceManager::initialize(const StreamModuleConfig& config_) bool StreamReloadResourceManager::tinit() { - bool must_tune = false; - - max_flows_change = + int max_flows_change = config.flow_cache_cfg.max_flows - flow_con->get_flow_cache_config().max_flows; if ( max_flows_change ) { @@ -230,10 +228,10 @@ bool StreamReloadResourceManager::tinit() stream_base_stats.reload_total_adds += max_flows_change; flow_con->set_flow_cache_config(config.flow_cache_cfg); - must_tune = true; + return true; } - return must_tune; + return false; } bool StreamReloadResourceManager::tune_packet_context() diff --git a/src/stream/base/stream_module.h b/src/stream/base/stream_module.h index c3e79548e..f359673c9 100644 --- a/src/stream/base/stream_module.h +++ b/src/stream/base/stream_module.h @@ -93,7 +93,6 @@ private: private: StreamModuleConfig config; - int max_flows_change = 0; }; class StreamModule : public snort::Module