From: Raza Shafiq (rshafiq) Date: Mon, 12 May 2025 22:48:01 +0000 (+0000) Subject: Pull request #4698: flow: excess flows to allowlist X-Git-Tag: 3.8.1.0~13 X-Git-Url: http://git.ipfire.org/gitweb/gitweb.cgi?a=commitdiff_plain;h=821359e1a2c2efef02fe80fb38b4719761112ef7;p=thirdparty%2Fsnort3.git Pull request #4698: flow: excess flows to allowlist Merge in SNORT/snort3 from ~RSHAFIQ/snort3:flow_limit to master Squashed commit of the following: commit 4caf75c4bc3857e7588f823ef89035f97e518d8f Author: rshafiq Date: Mon Apr 7 18:31:23 2025 -0400 flow: add option to move excess flows to allowlist --- diff --git a/src/flow/flow.h b/src/flow/flow.h index ea789acaa..c65d569c9 100644 --- a/src/flow/flow.h +++ b/src/flow/flow.h @@ -529,6 +529,7 @@ public: // FIXIT-M privatize if possible bool binder_action_allow : 1; bool binder_action_block : 1; bool in_allowlist : 1; // Set if the flow is in the allowlist + bool allowed_on_excess : 1; // Set if the flow is allowed on excess } flags = {}; FlowState flow_state = FlowState::SETUP; diff --git a/src/flow/flow_cache.cc b/src/flow/flow_cache.cc index 53b34ff1f..340a6b7b2 100644 --- a/src/flow/flow_cache.cc +++ b/src/flow/flow_cache.cc @@ -499,7 +499,7 @@ unsigned FlowCache::prune_idle(time_t thetime, const Flow* save_me) } if ( PacketTracer::is_active() and pruned ) - PacketTracer::log("Flow: Pruned %u flows\n", pruned); + PacketTracer::log("Flow: Pruned idle %u flows\n", pruned); return pruned; } @@ -537,7 +537,7 @@ unsigned FlowCache::prune_unis(PktType pkt_type) } if ( PacketTracer::is_active() and pruned ) - PacketTracer::log("Flow: Pruned %u flows\n", pruned); + PacketTracer::log("Flow: Pruned uni %u flows\n", pruned); return pruned; } @@ -550,6 +550,7 @@ unsigned FlowCache::prune_excess(const Flow* save_me) assert(max_cap > 0); unsigned pruned = 0; + unsigned allowed = 0; // Initially skip offloads but if that doesn't work, the hash table is iterated from the // beginning again. Prune offloads at that point. @@ -558,11 +559,19 @@ unsigned FlowCache::prune_excess(const Flow* save_me) assert(total_lru_count < 8 * sizeof(checked_lrus_mask)); + uint8_t lru_idx = allowlist_lru_index; + uint8_t last_lru_idx = total_lru_count; + + if ( is_allowlist_on_excess() ) + { + max_cap += hash_table->get_node_count(allowlist_lru_index); + lru_idx = first_proto; + last_lru_idx = max_protocols; + } + { PacketTracerSuspend pt_susp; unsigned blocks = 0; - // EXCESS pruning will start from the allowlist LRU - uint8_t lru_idx = allowlist_lru_index; while ( true ) { @@ -571,7 +580,7 @@ unsigned FlowCache::prune_excess(const Flow* save_me) ignore_offloads == 0 or all_lrus_checked(checked_lrus_mask) ) break; - for (; lru_idx < total_lru_count; ++lru_idx) + for (; lru_idx < last_lru_idx; ++lru_idx) { num_nodes = hash_table->get_num_nodes(); if ( num_nodes <= max_cap or num_nodes <= blocks ) @@ -599,6 +608,12 @@ unsigned FlowCache::prune_excess(const Flow* save_me) // Ensure LRU list remains sorted by time on touch hash_table->lru_touch(lru_idx); } + else if ( allowlist_on_excess(flow) ) + { + pruned++; + max_cap++; + allowed++; + } else { flow->ssn_state.session_flags |= SSNFLAG_PRUNED; @@ -609,7 +624,7 @@ unsigned FlowCache::prune_excess(const Flow* save_me) --ignore_offloads; } - if ( lru_idx >= total_lru_count ) + if ( lru_idx >= last_lru_idx ) lru_idx = first_proto; } @@ -619,9 +634,13 @@ unsigned FlowCache::prune_excess(const Flow* save_me) } } - if ( PacketTracer::is_active() and pruned ) - PacketTracer::log("Flow: Pruned %u flows\n", pruned); - + if ( PacketTracer::is_active() ) + { + if ( allowed ) + PacketTracer::log("Flow: Moved %u flows to allowlist\n", allowed); + else if ( pruned ) + PacketTracer::log("Flow: Pruned excess %u flows\n", pruned); + } return pruned; } @@ -636,13 +655,12 @@ bool FlowCache::prune_one(PruneReason reason, bool do_cleanup, uint8_t type) return false; flow->ssn_state.session_flags |= SSNFLAG_PRUNED; - - if ( type != allowlist_lru_index ) - return release(flow, reason, do_cleanup); - else if ( reason == PruneReason::MEMCAP or reason == PruneReason::EXCESS ) - return release(flow, reason, do_cleanup); - - return false; + + bool flow_handled; + if ( handle_allowlist_pruning(flow, reason, type, flow_handled) ) + return flow_handled; + + return release(flow, reason, do_cleanup); } unsigned FlowCache::prune_multiple(PruneReason reason, bool do_cleanup) @@ -875,6 +893,48 @@ unsigned FlowCache::purge() return retired; } +bool FlowCache::allowlist_on_excess(snort::Flow *f) +{ + if ( is_allowlist_on_excess() ) + { + Stream::disable_reassembly(f); + f->free_flow_data(); + f->trust(); + f->last_verdict = DAQ_VERDICT_WHITELIST; + if ( move_to_allowlist(f) ) + { + excess_to_allowlist_count++; + f->flags.allowed_on_excess = true; + return true; + } + } + else if ( PacketTracer::is_active() and config.move_to_allowlist_on_excess and !config.allowlist_cache ) + PacketTracer::log("Flow: Warning! move_to_allowlist_on_excess is enabled with no allowlist cache\n"); + return false; +} + +bool FlowCache::handle_allowlist_pruning(snort::Flow* flow, PruneReason reason, uint8_t type, bool& flow_handled) +{ + flow_handled = true; + + if ( type == allowlist_lru_index ) + { + if ( reason == PruneReason::EXCESS ) + return is_allowlist_on_excess(); + else if ( reason != PruneReason::MEMCAP ) + { + flow_handled = false; + return true; + } + return false; + } + + else if ( reason == PruneReason::EXCESS ) + return allowlist_on_excess(flow); + + return false; +} + std::string FlowCache::timeout_to_str(time_t t) { std::stringstream out; @@ -895,7 +955,6 @@ std::string FlowCache::timeout_to_str(time_t t) return out.str(); } - bool FlowCache::is_ip_match(const SfIp& flow_sfip, const SfIp& filter_sfip, const SfIp& filter_subnet_sfip) const { //if address is empty @@ -1001,7 +1060,12 @@ void FlowCache::output_flow(std::fstream& stream, const Flow& flow, const struct timeout_to_str(abs((int)(flow.expire_time - now.tv_sec))) : timeout_to_str(abs(remaining_time)); out << t; - stream << out.str() << proto.str() << (flow.flags.in_allowlist ? " (allowlist)" : "") << std::endl; + std::string allow_s; + if ( flow.flags.allowed_on_excess ) + allow_s = " (allowlist on excess)"; + else if ( flow.flags.in_allowlist ) + allow_s = " (allowlist)"; + stream << out.str() << proto.str() << allow_s << std::endl; } bool FlowCache::dump_flows(std::fstream& stream, unsigned count, const FilterFlowCriteria& ffc, bool first, uint8_t code) const diff --git a/src/flow/flow_cache.h b/src/flow/flow_cache.h index 9caf05bb9..572d808a9 100644 --- a/src/flow/flow_cache.h +++ b/src/flow/flow_cache.h @@ -188,6 +188,8 @@ public: size_t uni_ip_flows_size() const; size_t flows_size() const; PegCount get_lru_flow_count(uint8_t lru_idx) const; + PegCount get_excess_to_allowlist_count() const + { return excess_to_allowlist_count; } #ifdef UNIT_TEST size_t count_flows_in_lru(uint8_t lru_index) const; #endif @@ -199,10 +201,16 @@ private: void remove(snort::Flow*); void retire(snort::Flow*); unsigned prune_unis(PktType); + bool allowlist_on_excess(snort::Flow*); + bool handle_allowlist_pruning(snort::Flow*, PruneReason, uint8_t, bool&); + unsigned delete_active_flows(unsigned mode, unsigned num_to_delete, unsigned &deleted); static std::string timeout_to_str(time_t); bool is_ip_match(const snort::SfIp& flow_ip, const snort::SfIp& filter_ip, const snort::SfIp& subnet) const; + inline bool is_allowlist_on_excess() const + { return config.allowlist_cache and config.move_to_allowlist_on_excess; } + inline bool is_lru_checked(uint64_t checked_lrus_mask, uint64_t lru_mask) { return (checked_lrus_mask & lru_mask) != 0; } @@ -234,6 +242,7 @@ private: PruneStats prune_stats; FlowDeleteStats delete_stats; uint64_t empty_lru_mask; + PegCount excess_to_allowlist_count = 0; }; #endif diff --git a/src/flow/flow_config.h b/src/flow/flow_config.h index 91c26e4a1..b9d8e5b2f 100644 --- a/src/flow/flow_config.h +++ b/src/flow/flow_config.h @@ -36,6 +36,7 @@ struct FlowCacheConfig FlowTypeConfig proto[to_utype(PktType::MAX)]; unsigned prune_flows = 0; bool allowlist_cache = false; + bool move_to_allowlist_on_excess = false; }; #endif diff --git a/src/flow/flow_control.cc b/src/flow/flow_control.cc index 4de64434e..83cf80152 100644 --- a/src/flow/flow_control.cc +++ b/src/flow/flow_control.cc @@ -131,6 +131,9 @@ bool FlowControl::move_to_allowlist(Flow* f) PegCount FlowControl::get_allowlist_flow_count() const { return cache->get_lru_flow_count(allowlist_lru_index); } +PegCount FlowControl::get_excess_to_allowlist_count() const +{ return cache->get_excess_to_allowlist_count(); } + void FlowControl::release_flow(Flow* flow, PruneReason reason) { cache->release(flow, reason); } diff --git a/src/flow/flow_control.h b/src/flow/flow_control.h index 750b6a56c..e541ec434 100644 --- a/src/flow/flow_control.h +++ b/src/flow/flow_control.h @@ -96,6 +96,7 @@ public: { return num_flows; } PegCount get_allowlist_flow_count() const; + PegCount get_excess_to_allowlist_count() const; PegCount get_total_prunes() const; PegCount get_prunes(PruneReason) const; PegCount get_proto_prune_count(PruneReason, PktType) const; diff --git a/src/flow/test/flow_cache_test.cc b/src/flow/test/flow_cache_test.cc index 5e83dc2f4..f4b3af563 100644 --- a/src/flow/test/flow_cache_test.cc +++ b/src/flow/test/flow_cache_test.cc @@ -100,6 +100,7 @@ Flow::~Flow() = default; void Flow::init(PktType) { } void Flow::flush(bool) { } void Flow::reset(bool) { } +void Flow::trust() { } void Flow::free_flow_data() { } void Flow::set_client_initiate(Packet*) { } void Flow::set_direction(Packet*) { } @@ -107,6 +108,8 @@ void Flow::set_mpls_layer_per_dir(Packet*) { } void packet_gettimeofday(struct timeval* ) { } SO_PUBLIC void ts_print(const struct timeval*, char*, bool) { } +void Stream::disable_reassembly(Flow*) { } + time_t packet_time() { return 0; } void trace_vprintf(const char*, TraceLevel, const char*, const Packet*, const char*, va_list) {} @@ -1139,6 +1142,321 @@ TEST(flow_cache_lrus, count_flows_in_lru_test) } +TEST_GROUP(flow_cache_allowlist_pruning) { }; + +TEST(flow_cache_allowlist_pruning, allowlist_on_excess_true) +{ + FlowCacheConfig fcg; + fcg.max_flows = 3; + fcg.allowlist_cache = true; + fcg.move_to_allowlist_on_excess = true; + + DummyCache* cache = new DummyCache(fcg); + + // Add flows until we trigger excess pruning + for (int i = 0; i < 4; i++) { + FlowKey flow_key; + flow_key.port_l = 1000 + i; + flow_key.pkt_type = PktType::TCP; + Flow* flow = cache->allocate(&flow_key); + cache->unlink_uni(flow); + } + + CHECK_EQUAL(4, cache->get_count()); + CHECK(cache->get_lru_flow_count(allowlist_lru_index) > 0); + CHECK(cache->get_excess_to_allowlist_count() > 0); + + cache->purge(); + delete cache; +} + +TEST(flow_cache_allowlist_pruning, allowlist_on_excess_false_no_allowlist) +{ + FlowCacheConfig fcg; + fcg.max_flows = 3; + fcg.allowlist_cache = false; // Disable allowlist_cache + fcg.move_to_allowlist_on_excess = true; + + DummyCache* cache = new DummyCache(fcg); + + for (int i = 0; i < 4; i++) { + FlowKey flow_key; + flow_key.port_l = 1000 + i; + flow_key.pkt_type = PktType::TCP; + Flow* flow = cache->allocate(&flow_key); + cache->unlink_uni(flow); + } + + // Should prune normally, no allowlist flows + CHECK_EQUAL(3, cache->get_count()); + CHECK_EQUAL(0, cache->get_lru_flow_count(allowlist_lru_index)); + CHECK_EQUAL(0, cache->get_excess_to_allowlist_count()); + + cache->purge(); + delete cache; +} + +// Test that allowlist_on_excess behavior when move_to_allowlist_on_excess is disabled +TEST(flow_cache_allowlist_pruning, allowlist_on_excess_false_no_move_on_excess) +{ + FlowCacheConfig fcg; + fcg.max_flows = 3; + fcg.allowlist_cache = true; + fcg.move_to_allowlist_on_excess = false; // Disable move_to_allowlist_on_excess + + DummyCache* cache = new DummyCache(fcg); + + // Add flows until we trigger excess pruning + for (int i = 0; i < 4; i++) { + FlowKey flow_key; + flow_key.port_l = 1000 + i; + flow_key.pkt_type = PktType::TCP; + Flow* flow = cache->allocate(&flow_key); + cache->unlink_uni(flow); + } + + // Should prune normally, no allowlist flows from excess + CHECK_EQUAL(3, cache->get_count()); + CHECK_EQUAL(0, cache->get_lru_flow_count(allowlist_lru_index)); + CHECK_EQUAL(0, cache->get_excess_to_allowlist_count()); + + cache->purge(); + delete cache; +} + +// Test how prune_one handles allowed flows with EXCESS reason +TEST(flow_cache_allowlist_pruning, prune_one_excess_in_allowlist) +{ + FlowCacheConfig fcg; + fcg.max_flows = 10; + fcg.allowlist_cache = true; + fcg.move_to_allowlist_on_excess = true; + + DummyCache* cache = new DummyCache(fcg); + // Create a test flow + FlowKey flow_key; + flow_key.port_l = 1234; + flow_key.pkt_type = PktType::TCP; + Flow* flow = cache->allocate(&flow_key); + + // Set the flow as allowed + CHECK(cache->move_to_allowlist(flow)); + CHECK(flow->flags.in_allowlist == 1); + + // move_to_allowlist_on_excess is true, so Prune Reason::EXCESS on allowed flow should not succeed + CHECK(cache->prune_one(PruneReason::EXCESS, true, allowlist_lru_index) == false); + + // cache still have the allowed flow + CHECK_EQUAL(1, cache->get_lru_flow_count(allowlist_lru_index)); + CHECK_EQUAL(1, cache->get_count()); + + cache->purge(); + delete cache; +} + +// Test how prune_one handles allowed flows with timeout reasons +TEST(flow_cache_allowlist_pruning, prune_one_timeout_in_allowlist) +{ + FlowCacheConfig fcg; + fcg.max_flows = 10; + fcg.allowlist_cache = true; + + DummyCache* cache = new DummyCache(fcg); + + FlowKey flow_key; + flow_key.port_l = 1234; + flow_key.pkt_type = PktType::TCP; + Flow* flow = cache->allocate(&flow_key); + + CHECK(cache->move_to_allowlist(flow)); + CHECK(flow->flags.in_allowlist == 1); + + CHECK_FALSE(cache->prune_one(PruneReason::IDLE_PROTOCOL_TIMEOUT, true, allowlist_lru_index)); + + CHECK_EQUAL(1, cache->get_count()); + + cache->purge(); + delete cache; +} + +// Test how prune_one handles allowed flows with MEMCAP reason +TEST(flow_cache_allowlist_pruning, prune_one_memcap_in_allowlist) +{ + FlowCacheConfig fcg; + fcg.allowlist_cache = true; + fcg.move_to_allowlist_on_excess = true; + fcg.max_flows = 10; + + DummyCache* cache = new DummyCache(fcg); + + for (int i = 0; i < 11; i++) + { + FlowKey flow_key; + flow_key.port_l = 1000 + i; + flow_key.pkt_type = PktType::TCP; + Flow* flow = cache->allocate(&flow_key); + cache->unlink_uni(flow); + if (i < 5) + CHECK(cache->move_to_allowlist(flow)); + } + + CHECK_EQUAL(11, cache->get_count()); + CHECK_EQUAL(0, cache->get_excess_to_allowlist_count()); + CHECK_EQUAL(5, cache->get_lru_flow_count(allowlist_lru_index)); + + for (int i = 0; i < 5; i++) + { + FlowKey flow_key; + flow_key.port_l = 3000 + i; + flow_key.pkt_type = PktType::TCP; + Flow* flow = cache->allocate(&flow_key); + cache->unlink_uni(flow); + } + + // max flow cap is increased to 16 = max_flows + allowlist flows + // 5 allowed + 1 excess flow + CHECK_EQUAL(16, cache->get_count()); + CHECK_EQUAL(1, cache->get_excess_to_allowlist_count()); + CHECK_EQUAL(6, cache->get_lru_flow_count(allowlist_lru_index)); + // Attempt to prune with MEMCAP reason, it should succeed for allowed flows + CHECK(cache->prune_one(PruneReason::MEMCAP, true, allowlist_lru_index)); + + // one allowlist Flow should be gone due to memcap + CHECK_EQUAL(15, cache->get_count()); + CHECK_EQUAL(5, cache->get_lru_flow_count(allowlist_lru_index)); + + cache->purge(); + delete cache; +} + +// Test prune_one for non-allowed flows with EXCESS reason and allowlist enabled +TEST(flow_cache_allowlist_pruning, prune_one_excess_regular_flow_moves_to_allowlist) +{ + FlowCacheConfig fcg; + fcg.max_flows = 10; + fcg.allowlist_cache = true; + fcg.move_to_allowlist_on_excess = true; + + DummyCache* cache = new DummyCache(fcg); + + FlowKey flow_key; + flow_key.port_l = 1234; + flow_key.pkt_type = PktType::TCP; + Flow* flow = cache->allocate(&flow_key); + + FlowKey flow_key2; + flow_key2.port_l = 5678; + flow_key2.pkt_type = PktType::TCP; + flow = cache->allocate(&flow_key2); + + // Try to prune with EXCESS reason + CHECK(cache->prune_one(PruneReason::EXCESS, true, to_utype(PktType::TCP))); + + // Check no flows were removed from the cache + // and one flow was moved to allowlist + CHECK_EQUAL(2, cache->get_count()); + CHECK_EQUAL(1, cache->get_lru_flow_count(allowlist_lru_index)); + CHECK_EQUAL(1, cache->get_lru_flow_count(to_utype(PktType::TCP))); + + // The remaining flow should be moved to allowlist + flow = cache->find(&flow_key); + CHECK(flow != nullptr); + CHECK(flow->flags.in_allowlist == 1); + + cache->purge(); + delete cache; +} + +TEST(flow_cache_allowlist_pruning, prune_multiple_allowlist_pruning) +{ + FlowCacheConfig fcg; + fcg.max_flows = 10; + fcg.prune_flows = 5; + fcg.allowlist_cache = true; + + DummyCache* cache = new DummyCache(fcg); + + for (int i = 0; i < 5; i++) + { + FlowKey flow_key; + flow_key.port_l = 1000 + i; + flow_key.pkt_type = PktType::TCP; + Flow* flow = cache->allocate(&flow_key); + cache->unlink_uni(flow); + } + + for (int i = 0; i < 5; i++) + { + FlowKey flow_key; + flow_key.port_l = 2000 + i; + flow_key.pkt_type = PktType::UDP; + Flow* flow = cache->allocate(&flow_key); + CHECK(cache->move_to_allowlist(flow)); + } + + CHECK_EQUAL(10, cache->get_count()); + CHECK_EQUAL(5, cache->get_lru_flow_count(to_utype(PktType::TCP))); + CHECK_EQUAL(5, cache->get_lru_flow_count(allowlist_lru_index)); + + // Using MEMCAP reason should first prune from allowlist LRU + CHECK_EQUAL(5, cache->prune_multiple(PruneReason::MEMCAP, true)); + + // Check that we now have only TCP flows, allowlist was pruned first + CHECK_EQUAL(5, cache->get_count()); + CHECK_EQUAL(5, cache->get_lru_flow_count(to_utype(PktType::TCP))); + CHECK_EQUAL(0, cache->get_lru_flow_count(allowlist_lru_index)); + + cache->purge(); + delete cache; +} + +TEST(flow_cache_allowlist_pruning, prune_excess_with_prioritization) +{ + FlowCacheConfig fcg; + fcg.max_flows = 8; // Setting a small max to force pruning + fcg.allowlist_cache = true; + fcg.move_to_allowlist_on_excess = true; + + DummyCache* cache = new DummyCache(fcg); + + for (int i = 0; i < 5; i++) + { + FlowKey flow_key; + flow_key.port_l = 1000 + i; + flow_key.pkt_type = PktType::TCP; + Flow* flow = cache->allocate(&flow_key); + flow->last_data_seen = i; + cache->unlink_uni(flow); + } + + for (int i = 0; i < 5; i++) + { + FlowKey flow_key; + flow_key.port_l = 2000 + i; + flow_key.pkt_type = PktType::UDP; + Flow* flow = cache->allocate(&flow_key); + cache->unlink_uni(flow); + } + + // move_to_allowlist_on_excess enabled, should not be able to prune + CHECK_EQUAL(10, cache->get_count()); // Max flows is 10 + + FlowKey flow_key; + flow_key.port_l = 3000; + flow_key.pkt_type = PktType::ICMP; + Flow* flow = cache->allocate(&flow_key); + cache->unlink_uni(flow); + + CHECK_EQUAL(11, cache->get_count()); + + // Check if any flows moved to allowlist during pruning + CHECK(cache->get_lru_flow_count(allowlist_lru_index) == 3); + CHECK(cache->get_excess_to_allowlist_count() == 3); + + cache->purge(); + delete cache; +} + int main(int argc, char** argv) { return CommandLineTestRunner::RunAllTests(argc, argv); diff --git a/src/stream/base/stream_base.cc b/src/stream/base/stream_base.cc index c9bfb118e..eb27ffb1a 100644 --- a/src/stream/base/stream_base.cc +++ b/src/stream/base/stream_base.cc @@ -97,9 +97,10 @@ const PegInfo base_pegs[] = { CountType::SUM, "file_memcap_prunes", "number of FILE flows pruned due to memcap" }, { CountType::SUM, "pdu_memcap_prunes", "number of PDU flows pruned due to memcap" }, { CountType::SUM, "allowlist_memcap_prunes", "number of allowlist flows pruned due to memcap" }, + { CountType::SUM, "excess_to_allowlist", "number of flows moved to the allowlist due to excess" }, // Keep the NOW stats at the bottom as it requires special sum_stats logic - { CountType::NOW, "allowlist_flows", "number of flows moved to the allow list" }, + { CountType::NOW, "allowlist_flows", "number of flows moved to the allowlist" }, { CountType::NOW, "current_flows", "current number of flows in cache" }, { CountType::NOW, "uni_flows", "number of uni flows in cache" }, { CountType::NOW, "uni_ip_flows", "number of uni ip flows in cache" }, @@ -142,6 +143,7 @@ void base_prep() stream_base_stats.file_memcap_prunes = flow_con->get_proto_prune_count(PruneReason::MEMCAP, PktType::FILE); stream_base_stats.pdu_memcap_prunes = flow_con->get_proto_prune_count(PruneReason::MEMCAP, PktType::PDU); stream_base_stats.allowlist_memcap_prunes = flow_con->get_proto_prune_count(PruneReason::MEMCAP, static_cast(allowlist_lru_index)); + stream_base_stats.excess_to_allowlist = flow_con->get_excess_to_allowlist_count(); stream_base_stats.allowlist_flows = flow_con->get_allowlist_flow_count(); stream_base_stats.current_flows = flow_con->get_num_flows(); diff --git a/src/stream/base/stream_module.cc b/src/stream/base/stream_module.cc index ce3ad5f39..f92aa3372 100644 --- a/src/stream/base/stream_module.cc +++ b/src/stream/base/stream_module.cc @@ -72,6 +72,9 @@ static const Parameter allowlist_cache_params[] = { "enable", Parameter::PT_BOOL, nullptr, "false", "enable allowlist cache" }, + { "move_on_excess", Parameter::PT_BOOL, nullptr, "false", + "move flows to allowlist instead of removing when max flows limit reached" }, + { nullptr, Parameter::PT_MAX, nullptr, nullptr, nullptr } }; @@ -421,6 +424,9 @@ bool StreamModule::set(const char* fqn, Value& v, SnortConfig* c) else if ( !strcmp(fqn, "stream.allowlist_cache.enable") ) config.flow_cache_cfg.allowlist_cache = v.get_bool(); + else if ( !strcmp(fqn, "stream.allowlist_cache.move_on_excess") ) + config.flow_cache_cfg.move_to_allowlist_on_excess = v.get_bool(); + else if ( !strcmp(fqn, "stream.file_cache.idle_timeout") ) config.flow_cache_cfg.proto[to_utype(PktType::FILE)].nominal_timeout = v.get_uint32(); @@ -626,6 +632,7 @@ void StreamModuleConfig::show() const { std::string tmp; tmp += "{ enable = " + (flow_cache_cfg.allowlist_cache ? std::string("true") : std::string("false")); + tmp += ", move_on_excess = " + (flow_cache_cfg.move_to_allowlist_on_excess ? std::string("true") : std::string("false")); tmp += " }"; ConfigLogger::log_value("allowlist_cache", tmp.c_str()); } diff --git a/src/stream/base/stream_module.h b/src/stream/base/stream_module.h index 1a1b78d27..208eda2d4 100644 --- a/src/stream/base/stream_module.h +++ b/src/stream/base/stream_module.h @@ -104,6 +104,7 @@ struct BaseStats PegCount file_memcap_prunes; PegCount pdu_memcap_prunes; PegCount allowlist_memcap_prunes; + PegCount excess_to_allowlist; // Keep the NOW stats at the bottom as it requires special sum_stats logic PegCount allowlist_flows;