}
if ( PacketTracer::is_active() and pruned )
- PacketTracer::log("Flow: Pruned %u flows\n", pruned);
+ PacketTracer::log("Flow: Pruned idle %u flows\n", pruned);
return pruned;
}
}
if ( PacketTracer::is_active() and pruned )
- PacketTracer::log("Flow: Pruned %u flows\n", pruned);
+ PacketTracer::log("Flow: Pruned uni %u flows\n", pruned);
return pruned;
}
assert(max_cap > 0);
unsigned pruned = 0;
+ unsigned allowed = 0;
// Initially skip offloads but if that doesn't work, the hash table is iterated from the
// beginning again. Prune offloads at that point.
assert(total_lru_count < 8 * sizeof(checked_lrus_mask));
+ uint8_t lru_idx = allowlist_lru_index;
+ uint8_t last_lru_idx = total_lru_count;
+
+ if ( is_allowlist_on_excess() )
+ {
+ max_cap += hash_table->get_node_count(allowlist_lru_index);
+ lru_idx = first_proto;
+ last_lru_idx = max_protocols;
+ }
+
{
PacketTracerSuspend pt_susp;
unsigned blocks = 0;
- // EXCESS pruning will start from the allowlist LRU
- uint8_t lru_idx = allowlist_lru_index;
while ( true )
{
ignore_offloads == 0 or all_lrus_checked(checked_lrus_mask) )
break;
- for (; lru_idx < total_lru_count; ++lru_idx)
+ for (; lru_idx < last_lru_idx; ++lru_idx)
{
num_nodes = hash_table->get_num_nodes();
if ( num_nodes <= max_cap or num_nodes <= blocks )
// Ensure LRU list remains sorted by time on touch
hash_table->lru_touch(lru_idx);
}
+ else if ( allowlist_on_excess(flow) )
+ {
+ pruned++;
+ max_cap++;
+ allowed++;
+ }
else
{
flow->ssn_state.session_flags |= SSNFLAG_PRUNED;
--ignore_offloads;
}
- if ( lru_idx >= total_lru_count )
+ if ( lru_idx >= last_lru_idx )
lru_idx = first_proto;
}
}
}
- if ( PacketTracer::is_active() and pruned )
- PacketTracer::log("Flow: Pruned %u flows\n", pruned);
-
+ if ( PacketTracer::is_active() )
+ {
+ if ( allowed )
+ PacketTracer::log("Flow: Moved %u flows to allowlist\n", allowed);
+ else if ( pruned )
+ PacketTracer::log("Flow: Pruned excess %u flows\n", pruned);
+ }
return pruned;
}
return false;
flow->ssn_state.session_flags |= SSNFLAG_PRUNED;
-
- if ( type != allowlist_lru_index )
- return release(flow, reason, do_cleanup);
- else if ( reason == PruneReason::MEMCAP or reason == PruneReason::EXCESS )
- return release(flow, reason, do_cleanup);
-
- return false;
+
+ bool flow_handled;
+ if ( handle_allowlist_pruning(flow, reason, type, flow_handled) )
+ return flow_handled;
+
+ return release(flow, reason, do_cleanup);
}
unsigned FlowCache::prune_multiple(PruneReason reason, bool do_cleanup)
return retired;
}
+bool FlowCache::allowlist_on_excess(snort::Flow *f)
+{
+ if ( is_allowlist_on_excess() )
+ {
+ Stream::disable_reassembly(f);
+ f->free_flow_data();
+ f->trust();
+ f->last_verdict = DAQ_VERDICT_WHITELIST;
+ if ( move_to_allowlist(f) )
+ {
+ excess_to_allowlist_count++;
+ f->flags.allowed_on_excess = true;
+ return true;
+ }
+ }
+ else if ( PacketTracer::is_active() and config.move_to_allowlist_on_excess and !config.allowlist_cache )
+ PacketTracer::log("Flow: Warning! move_to_allowlist_on_excess is enabled with no allowlist cache\n");
+ return false;
+}
+
+bool FlowCache::handle_allowlist_pruning(snort::Flow* flow, PruneReason reason, uint8_t type, bool& flow_handled)
+{
+ flow_handled = true;
+
+ if ( type == allowlist_lru_index )
+ {
+ if ( reason == PruneReason::EXCESS )
+ return is_allowlist_on_excess();
+ else if ( reason != PruneReason::MEMCAP )
+ {
+ flow_handled = false;
+ return true;
+ }
+ return false;
+ }
+
+ else if ( reason == PruneReason::EXCESS )
+ return allowlist_on_excess(flow);
+
+ return false;
+}
+
std::string FlowCache::timeout_to_str(time_t t)
{
std::stringstream out;
return out.str();
}
-
bool FlowCache::is_ip_match(const SfIp& flow_sfip, const SfIp& filter_sfip, const SfIp& filter_subnet_sfip) const
{
//if address is empty
timeout_to_str(abs((int)(flow.expire_time - now.tv_sec))) :
timeout_to_str(abs(remaining_time));
out << t;
- stream << out.str() << proto.str() << (flow.flags.in_allowlist ? " (allowlist)" : "") << std::endl;
+ std::string allow_s;
+ if ( flow.flags.allowed_on_excess )
+ allow_s = " (allowlist on excess)";
+ else if ( flow.flags.in_allowlist )
+ allow_s = " (allowlist)";
+ stream << out.str() << proto.str() << allow_s << std::endl;
}
bool FlowCache::dump_flows(std::fstream& stream, unsigned count, const FilterFlowCriteria& ffc, bool first, uint8_t code) const
void Flow::init(PktType) { }
void Flow::flush(bool) { }
void Flow::reset(bool) { }
+void Flow::trust() { }
void Flow::free_flow_data() { }
void Flow::set_client_initiate(Packet*) { }
void Flow::set_direction(Packet*) { }
void packet_gettimeofday(struct timeval* ) { }
SO_PUBLIC void ts_print(const struct timeval*, char*, bool) { }
+void Stream::disable_reassembly(Flow*) { }
+
time_t packet_time() { return 0; }
void trace_vprintf(const char*, TraceLevel, const char*, const Packet*, const char*, va_list) {}
}
+TEST_GROUP(flow_cache_allowlist_pruning) { };
+
+TEST(flow_cache_allowlist_pruning, allowlist_on_excess_true)
+{
+ FlowCacheConfig fcg;
+ fcg.max_flows = 3;
+ fcg.allowlist_cache = true;
+ fcg.move_to_allowlist_on_excess = true;
+
+ DummyCache* cache = new DummyCache(fcg);
+
+ // Add flows until we trigger excess pruning
+ for (int i = 0; i < 4; i++) {
+ FlowKey flow_key;
+ flow_key.port_l = 1000 + i;
+ flow_key.pkt_type = PktType::TCP;
+ Flow* flow = cache->allocate(&flow_key);
+ cache->unlink_uni(flow);
+ }
+
+ CHECK_EQUAL(4, cache->get_count());
+ CHECK(cache->get_lru_flow_count(allowlist_lru_index) > 0);
+ CHECK(cache->get_excess_to_allowlist_count() > 0);
+
+ cache->purge();
+ delete cache;
+}
+
+TEST(flow_cache_allowlist_pruning, allowlist_on_excess_false_no_allowlist)
+{
+ FlowCacheConfig fcg;
+ fcg.max_flows = 3;
+ fcg.allowlist_cache = false; // Disable allowlist_cache
+ fcg.move_to_allowlist_on_excess = true;
+
+ DummyCache* cache = new DummyCache(fcg);
+
+ for (int i = 0; i < 4; i++) {
+ FlowKey flow_key;
+ flow_key.port_l = 1000 + i;
+ flow_key.pkt_type = PktType::TCP;
+ Flow* flow = cache->allocate(&flow_key);
+ cache->unlink_uni(flow);
+ }
+
+ // Should prune normally, no allowlist flows
+ CHECK_EQUAL(3, cache->get_count());
+ CHECK_EQUAL(0, cache->get_lru_flow_count(allowlist_lru_index));
+ CHECK_EQUAL(0, cache->get_excess_to_allowlist_count());
+
+ cache->purge();
+ delete cache;
+}
+
+// Test that allowlist_on_excess behavior when move_to_allowlist_on_excess is disabled
+TEST(flow_cache_allowlist_pruning, allowlist_on_excess_false_no_move_on_excess)
+{
+ FlowCacheConfig fcg;
+ fcg.max_flows = 3;
+ fcg.allowlist_cache = true;
+ fcg.move_to_allowlist_on_excess = false; // Disable move_to_allowlist_on_excess
+
+ DummyCache* cache = new DummyCache(fcg);
+
+ // Add flows until we trigger excess pruning
+ for (int i = 0; i < 4; i++) {
+ FlowKey flow_key;
+ flow_key.port_l = 1000 + i;
+ flow_key.pkt_type = PktType::TCP;
+ Flow* flow = cache->allocate(&flow_key);
+ cache->unlink_uni(flow);
+ }
+
+ // Should prune normally, no allowlist flows from excess
+ CHECK_EQUAL(3, cache->get_count());
+ CHECK_EQUAL(0, cache->get_lru_flow_count(allowlist_lru_index));
+ CHECK_EQUAL(0, cache->get_excess_to_allowlist_count());
+
+ cache->purge();
+ delete cache;
+}
+
+// Test how prune_one handles allowed flows with EXCESS reason
+TEST(flow_cache_allowlist_pruning, prune_one_excess_in_allowlist)
+{
+ FlowCacheConfig fcg;
+ fcg.max_flows = 10;
+ fcg.allowlist_cache = true;
+ fcg.move_to_allowlist_on_excess = true;
+
+ DummyCache* cache = new DummyCache(fcg);
+ // Create a test flow
+ FlowKey flow_key;
+ flow_key.port_l = 1234;
+ flow_key.pkt_type = PktType::TCP;
+ Flow* flow = cache->allocate(&flow_key);
+
+ // Set the flow as allowed
+ CHECK(cache->move_to_allowlist(flow));
+ CHECK(flow->flags.in_allowlist == 1);
+
+ // move_to_allowlist_on_excess is true, so Prune Reason::EXCESS on allowed flow should not succeed
+ CHECK(cache->prune_one(PruneReason::EXCESS, true, allowlist_lru_index) == false);
+
+ // cache still have the allowed flow
+ CHECK_EQUAL(1, cache->get_lru_flow_count(allowlist_lru_index));
+ CHECK_EQUAL(1, cache->get_count());
+
+ cache->purge();
+ delete cache;
+}
+
+// Test how prune_one handles allowed flows with timeout reasons
+TEST(flow_cache_allowlist_pruning, prune_one_timeout_in_allowlist)
+{
+ FlowCacheConfig fcg;
+ fcg.max_flows = 10;
+ fcg.allowlist_cache = true;
+
+ DummyCache* cache = new DummyCache(fcg);
+
+ FlowKey flow_key;
+ flow_key.port_l = 1234;
+ flow_key.pkt_type = PktType::TCP;
+ Flow* flow = cache->allocate(&flow_key);
+
+ CHECK(cache->move_to_allowlist(flow));
+ CHECK(flow->flags.in_allowlist == 1);
+
+ CHECK_FALSE(cache->prune_one(PruneReason::IDLE_PROTOCOL_TIMEOUT, true, allowlist_lru_index));
+
+ CHECK_EQUAL(1, cache->get_count());
+
+ cache->purge();
+ delete cache;
+}
+
+// Test how prune_one handles allowed flows with MEMCAP reason
+TEST(flow_cache_allowlist_pruning, prune_one_memcap_in_allowlist)
+{
+ FlowCacheConfig fcg;
+ fcg.allowlist_cache = true;
+ fcg.move_to_allowlist_on_excess = true;
+ fcg.max_flows = 10;
+
+ DummyCache* cache = new DummyCache(fcg);
+
+ for (int i = 0; i < 11; i++)
+ {
+ FlowKey flow_key;
+ flow_key.port_l = 1000 + i;
+ flow_key.pkt_type = PktType::TCP;
+ Flow* flow = cache->allocate(&flow_key);
+ cache->unlink_uni(flow);
+ if (i < 5)
+ CHECK(cache->move_to_allowlist(flow));
+ }
+
+ CHECK_EQUAL(11, cache->get_count());
+ CHECK_EQUAL(0, cache->get_excess_to_allowlist_count());
+ CHECK_EQUAL(5, cache->get_lru_flow_count(allowlist_lru_index));
+
+ for (int i = 0; i < 5; i++)
+ {
+ FlowKey flow_key;
+ flow_key.port_l = 3000 + i;
+ flow_key.pkt_type = PktType::TCP;
+ Flow* flow = cache->allocate(&flow_key);
+ cache->unlink_uni(flow);
+ }
+
+ // max flow cap is increased to 16 = max_flows + allowlist flows
+ // 5 allowed + 1 excess flow
+ CHECK_EQUAL(16, cache->get_count());
+ CHECK_EQUAL(1, cache->get_excess_to_allowlist_count());
+ CHECK_EQUAL(6, cache->get_lru_flow_count(allowlist_lru_index));
+ // Attempt to prune with MEMCAP reason, it should succeed for allowed flows
+ CHECK(cache->prune_one(PruneReason::MEMCAP, true, allowlist_lru_index));
+
+ // one allowlist Flow should be gone due to memcap
+ CHECK_EQUAL(15, cache->get_count());
+ CHECK_EQUAL(5, cache->get_lru_flow_count(allowlist_lru_index));
+
+ cache->purge();
+ delete cache;
+}
+
+// Test prune_one for non-allowed flows with EXCESS reason and allowlist enabled
+TEST(flow_cache_allowlist_pruning, prune_one_excess_regular_flow_moves_to_allowlist)
+{
+ FlowCacheConfig fcg;
+ fcg.max_flows = 10;
+ fcg.allowlist_cache = true;
+ fcg.move_to_allowlist_on_excess = true;
+
+ DummyCache* cache = new DummyCache(fcg);
+
+ FlowKey flow_key;
+ flow_key.port_l = 1234;
+ flow_key.pkt_type = PktType::TCP;
+ Flow* flow = cache->allocate(&flow_key);
+
+ FlowKey flow_key2;
+ flow_key2.port_l = 5678;
+ flow_key2.pkt_type = PktType::TCP;
+ flow = cache->allocate(&flow_key2);
+
+ // Try to prune with EXCESS reason
+ CHECK(cache->prune_one(PruneReason::EXCESS, true, to_utype(PktType::TCP)));
+
+ // Check no flows were removed from the cache
+ // and one flow was moved to allowlist
+ CHECK_EQUAL(2, cache->get_count());
+ CHECK_EQUAL(1, cache->get_lru_flow_count(allowlist_lru_index));
+ CHECK_EQUAL(1, cache->get_lru_flow_count(to_utype(PktType::TCP)));
+
+ // The remaining flow should be moved to allowlist
+ flow = cache->find(&flow_key);
+ CHECK(flow != nullptr);
+ CHECK(flow->flags.in_allowlist == 1);
+
+ cache->purge();
+ delete cache;
+}
+
+TEST(flow_cache_allowlist_pruning, prune_multiple_allowlist_pruning)
+{
+ FlowCacheConfig fcg;
+ fcg.max_flows = 10;
+ fcg.prune_flows = 5;
+ fcg.allowlist_cache = true;
+
+ DummyCache* cache = new DummyCache(fcg);
+
+ for (int i = 0; i < 5; i++)
+ {
+ FlowKey flow_key;
+ flow_key.port_l = 1000 + i;
+ flow_key.pkt_type = PktType::TCP;
+ Flow* flow = cache->allocate(&flow_key);
+ cache->unlink_uni(flow);
+ }
+
+ for (int i = 0; i < 5; i++)
+ {
+ FlowKey flow_key;
+ flow_key.port_l = 2000 + i;
+ flow_key.pkt_type = PktType::UDP;
+ Flow* flow = cache->allocate(&flow_key);
+ CHECK(cache->move_to_allowlist(flow));
+ }
+
+ CHECK_EQUAL(10, cache->get_count());
+ CHECK_EQUAL(5, cache->get_lru_flow_count(to_utype(PktType::TCP)));
+ CHECK_EQUAL(5, cache->get_lru_flow_count(allowlist_lru_index));
+
+ // Using MEMCAP reason should first prune from allowlist LRU
+ CHECK_EQUAL(5, cache->prune_multiple(PruneReason::MEMCAP, true));
+
+ // Check that we now have only TCP flows, allowlist was pruned first
+ CHECK_EQUAL(5, cache->get_count());
+ CHECK_EQUAL(5, cache->get_lru_flow_count(to_utype(PktType::TCP)));
+ CHECK_EQUAL(0, cache->get_lru_flow_count(allowlist_lru_index));
+
+ cache->purge();
+ delete cache;
+}
+
+TEST(flow_cache_allowlist_pruning, prune_excess_with_prioritization)
+{
+ FlowCacheConfig fcg;
+ fcg.max_flows = 8; // Setting a small max to force pruning
+ fcg.allowlist_cache = true;
+ fcg.move_to_allowlist_on_excess = true;
+
+ DummyCache* cache = new DummyCache(fcg);
+
+ for (int i = 0; i < 5; i++)
+ {
+ FlowKey flow_key;
+ flow_key.port_l = 1000 + i;
+ flow_key.pkt_type = PktType::TCP;
+ Flow* flow = cache->allocate(&flow_key);
+ flow->last_data_seen = i;
+ cache->unlink_uni(flow);
+ }
+
+ for (int i = 0; i < 5; i++)
+ {
+ FlowKey flow_key;
+ flow_key.port_l = 2000 + i;
+ flow_key.pkt_type = PktType::UDP;
+ Flow* flow = cache->allocate(&flow_key);
+ cache->unlink_uni(flow);
+ }
+
+ // move_to_allowlist_on_excess enabled, should not be able to prune
+ CHECK_EQUAL(10, cache->get_count()); // Max flows is 10
+
+ FlowKey flow_key;
+ flow_key.port_l = 3000;
+ flow_key.pkt_type = PktType::ICMP;
+ Flow* flow = cache->allocate(&flow_key);
+ cache->unlink_uni(flow);
+
+ CHECK_EQUAL(11, cache->get_count());
+
+ // Check if any flows moved to allowlist during pruning
+ CHECK(cache->get_lru_flow_count(allowlist_lru_index) == 3);
+ CHECK(cache->get_excess_to_allowlist_count() == 3);
+
+ cache->purge();
+ delete cache;
+}
+
int main(int argc, char** argv)
{
return CommandLineTestRunner::RunAllTests(argc, argv);