cache_allocator.h
cache_interface.h
host_cache.h
+ host_cache_segmented.h
host_tracker.h
)
host_cache.cc
host_cache_module.cc
host_cache_module.h
+ host_cache_segmented.h
host_tracker_module.cc
host_tracker_module.h
host_tracker.cc
#define CACHE_ALLOCATOR_CC
#include "host_cache.h"
+#include "host_cache_segmented.h"
template <class T>
HostCacheAllocIp<T>::HostCacheAllocIp()
{
- lru = &host_cache;
+ lru = &default_host_cache;
}
#endif
T* allocate(std::size_t n);
void deallocate(T* p, std::size_t n) noexcept;
+ void set_lru(CacheInterface* c) { lru = c; }
+ CacheInterface* get_lru() const { return lru; }
protected:
{
public:
+ using Base = CacheAlloc<T>;
// This needs to be in every derived class:
template <class U>
struct rebind
using CacheAlloc<T>::lru;
+ void set_cache(CacheInterface* hci) { Base::set_lru(hci); }
+ CacheInterface* get_cache_ptr() { return Base::get_lru(); }
+
+ template <class U>
+ HostCacheAllocIp(const HostCacheAllocIp<U>& other)
+ {
+ this->lru = other.get_lru();
+ }
+
+ template <class U>
+ HostCacheAllocIp(HostCacheAllocIp<U>&& other) noexcept
+ {
+ this->lru = other.get_lru();
+ }
+
HostCacheAllocIp();
};
Illustrative examples are test/cache_allocator_test.cc (standalone
host cache / allocator example) and test/host_cache_allocator_ht_test.cc
(host_cache / allocator with host tracker example).
+
+13/08/2023
+
+To address the issue of contention due to mutex locks when Snort is configured
+to run a large number (over 100) of threads with a single host_cache,
+we introduced a new layer: "host_cache_segmented". This layer operates on
+multiple cache segments, thus significantly reducing the locking contention
+that was previously observed.
+
+The segmented host cache is not a replacement but rather an enhancement layer
+above the existing host_cache. With this architecture, there can be more than
+one host_cache, now referred to as a "segment". Each segment functions
+as an LRU cache, just like the previous singular host_cache. Importantly,
+there has been no change in the LRU cache design or its logic.
+
+Whenever a new key-data pair is added to a segment, its allocator needs updating.
+This ensures that the peg counts and visibility metrics are accurate for
+that specific segment. The find_else_create method of the segmented cache
+takes care of this, ensuring that each key-data pair is correctly
+associated with its segment.
+
+Each of these cache segments can operate independently, allowing for more
+efficient parallel processing. This not only reduces the time threads spend
+waiting for locks but also better utilizes multi-core systems by allowing
+simultaneous read and write operations in different cache segments.
+
+The number of segments and the memcap are both configurable, providing flexibility
+for tuning based on the specific requirements of the deployment environment
+and the workload. Furthermore, this segmented approach scales well with the
+increase in the number of threads, making it a robust solution for high-performance,
+multi-threaded environments.
+
+In summary, the introduction of the "host_cache_segmented" layer represents
+a significant step forward in the performance and scalability of Snort in
+multi-threaded environments. This enhancement not only provides immediate benefits
+in terms of improved throughput but also paves the way for further performance
+optimizations in the future.
+ +-----------------+
+ | Snort Threads |
+ +-----------------+
+ |
+ v
+ +-------------------------------+
+ | Host Cache Segmented Layer |
+ +-------------------------------+
+ |
+ v
+ +-------------------------------------------------+
+ | Cache Segment 1 | Cache Segment 2 | ... |
+ +-------------------------------------------------+
\ No newline at end of file
#endif
#include "host_cache.h"
+#include "host_cache_segmented.h"
using namespace snort;
-// Default host cache size in bytes.
-// Must agree with default memcap in host_cache_module.cc.
-#define LRU_CACHE_INITIAL_SIZE 16384 * 512
-
-HostCacheIp host_cache(LRU_CACHE_INITIAL_SIZE);
+HostCacheIp default_host_cache(LRU_CACHE_INITIAL_SIZE);
+HostCacheSegmentedIp host_cache;
#include "cache_allocator.h"
#include "cache_interface.h"
+// Default host cache size in bytes.
+#define LRU_CACHE_INITIAL_SIZE 8388608 // 8 MB
+
// Used to create hash of key for indexing into cache.
//
// Note that both HashIp and IpEqualTo below ignore the IP family.
}
};
-extern SO_PUBLIC HostCacheIp host_cache;
#endif
#include "log/messages.h"
#include "managers/module_manager.h"
#include "utils/util.h"
+#include "host_cache_segmented.h"
using namespace snort;
using namespace std;
return 0;
}
+static int host_cache_get_segment_stats(lua_State* L)
+{
+ HostCacheModule* mod = (HostCacheModule*) ModuleManager::get_module(HOST_CACHE_NAME);
+
+ if ( mod )
+ {
+ int seg_idx = luaL_optint(L, 1, -1);
+ ControlConn* ctrlcon = ControlConn::query_from_lua(L);
+ string outstr = mod->get_host_cache_segment_stats(seg_idx);
+ ctrlcon->respond("%s", outstr.c_str());
+ }
+ return 0;
+}
+
static int host_cache_delete_host(lua_State* L)
{
HostCacheModule* mod = (HostCacheModule*) ModuleManager::get_module(HOST_CACHE_NAME);
{ nullptr, Parameter::PT_MAX, nullptr, nullptr, nullptr }
};
+static const Parameter host_cache_segment_stats_params[] =
+{
+ { "segment", Parameter::PT_INT, nullptr, nullptr, "segment number for stats" },
+ { nullptr, Parameter::PT_MAX, nullptr, nullptr, nullptr }
+};
+
static const Parameter host_cache_delete_host_params[] =
{
{ "host_ip", Parameter::PT_STRING, nullptr, nullptr, "ip address to delete" },
{ "delete_client", host_cache_delete_client,
host_cache_delete_client_params, "delete client from host"},
{ "get_stats", host_cache_get_stats, host_cache_stats_params, "get current host cache usage and pegs"},
+ { "get_segment_stats", host_cache_get_segment_stats, host_cache_segment_stats_params, "get usage and pegs for cache segment(s)"},
{ nullptr, nullptr, nullptr, nullptr }
};
{ "memcap", Parameter::PT_INT, "512:maxSZ", "8388608",
"maximum host cache size in bytes" },
+
+ { "segments", Parameter::PT_INT, "1:32", "4",
+ "number of host cache segments. It must be power of 2."},
{ nullptr, Parameter::PT_MAX, nullptr, nullptr, nullptr }
};
dump_file = v.get_string();
}
else if ( v.is("memcap") )
+ {
memcap = v.get_size();
+ }
+ else if ( v.is("segments"))
+ {
+ segments = v.get_uint8();
+
+ if(segments > 32)
+ segments = 32;
+ if (segments == 0 || (segments & (segments - 1)) != 0)
+ {
+ uint8_t highestBitSet = 0;
+ while (segments >>= 1)
+ highestBitSet++;
+ segments = 1 << highestBitSet;
+ LogMessage("== WARNING: host_cache segments is not the power of 2. setting to %d\n", segments);
+ }
+ }
return true;
}
if ( Snort::is_reloading() )
sc->register_reload_handler(new HostCacheReloadTuner(memcap));
else
- {
- host_cache.set_max_size(memcap);
+ {
+ host_cache.setup(segments, memcap);
ControlConn::log_command("host_cache.delete_host",false);
}
}
}
+string HostCacheModule::get_host_cache_segment_stats(int seg_idx)
+{
+
+ if(seg_idx >= host_cache.get_segments())
+ return "Invalid segment index\nTry host_cache.get_segment_stats() to get all stats\n";
+
+ string str;
+ const PegInfo* pegs = host_cache.get_pegs();
+
+ if(seg_idx == -1)
+ {
+ const auto&& lru_data = host_cache.get_all_data();
+ str = "Total host cache size: " + to_string(host_cache.mem_size()) + " bytes, "
+ + to_string(lru_data.size()) + " trackers, memcap: " + to_string(host_cache.get_max_size())
+ + " bytes\n";
+
+ for(auto cache : host_cache.seg_list)
+ {
+ cache->lock();
+ cache->stats.bytes_in_use = cache->current_size;
+ cache->stats.items_in_use = cache->list.size();
+ cache->unlock();
+ }
+
+ PegCount* counts = (PegCount*) host_cache.get_counts();
+ for ( int i = 0; pegs[i].type != CountType::END; i++ )
+ {
+ if ( counts[i] )
+ {
+ str += pegs[i].name;
+ str += ": " + to_string(counts[i]) + "\n" ;
+ }
+ }
+ }
+
+
+ str += "\n";
+ str += "total cache segments: " + to_string(host_cache.seg_list.size()) + "\n";
+ int idx = -1;
+ for( auto cache : host_cache.seg_list)
+ {
+ idx++;
+ if(seg_idx != -1 && seg_idx != idx)
+ continue;
+
+ str += "Segment " + to_string(idx) + ":\n";
+ const auto&& lru_data = cache->get_all_data();
+ str += "Current host cache size: " + to_string(cache->mem_size()) + " bytes, "
+ + to_string(lru_data.size()) + " trackers, memcap: " + to_string(cache->get_max_size())
+ + " bytes\n";
+
+ cache->lock();
+ cache->stats.bytes_in_use = cache->current_size;
+ cache->stats.items_in_use = cache->list.size();
+ cache->unlock();
+
+ PegCount* count = (PegCount*) cache->get_counts();
+ for ( int i = 0; pegs[i].type != CountType::END; i++ )
+ {
+ if ( count[i] )
+ {
+ str += pegs[i].name;
+ str += ": " + to_string(count[i]) + "\n" ;
+ }
+ }
+ str += "\n";
+ }
+ return str;
+}
+
string HostCacheModule::get_host_cache_stats()
{
string str;
const auto&& lru_data = host_cache.get_all_data();
str = "Current host cache size: " + to_string(host_cache.mem_size()) + " bytes, "
- + to_string(lru_data.size()) + " trackers, memcap: " + to_string(host_cache.max_size)
+ + to_string(lru_data.size()) + " trackers, memcap: " + to_string(host_cache.get_max_size())
+ " bytes\n";
- host_cache.lock();
-
- host_cache.stats.bytes_in_use = host_cache.current_size;
- host_cache.stats.items_in_use = host_cache.list.size();
-
+ for(auto cache : host_cache.seg_list)
+ {
+ cache->lock();
+ cache->stats.bytes_in_use = cache->current_size;
+ cache->stats.items_in_use = cache->list.size();
+ cache->unlock();
+ }
+
PegCount* counts = (PegCount*) host_cache.get_counts();
const PegInfo* pegs = host_cache.get_pegs();
}
- host_cache.unlock();
return str;
}
void HostCacheModule::sum_stats(bool dump_stats)
{
- host_cache.lock();
// These could be set in prep_counts but we set them here
// to save an extra cache lock.
- host_cache.stats.bytes_in_use = host_cache.current_size;
- host_cache.stats.items_in_use = host_cache.list.size();
+ for(auto cache : host_cache.seg_list)
+ {
+ cache->lock();
+ cache->stats.bytes_in_use = cache->current_size;
+ cache->stats.items_in_use = cache->list.size();
+ cache->unlock();
+ }
Module::sum_stats(dump_stats);
- host_cache.unlock();
}
void HostCacheModule::set_trace(const Trace* trace) const
#include "trace/trace_api.h"
#include "host_cache.h"
+#include "host_cache_segmented.h"
#define HOST_CACHE_NAME "host_cache"
#define HOST_CACHE_HELP "global LRU cache of host_tracker data about hosts"
void log_host_cache(const char* file_name, bool verbose = false);
std::string get_host_cache_stats();
+ std::string get_host_cache_segment_stats(int seg_idx);
void set_trace(const snort::Trace*) const override;
const snort::TraceOption* get_trace_options() const override;
private:
std::string dump_file;
size_t memcap = 0;
+ uint8_t segments = 1;
};
extern THREAD_LOCAL const snort::Trace* host_cache_trace;
--- /dev/null
+//--------------------------------------------------------------------------
+// Copyright (C) 2015-2023 Cisco and/or its affiliates. All rights reserved.
+//
+// This program is free software; you can redistribute it and/or modify it
+// under the terms of the GNU General Public License Version 2 as published
+// by the Free Software Foundation. You may not use, modify or distribute
+// this program under any other version of the GNU General Public License.
+//
+// This program is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License along
+// with this program; if not, write to the Free Software Foundation, Inc.,
+// 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+//--------------------------------------------------------------------------
+// host_cache_segmented.h author Raza Shafiq <rshafiq@cisco.com>
+
+#ifndef HOST_CACHE_SEGMENTED_H
+#define HOST_CACHE_SEGMENTED_H
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <atomic>
+#include <cassert>
+
+#include "host_cache.h"
+#include "log/messages.h"
+
+#define DEFAULT_HOST_CACHE_SEGMENTS 4
+
+extern SO_PUBLIC HostCacheIp default_host_cache;
+
+template<typename Key, typename Value>
+class HostCacheSegmented
+{
+public:
+ HostCacheSegmented() :
+ segment_count(DEFAULT_HOST_CACHE_SEGMENTS),
+ memcap_per_segment(LRU_CACHE_INITIAL_SIZE) { }
+ HostCacheSegmented(uint8_t segment_count, size_t memcap_per_segment);
+
+ void init();
+ void term();
+ void setup(uint8_t , size_t );
+
+ const PegInfo* get_pegs() { return lru_cache_shared_peg_names; }
+ size_t get_memcap_per_segment() { return memcap_per_segment.load(); }
+ size_t get_valid_id(uint8_t idx);
+ uint8_t get_segments() { return segment_count; }
+ size_t get_max_size();
+ size_t get_mem_chunk();
+ PegCount* get_counts();
+
+ void set_segments(uint8_t segments) { segment_count = segments; }
+ void print_config();
+ bool set_max_size(size_t max_size);
+ bool reload_resize(size_t memcap_per_segment);
+ bool reload_prune(size_t new_size, unsigned max_prune);
+ void invalidate();
+
+ std::shared_ptr<Value> operator[](const Key& key);
+
+ uint8_t get_segment_idx(Key val);
+ std::shared_ptr<Value> find(const Key& key);
+ std::shared_ptr<Value> find_else_create(const Key& key, bool* new_data);
+ std::vector<std::pair<Key, std::shared_ptr<Value>>> get_all_data();
+ bool find_else_insert(const Key& key, std::shared_ptr<Value>& value);
+ bool remove(const Key& key);
+ bool remove(const Key& key, typename LruCacheSharedMemcap
+ <snort::SfIp, snort::HostTracker, HashIp, IpEqualTo, HTPurgatory>::Data& data);
+ size_t mem_size();
+
+ std::vector<HostCacheIp*> seg_list;
+ HostCacheIp* default_cache = &default_host_cache; // Default cache used for host tracker
+
+private:
+ void update_counts();
+
+ uint8_t segment_count;
+ std::atomic<size_t> memcap_per_segment;
+ struct LruCacheSharedStats counts;
+ bool init_done = false;
+};
+
+
+template<typename Key, typename Value>
+HostCacheSegmented<Key, Value>::HostCacheSegmented(uint8_t segment_count, size_t memcap_per_segment) :
+ segment_count(segment_count),
+ memcap_per_segment(memcap_per_segment)
+{
+ assert(segment_count > 0);
+
+ for (size_t i = 0; i < this->segment_count; ++i)
+ {
+ auto cache = new HostCacheIp(this->memcap_per_segment);
+ seg_list.emplace_back((HostCacheIp*)cache);
+ }
+ init_done = true;
+}
+
+template<typename Key, typename Value>
+void HostCacheSegmented<Key, Value>::init()
+{
+ if(init_done or seg_list.size() >= segment_count)
+ return;
+
+ assert(segment_count > 0);
+
+ for (size_t i = 0; i < segment_count; ++i)
+ {
+ auto cache = new HostCacheIp(memcap_per_segment.load());
+ seg_list.emplace_back((HostCacheIp*)cache);
+ }
+ init_done = true;
+}
+
+template<typename Key, typename Value>
+void HostCacheSegmented<Key, Value>::term()
+{
+ for (auto cache : seg_list)
+ {
+ if (cache)
+ delete cache;
+ }
+}
+
+template<typename Key, typename Value>
+void HostCacheSegmented<Key, Value>::setup(uint8_t segs, size_t memcap )
+{
+ assert(segment_count > 0);
+
+ segment_count = segs;
+ memcap_per_segment = memcap/segs;
+ set_max_size(memcap);
+}
+
+template<typename Key, typename Value>
+size_t HostCacheSegmented<Key, Value>::get_valid_id(uint8_t idx)
+{
+ if(idx < seg_list.size())
+ return seg_list[idx]->get_valid_id();
+ return 0;
+}
+
+template<typename Key, typename Value>
+void HostCacheSegmented<Key, Value>::print_config()
+{
+ if ( snort::SnortConfig::log_verbose() )
+ {
+ snort::LogLabel("host_cache");
+ snort::LogMessage(" memcap: %zu bytes\n", get_max_size());
+ }
+}
+
+template<typename Key, typename Value>
+std::shared_ptr<Value> HostCacheSegmented<Key, Value>::operator[](const Key& key)
+{
+ return find_else_create(key, nullptr);
+}
+
+/**
+ * Sets the maximum size for the entire cache, which is distributed equally
+ * among all the segments.
+ */
+template<typename Key, typename Value>
+bool HostCacheSegmented<Key, Value>::set_max_size(size_t max_size)
+{
+ bool success = true;
+ memcap_per_segment = max_size/segment_count;
+ for (auto cache : seg_list)
+ {
+ if (!cache->set_max_size(memcap_per_segment))
+ success = false;
+ }
+ return success;
+}
+
+/**
+ * Resize the cache based on the provided memory capacity, distributing the
+ * memory equally among all the segments. If any segment fails to resize,
+ * the operation is considered unsuccessful.
+ */
+template<typename Key, typename Value>
+bool HostCacheSegmented<Key, Value>::reload_resize(size_t memcap)
+{
+ bool success = true;
+ memcap_per_segment = memcap/segment_count;
+ for (auto cache : seg_list)
+ {
+ if (!cache->reload_resize(memcap_per_segment.load()))
+ success = false;
+ }
+ return success;
+}
+
+// Computes the index of the segment where a given key-value pair belongs.
+template<typename Key, typename Value>
+uint8_t HostCacheSegmented<Key, Value>::get_segment_idx(Key val)
+{
+ const uint8_t* bytes = reinterpret_cast<const uint8_t*>(&val);
+ uint8_t result = 0;
+ for (size_t i = 0; i < sizeof(Key); ++i)
+ result ^= bytes[i];
+ //Assumes segment_count is a power of 2 always
+ //This is a fast way to do a modulo operation
+ return result & (segment_count - 1);
+}
+
+//Retrieves all the data stored across all the segments of the cache.
+template<typename Key, typename Value>
+std::vector<std::pair<Key, std::shared_ptr<Value>>> HostCacheSegmented<Key,Value>::get_all_data()
+{
+ std::vector<std::pair<Key, std::shared_ptr<Value>>> all_data;
+
+ for (auto cache : seg_list)
+ {
+ auto cache_data = cache->get_all_data();
+ all_data.insert(all_data.end(), cache_data.begin(), cache_data.end());
+ }
+ return all_data;
+}
+
+template<typename Key, typename Value>
+std::shared_ptr<Value> HostCacheSegmented<Key, Value>::find(const Key& key)
+{
+ uint8_t idx = get_segment_idx(key);
+ return seg_list[idx]->find(key);
+}
+
+/**
+ * Updates the internal counts of the host cache. This method aggregates the
+ * counts from all segments and updates the overall counts for the cache.
+ */
+template<typename Key, typename Value>
+void HostCacheSegmented<Key, Value>::update_counts()
+{
+ PegCount* pcs = (PegCount*)&counts;
+ const PegInfo* pegs = get_pegs();
+
+ for ( int i = 0; pegs[i].type != CountType::END; i++ )
+ {
+ PegCount c = 0;
+ for(auto cache : seg_list)
+ {
+ c += cache->get_counts()[i];
+ }
+ pcs[i] = c;
+ }
+}
+
+template<typename Key, typename Value>
+std::shared_ptr<Value> HostCacheSegmented<Key, Value>:: find_else_create(const Key& key, bool* new_data)
+{
+ // Determine the segment index where the key-value pair resides or should reside
+ uint8_t idx = get_segment_idx(key);
+ bool new_data_local = false;
+
+ // Retrieve or create the entry for the key in the determined segment
+ auto ht = seg_list[idx]->find_else_create(key, &new_data_local);
+ if(new_data_local)
+ {
+ // If a new entry was created, update its cache interface and visibility
+ ht->update_cache_interface(idx);
+ ht->init_visibility(seg_list[idx]->get_valid_id());
+ }
+ if(new_data)
+ *new_data = new_data_local;
+ return ht;
+}
+
+template<typename Key, typename Value>
+bool HostCacheSegmented<Key, Value>::find_else_insert(const Key& key, std::shared_ptr<Value>& value)
+{
+ uint8_t idx = get_segment_idx(key);
+ return seg_list[idx]->find_else_insert(key, value, false);
+}
+
+template<typename Key, typename Value>
+PegCount* HostCacheSegmented<Key, Value>::get_counts()
+{
+ if(init_done)
+ update_counts();
+ return (PegCount*)&counts;
+}
+
+template<typename Key, typename Value>
+void HostCacheSegmented<Key, Value>::invalidate()
+{
+ for( auto cache: seg_list)
+ {
+ cache->invalidate();
+ }
+}
+
+template<typename Key, typename Value>
+bool HostCacheSegmented<Key, Value>::reload_prune(size_t new_size, unsigned max_prune)
+{
+ bool success = true;
+ memcap_per_segment = new_size/segment_count;
+ for (auto cache : seg_list)
+ {
+ if (!cache->reload_prune(memcap_per_segment, max_prune))
+ success = false;
+ }
+ return success;
+}
+
+template<typename Key, typename Value>
+size_t HostCacheSegmented<Key, Value>::mem_size()
+{
+ size_t mem_size = 0;
+ for (auto cache : seg_list)
+ {
+ if(cache)
+ mem_size += cache->mem_size();
+ }
+ return mem_size;
+}
+
+template<typename Key, typename Value>
+size_t HostCacheSegmented<Key, Value>::get_max_size()
+{
+ size_t max_size = 0;
+ for (auto cache : seg_list)
+ {
+ max_size += cache->get_max_size();
+ }
+ return max_size;
+}
+
+template<typename Key, typename Value>
+size_t HostCacheSegmented<Key, Value>::get_mem_chunk()
+{
+ //Assumes all segments have the same mem_chunk
+ return seg_list[0]->mem_chunk;
+}
+
+template<typename Key, typename Value>
+bool HostCacheSegmented<Key, Value>::remove(const Key& key)
+{
+ uint8_t idx = get_segment_idx(key);
+ return seg_list[idx]->remove(key);
+}
+
+template<typename Key, typename Value>
+bool HostCacheSegmented<Key, Value>::remove(const Key& key, typename LruCacheSharedMemcap<snort::SfIp, snort::HostTracker, HashIp, IpEqualTo, HTPurgatory>::Data& data)
+{
+ uint8_t idx = get_segment_idx(key);
+ return seg_list[idx]->remove(key, data);
+}
+
+/*
+Warning!!!: update_allocator and update_set_allocator don't copy data to old container
+but erase it for speed. Use with care!!!
+*/
+template <template <typename, typename...> class Container, typename T, typename Alloc>
+void update_allocator(Container<T, Alloc>& cont, CacheInterface* new_lru)
+{
+ Alloc new_allocator;
+ new_allocator.set_cache(new_lru);
+ cont = std::move(Container<T, Alloc>(new_allocator));
+}
+
+template <template <typename, typename, typename...> class Container, typename T, typename Comp, typename Alloc>
+void update_set_allocator(Container<T, Comp, Alloc>& cont, CacheInterface* new_lru)
+{
+ Alloc new_allocator;
+ new_allocator.set_cache(new_lru);
+ cont = std::move(Container<T, Comp, Alloc> (new_allocator));
+}
+
+
+typedef HostCacheSegmented<snort::SfIp, snort::HostTracker> HostCacheSegmentedIp;
+extern SO_PUBLIC HostCacheSegmentedIp host_cache;
+
+#endif // HOST_CACHE_SEGMENTED_H
+
#include "cache_allocator.cc"
#include "host_cache.h"
+#include "host_cache_segmented.h"
#include "host_tracker.h"
using namespace snort;
const uint8_t snort::zero_mac[MAC_SIZE] = {0, 0, 0, 0, 0, 0};
+
HostTracker::HostTracker() : hops(-1)
{
last_seen = nat_count_start = (uint32_t) packet_time();
last_event = -1;
- visibility = host_cache.get_valid_id();
+ visibility = host_cache.get_valid_id(0);
}
void HostTracker::update_last_seen()
bool HostTracker::set_visibility(bool v)
{
- // get_valid_id may use its own lock, so get this outside our lock
- size_t container_id = host_cache.get_valid_id();
-
std::lock_guard<std::mutex> lck(host_tracker_lock);
+ size_t container_id = host_cache.get_valid_id(cache_idx);
size_t old_visibility = visibility;
visibility = v ? container_id : HostCacheIp::invalid_id;
bool HostTracker::is_visible() const
{
std::lock_guard<std::mutex> lck(host_tracker_lock);
- return visibility == host_cache.get_valid_id();
+ return visibility == host_cache.get_valid_id(cache_idx);
}
flows.clear();
}
+void HostTracker::update_cache_interface(uint8_t idx)
+{
+
+ if (idx == cache_idx and cache_interface == host_cache.seg_list[idx])
+ return;
+
+ std::lock_guard<std::mutex> lock(host_tracker_lock);
+ cache_idx = idx;
+ cache_interface = host_cache.seg_list[idx];
+
+ update_allocator(macs, cache_interface);
+ update_allocator(network_protos, cache_interface);
+ update_allocator(xport_protos, cache_interface);
+ update_allocator(services, cache_interface);
+ update_allocator(clients, cache_interface);
+ update_allocator(ua_fps, cache_interface);
+ update_set_allocator(tcp_fpids, cache_interface);
+ update_set_allocator(udp_fpids, cache_interface);
+ update_set_allocator(smb_fpids, cache_interface);
+ update_set_allocator(cpe_fpids, cache_interface);
+}
+
HostApplicationInfo::HostApplicationInfo(const char *ver, const char *ven)
{
if ( ver )
return ++nat_count;
}
+ void set_cache_idx(uint8_t idx)
+ {
+ std::lock_guard<std::mutex> lck(host_tracker_lock);
+ cache_idx = idx;
+ }
+
+ void init_visibility(size_t v)
+ {
+ std::lock_guard<std::mutex> lck(host_tracker_lock);
+ visibility = v;
+ }
+
+ uint8_t get_cache_idx() const
+ {
+ return cache_idx;
+ }
+
bool set_netbios_name(const char*);
bool set_visibility(bool v = true);
+ size_t get_visibility() const {return visibility;}
+
bool is_visible() const;
void remove_flows();
void remove_flow(RNAFlow*);
+ void update_cache_interface( uint8_t idx );
+ CacheInterface * get_cache_interface() { return cache_interface; }
+
private:
mutable std::mutex host_tracker_lock; // ensure that updates to a shared object are safe
uint32_t nat_count_start; // the time nat counting starts for this host
size_t visibility;
+ uint8_t cache_idx = 0;
uint32_t num_visible_services = 0;
uint32_t num_visible_clients = 0;
uint32_t num_visible_macs = 0;
+
+ CacheInterface * cache_interface = nullptr;
// These three do not lock independently; they are used by payload discovery and called
// from add_payload(HostApplication&, Port, IpProtocol, AppId, AppId, size_t); where the
#endif
#include "host_tracker_module.h"
+#include "host_cache_segmented.h"
#include "log/messages.h"
#include "main/snort_config.h"
using namespace snort;
+static HostCacheIp initial_host_cache(LRU_CACHE_INITIAL_SIZE);
+
const PegInfo host_tracker_pegs[] =
{
{ CountType::SUM, "service_adds", "host service adds" },
else if ( idx && !strcmp(fqn, "host_tracker") && addr.is_set() )
{
- host_cache[addr];
+ initial_host_cache[addr];
for ( auto& a : apps )
- host_cache[addr]->add_service(a);
+ initial_host_cache[addr]->add_service(a);
addr.clear();
apps.clear();
return true;
}
+void HostTrackerModule::init_data()
+{
+ auto host_data = initial_host_cache.get_all_data();
+ for ( auto& h : host_data )
+ {
+ host_cache.find_else_insert(h.first, h.second);
+ h.second->init_visibility(1);
+ }
+}
+
+
const PegInfo* HostTrackerModule::get_pegs() const
{ return host_tracker_pegs; }
#define host_tracker_help \
"configure hosts"
+#define HOST_TRACKER_NAME "host_tracker"
class HostTrackerModule : public snort::Module
{
public:
HostTrackerModule() :
- snort::Module("host_tracker", host_tracker_help, host_tracker_params, true) { }
+ snort::Module(HOST_TRACKER_NAME, host_tracker_help, host_tracker_params, true) { }
const PegInfo* get_pegs() const override;
PegCount* get_counts() const override;
bool begin(const char*, int, snort::SnortConfig*) override;
bool end(const char*, int, snort::SnortConfig*) override;
+ void init_data();
+
Usage get_usage() const override
{ return GLOBAL; }
add_cpputest( host_cache_test
SOURCES
../host_cache.cc
+ ../host_cache_segmented.h
../host_tracker.cc
../../network_inspectors/rna/test/rna_flow_stubs.cc
../../sfip/sf_ip.cc
add_cpputest( host_cache_module_test
SOURCES
../host_cache_module.cc
- ../host_cache.cc
+ ../host_cache_segmented.h
../host_tracker.cc
../../framework/module.cc
../../framework/value.cc
../../sfip/sf_ip.cc
)
+add_cpputest( host_cache_segmented_test
+ SOURCES
+ ../host_cache.cc
+ ../host_cache.h
+ ../host_tracker.cc
+ ../host_cache_segmented.h
+ ../../network_inspectors/rna/test/rna_flow_stubs.cc
+ ../../sfip/sf_ip.cc
+ )
+
add_cpputest( host_tracker_module_test
SOURCES
../host_cache.cc
+ ../host_cache_segmented.h
../host_tracker.cc
../host_tracker_module.cc
../../framework/module.cc
#endif
#include "host_tracker/host_cache.h"
+#include "host_tracker/host_cache_segmented.h"
#include "host_tracker/cache_allocator.cc"
#include "network_inspectors/rna/rna_flow.h"
#include <CppUTest/CommandLineTestRunner.h>
#include <CppUTest/TestHarness.h>
-HostCacheIp host_cache(100);
+HostCacheIp default_host_cache(LRU_CACHE_INITIAL_SIZE);
+HostCacheSegmentedIp host_cache(4,100);
using namespace std;
using namespace snort;
+namespace snort
+{
+void FatalError(const char* fmt, ...) { (void)fmt; exit(1); }
+}
// Derive an allocator from CacheAlloc:
template <class T>
class Allocator : public CacheAlloc<T>
int main(int argc, char** argv)
{
- // FIXIT-L There is currently no external way to fully release the memory from the global host
- // cache unordered_map in host_cache.cc
MemoryLeakWarningPlugin::turnOffNewDeleteOverloads();
- return CommandLineTestRunner::RunAllTests(argc, argv);
+ int ret = CommandLineTestRunner::RunAllTests(argc, argv);
+ host_cache.term();
+ return ret;
}
#include "network_inspectors/rna/rna_flow.h"
#include <cstring>
-
#include "main/snort_config.h"
#include <CppUTest/CommandLineTestRunner.h>
#include <CppUTest/TestHarness.h>
using namespace snort;
+using namespace std;
namespace snort
{
char* snort_strdup(const char* str)
{ return strdup(str); }
time_t packet_time() { return 0; }
+void FatalError(const char* fmt, ...) { (void)fmt; exit(1);}
+}
+HostCacheIp default_host_cache(LRU_CACHE_INITIAL_SIZE);
+HostCacheSegmentedIp host_cache(1,100);
+
+
+template <class T>
+class Allocator : public CacheAlloc<T>
+{
+public:
+ template <class U>
+ struct rebind
+ {
+ typedef Allocator<U> other;
+ };
+
+ using CacheAlloc<T>::lru;
+ using Base = CacheAlloc<T>;
+
+ void set_cache(CacheInterface* hci) { Base::set_lru(hci); }
+ CacheInterface* get_cache_ptr() { return Base::get_lru(); }
+
+ template <class U>
+ Allocator(const Allocator<U>& other)
+ {
+ lru = other.lru;
+ }
+ template <class U>
+ Allocator(const Allocator<U>&& other)
+ {
+ lru = other.lru;
+ }
+ Allocator();
+};
+
+
+class Item
+{
+public:
+ typedef int ValueType;
+ vector<ValueType, Allocator<ValueType>> data;
+};
+
+typedef LruCacheSharedMemcap<string, Item, hash<string>> CacheType;
+CacheType cache(100);
+CacheType cache2(100);
+
+template <class T>
+Allocator<T>::Allocator()
+{
+ lru = &cache;
}
-HostCacheIp host_cache(100);
TEST_GROUP(host_cache_allocator_ht)
{
+
};
+TEST(host_cache_allocator_ht, allocate_update)
+{
+ //declare a list with allocator cache
+ std::list<string, Allocator<string>> test_list;
+ CHECK(test_list.get_allocator().get_lru() == &cache);
+ //update cache interface of test_list to cache_2
+ update_allocator(test_list, &cache2);
+ CHECK(test_list.get_allocator().get_lru() == &cache2);
+}
+
// Test allocation / deallocation, pruning and remove.
TEST(host_cache_allocator_ht, allocate)
{
// room for n host trackers in the cache and 2^floor(log2(3))+2^ceil(log2(3))-1 host
// applications in ht
- // FIXIT-L this makes a questionable assumption about the STL vector implementation
- // that it will double the allocation each time it needs to increase its size, so
- // going from 2 to 3 will allocate 4 and then release 2, meaning in order to exactly
- // induce pruning, the max size should be just one <ht_item_sz> short of holding 6
- const size_t max_size = n * hc_item_sz + 5 * ht_item_sz;
-
+ const size_t max_size = n * hc_item_sz + m * ht_item_sz;
host_cache.set_max_size(max_size);
// insert n empty host trackers:
int main(int argc, char** argv)
{
- // FIXIT-L There is currently no external way to fully release the memory from the global host
- // cache unordered_map in host_cache.cc
MemoryLeakWarningPlugin::turnOffNewDeleteOverloads();
- return CommandLineTestRunner::RunAllTests(argc, argv);
+ int ret = CommandLineTestRunner::RunAllTests(argc, argv);
+ host_cache.term();
+ return ret;
}
#include "control/control.h"
#include "host_tracker/host_cache_module.h"
#include "host_tracker/host_cache.h"
+#include "host_tracker/host_cache_segmented.h"
#include "main/snort_config.h"
#include "managers/module_manager.h"
using namespace snort;
+HostCacheIp default_host_cache(LRU_CACHE_INITIAL_SIZE);
+HostCacheSegmentedIp host_cache(4,LRU_CACHE_INITIAL_SIZE);
+
// All tests here use the same module since host_cache is global. Creating a local module for each
// test will cause host_cache PegCount testing to be dependent on the order of running these tests.
static HostCacheModule module;
time_t packet_time() { return 0; }
bool Snort::is_reloading() { return false; }
void SnortConfig::register_reload_handler(ReloadResourceTuner* rrt) { delete rrt; }
+void FatalError(const char* fmt, ...) { (void)fmt; exit(1); }
} // end of namespace snort
void show_stats(PegCount*, const PegInfo*, unsigned, const char*) { }
template <class T>
HostCacheAllocIp<T>::HostCacheAllocIp()
{
- lru = &host_cache;
+ lru = host_cache.seg_list[0];
}
TEST_GROUP(host_cache_module)
static void try_reload_prune(bool is_not_locked)
{
+ auto segs = host_cache.seg_list.size();
+ auto prune_size = host_cache.seg_list[0]->mem_chunk * 1.5 * segs;
if ( is_not_locked )
{
- CHECK(host_cache.reload_prune(host_cache.mem_chunk * 1.5, 2) == true);
+ CHECK(host_cache.reload_prune(prune_size, 2) == true);
+ for ( auto& seg : host_cache.seg_list )
+ {
+ CHECK(seg->get_max_size() == prune_size/segs);
+ }
}
else
{
- CHECK(host_cache.reload_prune(host_cache.mem_chunk * 1.5, 2) == false);
+ CHECK(host_cache.reload_prune(prune_size, 2) == false);
}
}
+TEST(host_cache_module, cache_segments)
+{
+ SfIp ip0, ip1, ip2, ip3;
+ ip0.set("1.2.3.2");
+ ip1.set("11.22.2.0");
+ ip2.set("192.168.1.1");
+ ip3.set("10.20.33.10");
+
+ uint8_t segment0 = host_cache.get_segment_idx(ip0);
+ uint8_t segment1 = host_cache.get_segment_idx(ip1);
+ uint8_t segment2 = host_cache.get_segment_idx(ip2);
+ uint8_t segment3 = host_cache.get_segment_idx(ip3);
+
+ CHECK(segment0 == 0);
+ CHECK(segment1 == 1);
+ CHECK(segment2 == 2);
+ CHECK(segment3 == 3);
+
+ auto h0 = host_cache.find_else_create(ip0, nullptr);
+ auto h1 = host_cache.find_else_create(ip1, nullptr);
+ auto h2 = host_cache.find_else_create(ip2, nullptr);
+ auto h3 = host_cache.find_else_create(ip3, nullptr);
+
+ CHECK(segment0 == h0->get_cache_idx());
+ CHECK(segment1 == h1->get_cache_idx());
+ CHECK(segment2 == h2->get_cache_idx());
+ CHECK(segment3 == h3->get_cache_idx());
+}
+
+
// Test stats when HostCacheModule sets/changes host_cache size.
// This method is a friend of LruCacheSharedMemcap class.
TEST(host_cache_module, misc)
// cache, because sum_stats resets the pegs.
module.sum_stats(true);
- // add 3 entries
+ // add 3 entries to segment 3
SfIp ip1, ip2, ip3;
ip1.set("1.1.1.1");
ip2.set("2.2.2.2");
ip3.set("3.3.3.3");
+
host_cache.find_else_create(ip1, nullptr);
host_cache.find_else_create(ip2, nullptr);
host_cache.find_else_create(ip3, nullptr);
CHECK(ht_stats[2] == 3*mc); // bytes_in_use
CHECK(ht_stats[3] == 3); // items_in_use
- // no pruning needed for resizing higher than current size
- CHECK(host_cache.reload_resize(host_cache.mem_chunk * 10) == false);
+ // no pruning needed for resizing higher than current size in segment 3
+ CHECK(host_cache.seg_list[2]->reload_resize(host_cache.get_mem_chunk() * 10 ) == false);
module.sum_stats(true);
CHECK(ht_stats[2] == 3*mc); // bytes_in_use unchanged
CHECK(ht_stats[3] == 3); // items_in_use unchanged
- // pruning needed for resizing lower than current size
- CHECK(host_cache.reload_resize(host_cache.mem_chunk * 1.5) == true);
+ // pruning needed for resizing lower than current size in segment 3
+ CHECK(host_cache.seg_list[2]->reload_resize(host_cache.get_mem_chunk() * 1.5) == true);
module.sum_stats(true);
CHECK(ht_stats[2] == 3*mc); // bytes_in_use still unchanged
CHECK(ht_stats[3] == 3); // items_in_use still unchanged
// pruning in thread is not done when reload_mutex is already locked
- host_cache.reload_mutex.lock();
+ for(auto cache : host_cache.seg_list)
+ cache->reload_mutex.lock();
+
std::thread test_negative(try_reload_prune, false);
test_negative.join();
- host_cache.reload_mutex.unlock();
+
+ for(auto cache : host_cache.seg_list)
+ cache->reload_mutex.unlock();
+
module.sum_stats(true);
CHECK(ht_stats[2] == 3*mc); // no pruning yet
CHECK(ht_stats[3] == 3); // no pruning_yet
CHECK(ht_stats[0] == 4);
}
+
+// Test host_cache.get_segment_stats()
+TEST(host_cache_module, get_segment_stats)
+{
+ host_cache.init();
+ std::string str;
+ str = module.get_host_cache_segment_stats(0);
+
+ bool contain = str.find("Segment 0:") != std::string::npos;
+ CHECK_TRUE(contain);
+
+ str = module.get_host_cache_segment_stats(1);
+ contain = str.find("Segment 1:") != std::string::npos;
+ CHECK_TRUE(contain);
+
+ str = module.get_host_cache_segment_stats(2);
+ contain = str.find("Segment 2:") != std::string::npos;
+ CHECK_TRUE(contain);
+
+ str = module.get_host_cache_segment_stats(-1);
+ contain = str.find("total cache segments: 4") != std::string::npos;
+ CHECK_TRUE(contain);
+
+
+}
+
TEST(host_cache_module, log_host_cache_messages)
{
module.log_host_cache(nullptr, true);
int main(int argc, char** argv)
{
- // FIXIT-L There is currently no external way to fully release the memory from the global host
- // cache unordered_map in host_cache.cc
MemoryLeakWarningPlugin::turnOffNewDeleteOverloads();
- return CommandLineTestRunner::RunAllTests(argc, argv);
+ int ret = CommandLineTestRunner::RunAllTests(argc, argv);
+ host_cache.term();
+ return ret;
}
--- /dev/null
+//--------------------------------------------------------------------------
+// Copyright (C) 2016-2023 Cisco and/or its affiliates. All rights reserved.
+//
+// This program is free software; you can redistribute it and/or modify it
+// under the terms of the GNU General Public License Version 2 as published
+// by the Free Software Foundation. You may not use, modify or distribute
+// this program under any other version of the GNU General Public License.
+//
+// This program is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License along
+// with this program; if not, write to the Free Software Foundation, Inc.,
+// 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+//--------------------------------------------------------------------------
+
+// host_cache_segmented_test.cc author Raza Shafiq <rshafiq@cisco.com>
+
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <cstring>
+
+#include "host_tracker/host_cache.h"
+#include "host_tracker/host_cache_segmented.h"
+
+#include <CppUTest/CommandLineTestRunner.h>
+#include <CppUTest/TestHarness.h>
+
+#include "sfip/sf_ip.h"
+
+using namespace std;
+using namespace snort;
+
+namespace snort
+{
+char* snort_strdup(const char* s)
+{ return strdup(s); }
+time_t packet_time() { return 0; }
+void FatalError(const char* fmt, ...) { (void)fmt; exit(1); }
+
+}
+
+TEST_GROUP(host_cache_segmented)
+{
+};
+
+TEST(host_cache_segmented, get_segments_test)
+{
+ HostCacheSegmentedIp hc(4,4000);
+ hc.init();
+ CHECK(hc.get_segments() == 4);
+ CHECK(hc.get_memcap_per_segment() == 4000);
+ CHECK(hc.get_max_size() == 16000);
+ hc.term();
+}
+
+
+TEST(host_cache_segmented, cache_setup)
+{
+ HostCacheSegmentedIp hc;
+ hc.setup(2,2000);
+ hc.init();
+ CHECK(hc.get_segments() == 2);
+ CHECK(hc.get_memcap_per_segment() == 1000);
+ CHECK(hc.get_max_size() == 2000);
+ hc.term();
+}
+
+TEST(host_cache_segmented, one_segment)
+{
+ HostCacheSegmentedIp hc3(1,4000);
+ hc3.init();
+ CHECK(hc3.get_segments() == 1);
+ CHECK(hc3.get_memcap_per_segment() == 4000);
+ CHECK(hc3.get_max_size() == 4000);
+ hc3.term();
+}
+
+TEST(host_cache_segmented, set_max_size_test)
+{
+ HostCacheSegmentedIp hc4(16,1000);
+ hc4.init();
+ CHECK(hc4.get_segments() == 16);
+ CHECK(hc4.get_memcap_per_segment() == 1000);
+ hc4.set_max_size(40000);
+ CHECK(hc4.get_segments() == 16);
+ CHECK(hc4.get_memcap_per_segment() == 2500);
+ CHECK(hc4.get_max_size() == 40000);
+ hc4.term();
+}
+
+int main(int argc, char** argv)
+{
+ return CommandLineTestRunner::RunAllTests(argc, argv);
+}
#include <cstring>
#include "host_tracker/host_cache.h"
+#include "host_tracker/host_cache_segmented.h"
#include "host_tracker/host_tracker_module.h"
#include "main/snort_config.h"
#include "target_based/snort_protocols.h"
char* snort_strdup(const char* s)
{ return strdup(s); }
time_t packet_time() { return 0; }
+void FatalError(const char* fmt, ...) { (void)fmt; exit(1); }
}
// Fake show_stats to avoid bringing in a ton of dependencies.
int main(int argc, char** argv)
{
- // FIXIT-L There is currently no external way to fully release the memory from the global host
- // cache unordered_map in host_cache.cc
+ host_cache.init();
MemoryLeakWarningPlugin::turnOffNewDeleteOverloads();
- return CommandLineTestRunner::RunAllTests(argc, argv);
+ int ret = CommandLineTestRunner::RunAllTests(argc, argv);
+ host_cache.term();
+ return ret;
}
char* snort_strdup(const char* str)
{ return strdup(str); }
time_t packet_time() { return test_time; }
+void FatalError(const char* fmt, ...) { (void)fmt; exit(1); }
}
// There always needs to be a HostCacheIp associated with HostTracker,
// because any allocation / deallocation into the HostTracker will take up
// memory managed by the cache.
-HostCacheIp host_cache(1024);
+HostCacheIp default_host_cache(LRU_CACHE_INITIAL_SIZE);
+HostCacheSegmentedIp host_cache(4,1024);
TEST_GROUP(host_tracker)
{
int main(int argc, char** argv)
{
- return CommandLineTestRunner::RunAllTests(argc, argv);
+ int ret = CommandLineTestRunner::RunAllTests(argc, argv);
+ host_cache.term();
+ return ret;
}
#include "framework/mpse.h"
#include "helpers/process.h"
#include "host_tracker/host_cache.h"
+#include "host_tracker/host_cache_segmented.h"
+#include "host_tracker/host_tracker_module.h"
#include "ips_options/ips_options.h"
#include "log/log.h"
#include "log/messages.h"
HighAvailabilityManager::term();
SideChannelManager::term();
ModuleManager::term();
+ host_cache.term();
PluginManager::release_plugins();
ScriptManager::release_scripts();
memory::MemoryCap::term();
detection_filter_term();
term_signals();
+
}
void Snort::clean_exit(int)
memory::MemoryCap::start(*sc->memory, Stream::prune_flows);
memory::MemoryCap::print(SnortConfig::log_verbose(), true);
+ host_cache.init();
+ ((HostTrackerModule*)ModuleManager::get_module(HOST_TRACKER_NAME))->init_data();
host_cache.print_config();
TimeStart();
#include "framework/policy_selector.h"
#include "hash/xhash.h"
#include "helpers/process.h"
+#include "host_tracker/host_cache_segmented.h"
#include "latency/latency_config.h"
#include "log/messages.h"
#include "managers/action_manager.h"
EventManager::release_plugins();
IpsManager::release_plugins();
InspectorManager::release_plugins();
+ host_cache.term();
}
#endif
}
#include "appid_discovery.h"
#include "host_tracker/host_cache.h"
+#include "host_tracker/host_cache_segmented.h"
#include "log/messages.h"
#include "packet_tracer/packet_tracer.h"
#include "control/control.h"
#include "host_tracker/host_cache.h"
+#include "host_tracker/host_cache_segmented.h"
#include "log/messages.h"
#include "main/analyzer.h"
#include "main/analyzer_command.h"
THREAD_LOCAL PacketTracer* s_pkt_trace = nullptr;
THREAD_LOCAL Stopwatch<SnortClock>* pt_timer = nullptr;
void PacketTracer::daq_log(const char*, ...) { }
+void FatalError(const char* fmt, ...) { (void)fmt; exit(1); }
// Stubs for packet
Packet::Packet(bool) {}
static AppIdInspector* s_ins = nullptr;
static ServiceDiscovery* s_discovery_manager = nullptr;
-HostCacheIp host_cache(50);
+HostCacheIp default_host_cache(LRU_CACHE_INITIAL_SIZE);
+HostCacheSegmentedIp host_cache(1,50);
AppId HostTracker::get_appid(Port, IpProtocol, bool, bool)
{
return APP_ID_NONE;
int main(int argc, char** argv)
{
int rc = CommandLineTestRunner::RunAllTests(argc, argv);
+ host_cache.term();
return rc;
}
#include "helpers/discovery_filter.h"
#include "host_tracker/host_cache.h"
+#include "host_tracker/host_cache_segmented.h"
#include "protocols/packet.h"
#ifdef UNIT_TEST
#include "control/control.h"
#include "host_tracker/host_cache.h"
+#include "host_tracker/host_cache_segmented.h"
#include "log/messages.h"
#include "lua/lua.h"
#include "main/snort_config.h"