string(APPEND CMAKE_CXX_FLAGS " ${EXTRA_CXX_FLAGS}")
string(APPEND CMAKE_EXE_LINKER_FLAGS " ${EXTRA_LINKER_FLAGS}")
string(APPEND CMAKE_MODULE_LINKER_FLAGS " ${EXTRA_LINKER_FLAGS}")
-foreach (EXTRA_LIBRARY IN LISTS EXTRA_LIBRARIES)
- link_libraries(${EXTRA_LIBRARY})
-endforeach (EXTRA_LIBRARY)
include_directories (${PROJECT_BINARY_DIR})
include_directories (${PROJECT_SOURCE_DIR})
TCMalloc: OFF")
endif ()
+if (HAVE_JEMALLOC)
+ message("\
+ JEMalloc: ON")
+else ()
+ message("\
+ JEMalloc: OFF")
+endif ()
+
if (HAVE_UUID)
message("\
UUID: ON")
--- /dev/null
+# - Try to find jemalloc
+# Once done this will define
+# JEMALLOC_FOUND - System has jemalloc
+# JEMALLOC_INCLUDE_DIRS - The jemalloc include directories
+# JEMALLOC_LIBRARIES - The libraries needed to use jemalloc
+
+find_package(PkgConfig QUIET)
+pkg_check_modules(PC_JEMALLOC QUIET jemalloc)
+
+find_path(JEMALLOC_INCLUDE_DIR
+ NAMES jemalloc/jemalloc.h
+ HINTS ${PC_JEMALLOC_INCLUDE_DIRS}
+)
+
+if ( STATIC_JEMALLOC )
+ find_library(JEMALLOC_LIBRARY
+ NAMES libjemalloc.a jemalloc
+ HINTS ${PC_JEMALLOC_LIBRARY_DIRS}
+)
+else()
+ find_library(JEMALLOC_LIBRARY
+ NAMES jemalloc
+ HINTS ${PC_JEMALLOC_LIBRARY_DIRS}
+)
+endif()
+
+if(JEMALLOC_INCLUDE_DIR)
+ set(_version_regex "^#define[ \t]+JEMALLOC_VERSION[ \t]+\"([^\"]+)\".*")
+ file(STRINGS "${JEMALLOC_INCLUDE_DIR}/jemalloc/jemalloc.h"
+ JEMALLOC_VERSION REGEX "${_version_regex}")
+ string(REGEX REPLACE "${_version_regex}" "\\1"
+ JEMALLOC_VERSION "${JEMALLOC_VERSION}")
+ unset(_version_regex)
+endif()
+
+include(FindPackageHandleStandardArgs)
+# handle the QUIETLY and REQUIRED arguments and set JEMALLOC_FOUND to TRUE
+# if all listed variables are TRUE and the requested version matches.
+find_package_handle_standard_args(Jemalloc REQUIRED_VARS
+ JEMALLOC_LIBRARY JEMALLOC_INCLUDE_DIR
+ VERSION_VAR JEMALLOC_VERSION)
+
+if(JEMALLOC_FOUND)
+ set(JEMALLOC_LIBRARIES ${JEMALLOC_LIBRARY})
+ set(JEMALLOC_INCLUDE_DIRS ${JEMALLOC_INCLUDE_DIR})
+endif()
+
+mark_as_advanced(JEMALLOC_INCLUDE_DIR JEMALLOC_LIBRARY)
+
set ( USE_TSC_CLOCK ${ENABLE_TSC_CLOCK} )
set ( NO_PROFILER ${DISABLE_SNORT_PROFILER} )
set ( DEEP_PROFILING ${ENABLE_DEEP_PROFILING} )
-set ( NO_MEM_MGR ${DISABLE_MEMORY_MANAGER} )
+set ( ENABLE_MEMORY_OVERLOADS ${ENABLE_MEMORY_OVERLOADS} )
+set ( ENABLE_MEMORY_PROFILER ${ENABLE_MEMORY_PROFILER} )
+set ( ENABLE_RULE_PROFILER ${ENABLE_RULE_PROFILER} )
if ( ENABLE_LARGE_PCAP )
set ( _FILE_OFFSET_BITS 64 )
set ( HAVE_TCMALLOC "1" )
endif ( ENABLE_TCMALLOC )
+if ( ENABLE_JEMALLOC )
+ if ( ENABLE_ADDRESS_SANITIZER )
+ message ( SEND_ERROR "JEMalloc cannot be used at the same time as address sanitizer!" )
+ endif ()
+ find_package ( JEMalloc REQUIRED )
+ set ( JEMALLOC_C_FLAGS "-fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-free" )
+ set ( HAVE_JEMALLOC "1" )
+endif ( ENABLE_JEMALLOC )
+
if ( ENABLE_CODE_COVERAGE )
include(${CMAKE_MODULE_PATH}/CodeCoverage.cmake)
endif ( ENABLE_CODE_COVERAGE )
set ( EXTRA_C_FLAGS "${EXTRA_C_FLAGS} ${HARDENED_CXX_FLAGS} ${DEBUGGING_C_FLAGS} ${SANITIZER_CXX_FLAGS} ${TCMALLOC_C_FLAGS} ${COVERAGE_COMPILER_FLAGS}" )
set ( EXTRA_CXX_FLAGS "${EXTRA_CXX_FLAGS} ${HARDENED_CXX_FLAGS} ${DEBUGGING_C_FLAGS} ${SANITIZER_CXX_FLAGS} ${TCMALLOC_C_FLAGS} ${COVERAGE_COMPILER_FLAGS}" )
set ( EXTRA_LINKER_FLAGS "${EXTRA_LINKER_FLAGS} ${HARDENED_LINKER_FLAGS} ${SANITIZER_LINKER_FLAGS} ${COVERAGE_LINKER_FLAGS}" )
-foreach (EXTRA_LIBRARY IN LISTS COVERAGE_LIBRARIES TCMALLOC_LIBRARIES )
+foreach (EXTRA_LIBRARY IN LISTS COVERAGE_LIBRARIES TCMALLOC_LIBRARIES JEMALLOC_LIBRARY )
list ( APPEND EXTRA_LIBRARIES ${EXTRA_LIBRARY} )
endforeach ()
option ( ENABLE_PROFILE "Enable profiling options (developers only)" OFF )
option ( DISABLE_SNORT_PROFILER "Disable snort Profiler (developers only)" OFF )
option ( ENABLE_DEEP_PROFILING "Enable deep profiling of snort functions (developers only)" OFF )
-option ( DISABLE_MEMORY_MANAGER "Disable snort memory manager (developers only)" OFF )
+option ( ENABLE_MEMORY_OVERLOADS "Use new / delete overloads for profiling (developers only)" OFF )
+option ( ENABLE_MEMORY_PROFILER "Enable memory profiler (developers only)" OFF )
+option ( ENABLE_RULE_PROFILER "Enable rule keyword profiler (developers only)" OFF )
option ( ENABLE_ADDRESS_SANITIZER "enable address sanitizer support" OFF )
option ( ENABLE_THREAD_SANITIZER "enable thread sanitizer support" OFF )
option ( ENABLE_UB_SANITIZER "enable undefined behavior sanitizer support" OFF )
option ( ENABLE_TCMALLOC "enable using tcmalloc for dynamic memory management" OFF )
+option ( ENABLE_JEMALLOC "enable using jemalloc for dynamic memory management" OFF )
option ( ENABLE_CODE_COVERAGE "Whether to enable code coverage support" OFF )
# signals
set(DAQ_CPPFLAGS "-I${DAQ_INCLUDE_DIR}")
endif()
-if(DISABLE_MEMORY_MANAGER)
- set(NO_MEM_MGR_CPPFLAGS "-DNO_MEM_MGR")
+if(ENABLE_MEMORY_OVERLOADS)
+ set(MEMORY_OVERLOADS_CPPFLAGS "-DENABLE_MEMORY_OVERLOADS")
+endif()
+
+if(ENABLE_MEMORY_PROFILER)
+ set(MEMORY_PROFILER_CPPFLAGS "-DENABLE_MEMORY_PROFILER")
+endif()
+
+if(ENABLE_RULE_PROFILER)
+ set(RULE_PROFILER_CPPFLAGS "-DENABLE_RULE_PROFILER")
endif()
if(DISABLE_SNORT_PROFILER)
/* disable snort profiler */
#cmakedefine NO_PROFILER 1
-/* disable snort memory manager */
-#cmakedefine NO_MEM_MGR 1
+/* enable memory profiler */
+#cmakedefine ENABLE_MEMORY_PROFILER 1
+
+/* enable rule profiler */
+#cmakedefine ENABLE_RULE_PROFILER 1
+
+/* enable deep profiling */
+#cmakedefine DEEP_PROFILING 1
+
+/* enable new and delete overloads for profiling */
+#cmakedefine ENABLE_MEMORY_OVERLOADS 1
/* signal to dump stats */
#cmakedefine SIGNAL_SNORT_DUMP_STATS @SIGNAL_SNORT_DUMP_STATS@
/* safec available */
#cmakedefine HAVE_SAFEC 1
+/* jemalloc available */
+#cmakedefine HAVE_JEMALLOC 1
+
/* uuid available */
#cmakedefine HAVE_UUID 1
--enable-gprof-profile enable gprof profiling options (developers only)
--disable-snort-profiler
disable snort performance profiling (cpu and memory) (developers only)
- --disable-memory-manager
- disable snort memory manager (developers only)
+ --enable-memory-overloads
+ overload new and delete
+ --enable-memory-profiler
+ enable memory profiler
+ --enable-rule-profiler enable rule keyword profiler (developers only)
+ --enable-deep-profiling enable deep (multi-level) profiling (developers only)
--disable-corefiles prevent Snort from generating core files
--enable-address-sanitizer
enable address sanitizer support
--enable-thread-sanitizer
enable thread sanitizer support
- --enable-ub-sanitizer
- enable undefined behavior sanitizer support
- --enable-tcmalloc
- enable using tcmalloc for dynamic memory management
+ --enable-ub-sanitizer enable undefined behavior sanitizer support
+ --enable-tcmalloc enable using tcmalloc for dynamic memory management
+ --enable-jemalloc enable using jemalloc for dynamic memory management
+ --enable-jemalloc-static
+ same as --enable-jemalloc but linked statically
--enable-appid-third-party
enable third party appid
--enable-unit-tests build unit tests
append_cache_entry ENABLE_TSC_CLOCK BOOL true
;;
--disable-snort-profiler)
- append_cache_entry DISABLE_SNORT_PROFILER BOOL true
+ append_cache_entry DISABLE_SNORT_PROFILER BOOL false
;;
- --disable-memory-manager)
- append_cache_entry DISABLE_MEMORY_MANAGER BOOL true
+ --enable-memory-overloads)
+ append_cache_entry ENABLE_MEMORY_OVERLOADS BOOL true
+ ;;
+ --enable-memory-profiler)
+ append_cache_entry ENABLE_MEMORY_PROFILER BOOL true
+ ;;
+ --enable-rule-profiler)
+ append_cache_entry ENABLE_RULE_PROFILER BOOL true
+ ;;
+ --enable-deep-profiling)
+ append_cache_entry DEEP_PROFILING BOOL true
;;
--disable-large-pcap)
append_cache_entry ENABLE_LARGE_PCAP BOOL false
--disable-tcmalloc)
append_cache_entry ENABLE_TCMALLOC BOOL false
;;
+ --enable-jemalloc)
+ append_cache_entry ENABLE_JEMALLOC BOOL true
+ append_cache_entry STATIC_JEMALLOC BOOL false
+ ;;
+ --disable-jemalloc)
+ append_cache_entry ENABLE_JEMALLOC BOOL false
+ ;;
+ --enable-jemalloc-static)
+ append_cache_entry ENABLE_JEMALLOC BOOL true
+ append_cache_entry STATIC_JEMALLOC BOOL true
+ ;;
+ --disable-jemalloc-static)
+ append_cache_entry ENABLE_JEMALLOC BOOL false
+ ;;
--enable-appid-third-party)
;;
--enable-unit-tests)
* New latency monitoring and enforcement
* Piglets to facilitate component testing
* Inspection Events
-* Automake and Cmake
* Autogenerate reference documentation
=== Efficacy
* New latency monitoring and enforcement
* Piglets to facilitate component testing
* Inspection Events
-* Automake and Cmake
* Autogenerate reference documentation
Additional features are on the road map:
URL: www.snort.org
Version: @VERSION@
Libs: -L${libdir}/snort
-Cflags: -I${includedir}/snort @DEEP_PROFILING_CPPFLAGS@ @NO_MEM_MGR_CPPFLAGS@ @NO_PROFILER_CPPFLAGS@ @TP_APPID_CPPFLAGS@ @TSC_CPPFLAGS@
+Cflags: -I${includedir}/snort @DEEP_PROFILING_CPPFLAGS@ @MEMORY_OVERLOADS_CPPFLAGS@ @MEMORY_PROFILER_CPPFLAGS@ @RULE_PROFILER_CPPFLAGS@ @NO_PROFILER_CPPFLAGS@ @TP_APPID_CPPFLAGS@ @TSC_CPPFLAGS@
target_link_libraries( snort
${EXTERNAL_LIBRARIES}
+ ${EXTRA_LIBRARIES}
)
# Solaris requires libnsl and libsocket for various network-related library functions
TCP resets (TCP connections) or ICMP unreachable packets.
React sends an HTML page to the client, a RST to the server and blocks the flow.
-It is using payload_injector utilty. payload_injector should be configured when
+It is using payload_injector utility. payload_injector should be configured when
react is used.
Rewrite enables overwrite packet contents based on "replace" option in the
}
#endif
}
- ModuleManager::accumulate_offload("search_engine");
- ModuleManager::accumulate_offload("detection");
+ ModuleManager::accumulate_module("search_engine");
+ ModuleManager::accumulate_module("detection");
// FIXIT-M break this over-coupling. In reality we shouldn't be evaluating latency in offload.
PacketLatency::tterm();
void set_file_policy(FilePolicyBase* fp) { file_policy = fp; }
FilePolicyBase* get_file_policy() { return file_policy; }
- size_t size_of() override
- { return sizeof(*this); }
-
private:
void init_file_context(FileDirection, FileContext*);
FileContext* find_main_file_context(FilePosition, FileDirection, size_t id = 0);
initiating packet.
The HA subsystem exchanges two high level message types:
- - DELETE: Indicate to the partner that a session has neen removed. No
+ - DELETE: Indicate to the partner that a session has been removed. No
additional HA client status will be exchanged. (Not used for DAQ-backed
storage.)
- UPDATE: Indicate all other state changes. The message always includes
Flow::Flow()
{
- memory::MemoryCap::update_allocations(sizeof(*this) + sizeof(FlowStash));
constexpr size_t offset = offsetof(Flow, key);
// FIXIT-L need a struct to zero here to make future proof
memset((uint8_t*)this+offset, 0, sizeof(*this)-offset);
Flow::~Flow()
{
- memory::MemoryCap::update_deallocations(sizeof(*this) + sizeof(FlowStash));
term();
}
flow_data->prev = fd;
flow_data = fd;
-
- // this is after actual allocation so we can't prune beforehand
- // but if we are that close to the edge we are in trouble anyway
- // large allocations can be accounted for directly
- fd->update_allocations(fd->size_of());
-
return 0;
}
fd->prev->next = fd->next;
fd->next->prev = fd->prev;
}
- fd->update_deallocations(fd->size_of());
delete fd;
}
{
FlowData* tmp = flow_data;
flow_data = flow_data->next;
- tmp->update_deallocations(tmp->size_of());
delete tmp;
}
}
{
Flow* new_flow = new Flow();
push(new_flow);
- memory::MemoryCap::update_allocations(sizeof(HashNode) + sizeof(FlowKey));
}
else if ( !prune_stale(timestamp, nullptr) )
{
if ( flow->session && flow->pkt_type != key->pkt_type )
flow->term();
- memory::MemoryCap::update_allocations(config.proto[to_utype(key->pkt_type)].cap_weight);
flow->last_data_seen = timestamp;
return flow;
{
unlink_uni(flow);
- // FIXIT-M This check is added for offload case where both Flow::reset
- // and Flow::retire try remove the flow from hash. Flow::reset should
- // just mark the flow as pending instead of trying to remove it.
- if ( !hash_table->release_node(flow->key) )
- memory::MemoryCap::update_deallocations(config.proto[to_utype(flow->key->pkt_type)].cap_weight);
+ hash_table->release_node(flow->key);
}
bool FlowCache::release(Flow* flow, PruneReason reason, bool do_cleanup)
//The flow should not be removed from the hash before reset
hash_table->remove();
delete flow;
- memory::MemoryCap::update_deallocations(sizeof(HashNode) + sizeof(FlowKey));
--flows_allocated;
++deleted;
--num_to_delete;
delete flow;
delete_stats.update(FlowDeleteState::FREELIST);
- memory::MemoryCap::update_deallocations(sizeof(HashNode) + sizeof(FlowKey));
--flows_allocated;
++deleted;
while ( Flow* flow = (Flow*)hash_table->pop() )
{
delete flow;
- memory::MemoryCap::update_deallocations(sizeof(HashNode) + sizeof(FlowKey));
--flows_allocated;
}
#include "detection/detection_engine.h"
#include "main/snort_config.h"
#include "managers/inspector_manager.h"
-#include "memory/memory_cap.h"
#include "packet_io/active.h"
#include "packet_tracer/packet_tracer.h"
#include "protocols/icmp4.h"
cache->timeout(1, cur_time);
}
-void FlowControl::preemptive_cleanup()
-{
- // FIXIT-RC is there a possibility of this looping forever?
- while ( memory::MemoryCap::over_threshold() )
- {
- if ( !prune_one(PruneReason::PREEMPTIVE, true) )
- break;
- }
-}
-
Flow* FlowControl::stale_flow_cleanup(FlowCache* cache, Flow* flow, Packet* p)
{
if ( p->pkth->flags & DAQ_PKT_FLAG_NEW_FLOW )
p->disable_inspect = flow->is_inspection_disabled();
last_pkt_type = p->type();
- preemptive_cleanup();
// If this code is executed on a flow in SETUP state, it will result in a packet from both
// client and server on packets from 0.0.0.0 or ::
private:
void set_key(snort::FlowKey*, snort::Packet*);
unsigned process(snort::Flow*, snort::Packet*);
- void preemptive_cleanup();
void update_stats(snort::Flow*, snort::Packet*);
private:
{
if ( handler )
handler->rem_ref();
-
- assert(mem_in_use == 0);
- assert(net_allocation_calls == 0);
-}
-
-void FlowData::update_allocations(size_t n)
-{
- memory::MemoryCap::update_allocations(n);
-
- if (n > 0)
- {
- mem_in_use += n;
- net_allocation_calls++;
- }
-}
-
-void FlowData::update_deallocations(size_t n)
-{
- memory::MemoryCap::update_deallocations(n);
-
- if (n > 0)
- {
- assert(mem_in_use >= n);
- mem_in_use -= n;
- assert(net_allocation_calls > 0);
- net_allocation_calls--;
- }
}
RuleFlowData::RuleFlowData(unsigned u) :
static unsigned create_flow_data_id()
{ return ++flow_data_id; }
- // Allocations and deallocations must balance. It is not enough that the total number of bytes
- // allocated and deallocated are equal. They must be allocated and deallocated in the same
- // increments or roundoffs done inside the functions may not balance.
- void update_allocations(size_t);
- void update_deallocations(size_t);
Inspector* get_handler() { return handler; }
- // return fixed size (could be an approx avg)
- // this must be fixed for life of flow data instance
- // track significant supplemental allocations with the above updaters
- virtual size_t size_of() = 0;
+ // deprecated - do not implement
+ virtual size_t size_of() { return 0; }
virtual void handle_expected(Packet*) { }
virtual void handle_retransmit(Packet*) { }
private:
static unsigned flow_data_id;
Inspector* handler;
- size_t mem_in_use = 0;
- unsigned net_allocation_calls = 0;
unsigned id;
};
IDLE,
EXCESS,
UNI,
- PREEMPTIVE,
MEMCAP,
HA,
STALE,
{
public:
StashGenericObject(int type) : object_type(type)
- {
+ { }
- }
virtual ~StashGenericObject() = default;
+
int get_object_type() const
- {
- return object_type;
- }
- virtual size_t size_of() const = 0;
+ { return object_type; }
private:
int object_type;
{
type = STASH_ITEM_TYPE_GENERIC_OBJECT;
val.generic_obj_val = obj;
- memory::MemoryCap::update_allocations(sizeof(*this) + obj->size_of());
}
~StashItem()
delete val.str_val;
break;
case STASH_ITEM_TYPE_GENERIC_OBJECT:
- memory::MemoryCap::update_deallocations(sizeof(*this) + val.generic_obj_val->size_of());
delete val.generic_obj_val;
default:
break;
#include "main/snort_config.h"
#include "main/snort_debug.h"
#include "managers/inspector_manager.h"
-#include "memory/memory_cap.h"
#include "packet_io/active.h"
#include "packet_tracer/packet_tracer.h"
#include "protocols/icmp4.h"
void snort::trace_vprintf(const char*, TraceLevel, const char*, const Packet*, const char*, va_list) {}
uint8_t snort::TraceApi::get_constraints_generation() { return 0; }
void snort::TraceApi::filter(const Packet&) {}
-namespace memory
-{
-void MemoryCap::update_allocations(size_t) { }
-void MemoryCap::update_deallocations(size_t) { }
-bool MemoryCap::over_threshold() { return true; }
-}
namespace snort
{
#include "detection/detection_engine.h"
#include "main/snort_config.h"
#include "managers/inspector_manager.h"
-#include "memory/memory_cap.h"
#include "packet_io/active.h"
#include "packet_tracer/packet_tracer.h"
#include "protocols/icmp4.h"
bool ExpectCache::is_expected(Packet*) { return true; }
Flow* HighAvailabilityManager::import(Packet&, FlowKey&) { return nullptr; }
-namespace memory
-{
-bool MemoryCap::over_threshold() { return true; }
-}
-
namespace snort
{
namespace layer
using namespace snort;
using namespace std;
-
static DataBus* DB = nullptr;
-void memory::MemoryCap::update_allocations(size_t) { }
-void memory::MemoryCap::update_deallocations(size_t) { }
-
-class TestStashObject : public StashGenericObject
-{
-public:
- TestStashObject(int type) : StashGenericObject(type)
- {
-
- }
-
- size_t size_of() const override
- { return sizeof(*this); }
-};
-
-
template<class Type>
class DBConsumer : public DataHandler
{
TEST(stash_tests, new_generic_object)
{
FlowStash stash;
- TestStashObject *test_object = new TestStashObject(111);
+ StashGenericObject *test_object = new StashGenericObject(111);
stash.store("item_1", test_object);
StashGenericObject *retrieved_object;
CHECK(stash.get("item_1", retrieved_object));
POINTERS_EQUAL(test_object, retrieved_object);
- CHECK_EQUAL(test_object->get_object_type(), ((TestStashObject*)retrieved_object)->get_object_type());
+ CHECK_EQUAL(test_object->get_object_type(), ((StashGenericObject*)retrieved_object)->get_object_type());
}
TEST(stash_tests, update_generic_object)
{
FlowStash stash;
- TestStashObject *test_object = new TestStashObject(111);
+ StashGenericObject *test_object = new StashGenericObject(111);
stash.store("item_1", test_object);
- TestStashObject *new_test_object = new TestStashObject(111);
+ StashGenericObject *new_test_object = new StashGenericObject(111);
stash.store("item_1", new_test_object);
StashGenericObject *retrieved_object;
TEST(stash_tests, mixed_items)
{
FlowStash stash;
- TestStashObject *test_object = new TestStashObject(111);
+ StashGenericObject *test_object = new StashGenericObject(111);
stash.store("item_1", 10);
stash.store("item_2", "value_2");
StashGenericObject *retrieved_object;
CHECK(stash.get("item_4", retrieved_object));
POINTERS_EQUAL(test_object, retrieved_object);
- CHECK_EQUAL(test_object->get_object_type(), ((TestStashObject*)retrieved_object)->get_object_type());
+ CHECK_EQUAL(test_object->get_object_type(), ((StashGenericObject*)retrieved_object)->get_object_type());
}
TEST(stash_tests, store_ip)
#include "framework/inspector.h"
#include "framework/data_bus.h"
#include "main/snort_config.h"
-#include "memory/memory_cap.h"
#include "protocols/ip.h"
#include "protocols/layer.h"
#include "protocols/packet.h"
void Inspector::add_ref() {}
-void memory::MemoryCap::update_allocations(size_t) {}
-
-void memory::MemoryCap::update_deallocations(size_t) {}
-
-void memory::MemoryCap::free_space(size_t) { }
-
bool HighAvailabilityManager::active() { return false; }
FlowHAState::FlowHAState() = default;
// this is the current version of the base api
// must be prefixed to subtype version
-#define BASE_API_VERSION 10
+#define BASE_API_VERSION 11
// set options to API_OPTIONS to ensure compatibility
#ifndef API_OPTIONS
int main(int argc, char** argv)
{
+ MemoryLeakWarningPlugin::turnOffNewDeleteOverloads();
return CommandLineTestRunner::RunAllTests(argc, argv);
}
int main(int argc, char** argv)
{
+ MemoryLeakWarningPlugin::turnOffNewDeleteOverloads();
return CommandLineTestRunner::RunAllTests(argc, argv);
}
data.
class Obfuscator is implemented with a std::set to provide in order
- access for pottentialy out-of-order insertions.
+ access for potentially out-of-order insertions.
Currently does not support streaming mode. It should be possible to
iterate over contiguous chunks of data, alternating between obfuscated
private:
void reap_command(AnalyzerCommand* ac);
+ // we could just let the analyzer own this pointer and delete
+ // immediately after getting the data but that creates memory
+ // count mismatches between main and packet threads. since the
+ // startup swapper has no old config to delete only 32 bytes
+ // after held.
+ Swapper* swapper = nullptr;
+
std::thread* athread = nullptr;
unsigned idx = (unsigned)-1;
};
assert(!athread);
LogMessage("++ [%u] %s\n", idx, analyzer->get_source());
- Swapper* ps = new Swapper(SnortConfig::get_main_conf());
- athread = new std::thread(std::ref(*analyzer), ps, ++run_num);
+ swapper = new Swapper(SnortConfig::get_main_conf());
+ athread = new std::thread(std::ref(*analyzer), swapper, ++run_num);
}
void Pig::stop()
assert(analyzer);
assert(athread);
+ delete swapper;
+ swapper = nullptr;
+
athread->join();
delete athread;
athread = nullptr;
void Analyzer::add_to_retry_queue(DAQ_Msg_h daq_msg)
{
- // Temporarily increase memcap until message is finalized in case
- // DAQ makes a copy of the data buffer.
- memory::MemoryCap::update_allocations(daq_msg_get_data_len(daq_msg));
retry_queue->put(daq_msg);
}
void Analyzer::process_daq_msg(DAQ_Msg_h msg, bool retry)
{
oops_handler->set_current_message(msg);
+ memory::MemoryCap::free_space();
+
DAQ_Verdict verdict = DAQ_VERDICT_PASS;
switch (daq_msg_get_type(msg))
{
struct timeval now;
packet_gettimeofday(&now);
DAQ_Msg_h msg;
+
while ((msg = retry_queue->get(&now)) != nullptr)
{
process_daq_msg(msg, true);
daq_stats.retries_processed++;
-
- // Decrease memcap now that msg has been finalized.
- memory::MemoryCap::update_deallocations(daq_msg_get_data_len(msg));
}
}
}
RateFilter_Cleanup();
TraceApi::thread_term();
+
+ ModuleManager::accumulate_module("memory");
}
Analyzer::Analyzer(SFDAQInstance* instance, unsigned i, const char* s, uint64_t msg_cnt)
local_analyzer = this;
ps->apply(*this);
- delete ps;
if (SnortConfig::get_conf()->pcap_show())
show_source();
const SnortConfig* sc = SnortConfig::get_conf();
- memory::MemoryCap::print();
-
IpsManager::global_term(sc);
HostAttributesManager::term();
ModuleManager::term();
PluginManager::release_plugins();
ScriptManager::release_scripts();
+ memory::MemoryCap::cleanup();
+
term_signals();
}
set_quick_exit(false);
- memory::MemoryCap::calculate();
- memory::MemoryCap::print();
+ memory::MemoryCap::setup(*sc->memory, sc->thread_config->get_instance_max());
+ memory::MemoryCap::print(SnortConfig::log_verbose());
+
host_cache.print_config();
TimeStart();
Swapper::~Swapper()
{
- if ( new_conf )
+ if ( new_conf and old_conf )
+ // don't do this to startup configs
InspectorManager::clear_removed_inspectors(new_conf);
+
if ( old_conf )
delete old_conf;
}
SFDAQInstance* SFDAQ::get_local_instance() { return nullptr; }
}
-void memory::MemoryCap::update_allocations(size_t) { }
-void memory::MemoryCap::update_deallocations(size_t) { }
-
using namespace snort;
//--------------------------------------------------------------------------
for ( auto* mh : mod_hooks )
{
+ if ( !strcmp(mh->mod->name, "memory") )
+ continue;
+
lock_guard<mutex> lock(stats_mutex);
mh->mod->prep_counts();
mh->mod->sum_stats(true);
}
}
-void ModuleManager::accumulate_offload(const char* name)
+void ModuleManager::accumulate_module(const char* name)
{
ModHook* mh = get_hook(name);
if ( mh )
static void dump_stats(const char* skip = nullptr, bool dynamic = false);
static void accumulate();
- static void accumulate_offload(const char* name);
+ static void accumulate_module(const char* name);
static void reset_stats(SnortConfig*);
static void reset_stats(clear_counter_type_t);
set ( MEMORY_SOURCES
${MEMCAP_INCLUDES}
+ memory_allocator.cc
+ memory_allocator.h
memory_cap.cc
+ memory_config.h
+ memory_manager.cc
memory_module.cc
memory_module.h
- memory_config.h
prune_handler.cc
prune_handler.h
)
-This directory provides a simple mechanism for implementing a memory cap.
-Modules can use the MemoryCap::update_allocations() and
-update_deallocations() calls to self-report when they allocate or free
-heap memory. If the total memory allocations exceed the configured memory
-cap, flow pruning is done to free up additional memory.
+Snort memory management monitors memory usage and prunes flows as needed to keep the total process
+usage below the configured limit, if any. There are two ways to build memory management: build with
+jemalloc (--enable-jemalloc) or enable the new / delete overloads (--enable-memory-overloads). The
+latter option is required to support memory profiling (--enable-memory-profiler). Profiling is not
+enabled by default due to performance concerns and is viewed as a developer tool: apart from cache
+memcaps, users should not have to care about how Snort allocates memory, only that it doesn't
+exceed the configured limit if any.
-This mechanism is approximate and does not directly reflect the activities
-of the memory allocator or the OOM killer.
+tcmalloc builds (--enable-tcmalloc) do not support memory management. A process total is available
+from the tcmalloc extensions but it is too expensive to call per packet. Checking every N packets
+would also mean potentially freeing K > 1 flows after each exceeded event. Also, a process total
+does not allow pruning only the threads that are over limit. tcmalloc does provide a performance
+advantage over glibc so that may be preferred for deployments that don't need memory management.
-TODO:
+jemalloc is preferred because it is quicker and uses less memory since the implementation does not
+require memory tracking. jemalloc provides access to the current thread allocated total (which is
+between the number that Snort requests and what the system footprint is).
+
+memory_module.* - provides parameters and peg counts. The key parameters are the process_cap,
+thread_cap, and threshold. The caps are in bytes, and the threshold is a percentage of the caps
+specified.
+
+memory_manager.cc - when enabled with --enable-memory-overloads, overloads new and delete operators
+to provide support memory tracking. Metadata is allocated in front of the requested memory to store
+the sized allocated so the deallocation can be tracked. Due to the drag on performance, this is
+disabled by default.
+
+memory_allocator.* - implements the malloc and free calls used by the operator new and delete
+overloads.
+
+memory_config.h - provides MemoryConfig used by MemoryCap and stored in SnortConfig.
+
+memory_cap.* - provides the logic to enforce the thread cap by calling the prune handler. Tracks
+thread usage in pegs and updates the memory profiler if built. To avoid confusion, thread_cap
+refers to the configured maximums, while thread_limit refer to the configured percentage of the
+caps (memory.cap * memory.threshold / 100). The jemalloc specific code is here.
+
+prune_handler.* - implements the call to stream to prune.
+
+The current iteration of the memory manager is exclusively preemptive. MemoryCap::free_space is
+called by the analyzer before each DAQ message is processed. If thread_usage > thread_limit, a
+single flow will be pruned. Demand-based pruning, ie enforcing that each allocation stays below
+limit, is both not necessary and bug prone (due to reentrancy when pruning causes flushing causes
+more allocations).
+
+This implementation has the following limitations:
+
+* If the overload manager is built, it only tracks memory allocated with C++ new. Specifically,
+ direct calls to malloc or calloc which may be made by libraries are not tracked.
+
+* Packet thread tracking does not include heap overhead, which can be substantial.
+
+* Non-packet threads are assumed to have bounded memory usage, eg via a cache.
+
+* Heap managers tend to acquire memory from the system quickly and release back much more slowly,
+ if ever. It is also relatively expensive to force a release.
+
+* Therefore, pruning a single flow almost certainly won't release memory back to the system.
+
+For these reasons, the goal of the memory manager is to prevent allocation past the limit rather
+than try to reclaim memory allocated past the limit. This means that the configured limit must be
+well enough below the actual hard limit, for example the limit enforced by cgroups, such that the
+processing of a single packet will not push us over. It must also allow for additional heap
+overhead.
+
+Future work:
+
+* Support simplified configuration of a process cap instead of a thread cap. Implement a MemoryCap
+ method that can be called to inform the memory module of various cache related memcaps. Deduct
+ the startup ru_maxrss and the sum of memcaps from the configured process cap and then divide by
+ --max-packet-threads to get the effective thread cap.
+
+* Compensate for heap fragmentation and other overhead by using the current process footprint
+ (process_total below) as feedback to adjust the current packet thread limits:
+
+ thread_limit = [(cap - (process_total - sum_thread_usage)) / num_threads] * threshold
+
+* Recognize when a memory leak drives excessive pruning.
-- possibly add eventing
--- /dev/null
+//--------------------------------------------------------------------------
+// Copyright (C) 2016-2021 Cisco and/or its affiliates. All rights reserved.
+//
+// This program is free software; you can redistribute it and/or modify it
+// under the terms of the GNU General Public License Version 2 as published
+// by the Free Software Foundation. You may not use, modify or distribute
+// this program under any other version of the GNU General Public License.
+//
+// This program is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License along
+// with this program; if not, write to the Free Software Foundation, Inc.,
+// 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+//--------------------------------------------------------------------------
+
+// memory_allocator.cc author Joel Cornett <jocornet@cisco.com>
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "memory_allocator.h"
+
+#include <cstdlib>
+
+namespace memory
+{
+
+// FIXIT-L (de)allocate() could be inlined if defined in memory_manager.cc
+void* MemoryAllocator::allocate(size_t n)
+{ return malloc(n); }
+
+void MemoryAllocator::deallocate(void* p)
+{ free(p); }
+
+} // namespace memory
--- /dev/null
+//--------------------------------------------------------------------------
+// Copyright (C) 2016-2021 Cisco and/or its affiliates. All rights reserved.
+//
+// This program is free software; you can redistribute it and/or modify it
+// under the terms of the GNU General Public License Version 2 as published
+// by the Free Software Foundation. You may not use, modify or distribute
+// this program under any other version of the GNU General Public License.
+//
+// This program is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License along
+// with this program; if not, write to the Free Software Foundation, Inc.,
+// 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+//--------------------------------------------------------------------------
+
+// memory_allocator.h author Joel Cornett <jocornet@cisco.com>
+
+#ifndef MEMORY_ALLOCATOR_H
+#define MEMORY_ALLOCATOR_H
+
+#include <cstddef>
+
+namespace memory
+{
+
+struct MemoryAllocator
+{
+ static void* allocate(size_t);
+ static void deallocate(void*);
+};
+
+} // namespace memory
+
+#endif
#include "config.h"
#endif
+#include <malloc.h>
+#include <sys/resource.h>
+
#include <cassert>
+#include <vector>
+
+#ifdef HAVE_JEMALLOC
+#include <jemalloc/jemalloc.h>
+#endif
#include "memory_cap.h"
#include "log/messages.h"
#include "main/snort_config.h"
#include "main/snort_types.h"
+#include "main/thread.h"
#include "profiler/memory_profiler_active_context.h"
#include "utils/stats.h"
#include "memory_module.h"
#include "prune_handler.h"
-#ifdef UNIT_TEST
-#include "catch/snort_catch.h"
-#endif
-
using namespace snort;
namespace memory
{
+static MemoryCounts ctl_mem_stats;
+static std::vector<MemoryCounts> pkt_mem_stats;
+
namespace
{
-struct Tracker
+// -----------------------------------------------------------------------------
+// helpers
+// -----------------------------------------------------------------------------
+
+#ifdef HAVE_JEMALLOC
+static size_t get_usage(MemoryCounts& mc)
{
- void allocate(size_t n)
- { mem_stats.allocated += n; ++mem_stats.allocations; }
+ static THREAD_LOCAL uint64_t* alloc_ptr = nullptr, * dealloc_ptr = nullptr;
- void deallocate(size_t n)
+ if ( !alloc_ptr )
{
- mem_stats.deallocated += n; ++mem_stats.deallocations;
- assert(mem_stats.deallocated <= mem_stats.allocated);
- assert(mem_stats.deallocations <= mem_stats.allocations);
- assert(mem_stats.allocated or !mem_stats.allocations);
+ size_t sz = sizeof(alloc_ptr);
+ // __STRDUMP_DISABLE__
+ mallctl("thread.allocatedp", (void*)&alloc_ptr, &sz, nullptr, 0);
+ mallctl("thread.deallocatedp", (void*)&dealloc_ptr, &sz, nullptr, 0);
+ // __STRDUMP_ENABLE__
}
+ mc.allocated = *alloc_ptr;
+ mc.deallocated = *dealloc_ptr;
- size_t used() const
+ if ( mc.allocated > mc.deallocated )
{
- if ( mem_stats.allocated < mem_stats.deallocated )
- {
- assert(false);
- return 0;
- }
- return mem_stats.allocated - mem_stats.deallocated;
- }
-};
+ size_t usage = mc.allocated - mc.deallocated;
-static Tracker s_tracker;
+ if ( usage > mc.max_in_use )
+ mc.max_in_use = usage;
-// -----------------------------------------------------------------------------
-// helpers
-// -----------------------------------------------------------------------------
+ return usage;
+ }
+ return 0;
+}
+#else
+static size_t get_usage(const MemoryCounts& mc)
+{
+#ifdef ENABLE_MEMORY_OVERLOADS
+ assert(mc.allocated >= mc.deallocated);
+ return mc.allocated - mc.deallocated;
-template<typename Tracker, typename Handler>
-inline bool free_space(size_t requested, size_t cap, Tracker& trk, Handler& handler)
+#else
+ UNUSED(mc);
+ return 0;
+#endif
+}
+#endif
+
+template<typename Handler>
+inline void free_space(size_t cap, Handler& handler)
{
- if ( requested > cap )
- {
- return false;
- }
+ MemoryCounts& mc = memory::MemoryCap::get_mem_stats();
+ size_t usage = get_usage(mc);
- auto used = trk.used();
+ if ( usage < cap )
+ return;
- if ( used + requested <= cap )
- return true;
+ ++mc.reap_attempts;
- ++mem_stats.reap_attempts;
+ if ( handler() )
+ return;
- while ( used + requested > cap )
- {
- handler();
- auto tmp = trk.used();
-
- if ( tmp >= used )
- {
- ++mem_stats.reap_failures;
- return false;
- }
- used = tmp;
- }
- return true;
+ ++mc.reap_failures;
}
inline size_t calculate_threshold(size_t cap, size_t threshold)
// per-thread configuration
// -----------------------------------------------------------------------------
-size_t MemoryCap::thread_cap = 0;
-size_t MemoryCap::preemptive_threshold = 0;
+size_t MemoryCap::limit = 0;
// -----------------------------------------------------------------------------
// public interface
// -----------------------------------------------------------------------------
-void MemoryCap::free_space(size_t n)
+void MemoryCap::setup(const MemoryConfig& config, unsigned n)
{
- if ( !is_packet_thread() )
- return;
-
- if ( !thread_cap )
- return;
-
- static THREAD_LOCAL bool entered = false;
-
- if ( entered )
- return;
-
- entered = true;
- memory::free_space(n, thread_cap, s_tracker, prune_handler);
- entered = false;
+ assert(!is_packet_thread());
+ limit = memory::calculate_threshold(config.cap, config.threshold);
+ pkt_mem_stats.resize(n);
}
-static size_t fudge_it(size_t n)
+void MemoryCap::cleanup()
{
- return ((n >> 7) + 1) << 7;
+ pkt_mem_stats.resize(0);
}
-void MemoryCap::update_allocations(size_t n)
+MemoryCounts& MemoryCap::get_mem_stats()
{
- if (n == 0)
- return;
-
- size_t k = n;
- n = fudge_it(n);
- free_space(n);
- mem_stats.total_fudge += (n - k);
- s_tracker.allocate(n);
- auto in_use = s_tracker.used();
- if ( in_use > mem_stats.max_in_use )
- mem_stats.max_in_use = in_use;
- mp_active_context.update_allocs(n);
-}
-
-void MemoryCap::update_deallocations(size_t n)
-{
- if (n == 0)
- return;
+ if ( !is_packet_thread() )
+ return ctl_mem_stats;
- n = fudge_it(n);
- s_tracker.deallocate(n);
- mp_active_context.update_deallocs(n);
+ auto id = get_instance_id();
+ return pkt_mem_stats[id];
}
-bool MemoryCap::over_threshold()
+void MemoryCap::free_space()
{
- if ( !preemptive_threshold )
- return false;
-
- return s_tracker.used() >= preemptive_threshold;
-}
+ assert(is_packet_thread());
-// FIXIT-L this should not be called while the packet threads are running.
-// once reload is implemented for the memory manager, the configuration
-// model will need to be updated
-
-void MemoryCap::calculate()
-{
- assert(!is_packet_thread());
- const MemoryConfig& config = *SnortConfig::get_conf()->memory;
+ if ( !limit )
+ return;
- thread_cap = config.cap;
- preemptive_threshold = memory::calculate_threshold(thread_cap, config.threshold);
+ memory::free_space(limit, prune_handler);
}
-void MemoryCap::print()
+#ifdef ENABLE_MEMORY_OVERLOADS
+void MemoryCap::allocate(size_t n)
{
- if ( !MemoryModule::is_active() )
- return;
+ MemoryCounts& mc = memory::MemoryCap::get_mem_stats();
- bool verbose = SnortConfig::log_verbose();
+ mc.allocated += n;
+ ++mc.allocations;
- if ( verbose or mem_stats.allocations )
- LogLabel("memory (heap)");
+ assert(mc.allocated >= mc.deallocated);
+ auto in_use = mc.allocated - mc.deallocated;
- if ( verbose )
- {
- LogMessage(" thread cap: %zu\n", thread_cap);
- LogMessage(" thread preemptive threshold: %zu\n", preemptive_threshold);
- }
+ if ( in_use > mc.max_in_use )
+ mc.max_in_use = in_use;
- if ( mem_stats.allocations )
- {
- LogMessage(" main thread usage: %zu\n", s_tracker.used());
- LogMessage(" allocations: %" PRIu64 "\n", mem_stats.allocations);
- LogMessage(" deallocations: %" PRIu64 "\n", mem_stats.deallocations);
- }
+#ifdef ENABLE_MEMORY_PROFILER
+ mp_active_context.update_allocs(n);
+#endif
}
-} // namespace memory
-
-#ifdef UNIT_TEST
-
-namespace t_memory_cap
+void MemoryCap::deallocate(size_t n)
{
+ MemoryCounts& mc = memory::MemoryCap::get_mem_stats();
-struct MockTracker
-{
- size_t result = 0;
- size_t used() const
- { return result; }
-
- MockTracker(size_t r) : result { r } { }
- MockTracker() = default;
-};
-
-struct HandlerSpy
-{
- size_t calls = 0;
- ssize_t modifier;
- MockTracker* tracker;
-
- void operator()()
+ // std::thread causes an extra deallocation in packet
+ // threads so the below asserts don't hold
+ if ( mc.allocated >= mc.deallocated + n )
{
- ++calls;
- if ( modifier && tracker )
- tracker->result += modifier;
+ mc.deallocated += n;
+ ++mc.deallocations;
}
- HandlerSpy(ssize_t n, MockTracker& trk) :
- modifier(n), tracker(&trk) { }
-};
+#if 0
+ assert(mc.deallocated <= mc.allocated);
+ assert(mc.deallocations <= mc.allocations);
+ assert(mc.allocated or !mc.allocations);
+#endif
-} // namespace t_memory_cap
+#ifdef ENABLE_MEMORY_PROFILER
+ mp_active_context.update_deallocs(n);
+#endif
+}
+#endif
-TEST_CASE( "memory cap free space", "[memory]" )
+void MemoryCap::print(bool verbose, bool print_all)
{
- using namespace t_memory_cap;
-
- SECTION( "no handler call required" )
- {
- MockTracker tracker;
- HandlerSpy handler { 0, tracker };
-
- CHECK( memory::free_space(1, 1024, tracker, handler) );
- CHECK( handler.calls == 0 );
- }
-
- SECTION( "handler frees enough space the first time" )
- {
- MockTracker tracker { 1024 };
- HandlerSpy handler { -5, tracker };
-
- CHECK( memory::free_space(1, 1024, tracker, handler) );
- CHECK( handler.calls == 1 );
- }
+ if ( !MemoryModule::is_active() )
+ return;
- SECTION( "handler needs to be called multiple times to free up space" )
- {
- MockTracker tracker { 1024 };
- HandlerSpy handler { -1, tracker };
+ MemoryCounts& mc = get_mem_stats();
+ uint64_t usage = get_usage(mc);
- CHECK( memory::free_space(2, 1024, tracker, handler) );
- CHECK( (handler.calls == 2) );
- }
+ if ( verbose or usage )
+ LogLabel("memory (heap)");
- SECTION( "handler fails to free enough space" )
- {
- MockTracker tracker { 1024 };
- HandlerSpy handler { 0, tracker };
+ if ( verbose and print_all )
+ LogCount("pruning threshold", limit);
- CHECK_FALSE( memory::free_space(1, 1024, tracker, handler) );
- CHECK( handler.calls == 1 );
- }
+ LogCount("main thread usage", usage);
+ LogCount("allocations", mc.allocations);
+ LogCount("deallocations", mc.deallocations);
- SECTION( "handler actually uses more space" )
+ if ( verbose )
{
- MockTracker tracker { 1024 };
- HandlerSpy handler { 5, tracker };
- CHECK_FALSE( memory::free_space(1, 1024, tracker, handler) );
- CHECK( handler.calls == 1 );
+ struct rusage ru;
+ getrusage(RUSAGE_SELF, &ru);
+ LogCount("max_rss", ru.ru_maxrss * 1024);
}
}
-#endif
+} // namespace memory
+
#include <cstddef>
-#include "main/thread.h"
+#include "framework/counts.h"
+#include "main/snort_types.h"
+
+struct MemoryConfig;
namespace memory
{
+struct MemoryCounts
+{
+ PegCount allocations;
+ PegCount deallocations;
+ PegCount allocated;
+ PegCount deallocated;
+ PegCount reap_attempts;
+ PegCount reap_failures;
+ PegCount max_in_use;
+};
+
class SO_PUBLIC MemoryCap
{
public:
- static void free_space(size_t);
- // The following functions perform internal rounding. Allocations and deallocations must be
- // performed in identical increments or leakage may occur.
- static void update_allocations(size_t);
- static void update_deallocations(size_t);
+ static void setup(const MemoryConfig&, unsigned);
+ static void cleanup();
- static bool over_threshold();
+ static void free_space();
// call from main thread
- static void calculate();
+ static void print(bool verbose, bool print_all = true);
- // call from main thread
- static void print();
+ static MemoryCounts& get_mem_stats();
+
+#ifdef ENABLE_MEMORY_OVERLOADS
+ static void allocate(size_t);
+ static void deallocate(size_t);
+#endif
private:
- static size_t thread_cap;
- static size_t preemptive_threshold;
+ static size_t limit;
};
} // namespace memory
--- /dev/null
+//--------------------------------------------------------------------------
+// Copyright (C) 2016-2021 Cisco and/or its affiliates. All rights reserved.
+//
+// This program is free software; you can redistribute it and/or modify it
+// under the terms of the GNU General Public License Version 2 as published
+// by the Free Software Foundation. You may not use, modify or distribute
+// this program under any other version of the GNU General Public License.
+//
+// This program is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License along
+// with this program; if not, write to the Free Software Foundation, Inc.,
+// 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+//--------------------------------------------------------------------------
+
+// memory_manager.cc author Joel Cornett <jocornet@cisco.com>
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <cassert>
+#include <new>
+
+#include "main/thread.h"
+
+#include "memory_allocator.h"
+#include "memory_cap.h"
+
+#ifdef UNIT_TEST
+#include "catch/snort_catch.h"
+#endif
+
+namespace memory
+{
+
+// -----------------------------------------------------------------------------
+// metadata
+// -----------------------------------------------------------------------------
+
+// This structure must be aligned to max_align_t as long as we are prefixing
+// it to memory allocations so that the returned memory is also aligned.
+struct alignas(max_align_t) Metadata
+{
+#if defined(REG_TEST) || defined(UNIT_TEST)
+ static constexpr size_t SANITY_CHECK_VALUE = 0xabcdef;
+ size_t sanity;
+#endif
+
+ // number of requested bytes
+ size_t payload_size;
+
+ // total number of bytes allocated, including Metadata header
+ size_t total_size() const;
+ void* payload_offset();
+
+#if defined(REG_TEST) || defined(UNIT_TEST)
+ bool valid() const
+ { return sanity == SANITY_CHECK_VALUE; }
+#endif
+
+ Metadata(size_t = 0);
+
+ static size_t calculate_total_size(size_t);
+
+ template<typename Allocator>
+ static Metadata* create(size_t);
+
+ static Metadata* extract(void*);
+};
+
+inline size_t Metadata::total_size() const
+{ return calculate_total_size(payload_size); }
+
+inline void* Metadata::payload_offset()
+{ return this + 1; }
+
+inline Metadata::Metadata(size_t n) :
+#if defined(REG_TEST) || defined(UNIT_TEST)
+ sanity(SANITY_CHECK_VALUE),
+#endif
+ payload_size(n)
+{ }
+
+inline size_t Metadata::calculate_total_size(size_t n)
+{ return sizeof(Metadata) + n; }
+
+template<typename Allocator>
+Metadata* Metadata::create(size_t n)
+{
+ auto meta =
+ static_cast<Metadata*>(Allocator::allocate(calculate_total_size(n)));
+
+ if ( !meta )
+ return nullptr;
+
+ // Trigger metadata ctor
+ *meta = Metadata(n);
+
+#if defined(REG_TEST) || defined(UNIT_TEST)
+ assert(meta->valid());
+#endif
+
+ return meta;
+}
+
+Metadata* Metadata::extract(void* p)
+{
+ assert(p);
+
+ auto meta = static_cast<Metadata*>(p) - 1;
+
+#if defined(REG_TEST) || defined(UNIT_TEST)
+ assert(meta->valid());
+#endif
+
+ return meta;
+}
+
+// -----------------------------------------------------------------------------
+// the meat
+// -----------------------------------------------------------------------------
+
+class ReentryContext
+{
+public:
+ ReentryContext(bool& flag) :
+ already_entered(flag), flag(flag)
+ { flag = true; }
+
+ ~ReentryContext()
+ { flag = false; }
+
+ bool is_reentry() const
+ { return already_entered; }
+
+private:
+ const bool already_entered;
+ bool& flag;
+};
+
+template<typename Allocator = MemoryAllocator, typename Cap = MemoryCap>
+struct Interface
+{
+ static void* allocate(size_t);
+ static void deallocate(void*);
+
+ static THREAD_LOCAL bool in_allocation_call;
+};
+
+template<typename Allocator, typename Cap>
+void* Interface<Allocator, Cap>::allocate(size_t n)
+{
+ // prevent allocation reentry
+ ReentryContext reentry_context(in_allocation_call);
+ assert(!reentry_context.is_reentry());
+
+ auto meta = Metadata::create<Allocator>(n);
+
+ if ( !meta )
+ return nullptr;
+
+ Cap::allocate(meta->total_size());
+ return meta->payload_offset();
+}
+
+template<typename Allocator, typename Cap>
+void Interface<Allocator, Cap>::deallocate(void* p)
+{
+ if ( !p )
+ return;
+
+ auto meta = Metadata::extract(p);
+ assert(meta);
+
+ Cap::deallocate(meta->total_size());
+ Allocator::deallocate(meta);
+}
+
+template<typename Allocator, typename Cap>
+THREAD_LOCAL bool Interface<Allocator, Cap>::in_allocation_call = false;
+
+} //namespace memory
+
+// -----------------------------------------------------------------------------
+// new /delete replacements
+// -----------------------------------------------------------------------------
+
+// these don't have to be visible to operate as replacements
+
+#ifdef ENABLE_MEMORY_OVERLOADS
+void* operator new(size_t n)
+{
+ auto p = memory::Interface<>::allocate(n);
+ if ( !p )
+ throw std::bad_alloc();
+
+ return p;
+}
+
+void* operator new[](size_t n)
+{ return ::operator new(n); }
+
+void* operator new(size_t n, const std::nothrow_t&) noexcept
+{ return memory::Interface<>::allocate(n); }
+
+void* operator new[](size_t n, const std::nothrow_t&) noexcept
+{ return memory::Interface<>::allocate(n); }
+
+void operator delete(void* p) noexcept
+{ memory::Interface<>::deallocate(p); }
+
+void operator delete[](void* p) noexcept
+{ ::operator delete(p); }
+
+void operator delete(void* p, const std::nothrow_t&) noexcept
+{ ::operator delete(p); }
+
+void operator delete[](void* p, const std::nothrow_t&) noexcept
+{ ::operator delete[](p); }
+
+void operator delete(void* p, size_t) noexcept
+{ ::operator delete(p); }
+
+void operator delete[](void* p, size_t) noexcept
+{ ::operator delete[](p); }
+#endif
+
+// -----------------------------------------------------------------------------
+// unit tests
+// -----------------------------------------------------------------------------
+
+#ifdef UNIT_TEST
+
+namespace t_memory
+{
+
+struct AllocatorSpy
+{
+ static void* allocate(size_t n)
+ { allocate_called = true; allocate_arg = n; return pool; }
+
+ static void deallocate(void* p)
+ { deallocate_called = true; deallocate_arg = p; }
+
+ static void reset()
+ {
+ pool = nullptr;
+ allocate_called = false;
+ allocate_arg = 0;
+ deallocate_called = false;
+ deallocate_arg = nullptr;
+ }
+
+ static void* pool;
+ static bool allocate_called;
+ static size_t allocate_arg;
+ static bool deallocate_called;
+ static void* deallocate_arg;
+};
+
+void* AllocatorSpy::pool = nullptr;
+bool AllocatorSpy::allocate_called = false;
+size_t AllocatorSpy::allocate_arg = 0;
+bool AllocatorSpy::deallocate_called = false;
+void* AllocatorSpy::deallocate_arg = nullptr;
+
+struct CapSpy
+{
+ static void allocate(size_t n)
+ {
+ update_allocations_called = true;
+ update_allocations_arg = n;
+ }
+
+ static void deallocate(size_t n)
+ {
+ update_deallocations_called = true;
+ update_deallocations_arg = n;
+ }
+
+ static void reset()
+ {
+ update_allocations_called = false;
+ update_allocations_arg = 0;
+
+ update_deallocations_called = false;
+ update_deallocations_arg = 0;
+ }
+
+ static bool update_allocations_called;
+ static size_t update_allocations_arg;
+
+ static bool update_deallocations_called;
+ static size_t update_deallocations_arg;
+};
+
+bool CapSpy::update_allocations_called = false;
+size_t CapSpy::update_allocations_arg = 0;
+
+bool CapSpy::update_deallocations_called = false;
+size_t CapSpy::update_deallocations_arg = 0;
+
+} // namespace t_memory
+
+TEST_CASE( "memory metadata", "[memory]" )
+{
+ using namespace t_memory;
+
+ AllocatorSpy::reset();
+ constexpr size_t n = 1;
+ char pool[sizeof(memory::Metadata) + n];
+
+ SECTION( "create" )
+ {
+ AllocatorSpy::pool = pool;
+
+ auto meta = memory::Metadata::create<AllocatorSpy>(n);
+
+ CHECK( (void*)meta == (void*)pool );
+ CHECK( meta->valid() );
+ CHECK( meta->payload_size == n );
+ }
+
+ SECTION( "extract" )
+ {
+ auto meta_pool = reinterpret_cast<memory::Metadata*>(pool);
+ meta_pool[0] = memory::Metadata(n);
+
+ void* p = &meta_pool[1];
+
+ auto meta = memory::Metadata::extract(p);
+
+ CHECK( (void*)meta == (void*)pool );
+ CHECK( meta->payload_offset() == p );
+ }
+}
+
+TEST_CASE( "memory manager interface", "[memory]" )
+{
+ using namespace t_memory;
+
+ AllocatorSpy::reset();
+ CapSpy::reset();
+
+ constexpr size_t n = 1;
+ char pool[sizeof(memory::Metadata) + n];
+
+ using Interface = memory::Interface<AllocatorSpy, CapSpy>;
+
+ SECTION( "allocation" )
+ {
+ SECTION( "allocation failure" )
+ {
+ auto p = Interface::allocate(n);
+
+ CHECK( p == nullptr );
+
+ CHECK( AllocatorSpy::allocate_called );
+ CHECK( AllocatorSpy::allocate_arg == memory::Metadata::calculate_total_size(n) );
+
+ CHECK_FALSE( CapSpy::update_allocations_called );
+ }
+
+ SECTION( "success" )
+ {
+ AllocatorSpy::pool = pool;
+
+ auto p = Interface::allocate(n);
+
+ CHECK( p > (void*)pool );
+
+ CHECK( AllocatorSpy::allocate_called );
+ CHECK( AllocatorSpy::allocate_arg == memory::Metadata::calculate_total_size(n) );
+
+ CHECK( CapSpy::update_allocations_called );
+ CHECK( CapSpy::update_allocations_arg == memory::Metadata::calculate_total_size(n) );
+ }
+ }
+
+ SECTION( "deallocation" )
+ {
+ SECTION( "nullptr" )
+ {
+ Interface::deallocate(nullptr);
+
+ CHECK_FALSE( AllocatorSpy::deallocate_called );
+ CHECK_FALSE( CapSpy::update_deallocations_called );
+ }
+
+ SECTION( "success" )
+ {
+ auto meta_pool = reinterpret_cast<memory::Metadata*>(pool);
+ meta_pool[0] = memory::Metadata(n);
+
+ auto p = meta_pool[0].payload_offset();
+
+ Interface::deallocate(p);
+
+ CHECK( AllocatorSpy::deallocate_called );
+ CHECK( AllocatorSpy::deallocate_arg == (void*)pool );
+ CHECK( CapSpy::update_deallocations_called );
+ CHECK( CapSpy::update_deallocations_arg == memory::Metadata::calculate_total_size(n) );
+ }
+ }
+ AllocatorSpy::pool = nullptr;
+ AllocatorSpy::deallocate_arg = nullptr;
+}
+
+#endif
+
#include "main/snort_config.h"
+#include "memory_cap.h"
#include "memory_config.h"
using namespace snort;
{ "cap", Parameter::PT_INT, "0:maxSZ", "0",
"set the per-packet-thread cap on memory (bytes, 0 to disable)" },
- { "threshold", Parameter::PT_INT, "0:100", "0",
- "set the per-packet-thread threshold for preemptive cleanup actions "
- "(percent, 0 to disable)" },
+ { "threshold", Parameter::PT_INT, "1:100", "100",
+ "scale cap to account for heap overhead" },
{ nullptr, Parameter::PT_MAX, nullptr, nullptr, nullptr }
};
-THREAD_LOCAL MemoryCounts mem_stats;
-static MemoryCounts zero_stats = { };
+static memory::MemoryCounts zero_stats = { };
const PegInfo mem_pegs[] =
{
{ CountType::NOW, "reap_attempts", "attempts to reclaim memory" },
{ CountType::NOW, "reap_failures", "failures to reclaim memory" },
{ CountType::MAX, "max_in_use", "highest allocated - deallocated" },
- { CountType::NOW, "total_fudge", "sum of all adjustments" },
{ CountType::END, nullptr, nullptr }
};
{ return mem_pegs; }
PegCount* MemoryModule::get_counts() const
-{ return is_active() ? (PegCount*)&mem_stats : (PegCount*)&zero_stats; }
+{
+ if ( !is_active() )
+ return (PegCount*)&zero_stats;
+
+ return (PegCount*)&memory::MemoryCap::get_mem_stats();
+}
#include "framework/module.h"
-struct MemoryCounts
-{
- PegCount allocations;
- PegCount deallocations;
- PegCount allocated;
- PegCount deallocated;
- PegCount reap_attempts;
- PegCount reap_failures;
- PegCount max_in_use;
- PegCount total_fudge;
-};
-
-extern THREAD_LOCAL MemoryCounts mem_stats;
-
class MemoryModule : public snort::Module
{
public:
namespace memory
{
-void prune_handler()
+bool prune_handler()
{
- Stream::prune_flows();
+ return Stream::prune_flows();
}
} // namespace memory
namespace memory
{
-void prune_handler();
+bool prune_handler();
}
bool is_email_hdrs_present() const;
bool is_email_from_present() const;
bool is_email_to_present() const;
- size_t size_of() const override
- { return sizeof(*this); }
private:
int log_flags = 0;
{
p->flow->stash->store(STASH_EXTRADATA_MIME, log_state);
reset_mime_paf_state(&mime_boundary);
- memory::MemoryCap::update_allocations(sizeof(*this));
}
MimeSession::~MimeSession()
{
- memory::MemoryCap::update_deallocations(sizeof(*this));
if ( decode_state )
delete(decode_state);
}
if (asd.tpsession and asd.tpsession->get_ctxt_version() != tp_appid_ctxt->get_version())
{
bool is_tp_done = asd.is_tp_processing_done();
- memory::MemoryCap::update_deallocations(asd.tpsession->size_of());
delete asd.tpsession;
asd.tpsession = nullptr;
if (!is_tp_done)
class AppIdDnsSession
{
public:
- AppIdDnsSession()
- {
- memory::MemoryCap::update_allocations(sizeof(*this));
- }
-
- ~AppIdDnsSession()
- {
- memory::MemoryCap::update_deallocations(sizeof(*this));
- }
+ AppIdDnsSession() { }
+ ~AppIdDnsSession() { }
void reset()
{
ErrorMessage("appid: Could not allocate asd.tpsession data in consume");
else
{
- memory::MemoryCap::update_allocations(asd->tpsession->size_of());
asd->tpsession->set_state(TP_STATE_HA);
}
}
AppIdHttpSession::AppIdHttpSession(AppIdSession& asd, uint32_t http2_stream_id)
: asd(asd), http2_stream_id(http2_stream_id)
-{
- memory::MemoryCap::update_allocations(sizeof(AppIdHttpSession));
-}
+{ }
AppIdHttpSession::~AppIdHttpSession()
{
delete meta_data[i];
if (tun_dest)
delete tun_dest;
- memory::MemoryCap::update_deallocations(sizeof(AppIdHttpSession));
}
void AppIdHttpSession::free_chp_matches(ChpMatchDescriptor& cmd, unsigned num_matches)
if (tpsession)
{
- memory::MemoryCap::update_deallocations(tpsession->size_of());
if (pkt_thread_tp_appid_ctxt and
((tpsession->get_ctxt_version() == pkt_thread_tp_appid_ctxt->get_version()) and
!ThirdPartyAppIdContext::get_tp_reload_in_progress()))
{
public:
TlsSession()
- {
- memory::MemoryCap::update_allocations(sizeof(*this));
- }
+ { }
~TlsSession()
{
- memory::MemoryCap::update_deallocations(sizeof(*this));
if (tls_host)
snort_free(tls_host);
if (tls_first_alt_name)
bool bidirectional=false);
void initialize_future_session(AppIdSession&, uint64_t);
- size_t size_of() override
- { return sizeof(*this); }
-
snort::Flow* flow = nullptr;
AppIdConfig& config;
std::unordered_map<unsigned, AppIdFlowData*> flow_data;
void clear_user_logged_in() { user_logged_in = false; }
- size_t size_of() const override
- { return sizeof(*this); }
-
protected:
AppIdSessionApi(const AppIdSession* asd, const SfIp& ip);
void AppIdDetector::add_payload(AppIdSession&, int) { }
void AppIdDetector::add_app(snort::Packet const&, AppIdSession&, AppidSessionDirection, int,
int, char const*, AppidChangeBits&) { }
-void memory::MemoryCap::update_deallocations(size_t) { }
// LCOV_EXCL_STOP
SipEvent::SipEvent(snort::Packet const* p, SIPMsg const*, SIP_DialogData const*) { this->p = p; }
static AppId client_id = APP_ID_NONE;
static DetectorHTTPPattern mpattern;
-void memory::MemoryCap::update_allocations(size_t) { }
-void memory::MemoryCap::update_deallocations(size_t) { }
-
namespace snort
{
AppIdSessionApi::AppIdSessionApi(const AppIdSession*, const SfIp&) :
configuration for AppId and AppIdInspector provides the packet processing context. An AppId
inspector is instantiated for each packet thread created by the framework.
-AppId registers to recieve any IP packet, it does not process rebuilt packets.
+AppId registers to receive any IP packet, it does not process rebuilt packets.
AppIdModule contains all the logic to process the AppId inspector Lua configuration which is identified
by the 'appid' keyword. This configuration includes settings for logging, statistics, etc. and also
static SnortProtocolId dummy_http2_protocol_id = 1;
-void memory::MemoryCap::update_allocations(size_t) { }
-void memory::MemoryCap::update_deallocations(size_t) { }
-
namespace snort
{
// Mocks
-void memory::MemoryCap::update_allocations(size_t) { }
-void memory::MemoryCap::update_deallocations(size_t) { }
-
namespace snort
{
unsigned get_instance_id() { return 3; }
#include <CppUTest/CommandLineTestRunner.h>
#include <CppUTest/TestHarness.h>
-void memory::MemoryCap::update_allocations(size_t) { }
-void memory::MemoryCap::update_deallocations(size_t) { }
-
namespace snort
{
AppIdSessionApi::AppIdSessionApi(const AppIdSession*, const SfIp&) :
uint32_t ThirdPartyAppIdContext::next_version = 0;
-void memory::MemoryCap::update_allocations(size_t) { }
-void memory::MemoryCap::update_deallocations(size_t) { }
-
namespace snort
{
// Stubs for appid api
Packet::~Packet() = default;
}
-void memory::MemoryCap::update_allocations(size_t) { }
-void memory::MemoryCap::update_deallocations(size_t) { }
-
void ApplicationDescriptor::set_id(const Packet&, AppIdSession&, AppidSessionDirection,
AppId, AppidChangeBits&) { }
void AppIdModule::reset_stats() { }
using namespace snort;
-void memory::MemoryCap::update_allocations(size_t) { }
-void memory::MemoryCap::update_deallocations(size_t) { }
-
namespace snort
{
AppIdApi appid_api;
void Profiler::reset_stats() { }
void Profiler::show_stats() { }
-void memory::MemoryCap::update_allocations(size_t) { }
-void memory::MemoryCap::update_deallocations(size_t) { }
-
OdpContext::OdpContext(const AppIdConfig&, snort::SnortConfig*) { }
AppIdConfig::~AppIdConfig() = default;
static OdpContext odpctxt(config, nullptr);
static Flow flow;
-void memory::MemoryCap::update_allocations(size_t) { }
-void memory::MemoryCap::update_deallocations(size_t) { }
-
void ApplicationDescriptor::set_id(const Packet&, AppIdSession&, AppidSessionDirection, AppId, AppidChangeBits&) { }
void AppIdModule::reset_stats() {}
#include <vector>
-void memory::MemoryCap::update_allocations(size_t) { }
-void memory::MemoryCap::update_deallocations(size_t) { }
-
namespace snort
{
// Stubs for logs
void clear_attr(TPSessionAttr attr) override { flags &= ~attr; }
void set_attr(TPSessionAttr attr) override { flags |= attr; }
unsigned get_attr(TPSessionAttr attr) override { return flags & attr; }
- size_t size_of() const override { return sizeof(*this); }
private:
unsigned flags = 0;
virtual void clear_attr(TPSessionAttr) = 0;
virtual void set_attr(TPSessionAttr) = 0;
virtual unsigned get_attr(TPSessionAttr) = 0;
- virtual size_t size_of() const = 0;
virtual AppId get_appid(int& conf) { conf=confidence; return appid; }
virtual const ThirdPartyAppIdContext& get_ctxt() const
{ return ctxt; }
ErrorMessage("Could not allocate asd.tpsession data");
return false;
}
- memory::MemoryCap::update_allocations(asd.tpsession->size_of());
}
int tp_confidence;
frames and attempts to locate ARP spoofing attacks.
It alerts on source or destination address mismatch. It also alerts on an
-ARP request ocuring on a uni-cast frame (needs to be multi-cast).
+ARP request occurring on a uni-cast frame (needs to be multi-cast).
A network inspector module as it needs to examine all ethernet frames with
packet type of ARP.
{ "packets", Parameter::PT_INT, "0:max32", "10000",
"minimum packets to report" },
- { "seconds", Parameter::PT_INT, "1:max32", "60",
+ { "seconds", Parameter::PT_INT, "0:max32", "60",
"report interval" },
{ "flow_ip_memcap", Parameter::PT_INT, "236:maxSZ", "52428800",
else if ( v.is("seconds") )
{
config->sample_interval = v.get_uint32();
- if ( config->sample_interval == 0 )
- config->perf_flags |= PERF_SUMMARY;
}
else if ( v.is("flow_ip_memcap") )
{
FpElementType::SYN_MATCH, FpElementType::SYNTS
Examples:
- fptype = 2, mss = "12-34" -- client traffic (fptype = 2) with mss beween 12 and 34
+ fptype = 2, mss = "12-34" -- client traffic (fptype = 2) with mss between 12 and 34
fptype = 2, mss = "X" -- don't use mss for matching, so match anything from cient
fptype = 2, mss = "SYN" -- error: client traffic (fptype = 2) but mss of type SYN_MATCH
only accepted for server traffic (fptyp1 = 1 or 10)
fptype = 1, mss = "TS" -- OK: server traffic (fptype = 1) and mss of type SYNTS
- mss = "+5" -- eror: mss cannot be an INCREMENT type
+ mss = "+5" -- error: mss cannot be an INCREMENT type
id: ip id
FpElementType::RANGE
FpElementType::RANGE
These are defined by TcpOptCode in src/protocols/tcp_options.h.
- The ones we use for fingerprint matcing are
+ The ones we use for fingerprint matching are
- MAXSEG (2)
- WSCALE (3)
inspector_id = snort::FlowData::create_flow_data_id();
}
-size_t RNAFlow::size_of()
-{
- return sizeof(*this);
-}
-
bool FpFingerprintState::set(const Packet* p)
{
int pos = 0;
~RNAFlow() override;
static void init();
- size_t size_of() override;
void clear_ht(snort::HostTracker& ht);
unsigned Http2FlowData::inspector_id = 0;
Http2Stream::~Http2Stream() = default;
-HpackDynamicTable::HpackDynamicTable(Http2FlowData* flow_data) :
- session_data(flow_data) {}
HpackDynamicTable::~HpackDynamicTable() = default;
Http2DataCutter::Http2DataCutter(Http2FlowData* _session_data, HttpCommon::SourceId src_id) :
session_data(_session_data), source_id(src_id) { }
data_cutter {Http2DataCutter(this, SRC_CLIENT), Http2DataCutter(this, SRC_SERVER)}
{ }
Http2FlowData::~Http2FlowData() = default;
-size_t Http2FlowData::size_of() { return 1; }
Http2FlowData http2_flow_data(nullptr);
void Http2FlowData::set_mid_frame(bool val) { continuation_expected[SRC_SERVER] = val; }
bool Http2FlowData::is_mid_frame() const { return continuation_expected[SRC_SERVER]; }
static const StatsTable::Field fields[] =
{
{ "#", 5, ' ', 0, std::ios_base::left },
- { "module", 20, ' ', 0, std::ios_base::fmtflags() },
+ { "module", 24, ' ', 0, std::ios_base::fmtflags() },
{ "layer", 6, ' ', 0, std::ios_base::fmtflags() },
- { "allocs", 9, ' ', 0, std::ios_base::fmtflags() },
- { "used (kb)", 12, ' ', 2, std::ios_base::fmtflags() },
- { "avg/allocation", 15, ' ', 1, std::ios_base::fmtflags() },
+ { "allocs", 12, ' ', 0, std::ios_base::fmtflags() },
+ { "used (kb)", 15, ' ', 2, std::ios_base::fmtflags() },
+ { "avg/alloc", 12, ' ', 1, std::ios_base::fmtflags() },
{ "%/caller", 10, ' ', 2, std::ios_base::fmtflags() },
- { "%/total", 9, ' ', 2, std::ios_base::fmtflags() },
+ { "%/total", 10, ' ', 2, std::ios_base::fmtflags() },
{ nullptr, 0, '\0', 0, std::ios_base::fmtflags() }
};
assert(config);
show_time_profiler_stats(s_profiler_nodes, config->time);
+#ifdef ENABLE_MEMORY_PROFILER
show_memory_profiler_stats(s_profiler_nodes, config->memory);
+#endif
show_rule_profiler_stats(config->rule);
}
#ifdef NO_PROFILER
using Profile = ProfileDisabled;
#else
-#ifdef NO_MEM_MGR
+#ifndef ENABLE_MEMORY_PROFILER
using Profile = NoMemContext;
#else
using Profile = ProfileContext;
#endif
#endif
-// developer enable for profiling rule options
-//using RuleProfile = ProfileContext;
+#ifndef ENABLE_RULE_PROFILER
using RuleProfile = ProfileDisabled;
+#else
+using RuleProfile = ProfileContext;
+#endif
}
#endif
static void init()
{ inspector_id = snort::FlowData::create_flow_data_id(); }
- size_t size_of() override
- { return sizeof(*this); }
-
public:
static unsigned inspector_id;
CipSessionData session;
packets without decoding the protocol with a series of ”content” and ”byte
test” options.
-The preprocessor only evaluates PAF-flushed PDUs. If the rule options don't
+The inspector only evaluates PAF-flushed PDUs. If the rule options don't
check for this, they'll fire on stale session data when the original packet
goes through before flushing.
ssd.sd = sd;
ssd.policy = policy;
SMB_DEBUG(dce_smb_trace, DEFAULT_TRACE_OPTION_ID, TRACE_DEBUG_LEVEL, p, "smb1 session created\n");
- memory::MemoryCap::update_allocations(sizeof(*this));
}
Dce2Smb1SessionData::~Dce2Smb1SessionData()
{
DCE2_SmbDataFree(&ssd);
- memory::MemoryCap::update_deallocations(sizeof(*this));
}
void Dce2Smb1SessionData::process()
tcp_file_tracker = nullptr;
flow_key = get_smb2_flow_key(tcp_flow->key);
SMB_DEBUG(dce_smb_trace, DEFAULT_TRACE_OPTION_ID, TRACE_DEBUG_LEVEL, p, "smb2 session created\n");
- memory::MemoryCap::update_allocations(sizeof(*this));
}
Dce2Smb2SessionData::~Dce2Smb2SessionData()
it_session.second->detach_flow(flow_key);
}
session_data_mutex.unlock();
- memory::MemoryCap::update_deallocations(sizeof(*this));
}
void Dce2Smb2SessionData::reset_matching_tcp_file_tracker(
static void init()
{ inspector_id = snort::FlowData::create_flow_data_id(); }
- size_t size_of() override
- { return sizeof(*this); }
-
Dce2SmbSessionData* get_smb_session_data()
{ return ssd; }
static void init()
{ inspector_id = snort::FlowData::create_flow_data_id(); }
- size_t size_of() override
- { return sizeof(*this); }
-
public:
static unsigned inspector_id;
DCE2_TcpSsnData dce2_tcp_session;
static unsigned inspector_id;
DCE2_UdpSsnData dce2_udp_session;
-
- size_t size_of() override
- { return sizeof(*this); }
};
DCE2_UdpSsnData* get_dce2_udp_session_data(snort::Flow*);
The wizard is a special service inspector that examines the beginning of a
data stream and decides what application protocol is present.
-http_inspect is the legacy Snort HTTP preprocessor ported to Snort\++ .
-nhttp_inspect is the complete rewrite being developed specifically for
-Snort++.
-
static void init()
{ inspector_id = snort::FlowData::create_flow_data_id(); }
- size_t size_of() override
- { return sizeof(*this); }
-
public:
static unsigned inspector_id;
dnp3_session_data_t dnp3_session;
static void init()
{ inspector_id = snort::FlowData::create_flow_data_id(); }
- size_t size_of() override
- { return sizeof(*this); }
-
public:
static unsigned inspector_id;
DNSData session;
static void init()
{ inspector_id = snort::FlowData::create_flow_data_id(); }
- size_t size_of() override
- { return sizeof(*this); }
-
public:
static unsigned inspector_id;
TELNET_SESSION session;
static void init();
- size_t size_of() override
- { return sizeof(*this); }
-
public:
static unsigned inspector_id;
GTP_Roptions ropts;
uint64_t Http2FlowData::instance_count = 0;
#endif
-// Each stream will have class Http2Stream allocated and a node in streams list
-const size_t Http2FlowData::stream_memory_size = sizeof(class Http2Stream) +
- stream_extra_memory;
-const size_t Http2FlowData::stream_increment_memory_size = stream_memory_size *
- STREAM_MEMORY_TRACKING_INCREMENT;
-
Http2FlowData::Http2FlowData(Flow* flow_) :
FlowData(inspector_id),
flow(flow_),
for (Http2Stream* stream : streams)
delete stream;
- // Since stream memory is allocated in blocks of 25, must also deallocate in blocks of 25 to
- // ensure consistent rounding.
- while (stream_memory_allocations_tracked > STREAM_MEMORY_TRACKING_INCREMENT)
- update_stream_memory_deallocations();
}
HttpFlowData* Http2FlowData::get_hi_flow_data() const
stream->set_hi_flow_data(flow);
}
-size_t Http2FlowData::size_of()
-{
- // Account for memory for 25 concurrent streams up front, plus 1 stream for stream id 0.
- return sizeof(*this) + stream_increment_memory_size + stream_memory_size +
- (2 * sizeof(Http2EventGen)) + (2 * sizeof(Http2Infractions));
-}
-
-void Http2FlowData::update_stream_memory_allocations()
-{
- assert(concurrent_streams > stream_memory_allocations_tracked);
- assert(concurrent_streams % stream_memory_allocations_tracked == 1);
- update_allocations(stream_increment_memory_size);
- stream_memory_allocations_tracked += STREAM_MEMORY_TRACKING_INCREMENT;
-}
-
-void Http2FlowData::update_stream_memory_deallocations()
-{
- assert(stream_memory_allocations_tracked >= STREAM_MEMORY_TRACKING_INCREMENT);
- update_deallocations(stream_increment_memory_size);
- stream_memory_allocations_tracked -= STREAM_MEMORY_TRACKING_INCREMENT;
-}
-
Http2Stream* Http2FlowData::find_stream(const uint32_t key) const
{
for (Http2Stream* stream : streams)
concurrent_streams += 1;
if (concurrent_streams > Http2Module::get_peg_counts(PEG_MAX_CONCURRENT_STREAMS))
Http2Module::increment_peg_counts(PEG_MAX_CONCURRENT_STREAMS);
- if (concurrent_streams > stream_memory_allocations_tracked)
- update_stream_memory_allocations();
}
}
return stream;
return current_stream[source_id];
}
-void Http2FlowData::allocate_hi_memory(HttpFlowData* hi_flow_data)
-{
- update_allocations(hi_flow_data->size_of());
-}
-
-void Http2FlowData::deallocate_hi_memory(HttpFlowData* hi_flow_data)
-{
- update_deallocations(hi_flow_data->size_of());
-}
-
bool Http2FlowData::is_mid_frame() const
{
return (header_octets_seen[SRC_SERVER] != 0) || (remaining_data_padding[SRC_SERVER] != 0) ||
friend class Http2WindowUpdateFrame;
friend void finish_msg_body(Http2FlowData* session_data, HttpCommon::SourceId source_id);
- size_t size_of() override;
-
Http2Stream* find_current_stream(const HttpCommon::SourceId source_id) const;
uint32_t get_current_stream_id(const HttpCommon::SourceId source_id) const;
Http2Stream* get_processing_stream(const HttpCommon::SourceId source_id, uint32_t concurrent_streams_limit);
Http2Stream* get_hi_stream() const;
Http2Stream* find_stream(const uint32_t key) const;
void delete_processing_stream();
-
- // When H2I allocates http_inspect flows, it bypasses the usual FlowData memory allocation
- // bookkeeping. So H2I needs to update memory allocations and deallocations itself.
- void allocate_hi_memory(HttpFlowData* hi_flow_data);
- void deallocate_hi_memory(HttpFlowData* hi_flow_data);
- // Memory for streams is tracked in increments of 25 to minimize tracking overhead
- void update_stream_memory_allocations();
- void update_stream_memory_deallocations();
- static const size_t stream_memory_size;
- static const size_t stream_increment_memory_size;
- // Per-stream extra memory estimate to account for the std::list streams. Actual memory usage
- // is implementation dependent
- static const size_t stream_extra_memory = 24;
};
#endif
#endif
#include "http2_hpack_dynamic_table.h"
+#include "http2_module.h"
#include <cstring>
-#include "http2_flow_data.h"
#include "http2_hpack_table.h"
-#include "http2_module.h"
using namespace Http2Enums;
-HpackDynamicTable::HpackDynamicTable(Http2FlowData* flow_data) :
- session_data(flow_data)
-{
- session_data->update_allocations( ARRAY_CAPACITY * sizeof(HpackTableEntry*) +
- TABLE_MEMORY_TRACKING_INCREMENT);
- table_memory_allocated = TABLE_MEMORY_TRACKING_INCREMENT;
-}
-
-
HpackDynamicTable::~HpackDynamicTable()
{
- for (uint32_t i = 0, indx = start; i < num_entries; i++)
+ for (std::vector<HpackTableEntry*>::iterator it = circular_buf.begin();
+ it != circular_buf.end(); ++it)
{
- delete circular_buf[indx];
- indx = (indx + 1) % ARRAY_CAPACITY;
- }
- session_data->update_deallocations( ARRAY_CAPACITY * sizeof(HpackTableEntry*) +
- TABLE_MEMORY_TRACKING_INCREMENT );
-
- while (table_memory_allocated > TABLE_MEMORY_TRACKING_INCREMENT)
- {
- session_data->update_deallocations(TABLE_MEMORY_TRACKING_INCREMENT);
- table_memory_allocated -= TABLE_MEMORY_TRACKING_INCREMENT;
+ delete *it;
}
}
Http2Module::increment_peg_counts(PEG_MAX_TABLE_ENTRIES);
rfc_table_size += new_entry_size;
- while (rfc_table_size > table_memory_allocated)
- {
- session_data->update_allocations(TABLE_MEMORY_TRACKING_INCREMENT);
- table_memory_allocated += TABLE_MEMORY_TRACKING_INCREMENT;
- }
-
return true;
}
{
public:
// FIXIT-P This array can be optimized to start smaller and grow on demand
- HpackDynamicTable(Http2FlowData* flow_data);
+ HpackDynamicTable() : circular_buf(ARRAY_CAPACITY, nullptr) {}
~HpackDynamicTable();
const HpackTableEntry* get_entry(uint32_t index) const;
bool add_entry(const Field& name, const Field& value);
const static uint32_t DEFAULT_MAX_SIZE = 4096;
const static uint32_t ARRAY_CAPACITY = 512;
- const static uint32_t TABLE_MEMORY_TRACKING_INCREMENT = 500;
uint32_t max_size = DEFAULT_MAX_SIZE;
uint32_t start = 0;
uint32_t num_entries = 0;
uint32_t rfc_table_size = 0;
- HpackTableEntry* circular_buf[ARRAY_CAPACITY] = {0};
- Http2FlowData* const session_data;
- uint32_t table_memory_allocated;
+ std::vector<HpackTableEntry*> circular_buf;
void prune_to_size(uint32_t new_max_size);
};
class HpackIndexTable
{
public:
- HpackIndexTable(Http2FlowData* flow_data) : dynamic_table(flow_data) { }
+ HpackIndexTable(Http2FlowData*) { }
const HpackTableEntry* lookup(uint64_t index) const;
bool add_index(const Field& name, const Field& value);
HpackDynamicTable& get_dynamic_table() { return dynamic_table; }
Http2Stream::~Http2Stream()
{
delete current_frame;
- if (hi_flow_data)
- session_data->deallocate_hi_memory(hi_flow_data);
delete hi_flow_data;
}
{
if (hi_flow_data != nullptr)
{
- session_data->deallocate_hi_memory(hi_flow_data);
delete hi_flow_data;
hi_flow_data = nullptr;
}
{
assert(hi_flow_data == nullptr);
hi_flow_data = flow_data;
- session_data->allocate_hi_memory(hi_flow_data);
}
const Field& Http2Stream::get_buf(unsigned id)
}
HttpBodyCutter::HttpBodyCutter(bool accelerated_blocking_, ScriptFinder* finder_,
- CompressId compression_, HttpFlowData* ssn_data)
- : accelerated_blocking(accelerated_blocking_), compression(compression_), finder(finder_),
- session_data(ssn_data)
+ CompressId compression_)
+ : accelerated_blocking(accelerated_blocking_), compression(compression_), finder(finder_)
{
if (accelerated_blocking)
{
delete compress_stream;
compress_stream = nullptr;
}
- else
- session_data->update_allocations(session_data->zlib_inflate_memory);
-
}
static const uint8_t inspect_string[] = { '<', '/', 's', 'c', 'r', 'i', 'p', 't', '>' };
{
inflateEnd(compress_stream);
delete compress_stream;
- session_data->update_deallocations(session_data->zlib_inflate_memory);
}
}
{
public:
HttpBodyCutter(bool accelerated_blocking_, ScriptFinder* finder,
- HttpEnums::CompressId compression_, HttpFlowData* ssn_data);
+ HttpEnums::CompressId compression_);
~HttpBodyCutter() override;
void soft_reset() override { octets_seen = 0; }
const uint8_t* match_string;
const uint8_t* match_string_upper;
uint8_t string_length;
- HttpFlowData* const session_data;
};
class HttpBodyClCutter : public HttpBodyCutter
HttpBodyClCutter(int64_t expected_length,
bool accelerated_blocking,
ScriptFinder* finder,
- HttpEnums::CompressId compression,
- HttpFlowData* ssn_data) :
- HttpBodyCutter(accelerated_blocking, finder, compression, ssn_data),
+ HttpEnums::CompressId compression) :
+ HttpBodyCutter(accelerated_blocking, finder, compression),
remaining(expected_length)
{ assert(remaining > 0); }
HttpEnums::ScanResult cut(const uint8_t*, uint32_t length, HttpInfractions*, HttpEventGen*,
{
public:
HttpBodyOldCutter(bool accelerated_blocking, ScriptFinder* finder,
- HttpEnums::CompressId compression, HttpFlowData* ssn_data) :
- HttpBodyCutter(accelerated_blocking, finder, compression, ssn_data)
+ HttpEnums::CompressId compression) :
+ HttpBodyCutter(accelerated_blocking, finder, compression)
{}
HttpEnums::ScanResult cut(const uint8_t*, uint32_t, HttpInfractions*, HttpEventGen*,
uint32_t flow_target, bool stretch, HttpEnums::H2BodyState) override;
{
public:
HttpBodyChunkCutter(int64_t maximum_chunk_length_, bool accelerated_blocking,
- ScriptFinder* finder, HttpEnums::CompressId compression, HttpFlowData* ssn_data) :
- HttpBodyCutter(accelerated_blocking, finder, compression, ssn_data),
+ ScriptFinder* finder, HttpEnums::CompressId compression) :
+ HttpBodyCutter(accelerated_blocking, finder, compression),
maximum_chunk_length(maximum_chunk_length_)
{}
HttpEnums::ScanResult cut(const uint8_t* buffer, uint32_t length,
{
public:
HttpBodyH2Cutter(int64_t expected_length, bool accelerated_blocking, ScriptFinder* finder,
- HttpEnums::CompressId compression, HttpFlowData* ssn_data) :
- HttpBodyCutter(accelerated_blocking, finder, compression, ssn_data),
+ HttpEnums::CompressId compression) :
+ HttpBodyCutter(accelerated_blocking, finder, compression),
expected_body_length(expected_length)
{}
HttpEnums::ScanResult cut(const uint8_t* buffer, uint32_t length, HttpInfractions*,
#include "http_field.h"
-#include "flow/flow_data.h"
#include "http_common.h"
#include "http_enum.h"
#include "http_test_manager.h"
// Both Fields cannot be responsible for deleting the buffer so do not copy own_the_buffer
}
-void Field::update_allocations(snort::FlowData* flow_data)
-{
- if (own_the_buffer && (len > 0))
- flow_data->update_allocations(len);
-}
-
-void Field::update_deallocations(snort::FlowData* flow_data)
-{
- if (own_the_buffer && (len > 0))
- flow_data->update_deallocations(len);
-}
-
#ifdef REG_TEST
void Field::print(FILE* output, const char* name) const
{
#include "http_common.h"
#include "http_enum.h"
-namespace snort
-{
-class FlowData;
-}
-
// Individual pieces of the message found during parsing.
// Length values <= 0 are StatusCode values and imply that the start pointer is meaningless.
// Never use the start pointer without verifying that length > 0.
void set(const Field& f);
void set(HttpCommon::StatusCode stat_code);
void set(int32_t length) { set(static_cast<HttpCommon::StatusCode>(length)); }
- void update_allocations(snort::FlowData* flow_data);
- void update_deallocations(snort::FlowData* flow_data);
#ifdef REG_TEST
void print(FILE* output, const char* name) const;
#ifndef UNIT_TEST_BUILD
if (js_ident_ctx)
{
- update_deallocations(js_ident_ctx->size());
delete js_ident_ctx;
debug_log(4, http_trace, TRACE_JS_PROC, nullptr,
}
if (js_normalizer)
{
- update_deallocations(JSNormalizer::size());
delete js_normalizer;
debug_log(4, http_trace, TRACE_JS_PROC, nullptr,
delete events[k];
delete[] section_buffer[k];
delete[] partial_buffer[k];
- update_deallocations(partial_buffer_length[k]);
delete[] partial_detect_buffer[k];
- update_deallocations(partial_detect_length[k]);
HttpTransaction::delete_transaction(transaction[k], nullptr);
delete cutter[k];
if (compress_stream[k] != nullptr)
{
inflateEnd(compress_stream[k]);
delete compress_stream[k];
- update_deallocations(zlib_inflate_memory);
}
if (mime_state[k] != nullptr)
{
}
}
-size_t HttpFlowData::size_of()
-{
- return sizeof(HttpFlowData) + (2 * sizeof(HttpEventGen));
-}
-
void HttpFlowData::half_reset(SourceId source_id)
{
assert((source_id == SRC_CLIENT) || (source_id == SRC_SERVER));
inflateEnd(compress_stream[source_id]);
delete compress_stream[source_id];
compress_stream[source_id] = nullptr;
- update_deallocations(zlib_inflate_memory);
}
if (mime_state[source_id] != nullptr)
{
inflateEnd(compress_stream[source_id]);
delete compress_stream[source_id];
compress_stream[source_id] = nullptr;
- update_deallocations(zlib_inflate_memory);
}
detection_status[source_id] = DET_REACTIVATING;
}
if (!js_ident_ctx)
{
js_ident_ctx = new JSIdentifierCtx(ident_depth, max_scope_depth, built_in_ident);
- update_allocations(js_ident_ctx->size());
debug_logf(4, http_trace, TRACE_JS_PROC, nullptr,
"js_ident_ctx created (ident_depth %d)\n", ident_depth);
js_normalizer = new JSNormalizer(*js_ident_ctx, norm_depth,
max_template_nesting, max_bracket_depth);
- update_allocations(JSNormalizer::size());
debug_logf(4, http_trace, TRACE_JS_PROC, nullptr,
"js_normalizer created (norm_depth %zd, max_template_nesting %d)\n",
if (!js_normalizer)
return;
- update_deallocations(JSNormalizer::size());
delete js_normalizer;
js_normalizer = nullptr;
{
pipeline = new HttpTransaction*[MAX_PIPELINE];
HttpModule::increment_peg_counts(PEG_PIPELINED_FLOWS);
- update_allocations(sizeof(HttpTransaction*) * MAX_PIPELINE);
}
assert(!pipeline_overflow && !pipeline_underflow);
int new_back = (pipeline_back+1) % MAX_PIPELINE;
{
delete pipeline[k];
}
- if (pipeline != nullptr)
- update_deallocations(sizeof(HttpTransaction*) * MAX_PIPELINE);
delete[] pipeline;
}
{
// We've already sent all data through detection so no need to reinspect. Just need to
// prep for trailers
- update_deallocations(partial_buffer_length[source_id]);
partial_buffer_length[source_id] = 0;
delete[] partial_buffer[source_id];
partial_buffer[source_id] = nullptr;
- update_deallocations(partial_detect_length[source_id]);
partial_detect_length[source_id] = 0;
delete[] partial_detect_buffer[source_id];
partial_detect_buffer[source_id] = nullptr;
~HttpFlowData() override;
static unsigned inspector_id;
static void init() { inspector_id = snort::FlowData::create_flow_data_id(); }
- size_t size_of() override;
friend class HttpBodyCutter;
friend class HttpInspect;
if (session_data->detect_depth_remaining[source_id] > 0)
{
delete[] partial_detect_buffer;
- session_data->update_deallocations(partial_detect_length);
assert(detect_length <= session_data->detect_depth_remaining[source_id]);
bookkeeping_regular_flush(partial_detect_length, partial_detect_buffer,
partial_js_detect_length, detect_length);
detect_data.set(detect_length, js_norm_body.start());
delete[] partial_detect_buffer;
- session_data->update_deallocations(partial_detect_length);
if (!session_data->partial_flush[source_id])
{
partial_detect_buffer = save_partial;
partial_detect_length = decompressed->length();
partial_js_detect_length = js_norm_body.length();
- session_data->update_allocations(partial_detect_length);
}
set_file_data(const_cast<uint8_t*>(detect_data.start()),
{
*infractions += INF_JS_PDU_MISS;
session_data->events[HttpCommon::SRC_SERVER]->create_event(EVENT_JS_PDU_MISS);
-
session_data->js_data_lost_once = true;
return;
}
using namespace HttpEnums;
using namespace snort;
-// Memory calculation:
-// Approximations based on assumptions:
-// - there will be a cookie
-// - http_header and http_cookie rule options will be required
-// - classic normalization on the headers will not be trivial
-// - individual normalized headers won't be a lot and 500 bytes will cover them
-//
-// Header infrastructure (header_line, header_name, header_name_id, header_value):
-// session_data_->num_head_lines[source_id_] * (3 * sizeof(Field) + sizeof(HeaderId))
-//
-// The entire headers consist of http_raw_header and http_raw_cookie. Because there is
-// a cookie it will be necessary to write out the full msg_text into these two buffers,
-// resulting in allocations totaling approximately msg_text.length(). These raw buffers
-// in turn will need to be normalized, requiring another msg_text.length().
-// Total cost: 2 * msg_text.length().
-//
-// Plus 500 bytes for normalized headers.
HttpMsgHeadShared::HttpMsgHeadShared(const uint8_t* buffer, const uint16_t buf_size, HttpFlowData* session_data_,
HttpCommon::SourceId source_id_, bool buf_owner, snort::Flow* flow_,
const HttpParaList* params_): HttpMsgSection(buffer, buf_size, session_data_, source_id_,
- buf_owner, flow_, params_), own_msg_buffer(buf_owner),
- extra_memory_allocations(session_data_->num_head_lines[source_id_] *
- (3 * sizeof(Field) + sizeof(HeaderId)) + 2 * msg_text.length() + 500)
-{
- if (own_msg_buffer)
- session_data->update_allocations(msg_text.length());
-
- session_data->update_allocations(extra_memory_allocations);
-}
+ buf_owner, flow_, params_), own_msg_buffer(buf_owner)
+{ }
HttpMsgHeadShared::~HttpMsgHeadShared()
{
list_ptr = list_ptr->next;
delete temp_ptr;
}
-
- if (own_msg_buffer)
- session_data->update_deallocations(msg_text.length());
-
- session_data->update_deallocations(extra_memory_allocations);
}
bool HttpMsgHeadShared::is_external_js()
bool file_cache_index_computed = false;
bool own_msg_buffer;
- const uint32_t extra_memory_allocations;
int js_external = HttpCommon::STAT_NOT_COMPUTE;
};
delete session_data->compress_stream[source_id];
session_data->compress_stream[source_id] = nullptr;
}
- else
- session_data->update_allocations(session_data->zlib_inflate_memory);
}
void HttpMsgHeader::setup_utf_decoding()
{
uri = new HttpUri(start_line.start() + first_end + 1, last_begin - first_end - 1,
method_id, params->uri_param, transaction->get_infractions(source_id),
- session_data->events[source_id], session_data);
+ session_data->events[source_id]);
}
else
{
uri_end--);
uri = new HttpUri(start_line.start() + uri_begin, uri_end - uri_begin + 1, method_id,
params->uri_param, transaction->get_infractions(source_id),
- session_data->events[source_id], session_data);
+ session_data->events[source_id]);
}
else
{
HttpCommon::SourceId source_id_, bool buf_owner, snort::Flow* flow_,
const HttpParaList* params_) : HttpMsgSection(buffer, buf_size, session_data_, source_id_,
buf_owner, flow_, params_), own_msg_buffer(buf_owner)
-{
- if (own_msg_buffer)
- session_data->update_allocations(msg_text.length());
-}
+{ }
HttpMsgStart::~HttpMsgStart()
-{
- if (own_msg_buffer)
- session_data->update_deallocations(msg_text.length());
-}
+{ }
void HttpMsgStart::analyze()
{
inflateEnd(compress_stream);
delete compress_stream;
compress_stream = nullptr;
- session_data->update_deallocations(session_data->zlib_inflate_memory);
}
return;
}
inflateEnd(compress_stream);
delete compress_stream;
compress_stream = nullptr;
- session_data->update_deallocations(session_data->zlib_inflate_memory);
// Since we failed to uncompress the data, fall through
}
}
memcpy(buffer, partial_buffer, partial_buffer_length);
session_data->section_offset[source_id] = partial_buffer_length;
delete[] partial_buffer;
- session_data->update_deallocations(partial_buffer_length);
partial_buffer_length = 0;
partial_buffer = nullptr;
}
partial_buffer = new uint8_t[buf_size];
memcpy(partial_buffer, buffer, buf_size);
partial_buffer_length = buf_size;
- session_data->update_allocations(partial_buffer_length);
}
partial_raw_bytes += total;
}
session_data->data_length[source_id],
session_data->accelerated_blocking[source_id],
my_inspector->script_finder,
- session_data->compression[source_id], session_data);
+ session_data->compression[source_id]);
case SEC_BODY_CHUNK:
return (HttpCutter*)new HttpBodyChunkCutter(
my_inspector->params->maximum_chunk_length,
session_data->accelerated_blocking[source_id],
my_inspector->script_finder,
- session_data->compression[source_id], session_data);
+ session_data->compression[source_id]);
case SEC_BODY_OLD:
return (HttpCutter*)new HttpBodyOldCutter(
session_data->accelerated_blocking[source_id],
my_inspector->script_finder,
- session_data->compression[source_id], session_data);
+ session_data->compression[source_id]);
case SEC_BODY_H2:
return (HttpCutter*)new HttpBodyH2Cutter(
session_data->data_length[source_id],
session_data->accelerated_blocking[source_id],
my_inspector->script_finder,
- session_data->compression[source_id], session_data);
+ session_data->compression[source_id]);
default:
assert(false);
return nullptr;
{
infractions[0] = nullptr;
infractions[1] = nullptr;
- session_data->update_allocations(transaction_memory_usage_estimate);
}
HttpTransaction::~HttpTransaction()
}
delete_section_list(body_list);
delete_section_list(discard_list);
- session_data->update_deallocations(transaction_memory_usage_estimate);
}
HttpTransaction* HttpTransaction::attach_my_transaction(HttpFlowData* session_data, SourceId
#include "http_common.h"
#include "http_enum.h"
-#include "http_flow_data.h"
#include "hash/hash_key_operations.h"
using namespace HttpCommon;
using namespace HttpEnums;
using namespace snort;
-HttpUri::HttpUri(const uint8_t* start, int32_t length, HttpEnums::MethodId method_id_,
- const HttpParaList::UriParam& uri_param_, HttpInfractions* infractions_,
- HttpEventGen* events_, HttpFlowData* session_data_) :
- uri(length, start), infractions(infractions_), events(events_), method_id(method_id_),
- uri_param(uri_param_), session_data(session_data_)
-{
- normalize();
- classic_norm.update_allocations(session_data);
-}
-
-HttpUri::~HttpUri()
-{
- classic_norm.update_deallocations(session_data);
-}
-
void HttpUri::parse_uri()
{
// Four basic types of HTTP URI
return host_norm;
}
-
#ifndef HTTP_URI_H
#define HTTP_URI_H
-#include "http_event.h"
-#include "http_field.h"
-#include "http_module.h"
#include "http_str_to_code.h"
+#include "http_module.h"
#include "http_uri_norm.h"
-
-class HttpFlowData;
+#include "http_field.h"
+#include "http_event.h"
static const int MAX_SCHEME_LENGTH = 36; // schemes longer than 36 characters are malformed
static const int LONG_SCHEME_LENGTH = 10; // schemes longer than 10 characters will alert
public:
HttpUri(const uint8_t* start, int32_t length, HttpEnums::MethodId method_id_,
const HttpParaList::UriParam& uri_param_, HttpInfractions* infractions_,
- HttpEventGen* events_, HttpFlowData* session_data_);
- ~HttpUri();
+ HttpEventGen* events_) :
+ uri(length, start), infractions(infractions_), events(events_), method_id(method_id_),
+ uri_param(uri_param_)
+ { normalize(); }
const Field& get_uri() const { return uri; }
HttpEnums::UriType get_uri_type() { return uri_type; }
const Field& get_scheme() { return scheme; }
HttpEnums::UriType uri_type = HttpEnums::URI__NOT_COMPUTE;
const HttpEnums::MethodId method_id;
const HttpParaList::UriParam& uri_param;
- HttpFlowData* const session_data;
void normalize();
void parse_uri();
HttpJsNorm::~HttpJsNorm() = default;
void HttpJsNorm::configure(){}
int64_t Parameter::get_int(char const*) { return 0; }
-void FlowData::update_allocations(size_t) {}
-void FlowData::update_deallocations(size_t) {}
TEST_GROUP(http_peg_count_test)
{
// Stubs whose sole purpose is to make the test code link
long HttpTestManager::print_amount {};
bool HttpTestManager::print_hex {};
-void FlowData::update_allocations(size_t) {}
-void FlowData::update_deallocations(size_t) {}
// Tests for get_next_code()
TEST_GROUP(get_next_code)
const bool HttpEnums::is_sp_tab_quote_dquote[256] {};
long HttpTestManager::print_amount {};
bool HttpTestManager::print_hex {};
-void FlowData::update_allocations(size_t) {}
-void FlowData::update_deallocations(size_t) {}
TEST_GROUP(norm_decimal_integer_test) {};
int DetectionEngine::queue_event(unsigned int, unsigned int) { return 0; }
fd_status_t File_Decomp_StopFree(fd_session_t*) { return File_Decomp_OK; }
uint32_t str_to_hash(const uint8_t *, size_t) { return 0; }
-void FlowData::update_allocations(size_t) {}
-void FlowData::update_deallocations(size_t) {}
FlowData* Flow::get_flow_data(uint32_t) const { return nullptr; }
}
HttpJsNorm::~HttpJsNorm() = default;
void HttpJsNorm::configure() {}
int64_t Parameter::get_int(char const*) { return 0; }
-void FlowData::update_allocations(size_t) {}
-void FlowData::update_deallocations(size_t) {}
TEST_GROUP(http_inspect_uri_norm)
{
static void init();
void reset()
- {
- ssn_data.session_data_reset();
- }
-
- size_t size_of() override
- {
- return sizeof(*this);
- }
+ { ssn_data.session_data_reset(); }
public:
static unsigned inspector_id;
static void init()
{ inspector_id = snort::FlowData::create_flow_data_id(); }
- size_t size_of() override
- { return sizeof(*this); }
-
public:
static unsigned inspector_id;
IMAPData session;
Modbus packets without decoding the protocol with a series of ”content” and
”byte test” options.
-The preprocessor only evaluates PAF-flushed PDUs. If the rule options don't
+The inspector only evaluates PAF-flushed PDUs. If the rule options don't
check for this, they'll fire on stale session data when the original packet
goes through before flushing.
ssn_data.flags = 0;
}
- size_t size_of() override
- { return sizeof(*this); }
-
public:
static unsigned inspector_id;
modbus_session_data_t ssn_data;
static void init()
{ inspector_id = snort::FlowData::create_flow_data_id(); }
- size_t size_of() override
- { return sizeof(*this); }
-
public:
static unsigned inspector_id;
POPData session;
static void init()
{ inspector_id = FlowData::create_flow_data_id(); }
- size_t size_of() override
- { return sizeof(*this); }
-
public:
static unsigned inspector_id;
RpcSsnData session;
ssn_data.session_data_reset();
}
- size_t size_of() override
- { return sizeof(*this); }
-
public:
static unsigned inspector_id;
S7commplusSessionData ssn_data;
FreeSipData(&session);
assert(sip_stats.concurrent_sessions > 0);
sip_stats.concurrent_sessions--;
- memory::MemoryCap::update_deallocations(sizeof(SipFlowData));
}
static SIPData* SetNewSIPData(Packet* p)
{
SipFlowData* fd = new SipFlowData;
- memory::MemoryCap::update_allocations(sizeof(SipFlowData));
p->flow->set_flow_data(fd);
return &fd->session;
}
static void init()
{ inspector_id = snort::FlowData::create_flow_data_id(); }
- size_t size_of() override
- { return sizeof(*this); }
-
public:
static unsigned inspector_id;
SIPData session;
{
Stream::ignore_flow(p, p->flow->pkt_type, p->get_ip_proto_next(), &mdataA->maddress,
mdataA->mport, &mdataB->maddress, mdataB->mport, SSN_DIR_BOTH, (new SipFlowData));
- memory::MemoryCap::update_allocations(sizeof(SipFlowData));
}
sip_stats.ignoreChannels++;
mdataA = mdataA->nextM;
sip_stats.dialogs++;
dialog = (SIP_DialogData*)snort_calloc(sizeof(SIP_DialogData));
- memory::MemoryCap::update_allocations(sizeof(SIP_DialogData));
// Add to the head
dialog->nextD = currDialog;
}
sip_freeMediaList(currDialog->mediaSessions);
snort_free(currDialog);
- memory::MemoryCap::update_deallocations(sizeof(SIP_DialogData));
+
if ( dList->num_dialogs > 0)
dList->num_dialogs--;
+
return true;
}
nextNode = curNode->nextD;
sip_freeMediaList(curNode->mediaSessions);
snort_free(curNode);
- memory::MemoryCap::update_deallocations(sizeof(SIP_DialogData));
curNode = nextNode;
}
}
// Create a media session
msg->mediaSession = (SIP_MediaSession*)snort_calloc(sizeof(SIP_MediaSession));
- memory::MemoryCap::update_allocations(sizeof(SIP_MediaSession));
const char* start = buff;
/*
return SIP_PARSE_ERROR;
mdata = (SIP_MediaData*)snort_calloc(sizeof(SIP_MediaData));
- memory::MemoryCap::update_allocations(sizeof(SIP_MediaData));
mdata->mport = (uint16_t)SnortStrtoul(spaceIndex + 1, &next, 10);
if ((nullptr != next)&&('/'==next[0]))
{
nextNode = curNode->nextM;
snort_free(curNode);
- memory::MemoryCap::update_deallocations(sizeof(SIP_MediaData));
curNode = nextNode;
}
if (nullptr != mediaSession)
{
snort_free(mediaSession);
- memory::MemoryCap::update_deallocations(sizeof(SIP_MediaSession));
}
}
static void init()
{ inspector_id = snort::FlowData::create_flow_data_id(); }
- size_t size_of() override
- { return sizeof(*this); }
-
public:
static unsigned inspector_id;
SMTPData session;
static void init()
{ inspector_id = snort::FlowData::create_flow_data_id(); }
- size_t size_of() override
- { return sizeof(*this); }
-
public:
static unsigned inspector_id;
SSHData session;
static void init()
{ assign_ssl_inspector_id(snort::FlowData::create_flow_data_id()); }
- size_t size_of() override
- { return sizeof(*this); }
-
SSLData& get_session() override
{ return session; }
{ CountType::SUM, "idle_prunes", " sessions pruned due to timeout" },
{ CountType::SUM, "excess_prunes", "sessions pruned due to excess" },
{ CountType::SUM, "uni_prunes", "uni sessions pruned" },
- { CountType::SUM, "preemptive_prunes", "sessions pruned during preemptive pruning" },
+ { CountType::SUM, "preemptive_prunes", "sessions pruned during preemptive pruning (deprecated)" },
{ CountType::SUM, "memcap_prunes", "sessions pruned due to memcap" },
{ CountType::SUM, "ha_prunes", "sessions pruned by high availability sync" },
{ CountType::SUM, "stale_prunes", "sessions pruned due to stale connection" },
stream_base_stats.timeout_prunes = flow_con->get_prunes(PruneReason::IDLE);
stream_base_stats.excess_prunes = flow_con->get_prunes(PruneReason::EXCESS);
stream_base_stats.uni_prunes = flow_con->get_prunes(PruneReason::UNI);
- stream_base_stats.preemptive_prunes = flow_con->get_prunes(PruneReason::PREEMPTIVE);
+ stream_base_stats.preemptive_prunes = flow_con->get_prunes(PruneReason::MEMCAP);
stream_base_stats.memcap_prunes = flow_con->get_prunes(PruneReason::MEMCAP);
stream_base_stats.ha_prunes = flow_con->get_prunes(PruneReason::HA);
stream_base_stats.stale_prunes = flow_con->get_prunes(PruneReason::STALE);
//-------------------------------------------------------------------------
FileSession::FileSession(Flow* f) : Session(f)
-{ memory::MemoryCap::update_allocations(sizeof(*this)); }
+{ }
FileSession::~FileSession()
-{ memory::MemoryCap::update_deallocations(sizeof(*this)); }
+{ }
bool FileSession::setup(Packet*)
{
//-------------------------------------------------------------------------
IcmpSession::IcmpSession(Flow* f) : Session(f)
-{ memory::MemoryCap::update_allocations(sizeof(*this)); }
+{ }
IcmpSession::~IcmpSession()
-{ memory::MemoryCap::update_deallocations(sizeof(*this)); }
+{ }
bool IcmpSession::setup(Packet*)
{
{
delete[] fptr;
ip_stats.nodes_released++;
- memory::MemoryCap::update_deallocations(sizeof(*this) + flen);
}
uint8_t* data = nullptr; /* ptr to adjusted start position */
inline void init(uint16_t flen, const uint8_t* fptr, int ord)
{
assert(flen > 0);
- memory::MemoryCap::update_allocations(sizeof(*this) + flen);
this->flen = flen;
this->fptr = new uint8_t[flen];
//-------------------------------------------------------------------------
IpSession::IpSession(Flow* f) : Session(f)
-{ memory::MemoryCap::update_allocations(sizeof(*this)); }
+{ }
IpSession::~IpSession()
-{ memory::MemoryCap::update_deallocations(sizeof(*this)); }
+{ }
void IpSession::clear()
{
TcpStreamTracker::release_held_packets(cur_time, max_remove);
}
-void Stream::prune_flows()
+bool Stream::prune_flows()
{
- if ( flow_con && !FlowCache::is_pruning_in_progress())
- flow_con->prune_one(PruneReason::MEMCAP, false);
+ if ( !flow_con )
+ return false;
+
+ return flow_con->prune_one(PruneReason::MEMCAP, false);
}
//-------------------------------------------------------------------------
static void purge_flows();
static void handle_timeouts(bool idle);
- static void prune_flows();
+ static bool prune_flows();
static bool expected_flow(Flow*, Packet*);
// Looks in the flow cache for flow session with specified key and returns
{
TcpSegmentNode* tsn = reserved;
reserved = reserved->next;
- memory::MemoryCap::update_deallocations(sizeof(*tsn) + tsn->size);
tcpStats.mem_in_use -= tsn->size;
snort_free(tsn);
}
#endif
{
size_t size = sizeof(*tsn) + len;
- memory::MemoryCap::update_allocations(size);
tsn = (TcpSegmentNode*)snort_alloc(size);
tsn->size = len;
tcpStats.mem_in_use += len;
else
#endif
{
- memory::MemoryCap::update_deallocations(sizeof(*this) + size);
tcpStats.mem_in_use -= size;
snort_free(this);
}
client.session = this;
server.session = this;
tcpStats.instantiated++;
-
- memory::MemoryCap::update_allocations(sizeof(*this));
}
TcpSession::~TcpSession()
{
clear_session(true, false, false);
- memory::MemoryCap::update_deallocations(sizeof(*this));
}
bool TcpSession::setup(Packet*)
if ( held_packet != null_iterator )
return false;
- // Temporarily increase memcap until message is finalized in case
- // DAQ makes a copy of the data buffer.
- memory::MemoryCap::update_allocations(daq_msg_get_data_len(p->daq_msg));
-
held_packet = hpq->append(p->daq_msg, p->ptrs.tcph->seq(), *this);
held_pkt_seq = p->ptrs.tcph->seq();
if ( held_packet != null_iterator )
{
DAQ_Msg_h msg = held_packet->get_daq_msg();
- uint32_t msglen = daq_msg_get_data_len(msg);
if ( cp->active->packet_was_dropped() )
{
tcp_session->held_packet_dir = SSN_DIR_NONE;
}
- memory::MemoryCap::update_deallocations(msglen);
-
hpq->erase(held_packet);
held_packet = null_iterator;
tcpStats.current_packets_held--;
if ( held_packet != null_iterator )
{
DAQ_Msg_h msg = held_packet->get_daq_msg();
- uint32_t msglen = daq_msg_get_data_len(msg);
if ( (flow->session_state & STREAM_STATE_BLOCK_PENDING) ||
(flow->ssn_state.session_flags & SSNFLAG_BLOCK) )
tcpStats.held_packets_passed++;
}
- memory::MemoryCap::update_deallocations(msglen);
-
hpq->erase(held_packet);
held_packet = null_iterator;
tcpStats.current_packets_held--;
//-------------------------------------------------------------------------
UdpSession::UdpSession(Flow* f) : Session(f)
-{ memory::MemoryCap::update_allocations(sizeof(*this)); }
+{ }
UdpSession::~UdpSession()
-{ memory::MemoryCap::update_deallocations(sizeof(*this)); }
+{ }
bool UdpSession::setup(Packet* p)
{
This directory contains the implementation of user session tracking and
processing functions. When the source for a flow provides a TCP payload,
-e.g. a socket connection, then the base Stream preprocessor delegates
+e.g. a socket connection, then the base Stream inspector delegates
handling of the packets on that flow to this module.
The StreamUser class is implemented as a subclass of Inspector and provides
unsigned bucket = (n > BUCKET) ? n : BUCKET;
unsigned size = sizeof(UserSegment) + bucket -1;
- memory::MemoryCap::update_allocations(size);
UserSegment* us = (UserSegment*)snort_alloc(size);
us->size = size;
void UserSegment::term(UserSegment* us)
{
- memory::MemoryCap::update_deallocations(us->size);
snort_free(us);
}
//-------------------------------------------------------------------------
UserSession::UserSession(Flow* f) : Session(f)
-{ memory::MemoryCap::update_allocations(sizeof(*this)); }
+{ }
UserSession::~UserSession()
-{ memory::MemoryCap::update_deallocations(sizeof(*this)); }
+{ }
bool UserSession::setup(Packet*)
{
#include "helpers/process.h"
#include "log/messages.h"
#include "main/snort_config.h"
+#include "memory/memory_cap.h"
#include "managers/module_manager.h"
#include "packet_io/active.h"
#include "packet_io/sfdaq.h"
void PrintStatistics()
{
DropStats();
+ memory::MemoryCap::print(SnortConfig::log_verbose(), false);
timing_stats();
// FIXIT-L can do flag saving with RAII (much cleaner)