ip_len - raw.len, ip_len, raw.len);
codec_event(codec, DECODE_IPV4_DGRAM_GT_CAPLEN);
+ // FIXIT-L we should decode this layer if possible instead of stopping now
+ // ip6 etc may have similar issues
return false;
}
#if 0
Trace TRACE_NAME(detection);
-static THREAD_LOCAL unsigned s_events = 0;
static THREAD_LOCAL Ring<unsigned>* offload_ids = nullptr;
void DetectionEngine::thread_init()
if ( sfeventq_add(pq, en) )
return -1;
- s_events++;
return 0;
}
if ( sfeventq_add(pq, en) )
return -1;
- s_events++;
return 0;
}
return 0; // not enabled
}
- if ( s_events > 0 )
- s_events--;
-
fpLogEvent(en->rtn, en->otn, (Packet*)user);
sfthreshold_reset();
return 0;
}
-void DetectionEngine::reset_counts()
-{
- pc.log_limit += s_events;
- s_events = 0;
-}
-
void DetectionEngine::reset(Packet* p)
{
SF_EVENTQ* pq = p->context->equeue;
- sfeventq_reset(pq);
- reset_counts();
+ pc.log_limit += sfeventq_reset(pq);
}
static int queue_event(unsigned gid, unsigned sid, RuleType = RULE_TYPE__NONE);
static int log_events(Packet*);
-
static void reset(Packet*);
- static void reset_counts();
static void disable_all(Packet*);
static bool all_disabled(Packet*);
if ( p->ptrs.ip_api.is_valid() )
{
filterEvent = sfthreshold_test(
- otn->sigInfo.generator,
- otn->sigInfo.id,
+ otn->sigInfo.generator, otn->sigInfo.id,
p->ptrs.ip_api.get_src(), p->ptrs.ip_api.get_dst(),
p->pkth->ts.tv_sec);
}
cleared.clear();
filterEvent = sfthreshold_test(
- otn->sigInfo.generator,
- otn->sigInfo.id,
- &cleared, &cleared,
- p->pkth->ts.tv_sec);
+ otn->sigInfo.generator, otn->sigInfo.id,
+ &cleared, &cleared, p->pkth->ts.tv_sec);
}
if ( (filterEvent < 0) || (filterEvent > 0 && !override) )
#include "sfeventq.h"
+#include <assert.h>
#include "utils/util.h"
/*
-** NAME
-** sfeventq_new::
-*/
-/**
** Initialize the event queue. Provide the max number of nodes that this
** queue will support, the number of top nodes to log in the queue, and the
** size of the event structure that the user will fill in.
-**
-** @return integer
-**
-** @retval -1 failure
-** @retval 0 success
*/
SF_EVENTQ* sfeventq_new(int max_nodes, int log_nodes, int event_size)
{
- SF_EVENTQ* eq;
-
if ((max_nodes <= 0) || (log_nodes <= 0) || (event_size <= 0))
return NULL;
- eq = (SF_EVENTQ*)snort_calloc(sizeof(SF_EVENTQ));
+ SF_EVENTQ* eq = (SF_EVENTQ*)snort_calloc(sizeof(SF_EVENTQ));
/* Initialize the memory for the nodes that we are going to use. */
eq->node_mem = (SF_EVENTQ_NODE*)snort_calloc(max_nodes, sizeof(SF_EVENTQ_NODE));
eq->event_size = event_size;
eq->cur_nodes = 0;
eq->cur_events = 0;
+ eq->fails = 0;
+
eq->reserve_event = (char*)(&eq->event_mem[max_nodes * eq->event_size]);
return eq;
}
/*
-** NAME
-** sfeventq_event_alloc::
-*/
-/**
** Allocate the memory for an event to add to the event queue. This
** function is meant to be called first, the event structure filled in,
** and then added to the queue. While you can allocate several times before
}
/*
-** NAME
-** sfeventq_reset::
-*/
-/**
** Resets the event queue. We also set the reserve event back
** to the last event in the queue.
-**
-** @return void
*/
-void sfeventq_reset(SF_EVENTQ* eq)
+unsigned sfeventq_reset(SF_EVENTQ* eq)
{
+ unsigned fails = eq->fails;
+ eq->fails = 0;
eq->head = NULL;
eq->cur_nodes = 0;
eq->cur_events = 0;
eq->reserve_event = (char*)(&eq->event_mem[eq->max_nodes * eq->event_size]);
+ return fails;
}
-/*
-** NAME
-** sfeventq_free::
-*/
-/**
-** Cleanup the event queue.
-**
-** @return none
-**
-*/
void sfeventq_free(SF_EVENTQ* eq)
{
if (eq == NULL)
}
/*
-** NAME
-** get_eventq_node::
-*/
-/**
** This function returns a ptr to the node to use. We allocate the last
** event node if we have exhausted the event queue. Before we allocate
** the last node, we determine if the incoming event has a higher
if (eq->cur_nodes >= eq->max_nodes)
return NULL;
- /*
- ** We grab the next node from the node memory.
- */
+ // We grab the next node from the node memory.
return &eq->node_mem[eq->cur_nodes++];
}
/*
-** NAME
-** sfeventq_add:
-*/
-/**
** Add this event to the queue using the supplied ordering
** function. If the queue is exhausted, then we compare the
** event to be added with the last event, and decide whether
*/
int sfeventq_add(SF_EVENTQ* eq, void* event)
{
- SF_EVENTQ_NODE* node;
-
- if (!event)
- return -1;
+ assert(event);
/*
** If get_eventq_node() returns NULL, this means that
** is lower in priority then the last ranked event.
** So we just drop it.
*/
- node = get_eventq_node(eq, event);
- if (!node)
+ SF_EVENTQ_NODE* node = get_eventq_node(eq, event);
+
+ if ( !node )
+ {
+ ++eq->fails;
return -1;
+ }
node->event = event;
node->next = NULL;
node->prev = NULL;
- /*
- ** This is the first node
- */
if (eq->cur_nodes == 1)
{
+ // This is the first node
eq->head = eq->last = node;
return 0;
}
- /*
- ** This means we are the last node.
- */
+ // This means we are the last node.
node->prev = eq->last;
eq->last->next = node;
}
/*
-** NAME
-** sfeventq_action::
-*/
-/**
** Call the supplied user action function on the highest priority
** events.
**
*/
int cur_nodes;
int cur_events;
+ unsigned fails;
};
SF_EVENTQ* sfeventq_new(int max_nodes, int log_nodes, int event_size);
void* sfeventq_event_alloc(SF_EVENTQ*);
-void sfeventq_reset(SF_EVENTQ*);
+unsigned sfeventq_reset(SF_EVENTQ*); // returns fail count since last reset
int sfeventq_add(SF_EVENTQ*, void* event);
int sfeventq_action(SF_EVENTQ*, int (* action_func)(void* event, void* user), void* user);
void sfeventq_free(SF_EVENTQ*);
#include "flow.h"
+#include "detection/detection_engine.h"
#include "flow/ha.h"
#include "flow/session.h"
#include "ips_options/ips_flowbits.h"
void Flow::reset(bool do_cleanup)
{
+ DetectionEngine::onload(this);
+ DetectionEngine::set_packet();
+ DetectionEngine de;
+
if ( session )
{
if ( do_cleanup )
#include "flow/flow_cache.h"
-#include "detection/detection_engine.h"
#include "flow/ha.h"
#include "hash/zhash.h"
#include "helpers/flag_context.h"
int FlowCache::release(Flow* flow, PruneReason reason, bool do_cleanup)
{
- DetectionEngine::onload(flow);
flow->reset(do_cleanup);
prune_stats.update(reason);
return remove(flow);
virtual void restart(Packet*) { }
virtual void precheck(Packet*) { }
virtual void clear() = 0;
- virtual void cleanup() { clear(); }
+ virtual void cleanup(Packet* = nullptr) { clear(); }
virtual bool add_alert(Packet*, uint32_t /*gid*/, uint32_t /*sid*/) { return false; }
virtual bool check_alerted(Packet*, uint32_t /*gid*/, uint32_t /*sid*/) { return false; }
public:
virtual ~HttpEventGen() = default;
void reset() { events_generated = 0; }
+
virtual void create_event(HttpEnums::EventSid sid)
{
assert(((int)sid > 0) && ((int)sid <= MAX));
#include "flow/flow_control.h"
#include "flow/prune_stats.h"
-#include "protocols/packet.h"
#include "managers/inspector_manager.h"
#include "profiler/profiler_defs.h"
+#include "protocols/packet.h"
#include "stream_ha.h"
#include "stream_module.h"
return true;
}
-void TcpStreamSession::cleanup()
+void TcpStreamSession::cleanup(Packet* p)
{
- clear_session( true, true, false );
+ clear_session(true, true, false, p);
}
void TcpStreamSession::clear()
bool setup(Packet*) override;
void clear() override;
- void cleanup() override;
+ void cleanup(Packet* = nullptr) override;
void set_splitter(bool, StreamSplitter*) override;
StreamSplitter* get_splitter(bool) override;
bool is_sequenced(uint8_t /*dir*/) override;
if (flow->session_state & STREAM_STATE_CLOSED)
{
assert(flow_con);
+ flow->session->cleanup(p);
flow_con->delete_flow(flow, PruneReason::NONE);
p->flow = nullptr;
}
if ( !flow_con )
return;
- // FIXIT-H stream tcp needs to do this and prep pkt to handle
- // shutdown alerts while rebuilding (during flush before a
- // rebuilt packet is available)
- DetectionEngine::set_packet();
- DetectionEngine de;
- // this is a hack to work around the above issue
- DAQ_PktHdr_t* ph = (DAQ_PktHdr_t*)de.get_packet()->pkth;
- memset(ph, 0, sizeof(*ph));
-
flow_con->purge_flows(PktType::IP);
flow_con->purge_flows(PktType::ICMP);
flow_con->purge_flows(PktType::TCP);
void Stream::timeout_flows(time_t cur_time)
{
- if ( flow_con )
- // FIXIT-M batch here or loop vs looping over idle?
- flow_con->timeout_flows(cur_time);
+ if ( !flow_con )
+ return;
+
+ // FIXIT-M batch here or loop vs looping over idle?
+ flow_con->timeout_flows(cur_time);
}
void Stream::prune_flows()
{
- if ( flow_con )
- flow_con->prune_one(PruneReason::MEMCAP, false);
+ if ( !flow_con )
+ return;
+
+ flow_con->prune_one(PruneReason::MEMCAP, false);
}
bool Stream::expected_flow(Flow* f, Packet* p)
#include "profiler/profiler.h"
#include "detection/detection_engine.h"
#include "protocols/packet_manager.h"
+#include "time/packet_time.h"
#include "tcp_module.h"
#include "tcp_normalizer.h"
return flush_to_seq(bytes, p, dir);
}
-void TcpReassembler::final_flush(Packet* p, PegCount& peg, uint32_t dir)
+void TcpReassembler::final_flush(Packet* p, uint32_t dir)
{
- if ( !p )
- peg++;
-
tracker->set_tf_flags(TF_FORCE_FLUSH);
if ( flush_stream(p, dir) )
tracker->clear_tf_flags(TF_FORCE_FLUSH);
}
+static Packet* set_packet(Flow* flow, uint32_t flags, bool c2s)
+{
+ Packet* p = DetectionEngine::get_current_packet();
+ p->reset();
+
+ DAQ_PktHdr_t* ph = (DAQ_PktHdr_t*)p->pkth;
+ memset(ph, 0, sizeof(*ph));
+ packet_gettimeofday(&ph->ts);
+
+ p->ptrs.set_pkt_type(PktType::PDU);
+ p->proto_bits |= PROTO_BIT__TCP;
+ p->flow = flow;
+ p->packet_flags = flags;
+
+ if ( c2s )
+ {
+ p->ptrs.ip_api.set(flow->client_ip, flow->server_ip);
+ p->ptrs.sp = flow->client_port;
+ p->ptrs.dp = flow->server_port;
+ }
+ else
+ {
+ p->ptrs.ip_api.set(flow->server_ip, flow->client_ip);
+ p->ptrs.sp = flow->server_port;
+ p->ptrs.dp = flow->client_port;
+ }
+ return p;
+}
+
void TcpReassembler::flush_queued_segments(Flow* flow, bool clear, Packet* p)
{
+ bool data = p or seglist.head;
+
+ if ( !p )
+ {
+ // this packet is required if we call finish and/or final_flush
+ p = set_packet(flow, packet_dir, server_side);
+
+ if ( server_side )
+ tcpStats.s5tcp2++;
+ else
+ tcpStats.s5tcp1++;
+ }
+
bool pending = clear and paf_initialized(&tracker->paf_state)
and (!tracker->splitter or tracker->splitter->finish(flow) );
- if ((pending and (p or seglist.head) and !(flow->ssn_state.ignore_direction & ignore_dir)))
+ if ( pending and data and !(flow->ssn_state.ignore_direction & ignore_dir) )
{
- if (server_side)
- final_flush(p, tcpStats.s5tcp2, packet_dir);
- else
- final_flush(p, tcpStats.s5tcp1, packet_dir);
+ final_flush(p, packet_dir);
}
}
int flush_to_seq(uint32_t bytes, Packet*, uint32_t pkt_flags);
uint32_t get_q_footprint();
uint32_t get_q_sequenced();
- void final_flush(Packet*, PegCount&, uint32_t dir);
+ void final_flush(Packet*, uint32_t dir);
uint32_t get_reverse_packet_dir(const Packet*);
uint32_t get_forward_packet_dir(const Packet*);
int32_t flush_pdu_ips(uint32_t*);
void TcpSession::restart(Packet*){ }
void TcpSession::precheck(Packet*){ }
void TcpSession::clear(){ }
-void TcpSession::cleanup(){ }
+void TcpSession::cleanup(Packet* = nullptr){ }
bool TcpSession::add_alert(Packet*, unsigned int, unsigned int){ return true; }
bool TcpSession::check_alerted(Packet*, unsigned int, unsigned int){ return true; }
int TcpSession::update_alert(Packet*, unsigned int, unsigned int, unsigned int, unsigned int){ return 0; }
PacketManager::dump_stats();
- // ensure proper counting of log_limit
- DetectionEngine::reset_counts();
-
LogLabel("Module Statistics");
const char* exclude = "daq snort";
ModuleManager::dump_stats(snort_conf, exclude);