#include "packet_io/active.h"
#include "packet_io/packet_tracer.h"
#include "stream/base/stream_module.h"
-#include "stream/tcp/tcp_stream_session.h"
+#include "stream/tcp/tcp_session.h"
#include "stream/tcp/tcp_trace.h"
#include "time/packet_time.h"
#include "trace/trace_api.h"
<< dst_ip << "/" << dst_port;
if (flow.session)
{
- TcpStreamSession* tcp_session = static_cast<TcpStreamSession*>(flow.session);
+ TcpSession* tcp_session = static_cast<TcpSession*>(flow.session);
proto << " state client " << stream_tcp_state_to_str(tcp_session->client)
<< " server " << stream_tcp_state_to_str(tcp_session->server);
}
if ( ignore )
{
- flow->ssn_state.ignore_direction = ignore;
+ flow->ssn_state.ignore_direction = SSN_DIR_BOTH;
DetectionEngine::disable_all(p);
}
}
#include "dce_http_proxy_module.h"
-#include "stream/tcp/tcp_stream_session.h"
+#include "managers/inspector_manager.h"
+#include "stream/tcp/tcp_session.h"
#include "dce_http_proxy_splitter.h"
{
if ( !p->test_session_flags(SSNFLAG_ABORT_CLIENT | SSNFLAG_ABORT_SERVER) )
{
- TcpStreamSession* tcp_session = (TcpStreamSession*)flow->session;
+ TcpSession* tcp_session = (TcpSession*)flow->session;
DceHttpProxySplitter* c2s_splitter =
(DceHttpProxySplitter*)(tcp_session->get_splitter(true));
#include "dce_http_server_module.h"
-#include "stream/tcp/tcp_stream_session.h"
+#include "managers/inspector_manager.h"
+#include "stream/tcp/tcp_session.h"
#include "dce_http_server_splitter.h"
{
if ( !p->test_session_flags(SSNFLAG_ABORT_SERVER) )
{
- TcpStreamSession* tcp_session = (TcpStreamSession*)flow->session;
+ TcpSession* tcp_session = (TcpSession*)flow->session;
DceHttpServerSplitter* splitter =
(DceHttpServerSplitter*)(tcp_session->get_splitter(false));
set (STREAM_INCLUDES
paf.h
+ pafng.h
stream.h
stream_splitter.h
)
flush_bucket.h
paf.cc
paf_stats.h
+ pafng.cc
)
install (FILES ${STREAM_INCLUDES}
--- /dev/null
+//--------------------------------------------------------------------------
+// Copyright (C) 2024-2024 Cisco and/or its affiliates. All rights reserved.
+//
+// This program is free software; you can redistribute it and/or modify it
+// under the terms of the GNU General Public License Version 2 as published
+// by the Free Software Foundation. You may not use, modify or distribute
+// this program under any other version of the GNU General Public License.
+//
+// This program is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License along
+// with this program; if not, write to the Free Software Foundation, Inc.,
+// 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+//--------------------------------------------------------------------------
+
+// pafng.cc author davis mcpherson davmcphe@cisco.com
+// based on paf.cc author Russ Combs <rcombs@sourcefire.com>
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "pafng.h"
+
+#include "detection/detection_engine.h"
+#include "protocols/packet.h"
+#include "protocols/tcp.h"
+
+using namespace snort;
+
+//--------------------------------------------------------------------
+// private state
+//--------------------------------------------------------------------
+
+
+#define PAF_LIMIT_FUZZ 1500
+
+// 255 is max pseudo-random flush point; eth mtu ensures that maximum flushes
+// are not trimmed which throws off the tracking total in stream5_paf.c
+// max paf max = max datagram - eth mtu - 255 = 63780
+#define MAX_PAF_MAX (65535 - PAF_LIMIT_FUZZ - 255)
+
+extern THREAD_LOCAL snort::ProfileStats pafPerfStats;
+
+//--------------------------------------------------------------------
+
+uint32_t ProtocolAwareFlusher::paf_flush (const PafAux& px, uint32_t* flags)
+{
+ uint32_t at = 0;
+ *flags &= ~(PKT_PDU_HEAD | PKT_PDU_TAIL);
+
+ switch ( px.ft )
+ {
+ case FT_NOP:
+ return -1;
+
+ case FT_SFP:
+ *flags = 0;
+ return -1;
+
+ case FT_PAF:
+ at = fpt;
+ *flags |= PKT_PDU_TAIL;
+ break;
+
+ case FT_LIMIT:
+ if (fpt > px.len)
+ {
+ at = px.len;
+ fpt -= px.len;
+ }
+ else
+ {
+ at = fpt;
+ fpt = px.len - fpt; // number of characters scanned but not flushing
+ }
+ break;
+
+ // use of px.len is suboptimal here because the actual amount
+ // flushed is determined later and can differ in certain cases
+ // such as exceeding s5_pkt->max_dsize. the actual amount
+ // flushed would ideally be applied to fpt later. for
+ // now we try to circumvent such cases so we track correctly.
+ //
+ // FIXIT-L max_dsize should no longer be exceeded since it excludes headers.
+ case FT_MAX:
+ at = px.len;
+ if ( fpt > px.len )
+ fpt -= px.len;
+ else
+ fpt = 0;
+ break;
+ }
+
+ if ( !at || !px.len )
+ return -1;
+
+ // safety - prevent seq + at < seq
+ if ( at > 0x7FFFFFFF )
+ at = 0x7FFFFFFF;
+
+ if ( !tot )
+ *flags |= PKT_PDU_HEAD;
+
+ if ( *flags & PKT_PDU_TAIL )
+ tot = 0;
+ else
+ tot += at;
+
+ return at;
+}
+
+//--------------------------------------------------------------------
+
+bool ProtocolAwareFlusher::paf_callback (PafAux& px, Packet* pkt, const uint8_t* data,
+ uint32_t len, uint32_t flags)
+{
+ fpt = 0;
+ state = splitter->scan(pkt, data, len, flags, &fpt);
+
+ if ( state == StreamSplitter::ABORT || state == StreamSplitter::STOP )
+ return false;
+
+ if ( state != StreamSplitter::SEARCH )
+ {
+ fpt += px.idx;
+ if ( fpt <= px.len )
+ {
+ px.idx = fpt;
+ return true;
+ }
+ }
+ px.idx = px.len;
+ return false;
+}
+
+//--------------------------------------------------------------------
+
+bool ProtocolAwareFlusher::paf_eval(PafAux& px, Packet* pkt, uint32_t flags,
+ const uint8_t* data, uint32_t len)
+{
+ switch ( state )
+ {
+ case StreamSplitter::SEARCH:
+ if ( px.len > px.idx )
+ return paf_callback(px, pkt, data, len, flags);
+
+ return false;
+
+ case StreamSplitter::FLUSH:
+ if ( px.len >= fpt )
+ {
+ px.ft = FT_PAF;
+ state = StreamSplitter::SEARCH;
+ return true;
+ }
+ if ( px.len >= splitter->max(pkt->flow) )
+ {
+ px.ft = FT_MAX;
+ return false;
+ }
+ return false;
+
+ case StreamSplitter::LIMIT:
+ // if we are within PAF_LIMIT_FUZZ character of paf_max ...
+ if ( px.len + PAF_LIMIT_FUZZ >= splitter->max(pkt->flow))
+ {
+ px.ft = FT_LIMIT;
+ state = StreamSplitter::LIMITED;
+ return false;
+ }
+ state = StreamSplitter::SEARCH;
+ return false;
+
+ case StreamSplitter::SKIP:
+ if ( px.len > fpt )
+ {
+ if ( fpt > px.idx )
+ {
+ uint32_t delta = fpt - px.idx;
+ if ( delta > len )
+ return false;
+
+ data += delta;
+ len -= delta;
+ }
+ px.idx = fpt;
+ return paf_callback(px, pkt, data, len, flags);
+ }
+ return false;
+
+ case StreamSplitter::LIMITED:
+ // increment position by previously scanned bytes. set in paf_flush
+ state = StreamSplitter::SEARCH;
+ px.idx += fpt;
+ fpt = 0;
+ return true;
+
+ default:
+ // StreamSplitter::ABORT || StreamSplitter::START
+ break;
+ }
+
+ px.ft = FT_SFP;
+ return false;
+}
+
+//--------------------------------------------------------------------
+
+int32_t ProtocolAwareFlusher::paf_check (Packet* pkt, const uint8_t* data, uint32_t len,
+ uint32_t total, uint32_t seq, uint32_t* flags)
+{
+ Profile profile(pafPerfStats); // cppcheck-suppress unreadVariable
+ PafAux px;
+
+ if ( !paf_initialized() )
+ {
+ seq_num = pos = seq;
+ fpt = tot = 0;
+ state = StreamSplitter::SEARCH;
+ }
+ else if ( SEQ_GT(seq, seq_num) )
+ {
+ // if seq jumped we have a gap. Flush any queued data, then abort
+ px.len = total - len;
+
+ if ( px.len )
+ {
+ fpt = 0;
+ px.ft = FT_MAX;
+ state = StreamSplitter::ABORT;
+ return paf_flush(px, flags);
+ }
+ *flags = 0;
+ state = StreamSplitter::ABORT;
+ return -1;
+ }
+ else if ( SEQ_LEQ(seq + len, seq_num) )
+ {
+ return -1;
+ }
+ else if ( SEQ_LT(seq, seq_num) )
+ {
+ uint32_t shift = seq_num - seq;
+ data += shift;
+ len -= shift;
+ }
+
+ seq_num += len;
+ px.idx = total - len;
+
+ // if 'total' is greater than the maximum paf_max AND 'total' is greater
+ // than paf_max bytes (i.e. after we have finished analyzing the
+ // current segment, total bytes analyzed will be greater than the
+ // configured paf_max == splitter->max(), we must ensure a flush
+ // occurs at the paf_max byte. So, we manually set the data's length and
+ // total queued bytes (px.len) to guarantee that at most paf_max bytes will
+ // be analyzed and flushed since the last flush point. It should also be
+ // noted that we perform the check here rather in in paf_flush() to
+ // avoid scanning the same data twice. The first scan would analyze the
+ // entire segment and the second scan would analyze this segments
+ // unflushed data.
+ if ( total >= MAX_PAF_MAX && total > splitter->max(pkt->flow) )
+ {
+ px.len = MAX_PAF_MAX;
+ len = len + px.len - total;
+ }
+ else
+ px.len = total;
+
+ do
+ {
+ px.ft = FT_NOP;
+ uint32_t idx = px.idx;
+
+ const bool cont = paf_eval(px, pkt, *flags, data, len);
+
+ if ( px.ft != FT_NOP )
+ {
+ int32_t fp = paf_flush(px, flags);
+ paf_jump(fp);
+ return fp;
+ }
+
+ if ( !cont )
+ break;
+
+ if ( px.idx > idx )
+ {
+ uint32_t shift = px.idx - idx;
+ if ( shift > len )
+ shift = len;
+ data += shift;
+ len -= shift;
+ }
+ }
+ while ( true );
+
+ if ( state == StreamSplitter::ABORT )
+ *flags = 0;
+
+ else if ( (state != StreamSplitter::FLUSH) && (px.len > splitter->max(pkt->flow)) )
+ {
+ px.ft = FT_MAX;
+ uint32_t fp = paf_flush(px, flags);
+ paf_jump(fp);
+ return fp;
+ }
+
+ return -1;
+}
+
--- /dev/null
+//--------------------------------------------------------------------------
+// Copyright (C) 2024-2024 Cisco and/or its affiliates. All rights reserved.
+//
+// This program is free software; you can redistribute it and/or modify it
+// under the terms of the GNU General Public License Version 2 as published
+// by the Free Software Foundation. You may not use, modify or distribute
+// this program under any other version of the GNU General Public License.
+//
+// This program is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License along
+// with this program; if not, write to the Free Software Foundation, Inc.,
+// 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+//--------------------------------------------------------------------------
+
+//--------------------------------------------------------------------
+// protocol aware flushing stuff
+// pafng.h author davis mcpherson davmcphe@cisco.com
+//--------------------------------------------------------------------
+
+#ifndef PAFNG_H
+#define PAFNG_H
+
+#include "main/snort_types.h"
+#include "main/thread.h"
+#include "profiler/profiler_defs.h"
+#include "stream/stream_splitter.h"
+
+namespace snort
+{
+struct Packet;
+}
+
+enum FlushType
+{
+ FT_NOP, // no flush
+ FT_SFP, // abort paf
+ FT_PAF, // flush to paf pt when len >= paf
+ FT_LIMIT, // flush to paf pt, don't update flags
+ FT_MAX // flush len when len >= max
+};
+
+struct PafAux
+{
+ FlushType ft;
+ uint32_t len; // total bytes queued
+ uint32_t idx; // offset from start of queued bytes
+};
+
+class ProtocolAwareFlusher
+{
+public:
+ ProtocolAwareFlusher() { }
+ ~ProtocolAwareFlusher() { }
+
+ SO_PUBLIC void paf_setup(snort::StreamSplitter* ss)
+ {
+ splitter = ss;
+ state = snort::StreamSplitter::START;
+ }
+
+ void paf_reset ()
+ { state = snort::StreamSplitter::START; }
+
+ void paf_clear ()
+ { state = snort::StreamSplitter::ABORT; }
+
+ uint32_t paf_position ()
+ { return seq_num; }
+
+ SO_PUBLIC uint32_t paf_initialized ()
+ { return ( state != snort::StreamSplitter::START ); }
+
+ SO_PUBLIC void paf_initialize(uint32_t seq)
+ {
+ seq_num = pos = seq;
+ fpt = tot = 0;
+ state = snort::StreamSplitter::SEARCH;
+ }
+
+ uint32_t paf_active ()
+ { return ( state != snort::StreamSplitter::ABORT ); }
+
+ void paf_jump(uint32_t n)
+ {
+ pos += n;
+ seq_num = pos;
+ }
+
+ // called on each in order segment
+ SO_PUBLIC int32_t paf_check(snort::Packet* p, const uint8_t* data, uint32_t len,
+ uint32_t total, uint32_t seqnum, uint32_t* flags);
+
+ uint32_t seq_num = 0; // stream cursor
+ uint32_t pos = 0; // last flush position
+ uint32_t fpt = 0; // current flush point
+ uint32_t tot = 0; // total bytes flushed
+ snort::StreamSplitter::Status state = snort::StreamSplitter::START; // current scan state
+
+private:
+ uint32_t paf_flush (const PafAux& px, uint32_t* flags);
+ bool paf_callback (PafAux&, snort::Packet*, const uint8_t* data, uint32_t len, uint32_t flags);
+ bool paf_eval (PafAux&, snort::Packet*, uint32_t flags, const uint8_t* data, uint32_t len);
+
+ snort::StreamSplitter* splitter = nullptr;
+};
+
+#endif
+
#include "utils/util.h"
#include "tcp/tcp_session.h"
-#include "tcp/tcp_stream_session.h"
#include "tcp/tcp_stream_tracker.h"
using namespace snort;
{
assert(flow and flow->session and flow->pkt_type == PktType::TCP);
- TcpStreamSession* tcp_session = (TcpStreamSession*)flow->session;
+ TcpSession* tcp_session = (TcpSession*)flow->session;
tcp_session->start_proxy();
}
return flow->session->disable_reassembly(flow);
}
-char Stream::get_reassembly_direction(Flow* flow)
-{
- assert(flow && flow->session);
- return flow->session->get_reassembly_direction();
-}
-
bool Stream::is_stream_sequenced(Flow* flow, uint8_t dir)
{
assert(flow && flow->session);
{
assert(flow and flow->session and flow->pkt_type == PktType::TCP);
- TcpStreamSession* tcp_session = (TcpStreamSession*)flow->session;
+ TcpSession* tcp_session = (TcpSession*)flow->session;
return tcp_session->get_mss(to_server);
}
{
assert(flow and flow->session and flow->pkt_type == PktType::TCP);
- TcpStreamSession* tcp_session = (TcpStreamSession*)flow->session;
+ TcpSession* tcp_session = (TcpSession*)flow->session;
return tcp_session->get_tcp_options_len(to_server);
}
{
assert(flow and flow->session and flow->pkt_type == PktType::TCP);
- TcpStreamSession* tcp_session = (TcpStreamSession*)flow->session;
+ TcpSession* tcp_session = (TcpSession*)flow->session;
return tcp_session->can_set_no_ack();
}
{
assert(flow and flow->session and flow->pkt_type == PktType::TCP);
- TcpStreamSession* tcp_session = (TcpStreamSession*)flow->session;
+ TcpSession* tcp_session = (TcpSession*)flow->session;
return tcp_session->set_no_ack(on_off);
}
if ( flow->pkt_type == PktType::TCP )
{
if ( to_server )
- ((TcpStreamSession*)flow->session)->server.perform_partial_flush();
+ ((TcpSession*)flow->session)->server.perform_partial_flush();
else
- ((TcpStreamSession*)flow->session)->client.perform_partial_flush();
+ ((TcpSession*)flow->session)->client.perform_partial_flush();
}
}
if (!flow or !flow->session or !(flow->pkt_type == PktType::TCP))
return false;
- TcpStreamSession* tcp_session = (TcpStreamSession*)flow->session;
+ TcpSession* tcp_session = (TcpSession*)flow->session;
if (tcp_session->held_packet_dir == SSN_DIR_NONE)
return false;
TEST_CASE("Stream API", "[stream_api][stream]")
{
// initialization code here
+ TcpNormalizerFactory::initialize();
Flow* flow = new Flow;
SECTION("set/get ignore direction")
}
delete flow;
+ TcpNormalizerFactory::term();
}
#endif
Flow*, Packet* p, uint32_t gid, uint32_t sid,
uint32_t eventId, uint32_t eventSecond);
+
static void disable_reassembly(Flow*);
- static char get_reassembly_direction(Flow*);
// Returns true if stream data for the flow is in sequence, otherwise return false.
static bool is_stream_sequenced(Flow*, uint8_t dir);
held_packet_queue.h
ips_stream_reassemble.cc
ips_stream_size.cc
- segment_overlap_editor.cc
- segment_overlap_editor.h
stream_tcp.cc
stream_tcp.h
+ tcp_alerts.cc
+ tcp_alerts.h
tcp_defs.h
tcp_event_logger.cc
tcp_event_logger.h
tcp_normalizer.h
tcp_normalizers.cc
tcp_normalizers.h
+ tcp_overlap_resolver.cc
+ tcp_overlap_resolver.h
+ tcp_reassembler_ids.cc
+ tcp_reassembler_ids.h
+ tcp_reassembler_ips.cc
+ tcp_reassembler_ips.h
tcp_reassembler.cc
tcp_reassembler.h
- tcp_reassemblers.cc
- tcp_reassemblers.h
+ tcp_reassembly_segments.cc
+ tcp_reassembly_segments.h
tcp_segment_descriptor.cc
tcp_segment_descriptor.h
tcp_segment_node.cc
tcp_state_time_wait.h
tcp_stream_config.cc
tcp_stream_config.h
- tcp_stream_session.cc
- tcp_stream_session.h
tcp_stream_tracker.cc
tcp_stream_tracker.h
tcp_trace.cc
if (!pkt->flow || !pkt->ptrs.tcph)
return NO_MATCH;
+ Flow* flow = (Flow*)pkt->flow;
+
+ if ( !srod.enable ) /* Turn it off */
+ {
+ if ( srod.direction & SSN_DIR_FROM_SERVER )
+ Stream::set_splitter(flow, true);
+
+ if ( srod.direction & SSN_DIR_FROM_CLIENT )
+ Stream::set_splitter(flow, false);
+ }
+ else
+ {
+ // FIXIT-M PAF need to instantiate service splitter?
+ // FIXIT-M PAF need to check for ips / on-data
+ if ( srod.direction & SSN_DIR_FROM_SERVER )
+ Stream::set_splitter(flow, true, new AtomSplitter(true));
+
+ if ( srod.direction & SSN_DIR_FROM_CLIENT )
+ Stream::set_splitter(flow, false, new AtomSplitter(false));
+ }
+
+ if (srod.fastpath)
{
- Flow* lwssn = (Flow*)pkt->flow;
- TcpSession* tcpssn = (TcpSession*)lwssn->session;
-
- if ( !srod.enable ) /* Turn it off */
- {
- if ( srod.direction & SSN_DIR_FROM_SERVER )
- {
- tcpssn->server.set_flush_policy(STREAM_FLPOLICY_IGNORE);
- Stream::set_splitter(lwssn, true);
- }
-
- if ( srod.direction & SSN_DIR_FROM_CLIENT )
- {
- tcpssn->client.set_flush_policy(STREAM_FLPOLICY_IGNORE);
- Stream::set_splitter(lwssn, false);
- }
- }
- else
- {
- // FIXIT-M PAF need to instantiate service splitter?
- // FIXIT-M PAF need to check for ips / on-data
- if ( srod.direction & SSN_DIR_FROM_SERVER )
- {
- tcpssn->server.set_flush_policy(STREAM_FLPOLICY_ON_ACK);
- Stream::set_splitter(lwssn, true, new AtomSplitter(true));
- }
-
- if ( srod.direction & SSN_DIR_FROM_CLIENT )
- {
- tcpssn->client.set_flush_policy(STREAM_FLPOLICY_ON_ACK);
- Stream::set_splitter(lwssn, false, new AtomSplitter(false));
- }
- }
-
- if (srod.fastpath)
- {
- /* Turn off inspection */
- lwssn->ssn_state.ignore_direction |= srod.direction;
- DetectionEngine::disable_all(pkt);
-
- /* TBD: Set TF_FORCE_FLUSH ? */
- }
+ /* Turn off inspection */
+ flow->ssn_state.ignore_direction |= srod.direction;
+ DetectionEngine::disable_all(pkt);
+
+ /* TBD: Set TF_FORCE_FLUSH ? */
}
if (srod.alert)
ReassembleModule* reassembler = ( ReassembleModule* )ips_stream_reassemble->mod_ctor();
REQUIRE( reassembler != nullptr );
+ TcpNormalizerFactory::initialize();
+
Flow* flow = new Flow;
Packet* pkt = get_syn_packet(flow);
Cursor cursor(pkt);
}
#endif
release_packet(pkt);
+ TcpNormalizerFactory::term();
delete flow;
ips_stream_reassemble->mod_dtor(reassembler);
}
+++ /dev/null
-//--------------------------------------------------------------------------
-// Copyright (C) 2015-2024 Cisco and/or its affiliates. All rights reserved.
-//
-// This program is free software; you can redistribute it and/or modify it
-// under the terms of the GNU General Public License Version 2 as published
-// by the Free Software Foundation. You may not use, modify or distribute
-// this program under any other version of the GNU General Public License.
-//
-// This program is distributed in the hope that it will be useful, but
-// WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License along
-// with this program; if not, write to the Free Software Foundation, Inc.,
-// 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
-//--------------------------------------------------------------------------
-
-// segment_overlap_editor.cc author davis mcpherson <davmcphe@cisco.com>
-// Created on: Oct 11, 2015
-
-#ifdef HAVE_CONFIG_H
-#include "config.h"
-#endif
-
-#include "segment_overlap_editor.h"
-
-#include "detection/detection_engine.h"
-#include "log/messages.h"
-
-#include "tcp_module.h"
-#include "tcp_normalizers.h"
-#include "tcp_session.h"
-
-using namespace snort;
-
-void SegmentOverlapState::init_sos(TcpSession* ssn, StreamPolicy pol)
-{
- session = ssn;
- reassembly_policy = pol;
-
- seglist.reset();
-
- seglist_base_seq = 0;
- seg_count = 0;
- seg_bytes_total = 0;
- seg_bytes_logical = 0;
- total_bytes_queued = 0;
- total_segs_queued = 0;
- overlap_count = 0;
-
- tsd = nullptr;
- left = nullptr;
- right = nullptr;
- rdata = nullptr;
-
- seq = 0;
- seq_end = 0;
- len = 0;
- overlap = 0;
- slide = 0;
- trunc_len = 0;
- rsize = 0;
- rseq = 0;
- keep_segment = true;
-
- tcp_ips_data = Normalize_GetMode(NORM_TCP_IPS);
-}
-
-void SegmentOverlapState::init_soe(
- TcpSegmentDescriptor& tsd, TcpSegmentNode* left, TcpSegmentNode* right)
-{
- this->tsd = &tsd;
- this->left = left;
- this->right = right;
-
- seq = tsd.get_seq();
- seq_end = tsd.get_end_seq();
- len = tsd.get_len();
-
- overlap = 0;
- slide = 0;
- trunc_len = 0;
-
- rdata = tsd.get_pkt()->data;
- rsize = tsd.get_len();
- rseq = tsd.get_seq();
-
- keep_segment = true;
-}
-
-bool SegmentOverlapEditor::is_segment_retransmit(
- TcpReassemblerState& trs, bool* full_retransmit)
-{
- // Don't want to count retransmits as overlaps or do anything
- // else with them. Account for retransmits of multiple PDUs
- // in one segment.
- bool* pb = (trs.sos.rseq == trs.sos.tsd->get_seq()) ? full_retransmit : nullptr;
-
- if ( trs.sos.right->is_retransmit(trs.sos.rdata, trs.sos.rsize,
- trs.sos.rseq, trs.sos.right->i_len, pb) )
- {
- trs.sos.tsd->set_retransmit_flag();
-
- if ( !(*full_retransmit) )
- {
- trs.sos.rdata += trs.sos.right->i_len;
- trs.sos.rsize -= trs.sos.right->i_len;
- trs.sos.rseq += trs.sos.right->i_len;
- trs.sos.seq += trs.sos.right->i_len;
- trs.sos.left = trs.sos.right;
- trs.sos.right = trs.sos.right->next;
- }
- else
- trs.sos.rsize = 0;
-
- if ( trs.sos.rsize == 0 )
- {
- // All data was retransmitted
- snort::DetectionEngine::disable_content(trs.sos.tsd->get_pkt());
- trs.sos.keep_segment = false;
- }
-
- return true;
- }
-
- return false;
-}
-
-void SegmentOverlapEditor::eval_left(TcpReassemblerState& trs)
-{
- if ( trs.sos.left )
- insert_left_overlap(trs);
-}
-
-void SegmentOverlapEditor::eval_right(TcpReassemblerState& trs)
-{
- while ( trs.sos.right && SEQ_LT(trs.sos.right->i_seq, trs.sos.seq_end) )
- {
- trs.sos.trunc_len = 0;
-
- assert(SEQ_LEQ(trs.sos.seq, trs.sos.right->i_seq));
- trs.sos.overlap = ( int )( trs.sos.seq_end - trs.sos.right->i_seq );
-
- // Treat sequence number overlap as a retransmission,
- // only check right side since left side happens rarely
- trs.sos.session->flow->call_handlers(trs.sos.tsd->get_pkt(), false);
- if ( trs.sos.overlap < trs.sos.right->i_len )
- {
- if ( trs.sos.right->is_retransmit(trs.sos.rdata, trs.sos.rsize,
- trs.sos.rseq, trs.sos.right->i_len, nullptr) )
- {
- // All data was retransmitted
- trs.sos.tsd->set_retransmit_flag();
- snort::DetectionEngine::disable_content(trs.sos.tsd->get_pkt());
- trs.sos.keep_segment = false;
- tcpStats.full_retransmits++;
- }
- else
- {
- tcpStats.overlaps++;
- trs.sos.overlap_count++;
- insert_right_overlap(trs);
- }
-
- break;
- }
- else // Full overlap
- {
- bool full_retransmit = false;
- // Don't want to count retransmits as overlaps or do anything
- // else with them. Account for retransmits of multiple PDUs
- // in one segment.
- if ( is_segment_retransmit(trs, &full_retransmit) )
- {
- if ( full_retransmit )
- {
- tcpStats.full_retransmits++;
- break;
- }
-
- continue;
- }
-
- tcpStats.overlaps++;
- trs.sos.overlap_count++;
- insert_full_overlap(trs);
-
- if ( trs.sos.keep_segment == false )
- return;
- }
- }
-}
-
-void SegmentOverlapEditor::drop_old_segment(TcpReassemblerState& trs)
-{
- TcpSegmentNode* drop_seg = trs.sos.right;
- trs.sos.right = trs.sos.right->next;
- delete_reassembly_segment(trs, drop_seg);
-}
-
-void SegmentOverlapEditor::left_overlap_keep_first(TcpReassemblerState& trs)
-{
- // NOTE that overlap will always be less than left->size since
- // seq is always greater than left->seq
- assert(SEQ_GT(trs.sos.seq, trs.sos.left->i_seq));
-
- trs.sos.overlap = trs.sos.left->i_seq + trs.sos.left->i_len - trs.sos.seq;
-
- if ( trs.sos.len < trs.sos.overlap )
- trs.sos.overlap = trs.sos.len;
-
- if ( trs.sos.overlap > 0 )
- {
- tcpStats.overlaps++;
- trs.sos.overlap_count++;
-
- if ( SEQ_GT(trs.sos.left->i_seq + trs.sos.left->i_len, trs.sos.seq_end) )
- {
- if (trs.sos.tcp_ips_data == NORM_MODE_ON)
- {
- unsigned offset = trs.sos.tsd->get_seq() - (trs.sos.left->i_seq - trs.sos.left->o_offset);
- trs.sos.tsd->rewrite_payload(0, trs.sos.left->data + offset);
- }
- norm_stats[PC_TCP_IPS_DATA][trs.sos.tcp_ips_data]++;
- }
- else
- {
- if ( trs.sos.tcp_ips_data == NORM_MODE_ON )
- {
- unsigned offset = trs.sos.tsd->get_seq() - (trs.sos.left->i_seq - trs.sos.left->o_offset);
- unsigned length = trs.sos.left->i_seq + trs.sos.left->i_len - trs.sos.tsd->get_seq();
- trs.sos.tsd->rewrite_payload(0, trs.sos.left->data + offset, length);
- }
-
- norm_stats[PC_TCP_IPS_DATA][trs.sos.tcp_ips_data]++;
- }
-
- trs.sos.seq += trs.sos.overlap;
- }
-}
-
-void SegmentOverlapEditor::left_overlap_trim_first(TcpReassemblerState& trs)
-{
- assert(SEQ_GT(trs.sos.seq, trs.sos.left->i_seq));
-
- trs.sos.overlap = trs.sos.left->i_seq + trs.sos.left->i_len - trs.sos.seq;
-
- if ( trs.sos.overlap > 0 )
- {
- tcpStats.overlaps++;
- trs.sos.overlap_count++;
-
- if ( SEQ_GEQ(trs.sos.left->i_seq + trs.sos.left->i_len, trs.sos.seq + trs.sos.len) )
- {
- // existing packet overlaps new on both sides. Drop the new data.
- trs.sos.seq += trs.sos.len;
- }
- else
- {
- /* Otherwise, trim the old data accordingly */
- trs.sos.left->c_len -= ( int16_t )trs.sos.overlap;
- trs.sos.left->i_len -= ( int16_t )trs.sos.overlap;
- trs.sos.seg_bytes_logical -= trs.sos.overlap;
- }
- }
-}
-
-void SegmentOverlapEditor::left_overlap_keep_last(TcpReassemblerState& trs)
-{
- assert(SEQ_GT(trs.sos.seq, trs.sos.left->i_seq));
-
- trs.sos.overlap = trs.sos.left->i_seq + trs.sos.left->i_len - trs.sos.seq;
-
- if ( trs.sos.overlap > 0 )
- {
- tcpStats.overlaps++;
- trs.sos.overlap_count++;
-
- /* True "Last" policy" */
- if ( SEQ_GT(trs.sos.left->i_seq + trs.sos.left->i_len, trs.sos.seq + trs.sos.len) )
- {
- /* New data is overlapped on both sides by existing data. Existing data needs to be
- * split and the new data inserted in the middle.
- * Need to duplicate left. Adjust that seq by + (seq + len) and
- * size by - (seq + len - left->i_seq).
- */
- dup_reassembly_segment(trs, trs.sos.left, &trs.sos.right);
-
- trs.sos.left->c_len -= (int16_t)trs.sos.overlap;
- trs.sos.left->i_len -= (int16_t)trs.sos.overlap;
-
- trs.sos.right->i_seq = trs.sos.seq + trs.sos.len;
- trs.sos.right->c_seq = trs.sos.right->i_seq;
- uint16_t delta = (int16_t)(trs.sos.right->i_seq - trs.sos.left->i_seq);
- trs.sos.right->c_len -= delta;
- trs.sos.right->i_len -= delta;
- trs.sos.right->offset += delta;
- trs.sos.right->o_offset += delta;
-
- trs.sos.seg_bytes_logical -= trs.sos.len;
- }
- else
- {
- trs.sos.left->c_len -= (int16_t)trs.sos.overlap;
- trs.sos.left->i_len -= (int16_t)trs.sos.overlap;
- trs.sos.seg_bytes_logical -= trs.sos.overlap;
- }
- }
-}
-
-void SegmentOverlapEditor::right_overlap_truncate_existing(TcpReassemblerState& trs)
-{
- if ( SEQ_EQ(trs.sos.right->i_seq, trs.sos.seq) &&
- ( trs.sos.reassembly_policy != StreamPolicy::OS_LAST ) )
- {
- trs.sos.seq += trs.sos.overlap;
- }
- else
- {
- /* partial overlap */
- trs.sos.right->i_seq += trs.sos.overlap;
- trs.sos.right->c_seq = trs.sos.right->i_seq;
- trs.sos.right->offset += trs.sos.overlap;
- trs.sos.right->o_offset += trs.sos.overlap;
- trs.sos.right->c_len -= (int16_t)trs.sos.overlap;
- trs.sos.right->i_len -= ( int16_t )trs.sos.overlap;
- trs.sos.seg_bytes_logical -= trs.sos.overlap;
- trs.sos.total_bytes_queued -= trs.sos.overlap;
- }
-}
-
-void SegmentOverlapEditor::right_overlap_truncate_new(TcpReassemblerState& trs)
-{
- if (trs.sos.tcp_ips_data == NORM_MODE_ON)
- {
- unsigned offset = trs.sos.right->i_seq - trs.sos.tsd->get_seq();
- unsigned length = trs.sos.tsd->get_seq() + trs.sos.tsd->get_len() - trs.sos.right->i_seq;
- trs.sos.tsd->rewrite_payload(offset, trs.sos.right->data + trs.sos.right->o_offset, length);
- }
-
- norm_stats[PC_TCP_IPS_DATA][trs.sos.tcp_ips_data]++;
- trs.sos.trunc_len = trs.sos.overlap;
-}
-
-// REASSEMBLY_POLICY_FIRST:
-// REASSEMBLY_POLICY_VISTA:
-void SegmentOverlapEditor::full_right_overlap_truncate_new(TcpReassemblerState& trs)
-{
-
- if ( trs.sos.tcp_ips_data == NORM_MODE_ON )
- {
- unsigned offset = trs.sos.right->i_seq - trs.sos.tsd->get_seq();
-
- if ( !offset && zwp_data_mismatch(trs, *trs.sos.tsd, trs.sos.right->i_len))
- {
- trs.tracker->normalizer.session_blocker(*trs.sos.tsd);
- trs.sos.keep_segment = false;
- return;
- }
-
- trs.sos.tsd->rewrite_payload(offset, trs.sos.right->data + trs.sos.right->o_offset, trs.sos.right->i_len);
- }
-
- norm_stats[PC_TCP_IPS_DATA][trs.sos.tcp_ips_data]++;
-
- if ( SEQ_EQ(trs.sos.right->i_seq, trs.sos.seq) )
- {
- /* Overlap is greater than or equal to right->size
- * slide gets set before insertion */
- trs.sos.seq += trs.sos.right->i_len;
- trs.sos.left = trs.sos.right;
- trs.sos.right = trs.sos.right->next;
- }
- else
- {
- // seq is less than right->i_seq, set trunc length and slide
- // and insert chunk before current right segment...
- trs.sos.trunc_len = trs.sos.overlap;
- trs.sos.slide = trs.sos.seq - trs.sos.tsd->get_seq();
- add_reassembly_segment(trs, *trs.sos.tsd, trs.sos.len, trs.sos.slide,
- trs.sos.trunc_len, trs.sos.seq, trs.sos.left);
-
- // Set seq to end of right since overlap was greater than or equal to right->size and
- // inserted seq has been truncated to beginning of right and reset trunc length to 0
- // since we may fall out of loop if next right is null
- trs.sos.seq = trs.sos.right->i_seq + trs.sos.right->i_len;
- trs.sos.left = trs.sos.right;
- trs.sos.right = trs.sos.right->next;
- trs.sos.trunc_len = 0;
- }
-}
-
-// REASSEMBLY_POLICY_WINDOWS:
-// REASSEMBLY_POLICY_WINDOWS2K3:
-// REASSEMBLY_POLICY_BSD:
-// REASSEMBLY_POLICY_MACOS:
-void SegmentOverlapEditor::full_right_overlap_os1(TcpReassemblerState& trs)
-{
- if ( SEQ_GEQ(trs.sos.seq_end, trs.sos.right->i_seq + trs.sos.right->i_len) and
- SEQ_LT(trs.sos.seq, trs.sos.right->i_seq) )
- {
- drop_old_segment(trs);
- }
- else
- full_right_overlap_truncate_new(trs);
-}
-
-// REASSEMBLY_POLICY_LINUX:
-// REASSEMBLY_POLICY_HPUX10:
-// REASSEMBLY_POLICY_IRIX:
-void SegmentOverlapEditor::full_right_overlap_os2(TcpReassemblerState& trs)
-{
- if ( SEQ_GEQ(trs.sos.seq_end, trs.sos.right->i_seq + trs.sos.right->i_len) and
- SEQ_LT(trs.sos.seq, trs.sos.right->i_seq) )
- {
- drop_old_segment(trs);
- }
- else if ( SEQ_GT(trs.sos.seq_end, trs.sos.right->i_seq + trs.sos.right->i_len) and
- SEQ_EQ(trs.sos.seq, trs.sos.right->i_seq) )
- {
- drop_old_segment(trs);
- }
- else
- full_right_overlap_truncate_new(trs);
-}
-
-// REASSEMBLY_POLICY_HPUX11:
-// REASSEMBLY_POLICY_SOLARIS:
-void SegmentOverlapEditor::full_right_overlap_os3(TcpReassemblerState& trs)
-{
- // If this packet is wholly overlapping and the same size as a previous one and we have not
- // received the one immediately preceding, we take the FIRST.
- if ( SEQ_EQ(trs.sos.right->i_seq, trs.sos.seq) && (trs.sos.right->i_len == trs.sos.len)
- && (trs.sos.left && !SEQ_EQ(trs.sos.left->i_seq + trs.sos.left->i_len, trs.sos.seq)) )
- {
- trs.sos.trunc_len = trs.sos.right->i_len;
- trs.sos.rdata += trs.sos.right->i_len;
- trs.sos.rsize -= trs.sos.right->i_len;
- trs.sos.rseq += trs.sos.right->i_len;
- trs.sos.seq += trs.sos.right->i_len;
- trs.sos.left = trs.sos.right;
- trs.sos.right = trs.sos.right->next;
- }
- else
- drop_old_segment(trs);
-}
-
-// REASSEMBLY_POLICY_OLD_LINUX:
-// REASSEMBLY_POLICY_LAST:
-void SegmentOverlapEditor::full_right_overlap_os4(TcpReassemblerState& trs)
-{ drop_old_segment(trs); }
-
-void SegmentOverlapEditor::full_right_overlap_os5(TcpReassemblerState& trs)
-{
- full_right_overlap_truncate_new(trs);
-}
-
-bool SegmentOverlapEditor::zwp_data_mismatch(
- TcpReassemblerState& trs, TcpSegmentDescriptor& tsd, uint32_t overlap)
-{
- if ( overlap == MAX_ZERO_WIN_PROBE_LEN
- and trs.sos.right->i_seq == trs.tracker->normalizer.get_zwp_seq()
- and (trs.sos.right->data[0] != tsd.get_pkt()->data[0]) )
- {
- return tsd.is_nap_policy_inline();
- }
-
- return false;
-}
-
-void SegmentOverlapEditor::print(TcpReassemblerState& trs)
-{
- LogMessage(" seglist_base_seq: %X\n", trs.sos.seglist_base_seq);
- LogMessage(" seglist head: %p\n", (void*)trs.sos.seglist.head);
- LogMessage(" seglist tail: %p\n", (void*)trs.sos.seglist.tail);
- LogMessage(" seglist current: %p\n", (void*)trs.sos.seglist.cur_rseg);
- LogMessage(" seg_count: %d\n", trs.sos.seg_count);
- LogMessage(" seg_bytes_total: %d\n", trs.sos.seg_bytes_total);
- LogMessage(" seg_bytes_logical: %d\n", trs.sos.seg_bytes_logical);
-}
+++ /dev/null
-//--------------------------------------------------------------------------
-// Copyright (C) 2015-2024 Cisco and/or its affiliates. All rights reserved.
-//
-// This program is free software; you can redistribute it and/or modify it
-// under the terms of the GNU General Public License Version 2 as published
-// by the Free Software Foundation. You may not use, modify or distribute
-// this program under any other version of the GNU General Public License.
-//
-// This program is distributed in the hope that it will be useful, but
-// WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License along
-// with this program; if not, write to the Free Software Foundation, Inc.,
-// 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
-//--------------------------------------------------------------------------
-
-// segment_overlap_editor.h author davis mcpherson <davmcphe@cisco.com>
-// Created on: Oct 11, 2015
-
-#ifndef SEGMENT_OVERLAP_EDITOR_H
-#define SEGMENT_OVERLAP_EDITOR_H
-
-#include <vector>
-
-#include "normalize/norm_stats.h"
-#include "stream/paf.h"
-#include "stream/stream.h"
-#include "tcp_segment_node.h"
-
-class TcpSession;
-class TcpStreamTracker;
-
-struct SegmentOverlapState
-{
- TcpSession* session;
- TcpSegmentDescriptor* tsd;
- TcpSegmentNode* left;
- TcpSegmentNode* right;
- const uint8_t* rdata;
-
- TcpSegmentList seglist;
- uint32_t seglist_base_seq; /* seq of first queued segment */
- uint32_t seg_count; /* number of current queued segments */
- uint32_t seg_bytes_total; /* total bytes currently queued */
- uint32_t seg_bytes_logical; /* logical bytes queued (total - overlaps) */
- uint32_t total_bytes_queued; /* total bytes queued (life of session) */
- uint32_t total_segs_queued; /* number of segments queued (life) */
- uint32_t overlap_count; /* overlaps encountered */
-
- uint32_t seq;
- uint32_t seq_end;
- uint32_t rseq;
-
- int32_t overlap;
- int32_t slide;
- int32_t trunc_len;
-
- uint16_t len;
- uint16_t rsize;
- int8_t tcp_ips_data;
- StreamPolicy reassembly_policy;
-
- bool keep_segment;
-
- ~SegmentOverlapState()
- { seglist.reset(); }
-
- void init_sos(TcpSession*, StreamPolicy);
- void init_soe(TcpSegmentDescriptor& tsd, TcpSegmentNode* left, TcpSegmentNode* right);
-};
-
-struct StreamAlertInfo : snort::AlertInfo
-{
- StreamAlertInfo(uint32_t gid_, uint32_t sid_, uint32_t seq_num_ = 0, uint32_t id_ = 0, uint32_t ts_ = 0)
- : snort::AlertInfo(gid_, sid_, id_, ts_), seq(seq_num_)
- {}
-
- uint32_t seq;
-};
-
-struct TcpReassemblerState
-{
- SegmentOverlapState sos;
- TcpStreamTracker* tracker;
- uint32_t flush_count; // number of flushed queued segments
- uint32_t xtradata_mask; // extra data available to log
- std::vector<StreamAlertInfo> alerts;
- uint8_t ignore_dir;
- uint8_t packet_dir;
- bool server_side;
- PAF_State paf_state;
-};
-
-class SegmentOverlapEditor
-{
-protected:
- SegmentOverlapEditor() = default;
- virtual ~SegmentOverlapEditor() = default;
-
- void eval_left(TcpReassemblerState&);
- void eval_right(TcpReassemblerState&);
-
- virtual bool is_segment_retransmit(TcpReassemblerState&, bool*);
- virtual void drop_old_segment(TcpReassemblerState&);
- virtual bool zwp_data_mismatch(TcpReassemblerState&, TcpSegmentDescriptor&, uint32_t);
-
- virtual void left_overlap_keep_first(TcpReassemblerState&);
- virtual void left_overlap_trim_first(TcpReassemblerState&);
- virtual void left_overlap_keep_last(TcpReassemblerState&);
- virtual void right_overlap_truncate_existing(TcpReassemblerState&);
- virtual void right_overlap_truncate_new(TcpReassemblerState&);
- virtual void full_right_overlap_truncate_new(TcpReassemblerState&);
- virtual void full_right_overlap_os1(TcpReassemblerState&);
- virtual void full_right_overlap_os2(TcpReassemblerState&);
- virtual void full_right_overlap_os3(TcpReassemblerState&);
- virtual void full_right_overlap_os4(TcpReassemblerState&);
- virtual void full_right_overlap_os5(TcpReassemblerState&);
-
- virtual void insert_left_overlap(TcpReassemblerState&) = 0;
- virtual void insert_right_overlap(TcpReassemblerState&) = 0;
- virtual void insert_full_overlap(TcpReassemblerState&) = 0;
-
- virtual void add_reassembly_segment(
- TcpReassemblerState&, TcpSegmentDescriptor&, uint16_t, uint32_t,
- uint32_t, uint32_t, TcpSegmentNode*) = 0;
-
- virtual void dup_reassembly_segment(TcpReassemblerState&, TcpSegmentNode*, TcpSegmentNode**) = 0;
- virtual int delete_reassembly_segment(TcpReassemblerState&, TcpSegmentNode*) = 0;
- virtual void print(TcpReassemblerState&);
-};
-
-#endif
#include "tcp_ha.h"
#include "tcp_module.h"
+#include "tcp_overlap_resolver.h"
#include "tcp_session.h"
-#include "tcp_reassemblers.h"
#include "tcp_state_machine.h"
using namespace snort;
static void stream_tcp_pinit()
{
TcpStateMachine::initialize();
- TcpReassemblerFactory::initialize();
+ TcpOverlapResolverFactory::initialize();
TcpNormalizerFactory::initialize();
}
static void stream_tcp_pterm()
{
TcpStateMachine::term();
- TcpReassemblerFactory::term();
+ TcpOverlapResolverFactory::term();
TcpNormalizerFactory::term();
}
--- /dev/null
+//--------------------------------------------------------------------------
+// Copyright (C) 2015-2024 Cisco and/or its affiliates. All rights reserved.
+//
+// This program is free software; you can redistribute it and/or modify it
+// under the terms of the GNU General Public License Version 2 as published
+// by the Free Software Foundation. You may not use, modify or distribute
+// this program under any other version of the GNU General Public License.
+//
+// This program is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License along
+// with this program; if not, write to the Free Software Foundation, Inc.,
+// 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+//--------------------------------------------------------------------------
+
+// tcp_alerts.cc author davis mcpherson <davmcphe@cisco.com>
+// Created on: Nov 7, 2023
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <cassert>
+
+#include "tcp_alerts.h"
+
+#include "detection/context_switcher.h"
+
+#include "tcp_session.h"
+
+using namespace snort;
+
+static void purge_alerts_callback_ackd(IpsContext *c)
+{
+ TcpSession *session = (TcpSession*) c->packet->flow->session;
+
+ if (c->packet->is_from_server())
+ session->client.tcp_alerts.purge_alerts(c->packet->flow);
+ else
+ session->server.tcp_alerts.purge_alerts(c->packet->flow);
+}
+
+static void purge_alerts_callback_ips(IpsContext *c)
+{
+ TcpSession *session = (TcpSession*) c->packet->flow->session;
+
+ if (c->packet->is_from_server())
+ session->server.tcp_alerts.purge_alerts(c->packet->flow);
+ else
+ session->client.tcp_alerts.purge_alerts(c->packet->flow);
+}
+
+bool TcpAlerts::add_alert(uint32_t gid, uint32_t sid)
+{
+ assert(
+ alerts.size()
+ <= (uint32_t )(get_ips_policy()->rules_loaded + get_ips_policy()->rules_shared));
+
+ if (!this->check_alerted(gid, sid))
+ alerts.emplace_back(gid, sid);
+
+ return true;
+}
+
+bool TcpAlerts::check_alerted(uint32_t gid, uint32_t sid)
+{
+ return std::any_of(alerts.cbegin(), alerts.cend(), [gid, sid](const StreamAlertInfo &alert)
+ { return alert.gid == gid && alert.sid == sid;});
+}
+
+int TcpAlerts::update_alert(uint32_t gid, uint32_t sid, uint32_t event_id, uint32_t event_second)
+{
+ // FIXIT-M comparison of seq_num is wrong, compare value is always 0, should be seq_num of wire packet
+ uint32_t seq_num = 0;
+
+ auto it = std::find_if(alerts.begin(), alerts.end(),
+ [gid, sid, seq_num](const StreamAlertInfo &alert)
+ { return alert.gid == gid && alert.sid == sid && SEQ_EQ(alert.seq, seq_num);});
+ if (it != alerts.end())
+ {
+ (*it).event_id = event_id;
+ (*it).event_second = event_second;
+ return 0;
+ }
+
+ return -1;
+}
+
+void TcpAlerts::purge_alerts(Flow* flow)
+{
+ for (auto &alert : alerts)
+ Stream::log_extra_data(flow, xtradata_mask, alert);
+
+ if (!flow->is_suspended())
+ alerts.clear();
+}
+
+void TcpAlerts::purge_alerts(Packet& last_pdu, bool ips_enabled)
+{
+ if ( ips_enabled )
+ last_pdu.context->register_post_callback(purge_alerts_callback_ips);
+ else
+ last_pdu.context->register_post_callback(purge_alerts_callback_ackd);
+}
+
--- /dev/null
+//--------------------------------------------------------------------------
+// Copyright (C) 2014-2024 Cisco and/or its affiliates. All rights reserved.
+//
+// This program is free software; you can redistribute it and/or modify it
+// under the terms of the GNU General Public License Version 2 as published
+// by the Free Software Foundation. You may not use, modify or distribute
+// this program under any other version of the GNU General Public License.
+//
+// This program is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License along
+// with this program; if not, write to the Free Software Foundation, Inc.,
+// 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+//--------------------------------------------------------------------------
+
+// tcp_alerts.h author davis mcpherson <davmcphe@cisco.com>
+// Created on: Jul 31, 2015
+
+#ifndef TCP_ALERTS_H
+#define TCP_ALERTS_H
+
+#include <cstdint>
+
+#include "protocols/packet.h"
+#include "stream/stream.h"
+
+struct StreamAlertInfo: snort::AlertInfo
+{
+ StreamAlertInfo(uint32_t gid_, uint32_t sid_, uint32_t seq_num_ = 0, uint32_t id_ = 0,
+ uint32_t ts_ = 0) : snort::AlertInfo(gid_, sid_, id_, ts_), seq(seq_num_)
+ { }
+
+ uint32_t seq;
+};
+
+class TcpAlerts
+{
+public:
+ TcpAlerts() = default;
+
+ void clear()
+ {
+ xtradata_mask = 0;
+ alerts.clear();
+ }
+
+ bool add_alert(uint32_t gid, uint32_t sid);
+ bool check_alerted(uint32_t gid, uint32_t sid);
+ int update_alert(uint32_t gid, uint32_t sid, uint32_t event_id, uint32_t event_second);
+ void purge_alerts(snort::Flow* flow);
+ void purge_alerts(snort::Packet& last_pdu, bool ips_enabled);
+
+ void set_xtradata_mask(uint32_t mask)
+ {
+ xtradata_mask = mask;
+ }
+
+ uint32_t get_xtradata_mask() const
+ {
+ return xtradata_mask;
+ }
+
+private:
+
+ uint32_t xtradata_mask = 0; // extra data available to log
+ std::vector<StreamAlertInfo> alerts;
+
+};
+
+#endif
#define PAWS_WINDOW 60
#define PAWS_24DAYS 2073600 /* 24 days in seconds */
-#define STREAM_UNALIGNED 0
-#define STREAM_ALIGNED 1
-
#define STREAM_DEFAULT_MAX_SMALL_SEG_SIZE 0 /* disabled */
#define STREAM_DEFAULT_CONSEC_SMALL_SEGS 0 /* disabled */
#include "tcp_normalizer.h"
-#include "stream/stream.h"
+#include "detection/detection_engine.h"
#include "packet_io/packet_tracer.h"
+#include "stream/stream.h"
+#include "trace/trace.h"
#include "trace/trace_api.h"
-#include "tcp_module.h"
-#include "tcp_stream_session.h"
+
+#include "tcp_session.h"
#include "tcp_stream_tracker.h"
using namespace snort;
tcpStats.zero_win_probes++;
set_zwp_seq(tns, seq);
log_drop_reason(tns, tsd, inline_mode, "stream",
- "Normalizer: Maximum Zero Window Probe length supported at a time is 1 byte\n");
+ "Normalizer: Maximum Zero Window Probe length supported at a time is 1 byte\n");
trim_win_payload(tns, tsd, MAX_ZERO_WIN_PROBE_LEN, inline_mode);
}
}
#include "normalize/norm_stats.h"
#include "protocols/tcp_options.h"
-class TcpStreamSession;
+class TcpSession;
class TcpStreamTracker;
class TcpSegmentDescriptor;
class TcpNormalizer;
struct TcpNormalizerState
{
- TcpStreamSession* session = nullptr;
+ TcpSession* session = nullptr;
TcpStreamTracker* tracker = nullptr;
TcpStreamTracker* peer_tracker = nullptr;
TcpNormalizer* prev_norm = nullptr;
virtual int handle_repeated_syn(State&, TcpSegmentDescriptor&) = 0;
virtual uint16_t set_urg_offset(State&, const snort::tcp::TCPHdr* tcph, uint16_t dsize);
virtual void set_zwp_seq(State&, uint32_t seq);
- virtual void log_drop_reason(State&, const TcpSegmentDescriptor&, bool inline_mode, const char *issuer, const std::string& log);
+ virtual void log_drop_reason(State&, const TcpSegmentDescriptor&, bool inline_mode,
+ const char *issuer, const std::string& log);
virtual bool is_keep_alive_probe(State&, const TcpSegmentDescriptor&);
static void reset_stats();
#include "tcp_module.h"
#include "tcp_segment_descriptor.h"
-#include "tcp_stream_session.h"
+#include "tcp_session.h"
#include "tcp_stream_tracker.h"
using namespace snort;
static inline int handle_repeated_syn_mswin(
TcpStreamTracker* talker, TcpStreamTracker* listener,
- const TcpSegmentDescriptor& tsd, TcpStreamSession* session)
+ const TcpSegmentDescriptor& tsd, TcpSession* session)
{
/* Windows has some strange behavior here. If the sequence of the reset is the
* next expected sequence, it Resets. Otherwise it ignores the 2nd SYN.
}
static inline int handle_repeated_syn_bsd(
- TcpStreamTracker* talker, const TcpSegmentDescriptor& tsd, TcpStreamSession* session)
+ TcpStreamTracker* talker, const TcpSegmentDescriptor& tsd, TcpSession* session)
{
/* If its not a retransmission of the actual SYN... RESET */
if ( !SEQ_EQ(tsd.get_seq(), talker->get_iss()) )
return NORM_OK;
}
-bool TcpNormalizerProxy::validate_rst(
- TcpNormalizerState&, TcpSegmentDescriptor&)
-{
- return true;
-}
+bool TcpNormalizerProxy::validate_rst(TcpNormalizerState&, TcpSegmentDescriptor&)
+{ return true; }
-int TcpNormalizerProxy::handle_paws(
- TcpNormalizerState&, TcpSegmentDescriptor&)
-{
- return ACTION_NOTHING;
-}
+int TcpNormalizerProxy::handle_paws(TcpNormalizerState&, TcpSegmentDescriptor&)
+{ return ACTION_NOTHING; }
-int TcpNormalizerProxy::handle_repeated_syn(
- TcpNormalizerState&, TcpSegmentDescriptor&)
-{
- return ACTION_NOTHING;
-}
+int TcpNormalizerProxy::handle_repeated_syn(TcpNormalizerState&, TcpSegmentDescriptor&)
+{ return ACTION_NOTHING; }
TcpNormalizer::NormStatus TcpNormalizerMissed3whs::apply_normalizations(
TcpNormalizerState&, TcpSegmentDescriptor&, uint32_t, bool)
return true;
}
-int TcpNormalizerMissed3whs::handle_paws(
- TcpNormalizerState& tns, TcpSegmentDescriptor& tsd)
-{
- UNUSED(tsd);
- UNUSED(tns);
- return ACTION_NOTHING;
-}
+int TcpNormalizerMissed3whs::handle_paws(TcpNormalizerState&, TcpSegmentDescriptor&)
+{ return ACTION_NOTHING; }
int TcpNormalizerMissed3whs::handle_repeated_syn(
- TcpNormalizerState& tns, TcpSegmentDescriptor& tsd)
+ TcpNormalizerState& tns, TcpSegmentDescriptor& tsd)
{
return tns.prev_norm->handle_repeated_syn(tns, tsd);
}
-void TcpNormalizerPolicy::init(StreamPolicy os, TcpStreamSession* ssn, TcpStreamTracker* trk, TcpStreamTracker* peer)
+void TcpNormalizerPolicy::init(StreamPolicy os, TcpSession* ssn, TcpStreamTracker* trk, TcpStreamTracker* peer)
{
if ( os == StreamPolicy::MISSED_3WHS and os == tns.os_policy)
tns.prev_norm = TcpNormalizerFactory::get_instance(StreamPolicy::OS_DEFAULT);
#include "stream/tcp/tcp_normalizer.h"
-class TcpStreamSession;
-class TcpStreamSession;
+class TcpSession;
+class TcpSession;
class TcpNormalizerFactory
{
TcpNormalizerPolicy() = default;
~TcpNormalizerPolicy() = default;
- void init(StreamPolicy os, TcpStreamSession* ssn, TcpStreamTracker* trk, TcpStreamTracker* peer);
+ void init(StreamPolicy os, TcpSession* ssn, TcpStreamTracker* trk, TcpStreamTracker* peer);
void reset()
{ init(StreamPolicy::OS_DEFAULT, nullptr, nullptr, nullptr); }
void set_zwp_seq(uint32_t seq)
{ return norm->set_zwp_seq(tns, seq); }
- void log_drop_reason(const TcpSegmentDescriptor& tsd, bool inline_mode, const char *issuer, const std::string& log)
+ void log_drop_reason(const TcpSegmentDescriptor& tsd, bool inline_mode,
+ const char *issuer, const std::string& log)
{ return norm->log_drop_reason(tns, tsd, inline_mode, issuer, log); }
bool is_keep_alive_probe(const TcpSegmentDescriptor& tsd)
--- /dev/null
+//--------------------------------------------------------------------------
+// Copyright (C) 2015-2024 Cisco and/or its affiliates. All rights reserved.
+//
+// This program is free software; you can redistribute it and/or modify it
+// under the terms of the GNU General Public License Version 2 as published
+// by the Free Software Foundation. You may not use, modify or distribute
+// this program under any other version of the GNU General Public License.
+//
+// This program is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License along
+// with this program; if not, write to the Free Software Foundation, Inc.,
+// 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+//--------------------------------------------------------------------------
+
+// tcp_overlap_resolver.cc author davis mcpherson <davmcphe@cisco.com>
+// Created on: Oct 11, 2015
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "tcp_overlap_resolver.h"
+
+#include "detection/detection_engine.h"
+
+#include "tcp_module.h"
+#include "tcp_normalizers.h"
+#include "tcp_segment_node.h"
+#include "tcp_session.h"
+
+using namespace snort;
+
+TcpOverlapState::TcpOverlapState(TcpReassemblySegments& seglist)
+ : seglist(seglist)
+{
+ tcp_ips_data = Normalize_GetMode(NORM_TCP_IPS);
+}
+
+void TcpOverlapState::init(TcpSegmentDescriptor& tsd)
+{
+ int32_t dist_head = 0, dist_tail = 0;
+
+ if ( seglist.head && seglist.tail )
+ {
+ if ( SEQ_GT(tsd.get_seq(), seglist.head->start_seq()) )
+ dist_head = tsd.get_seq() - seglist.head->start_seq();
+ else
+ dist_head = seglist.head->start_seq() - tsd.get_seq();
+
+ if ( SEQ_GT(tsd.get_seq(), seglist.tail->start_seq()) )
+ dist_tail = tsd.get_seq() - seglist.tail->start_seq();
+ else
+ dist_tail = seglist.tail->start_seq() - tsd.get_seq();
+ }
+
+ left = right = nullptr;
+ if ( dist_head < dist_tail )
+ {
+ right = seglist.head;
+ while ( right and SEQ_LT(right->start_seq(), tsd.get_seq()) )
+ {
+ left = right;
+ right = right->next;
+ }
+ }
+ else
+ {
+ left = seglist.tail;
+ while ( left and SEQ_GEQ(left->start_seq(), tsd.get_seq()) )
+ {
+ right = left;
+ left = left->prev;
+ }
+ }
+
+ this->tsd = &tsd;
+ seq = tsd.get_seq();
+ seq_end = tsd.get_end_seq();
+ len = tsd.get_len();
+
+ overlap = 0;
+ slide = 0;
+ trunc_len = 0;
+
+ rdata = tsd.get_pkt()->data;
+ rsize = tsd.get_len();
+ rseq = tsd.get_seq();
+
+ keep_segment = true;
+}
+
+bool TcpOverlapResolver::is_segment_retransmit(TcpOverlapState& tos, bool* full_retransmit)
+{
+ // Don't want to count retransmits as overlaps or do anything
+ // else with them. Account for retransmits of multiple PDUs
+ // in one segment.
+ bool* pb = (tos.rseq == tos.tsd->get_seq()) ? full_retransmit : nullptr;
+
+ if ( tos.right->is_retransmit(tos.rdata, tos.rsize,
+ tos.rseq, tos.right->length, pb) )
+ {
+ tos.tsd->set_retransmit_flag();
+
+ if ( !(*full_retransmit) )
+ {
+ tos.rdata += tos.right->length;
+ tos.rsize -= tos.right->length;
+ tos.rseq += tos.right->length;
+ tos.slide += tos.right->length;
+ tos.left = tos.right;
+ tos.right = tos.right->next;
+ }
+ else
+ tos.rsize = 0;
+
+ if ( tos.rsize == 0 )
+ {
+ // All data was retransmitted
+ snort::DetectionEngine::disable_content(tos.tsd->get_pkt());
+ tos.keep_segment = false;
+ }
+
+ return true;
+ }
+
+ return false;
+}
+
+void TcpOverlapResolver::eval_left(TcpOverlapState& tos)
+{
+ if ( tos.left )
+ insert_left_overlap(tos);
+}
+
+void TcpOverlapResolver::eval_right(TcpOverlapState& tos)
+{
+ while ( tos.right && SEQ_LT(tos.right->start_seq(), tos.seq_end) )
+ {
+ tos.trunc_len = 0;
+
+ assert(SEQ_LEQ(tos.slide_seq(), tos.right->start_seq()));
+ tos.overlap = ( int )( tos.seq_end - tos.right->start_seq() );
+
+ // Treat sequence number overlap as a retransmission,
+ // only check right side since left side happens rarely
+ tos.seglist.session->flow->call_handlers(tos.tsd->get_pkt(), false);
+ if ( tos.overlap < tos.right->length )
+ {
+ if ( tos.right->is_retransmit(tos.rdata, tos.rsize,
+ tos.rseq, tos.right->length, nullptr) )
+ {
+ // All data was retransmitted
+ tos.tsd->set_retransmit_flag();
+ snort::DetectionEngine::disable_content(tos.tsd->get_pkt());
+ tos.keep_segment = false;
+ tcpStats.full_retransmits++;
+ }
+ else
+ {
+ tcpStats.overlaps++;
+ tos.seglist.overlap_count++;
+ insert_right_overlap(tos);
+ }
+
+ break;
+ }
+ else // Full overlap
+ {
+ bool full_retransmit = false;
+ // Don't want to count retransmits as overlaps or do anything
+ // else with them. Account for retransmits of multiple PDUs
+ // in one segment.
+ if ( is_segment_retransmit(tos, &full_retransmit) )
+ {
+ if ( full_retransmit )
+ {
+ tcpStats.full_retransmits++;
+ break;
+ }
+ continue;
+ }
+
+ tcpStats.overlaps++;
+ tos.seglist.overlap_count++;
+ insert_full_overlap(tos);
+
+ if ( !tos.keep_segment )
+ return;
+ }
+ }
+}
+
+void TcpOverlapResolver::drop_old_segment(TcpOverlapState& tos)
+{
+ TcpSegmentNode* drop_seg = tos.right;
+ tos.right = tos.right->next;
+ tos.seglist.delete_reassembly_segment(drop_seg);
+}
+
+void TcpOverlapResolver::left_overlap_keep_first(TcpOverlapState& tos)
+{
+ // NOTE that overlap will always be less than left->size since
+ // seq is always greater than left->seq
+ assert(SEQ_GT(tos.seq, tos.left->start_seq()));
+
+ tos.overlap = tos.left->next_seq() - tos.seq;
+ if ( tos.len < tos.overlap )
+ tos.overlap = tos.len;
+
+ if ( tos.overlap > 0 )
+ {
+ tcpStats.overlaps++;
+ tos.seglist.overlap_count++;
+
+ if ( SEQ_GT(tos.left->next_seq(), tos.seq_end) )
+ {
+ if (tos.tcp_ips_data == NORM_MODE_ON)
+ {
+ unsigned offset = tos.tsd->get_seq() - tos.left->start_seq();
+ tos.tsd->rewrite_payload(0, tos.left->payload() + offset);
+ }
+ norm_stats[PC_TCP_IPS_DATA][tos.tcp_ips_data]++;
+ }
+ else
+ {
+ if ( tos.tcp_ips_data == NORM_MODE_ON )
+ {
+ unsigned offset = tos.tsd->get_seq() - tos.left->start_seq();
+ unsigned length = tos.left->next_seq() - tos.tsd->get_seq();
+ tos.tsd->rewrite_payload(0, tos.left->payload() + offset, length);
+ }
+
+ norm_stats[PC_TCP_IPS_DATA][tos.tcp_ips_data]++;
+ }
+
+ tos.slide = tos.overlap;
+ }
+}
+
+void TcpOverlapResolver::left_overlap_trim_first(TcpOverlapState& tos)
+{
+ assert(SEQ_GT(tos.seq, tos.left->start_seq()));
+
+ tos.overlap = tos.left->next_seq() - tos.seq;
+ if ( tos.overlap > 0 )
+ {
+ tcpStats.overlaps++;
+ tos.seglist.overlap_count++;
+
+ if ( SEQ_GEQ(tos.left->next_seq(), tos.seq + tos.len) )
+ {
+ // existing packet overlaps new on both sides. Drop the new data.
+ tos.slide += tos.len;
+ }
+ else
+ {
+ /* Otherwise, trim the old data accordingly */
+ tos.left->length -= ( int16_t )tos.overlap;
+ tos.seglist.seg_bytes_logical -= tos.overlap;
+ }
+ }
+}
+
+void TcpOverlapResolver::left_overlap_keep_last(TcpOverlapState& tos)
+{
+ assert(SEQ_GT(tos.seq, tos.left->seq));
+
+ tos.overlap = tos.left->next_seq() - tos.seq;
+ if ( tos.overlap > 0 )
+ {
+ tcpStats.overlaps++;
+ tos.seglist.overlap_count++;
+
+ /* True "Last" policy" */
+ if (SEQ_GT(tos.left->next_seq(), tos.seq + tos.len) )
+ {
+ /* New data is overlapped on both sides by existing data. Existing data needs to be
+ * split and the new data inserted in the middle.
+ * Need to duplicate left. Adjust that seq by + (seq + len) and
+ * size by - (seq + len - left->start_seq()).
+ */
+ tos.seglist.dup_reassembly_segment(tos.left, &tos.right);
+
+ tos.left->length -= tos.overlap;
+ uint16_t delta = tos.seq_end - tos.left->start_seq();
+ tos.right->length -= delta;
+ tos.right->offset += delta;
+ tos.seglist.seg_bytes_logical -= tos.len;
+ }
+ else
+ {
+ tos.left->length -= (int16_t)tos.overlap;
+ tos.seglist.seg_bytes_logical -= tos.overlap;
+ }
+ }
+}
+
+void TcpOverlapResolver::right_overlap_truncate_existing(TcpOverlapState& tos)
+{
+ if ( SEQ_EQ(tos.right->start_seq(), tos.slide_seq()) )
+ {
+ tos.slide += tos.overlap;
+ }
+ else
+ {
+ /* partial overlap */
+ tos.right->offset += tos.overlap;
+ tos.right->length -= ( int16_t )tos.overlap;
+ tos.seglist.seg_bytes_logical -= tos.overlap;
+ }
+}
+
+void TcpOverlapResolver::right_overlap_truncate_new(TcpOverlapState& tos)
+{
+ if (tos.tcp_ips_data == NORM_MODE_ON)
+ {
+ unsigned offset = tos.right->start_seq() - tos.tsd->get_seq();
+ unsigned length = tos.tsd->get_seq() + tos.tsd->get_len() - tos.right->start_seq();
+ tos.tsd->rewrite_payload(offset, tos.right->payload(), length);
+ }
+
+ norm_stats[PC_TCP_IPS_DATA][tos.tcp_ips_data]++;
+ tos.trunc_len = tos.overlap;
+}
+
+// REASSEMBLY_POLICY_FIRST:
+// REASSEMBLY_POLICY_VISTA:
+void TcpOverlapResolver::full_right_overlap_truncate_new(TcpOverlapState& tos)
+{
+
+ if ( tos.tcp_ips_data == NORM_MODE_ON )
+ {
+ unsigned offset = tos.right->start_seq() - tos.tsd->get_seq();
+ if ( !offset && zwp_data_mismatch(tos, *tos.tsd, tos.right->length))
+ {
+ tos.seglist.tracker->normalizer.session_blocker(*tos.tsd);
+ tos.keep_segment = false;
+ return;
+ }
+
+ tos.tsd->rewrite_payload(offset, tos.right->payload(), tos.right->length);
+ }
+
+ norm_stats[PC_TCP_IPS_DATA][tos.tcp_ips_data]++;
+
+ if ( SEQ_EQ(tos.right->start_seq(), tos.slide_seq()) )
+ {
+ // Overlap is greater than or equal to right->size slide gets set before insertion
+ tos.slide += tos.right->length;
+ tos.left = tos.right;
+ tos.right = tos.right->next;
+ }
+ else
+ {
+ // seq is less than right->start_seq(), set trunc length and slide
+ // and insert chunk before current right segment...
+ tos.trunc_len = tos.overlap;
+ tos.seglist.add_reassembly_segment(*tos.tsd, tos.len, tos.slide,
+ tos.trunc_len, tos.seq, tos.left);
+
+ // adjust slide and trunc_len and move to next node to the right...
+ tos.slide += tos.right->next_seq() - tos.slide_seq();
+ tos.trunc_len = 0;
+ tos.left = tos.right;
+ tos.right = tos.right->next;
+ }
+}
+
+// REASSEMBLY_POLICY_WINDOWS:
+// REASSEMBLY_POLICY_WINDOWS2K3:
+// REASSEMBLY_POLICY_BSD:
+// REASSEMBLY_POLICY_MACOS:
+void TcpOverlapResolver::full_right_overlap_os1(TcpOverlapState& tos)
+{
+ if ( SEQ_GEQ(tos.seq_end, tos.right->next_seq()) and
+ SEQ_LT(tos.slide_seq(), tos.right->start_seq()) )
+ {
+ drop_old_segment(tos);
+ }
+ else
+ full_right_overlap_truncate_new(tos);
+}
+
+// REASSEMBLY_POLICY_LINUX:
+// REASSEMBLY_POLICY_HPUX10:
+// REASSEMBLY_POLICY_IRIX:
+void TcpOverlapResolver::full_right_overlap_os2(TcpOverlapState& tos)
+{
+ if ( SEQ_GEQ(tos.seq_end, tos.right->next_seq()) and
+ SEQ_LT(tos.slide_seq(), tos.right->start_seq()) )
+ {
+ drop_old_segment(tos);
+ }
+ else if ( SEQ_GT(tos.seq_end, tos.right->next_seq()) and
+ SEQ_EQ(tos.slide_seq(), tos.right->start_seq()) )
+ {
+ drop_old_segment(tos);
+ }
+ else
+ full_right_overlap_truncate_new(tos);
+}
+
+// REASSEMBLY_POLICY_HPUX11:
+// REASSEMBLY_POLICY_SOLARIS:
+void TcpOverlapResolver::full_right_overlap_os3(TcpOverlapState& tos)
+{
+ // If this packet is wholly overlapping and the same size as a previous one and we have not
+ // received the one immediately preceding, we take the FIRST.
+ if ( SEQ_EQ(tos.right->start_seq(), tos.seq) && (tos.right->length == tos.len)
+ && (tos.left && !SEQ_EQ(tos.left->next_seq(), tos.seq)) )
+ {
+ right_overlap_truncate_new(tos);
+
+ tos.rdata += tos.right->length;
+ tos.rsize -= tos.right->length;
+ tos.rseq += tos.right->length;
+ tos.left = tos.right;
+ tos.right = tos.right->next;
+ }
+ else
+ drop_old_segment(tos);
+}
+
+// REASSEMBLY_POLICY_OLD_LINUX:
+// REASSEMBLY_POLICY_LAST:
+void TcpOverlapResolver::full_right_overlap_os4(TcpOverlapState& tos)
+{ drop_old_segment(tos); }
+
+void TcpOverlapResolver::full_right_overlap_os5(TcpOverlapState& tos)
+{
+ full_right_overlap_truncate_new(tos);
+}
+
+bool TcpOverlapResolver::zwp_data_mismatch(TcpOverlapState& tos, TcpSegmentDescriptor& tsd, uint32_t overlap)
+{
+ if ( overlap == MAX_ZERO_WIN_PROBE_LEN
+ and tos.right->start_seq() == tos.seglist.tracker->normalizer.get_zwp_seq()
+ and (tos.right->data[0] != tsd.get_pkt()->data[0]) )
+ {
+ return tsd.is_nap_policy_inline();
+ }
+
+ return false;
+}
+
+class TcpOverlapResolverFirst : public TcpOverlapResolver
+{
+public:
+ TcpOverlapResolverFirst()
+ { overlap_policy = StreamPolicy::OS_FIRST; }
+
+private:
+ void insert_left_overlap(TcpOverlapState& tos) override
+ { left_overlap_keep_first(tos); }
+
+ void insert_right_overlap(TcpOverlapState& tos) override
+ { right_overlap_truncate_new(tos); }
+
+ void insert_full_overlap(TcpOverlapState& tos) override
+ { full_right_overlap_os5(tos); }
+};
+
+class TcpOverlapResolverLast : public TcpOverlapResolver
+{
+public:
+ TcpOverlapResolverLast()
+ { overlap_policy = StreamPolicy::OS_LAST; }
+
+private:
+ void right_overlap_truncate_existing(TcpOverlapState& tos) override
+ {
+ tos.right->offset += tos.overlap;
+ tos.right->length -= ( int16_t )tos.overlap;
+ tos.seglist.seg_bytes_logical -= tos.overlap;
+ }
+
+ void insert_left_overlap(TcpOverlapState& tos) override
+ { left_overlap_keep_last(tos); }
+
+ void insert_right_overlap(TcpOverlapState& tos) override
+ { right_overlap_truncate_existing(tos); }
+
+ void insert_full_overlap(TcpOverlapState& tos) override
+ { full_right_overlap_os4(tos); }
+};
+
+class TcpOverlapResolverLinux : public TcpOverlapResolver
+{
+public:
+ TcpOverlapResolverLinux()
+ { overlap_policy = StreamPolicy::OS_LINUX; }
+
+private:
+ void insert_left_overlap(TcpOverlapState& tos) override
+ { left_overlap_keep_first(tos); }
+
+ void insert_right_overlap(TcpOverlapState& tos) override
+ { right_overlap_truncate_existing(tos); }
+
+ void insert_full_overlap(TcpOverlapState& tos) override
+ { full_right_overlap_os2(tos); }
+};
+
+class TcpOverlapResolverOldLinux : public TcpOverlapResolver
+{
+public:
+ TcpOverlapResolverOldLinux()
+ { overlap_policy = StreamPolicy::OS_OLD_LINUX; }
+
+private:
+ void insert_left_overlap(TcpOverlapState& tos) override
+ { left_overlap_keep_first(tos); }
+
+ void insert_right_overlap(TcpOverlapState& tos) override
+ { right_overlap_truncate_existing(tos); }
+
+ void insert_full_overlap(TcpOverlapState& tos) override
+ { full_right_overlap_os4(tos); }
+};
+
+class TcpOverlapResolverBSD : public TcpOverlapResolver
+{
+public:
+ TcpOverlapResolverBSD()
+ { overlap_policy = StreamPolicy::OS_BSD; }
+
+private:
+ void insert_left_overlap(TcpOverlapState& tos) override
+ { left_overlap_keep_first(tos); }
+
+ void insert_right_overlap(TcpOverlapState& tos) override
+ { right_overlap_truncate_existing(tos); }
+
+ void insert_full_overlap(TcpOverlapState& tos) override
+ { full_right_overlap_os1(tos); }
+};
+
+class TcpOverlapResolverMacOS : public TcpOverlapResolver
+{
+public:
+ TcpOverlapResolverMacOS()
+ { overlap_policy = StreamPolicy::OS_MACOS; }
+
+private:
+ void insert_left_overlap(TcpOverlapState& tos) override
+ { left_overlap_keep_first(tos); }
+
+ void insert_right_overlap(TcpOverlapState& tos) override
+ { right_overlap_truncate_existing(tos); }
+
+ void insert_full_overlap(TcpOverlapState& tos) override
+ { full_right_overlap_os1(tos); }
+};
+
+class TcpOverlapResolverSolaris : public TcpOverlapResolver
+{
+public:
+ TcpOverlapResolverSolaris()
+ { overlap_policy = StreamPolicy::OS_SOLARIS; }
+
+private:
+ void insert_left_overlap(TcpOverlapState& tos) override
+ { left_overlap_trim_first(tos); }
+
+ void insert_right_overlap(TcpOverlapState& tos) override
+ { right_overlap_truncate_new(tos); }
+
+ void insert_full_overlap(TcpOverlapState& tos) override
+ { full_right_overlap_os3(tos); }
+};
+
+class TcpOverlapResolverIrix : public TcpOverlapResolver
+{
+public:
+ TcpOverlapResolverIrix()
+ { overlap_policy = StreamPolicy::OS_IRIX; }
+
+private:
+ void insert_left_overlap(TcpOverlapState& tos) override
+ { left_overlap_keep_first(tos); }
+
+ void insert_right_overlap(TcpOverlapState& tos) override
+ { right_overlap_truncate_existing(tos); }
+
+ void insert_full_overlap(TcpOverlapState& tos) override
+ { full_right_overlap_os2(tos); }
+};
+
+class TcpOverlapResolverHpux11 : public TcpOverlapResolver
+{
+public:
+ TcpOverlapResolverHpux11()
+ { overlap_policy = StreamPolicy::OS_HPUX11; }
+
+private:
+ void insert_left_overlap(TcpOverlapState& tos) override
+ { left_overlap_trim_first(tos); }
+
+ void insert_right_overlap(TcpOverlapState& tos) override
+ { right_overlap_truncate_new(tos); }
+
+ void insert_full_overlap(TcpOverlapState& tos) override
+ { full_right_overlap_os3(tos); }
+};
+
+class TcpOverlapResolverHpux10 : public TcpOverlapResolver
+{
+public:
+ TcpOverlapResolverHpux10()
+ { overlap_policy = StreamPolicy::OS_HPUX10; }
+
+private:
+ void insert_left_overlap(TcpOverlapState& tos) override
+ { left_overlap_keep_first(tos); }
+
+ void insert_right_overlap(TcpOverlapState& tos) override
+ { right_overlap_truncate_existing(tos); }
+
+ void insert_full_overlap(TcpOverlapState& tos) override
+ { full_right_overlap_os2(tos); }
+};
+
+class TcpOverlapResolverWindows : public TcpOverlapResolver
+{
+public:
+ TcpOverlapResolverWindows()
+ { overlap_policy = StreamPolicy::OS_WINDOWS; }
+
+private:
+ void insert_left_overlap(TcpOverlapState& tos) override
+ { left_overlap_keep_first(tos); }
+
+ void insert_right_overlap(TcpOverlapState& tos) override
+ { right_overlap_truncate_existing(tos); }
+
+ void insert_full_overlap(TcpOverlapState& tos) override
+ { full_right_overlap_os1(tos); }
+};
+
+class TcpOverlapResolverWindows2K3 : public TcpOverlapResolver
+{
+public:
+ TcpOverlapResolverWindows2K3()
+ { overlap_policy = StreamPolicy::OS_WINDOWS2K3; }
+
+private:
+ void insert_left_overlap(TcpOverlapState& tos) override
+ { left_overlap_keep_first(tos); }
+
+ void insert_right_overlap(TcpOverlapState& tos) override
+ { right_overlap_truncate_existing(tos); }
+
+ void insert_full_overlap(TcpOverlapState& tos) override
+ { full_right_overlap_os1(tos); }
+};
+
+class TcpOverlapResolverVista : public TcpOverlapResolver
+{
+public:
+ TcpOverlapResolverVista()
+ { overlap_policy = StreamPolicy::OS_VISTA; }
+
+private:
+ void insert_left_overlap(TcpOverlapState& tos) override
+ { left_overlap_keep_first(tos); }
+
+ void insert_right_overlap(TcpOverlapState& tos) override
+ { right_overlap_truncate_new(tos); }
+
+ void insert_full_overlap(TcpOverlapState& tos) override
+ { full_right_overlap_os5 (tos); }
+};
+
+class TcpOverlapResolverProxy : public TcpOverlapResolverFirst
+{
+public:
+ TcpOverlapResolverProxy()
+ { overlap_policy = StreamPolicy::OS_PROXY; }
+
+private:
+ void insert_left_overlap(TcpOverlapState& tos) override
+ { left_overlap_keep_first(tos); }
+
+ void insert_right_overlap(TcpOverlapState& tos) override
+ { right_overlap_truncate_new(tos); }
+
+ void insert_full_overlap(TcpOverlapState& tos) override
+ { full_right_overlap_os5(tos); }
+};
+
+TcpOverlapResolver* TcpOverlapResolverFactory::overlap_resolvers[StreamPolicy::OS_END_OF_LIST];
+
+void TcpOverlapResolverFactory::initialize()
+{
+ overlap_resolvers[StreamPolicy::OS_FIRST] = new TcpOverlapResolverFirst;
+ overlap_resolvers[StreamPolicy::OS_LAST] = new TcpOverlapResolverLast;
+ overlap_resolvers[StreamPolicy::OS_LINUX] = new TcpOverlapResolverLinux;
+ overlap_resolvers[StreamPolicy::OS_OLD_LINUX] = new TcpOverlapResolverOldLinux;
+ overlap_resolvers[StreamPolicy::OS_BSD] = new TcpOverlapResolverBSD;
+ overlap_resolvers[StreamPolicy::OS_MACOS] = new TcpOverlapResolverMacOS;
+ overlap_resolvers[StreamPolicy::OS_SOLARIS] = new TcpOverlapResolverSolaris;
+ overlap_resolvers[StreamPolicy::OS_IRIX] = new TcpOverlapResolverIrix;
+ overlap_resolvers[StreamPolicy::OS_HPUX11] = new TcpOverlapResolverHpux11;
+ overlap_resolvers[StreamPolicy::OS_HPUX10] = new TcpOverlapResolverHpux10;
+ overlap_resolvers[StreamPolicy::OS_WINDOWS] = new TcpOverlapResolverWindows;
+ overlap_resolvers[StreamPolicy::OS_WINDOWS2K3] = new TcpOverlapResolverWindows2K3;
+ overlap_resolvers[StreamPolicy::OS_VISTA] = new TcpOverlapResolverVista;
+ overlap_resolvers[StreamPolicy::OS_PROXY] = new TcpOverlapResolverProxy;
+}
+
+void TcpOverlapResolverFactory::term()
+{
+ for ( auto sp = StreamPolicy::OS_FIRST; sp <= StreamPolicy::OS_PROXY; sp++ )
+ delete overlap_resolvers[sp];
+}
+
+TcpOverlapResolver* TcpOverlapResolverFactory::get_instance(StreamPolicy os_policy)
+{
+ NormMode tcp_ips_data = Normalize_GetMode(NORM_TCP_IPS);
+ StreamPolicy sp = (tcp_ips_data == NORM_MODE_ON) ? StreamPolicy::OS_FIRST : os_policy;
+
+ assert( sp <= StreamPolicy::OS_PROXY );
+ return overlap_resolvers[sp];
+}
--- /dev/null
+//--------------------------------------------------------------------------
+// Copyright (C) 2015-2024 Cisco and/or its affiliates. All rights reserved.
+//
+// This program is free software; you can redistribute it and/or modify it
+// under the terms of the GNU General Public License Version 2 as published
+// by the Free Software Foundation. You may not use, modify or distribute
+// this program under any other version of the GNU General Public License.
+//
+// This program is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License along
+// with this program; if not, write to the Free Software Foundation, Inc.,
+// 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+//--------------------------------------------------------------------------
+
+// tcp_overlap_resolver.h author davis mcpherson <davmcphe@cisco.com>
+// Created on: Oct 11, 2015
+
+#ifndef TCP_OVERLAP_RESOLVER_H
+#define TCP_OVERLAP_RESOLVER_H
+
+#include <vector>
+
+#include "normalize/norm_stats.h"
+#include "stream/stream.h"
+
+#include "tcp_defs.h"
+
+class TcpReassemblySegments;
+class TcpSegmentDescriptor;
+class TcpSegmentNode;
+class TcpSession;
+class TcpStreamTracker;
+
+class TcpOverlapState
+{
+public:
+ TcpOverlapState(TcpReassemblySegments& seglist);
+ ~TcpOverlapState() = default;
+
+ void init(TcpSegmentDescriptor&);
+
+ uint32_t slide_seq() const
+ { return seq + slide; }
+
+ TcpReassemblySegments& seglist;
+ TcpSegmentDescriptor* tsd = nullptr;
+
+ TcpSegmentNode* left = nullptr;
+ TcpSegmentNode* right = nullptr;
+ const uint8_t* rdata = nullptr;
+
+ uint32_t seq = 0;
+ uint32_t seq_end = 0;
+ uint32_t rseq = 0;
+
+ int32_t overlap = 0;
+ int32_t slide = 0;
+ int32_t trunc_len = 0;
+
+ uint16_t len = 0;
+ uint16_t rsize = 0;
+ int8_t tcp_ips_data = 0;
+
+ bool keep_segment = true;
+};
+
+class TcpOverlapResolver
+{
+public:
+ TcpOverlapResolver() = default;
+ virtual ~TcpOverlapResolver() = default;
+
+ void eval_left(TcpOverlapState&);
+ void eval_right(TcpOverlapState&);
+
+ StreamPolicy get_overlap_policy()
+ { return overlap_policy; }
+
+protected:
+ virtual bool is_segment_retransmit(TcpOverlapState&, bool*);
+ virtual void drop_old_segment(TcpOverlapState&);
+ virtual bool zwp_data_mismatch(TcpOverlapState&, TcpSegmentDescriptor&, uint32_t);
+
+ virtual void left_overlap_keep_first(TcpOverlapState&);
+ virtual void left_overlap_trim_first(TcpOverlapState&);
+ virtual void left_overlap_keep_last(TcpOverlapState&);
+ virtual void right_overlap_truncate_existing(TcpOverlapState&);
+ virtual void right_overlap_truncate_new(TcpOverlapState&);
+ virtual void full_right_overlap_truncate_new(TcpOverlapState&);
+ virtual void full_right_overlap_os1(TcpOverlapState&);
+ virtual void full_right_overlap_os2(TcpOverlapState&);
+ virtual void full_right_overlap_os3(TcpOverlapState&);
+ virtual void full_right_overlap_os4(TcpOverlapState&);
+ virtual void full_right_overlap_os5(TcpOverlapState&);
+
+ virtual void insert_left_overlap(TcpOverlapState&) = 0;
+ virtual void insert_right_overlap(TcpOverlapState&) = 0;
+ virtual void insert_full_overlap(TcpOverlapState&) = 0;
+
+ StreamPolicy overlap_policy = StreamPolicy::OS_DEFAULT;
+};
+
+class TcpOverlapResolverFactory
+{
+public:
+ static void initialize();
+ static void term();
+ static TcpOverlapResolver* get_instance(StreamPolicy);
+
+private:
+ TcpOverlapResolverFactory() = delete;
+
+ static TcpOverlapResolver* overlap_resolvers[StreamPolicy::OS_END_OF_LIST];
+};
+
+#endif
#include "packet_io/packet_tracer.h"
#include "profiler/profiler.h"
#include "protocols/packet_manager.h"
+#include "stream/stream_splitter.h"
#include "time/packet_time.h"
#include "tcp_module.h"
#include "tcp_normalizers.h"
+#include "tcp_segment_node.h"
#include "tcp_session.h"
-#include "tcp_stream_tracker.h"
using namespace snort;
-static THREAD_LOCAL Packet* last_pdu = nullptr;
-
-static void purge_alerts_callback_ackd(IpsContext* c)
+void TcpReassembler::init(bool server, StreamSplitter* ss)
{
- TcpSession* session = (TcpSession*)c->packet->flow->session;
-
- if ( c->packet->is_from_server() )
- session->client.reassembler.purge_alerts();
+ splitter = ss;
+ paf.paf_setup(ss);
+ if ( seglist.cur_rseg )
+ seglist.cur_sseg = seglist.cur_rseg;
else
- session->server.reassembler.purge_alerts();
-}
+ seglist.cur_sseg = seglist.head;
-static void purge_alerts_callback_ips(IpsContext* c)
-{
- TcpSession* session = (TcpSession*)c->packet->flow->session;
+ server_side = server;
- if ( c->packet->is_from_server() )
- session->server.reassembler.purge_alerts();
+ if ( server_side )
+ {
+ ignore_dir = SSN_DIR_FROM_CLIENT;
+ packet_dir = PKT_FROM_CLIENT;
+ }
else
- session->client.reassembler.purge_alerts();
-}
-
-bool TcpReassembler::is_segment_pending_flush(const TcpReassemblerState& trs) const
-{
- return ( get_pending_segment_count(trs, 1) > 0 );
-}
-
-uint32_t TcpReassembler::get_pending_segment_count(const TcpReassemblerState& trs, unsigned max) const
-{
- uint32_t n = trs.sos.seg_count - trs.flush_count;
-
- if ( !n || max == 1 )
- return n;
-
- n = 0;
- const TcpSegmentNode* tsn = trs.sos.seglist.head;
- while ( tsn )
{
- if ( tsn->c_len && SEQ_LT(tsn->c_seq, trs.tracker->r_win_base) )
- n++;
-
- if ( max && n == max )
- return n;
-
- tsn = tsn->next;
+ ignore_dir = SSN_DIR_FROM_SERVER;
+ packet_dir = PKT_FROM_SERVER;
}
-
- return n;
-}
-
-bool TcpReassembler::next_no_gap(const TcpSegmentNode& tsn)
-{
- return tsn.next and (tsn.next->i_seq == tsn.i_seq + tsn.i_len);
-}
-
-bool TcpReassembler::next_no_gap_c(const TcpSegmentNode& tsn)
-{
- return tsn.next and (tsn.next->c_seq == tsn.c_seq + tsn.c_len);
-}
-
-bool TcpReassembler::next_acked_no_gap_c(const TcpSegmentNode& tsn, const TcpReassemblerState& trs)
-{
- return tsn.next and (tsn.next->c_seq == tsn.c_seq + tsn.c_len)
- and SEQ_LT(tsn.next->c_seq, trs.tracker->r_win_base);
}
-bool TcpReassembler::fin_no_gap(const TcpSegmentNode& tsn, const TcpReassemblerState& trs)
+bool TcpReassembler::fin_no_gap(const TcpSegmentNode& tsn)
{
- return trs.tracker->fin_seq_status >= TcpStreamTracker::FIN_WITH_SEQ_SEEN
- and SEQ_GEQ(tsn.i_seq + tsn.i_len, trs.tracker->get_fin_i_seq());
+ return tracker.fin_seq_status >= FIN_WITH_SEQ_SEEN
+ and SEQ_GEQ(tsn.next_seq(), tracker.get_fin_i_seq());
}
-bool TcpReassembler::fin_acked_no_gap(const TcpSegmentNode& tsn, const TcpReassemblerState& trs)
+bool TcpReassembler::fin_acked_no_gap(const TcpSegmentNode& tsn)
{
- return trs.tracker->fin_seq_status >= TcpStreamTracker::FIN_WITH_SEQ_ACKED
- and SEQ_GEQ(tsn.i_seq + tsn.i_len, trs.tracker->get_fin_i_seq());
-}
-
-void TcpReassembler::update_next(TcpReassemblerState& trs, const TcpSegmentNode& tsn)
-{
- trs.sos.seglist.cur_rseg = next_no_gap(tsn) ? tsn.next : nullptr;
- if ( trs.sos.seglist.cur_rseg )
- trs.sos.seglist.cur_rseg->c_seq = trs.sos.seglist.cur_rseg->i_seq;
+ return tracker.fin_seq_status >= FIN_WITH_SEQ_ACKED
+ and SEQ_GEQ(tsn.next_seq(), tracker.get_fin_i_seq());
}
// If we are skipping seglist hole, update tsn so that we can purge
-void TcpReassembler::update_skipped_bytes(uint32_t remaining_bytes, TcpReassemblerState& trs)
+void TcpReassembler::update_skipped_bytes(uint32_t remaining_bytes)
{
TcpSegmentNode* tsn;
- while ( remaining_bytes and (tsn = trs.sos.seglist.cur_rseg) )
+ while ( remaining_bytes and (tsn = seglist.cur_rseg) )
{
- auto bytes_skipped = ( tsn->c_len <= remaining_bytes ) ? tsn->c_len : remaining_bytes;
+ auto bytes_skipped = ( tsn->unscanned() <= remaining_bytes ) ? tsn->unscanned() : remaining_bytes;
remaining_bytes -= bytes_skipped;
- tsn->update_reassembly_cursor(bytes_skipped);
+ tsn->advance_cursor(bytes_skipped);
- if ( !tsn->c_len )
+ if ( !tsn->unscanned() )
{
- trs.flush_count++;
- update_next(trs, *tsn);
+ seglist.flush_count++;
+ seglist.update_next(tsn);
}
}
}
-int TcpReassembler::delete_reassembly_segment(TcpReassemblerState& trs, TcpSegmentNode* tsn)
-{
- int ret;
- assert(tsn);
-
- trs.sos.seglist.remove(tsn);
- trs.sos.seg_bytes_total -= tsn->i_len;
- trs.sos.seg_bytes_logical -= tsn->i_len;
- ret = tsn->i_len;
-
- if ( !tsn->c_len )
- {
- tcpStats.segs_used++;
- trs.flush_count--;
- }
-
- if ( trs.sos.seglist.cur_sseg == tsn )
- trs.sos.seglist.cur_sseg = tsn->next;
-
- if ( trs.sos.seglist.cur_rseg == tsn )
- update_next(trs, *tsn);
-
- tsn->term();
- trs.sos.seg_count--;
-
- return ret;
-}
-
-void TcpReassembler::queue_reassembly_segment(
- TcpReassemblerState& trs, TcpSegmentNode* prev, TcpSegmentNode* tsn)
-{
- trs.sos.seglist.insert(prev, tsn);
-
- if ( !trs.sos.seglist.cur_sseg )
- trs.sos.seglist.cur_sseg = tsn;
- else if ( SEQ_LT(tsn->c_seq, trs.sos.seglist.cur_sseg->c_seq) )
- {
- trs.sos.seglist.cur_sseg = tsn;
- if ( SEQ_LT(tsn->c_seq, trs.sos.seglist_base_seq) )
- trs.sos.seglist_base_seq = tsn->c_seq;
-
- if ( trs.sos.seglist.cur_rseg && SEQ_LT(tsn->c_seq, trs.sos.seglist.cur_rseg->c_seq) )
- trs.sos.seglist.cur_rseg = tsn;
- }
-
- trs.sos.seg_count++;
- trs.sos.seg_bytes_total += tsn->i_len;
- trs.sos.total_segs_queued++;
- tcpStats.segs_queued++;
-
- if ( trs.sos.seg_count > tcpStats.max_segs )
- tcpStats.max_segs = trs.sos.seg_count;
-
- if ( trs.sos.seg_bytes_total > tcpStats.max_bytes )
- tcpStats.max_bytes = trs.sos.seg_bytes_total;
-}
-
-bool TcpReassembler::is_segment_fasttrack(
- TcpReassemblerState&, TcpSegmentNode* tail, const TcpSegmentDescriptor& tsd)
-{
- if ( SEQ_EQ(tsd.get_seq(), tail->i_seq + tail->i_len) )
- return true;
-
- return false;
-}
-
-void TcpReassembler::add_reassembly_segment(
- TcpReassemblerState& trs, TcpSegmentDescriptor& tsd, uint16_t len, uint32_t slide,
- uint32_t trunc_len, uint32_t seq, TcpSegmentNode* left)
-{
- const int32_t new_size = len - slide - trunc_len;
- assert(new_size >= 0);
-
- // if trimming will delete all data, don't insert this segment in the queue
- if ( new_size <= 0 )
- {
- tcpStats.payload_fully_trimmed++;
- trs.tracker->normalizer.trim_win_payload(tsd);
- return;
- }
-
- // FIXIT-L don't allocate overlapped part
- TcpSegmentNode* const tsn = TcpSegmentNode::init(tsd);
-
- tsn->offset = slide;
- tsn->o_offset = slide;
- tsn->c_len = (uint16_t)new_size;
- tsn->i_len = (uint16_t)new_size;
- tsn->i_seq = tsn->c_seq = seq;
- tsn->ts = tsd.get_timestamp();
-
- // FIXIT-M the urgent ptr handling is broken... urg_offset could be set here but currently
- // not actually referenced anywhere else. In 2.9.7 the FlushStream function did reference
- // this field but that code has been lost... urg ptr handling needs to be reviewed and fixed
- // tsn->urg_offset = trs.tracker->normalizer.set_urg_offset(tsd.get_tcph(), tsd.get_seg_len());
-
- queue_reassembly_segment(trs, left, tsn);
-
- trs.sos.seg_bytes_logical += tsn->c_len;
- trs.sos.total_bytes_queued += tsn->c_len;
- tsd.set_packet_flags(PKT_STREAM_INSERT);
-}
-
-void TcpReassembler::dup_reassembly_segment(
- TcpReassemblerState& trs, TcpSegmentNode* left, TcpSegmentNode** retSeg)
-{
- TcpSegmentNode* tsn = TcpSegmentNode::init(*left);
- tcpStats.segs_split++;
-
- // twiddle the values for overlaps
- tsn->c_len = left->c_len;
- tsn->i_seq = tsn->c_seq = left->i_seq;
- queue_reassembly_segment(trs, left, tsn);
-
- *retSeg = tsn;
-}
-
-bool TcpReassembler::add_alert(TcpReassemblerState& trs, uint32_t gid, uint32_t sid)
-{
- assert(trs.alerts.size() <=
- (uint32_t)(get_ips_policy()->rules_loaded + get_ips_policy()->rules_shared));
-
- if (!this->check_alerted(trs, gid, sid))
- trs.alerts.emplace_back(gid, sid);
-
- return true;
-}
-
-bool TcpReassembler::check_alerted(TcpReassemblerState& trs, uint32_t gid, uint32_t sid)
-{
- return std::any_of(trs.alerts.cbegin(), trs.alerts.cend(),
- [gid, sid](const StreamAlertInfo& alert){ return alert.gid == gid && alert.sid == sid; });
-}
-
-int TcpReassembler::update_alert(TcpReassemblerState& trs, uint32_t gid, uint32_t sid,
- uint32_t event_id, uint32_t event_second)
-{
- // FIXIT-M comparison of seq_num is wrong, compare value is always 0, should be seq_num of wire packet
- uint32_t seq_num = 0;
-
- auto it = std::find_if(trs.alerts.begin(), trs.alerts.end(),
- [gid, sid, seq_num](const StreamAlertInfo& alert)
- { return alert.gid == gid && alert.sid == sid && SEQ_EQ(alert.seq, seq_num); });
- if (it != trs.alerts.end())
- {
- (*it).event_id = event_id;
- (*it).event_second = event_second;
- return 0;
- }
-
- return -1;
-}
-
-void TcpReassembler::purge_alerts(TcpReassemblerState& trs)
-{
- Flow* flow = trs.sos.session->flow;
-
- for ( auto& alert : trs.alerts )
- Stream::log_extra_data(flow, trs.xtradata_mask, alert);
-
- if ( !flow->is_suspended() )
- trs.alerts.clear();
-}
-
-void TcpReassembler::purge_to_seq(TcpReassemblerState& trs, uint32_t flush_seq)
+void TcpReassembler::purge_to_seq(uint32_t flush_seq)
{
- assert( trs.sos.seglist.head );
- uint32_t last_ts = 0;
-
- TcpSegmentNode* tsn = trs.sos.seglist.head;
- while ( tsn && SEQ_LT(tsn->i_seq, flush_seq))
- {
- if ( tsn->c_len )
- break;
-
- TcpSegmentNode* dump_me = tsn;
- tsn = tsn->next;
- if (dump_me->ts > last_ts)
- last_ts = dump_me->ts;
-
- delete_reassembly_segment(trs, dump_me);
- }
-
- if ( SEQ_LT(trs.tracker->rcv_nxt, flush_seq) )
- trs.tracker->rcv_nxt = flush_seq;
+ seglist.purge_flushed_segments(flush_seq);
if ( last_pdu )
{
- if ( trs.tracker->normalizer.is_tcp_ips_enabled() )
- last_pdu->context->register_post_callback(purge_alerts_callback_ips);
- else
- last_pdu->context->register_post_callback(purge_alerts_callback_ackd);
-
+ tracker.tcp_alerts.purge_alerts(*last_pdu, tracker.normalizer.is_tcp_ips_enabled());
last_pdu = nullptr;
}
else
- purge_alerts(trs);
-
- if ( trs.sos.seglist.head == nullptr )
- trs.sos.seglist.tail = nullptr;
-
- /* Update the "last" time stamp seen from the other side
- * to be the most recent timestamp (largest) that was removed
- * from the queue. This will ensure that as we go forward,
- * last timestamp is the highest one that we had stored and
- * purged and handle the case when packets arrive out of order,
- * such as:
- * P1: seq 10, length 10, timestamp 10
- * P3: seq 30, length 10, timestamp 30
- * P2: seq 20, length 10, timestamp 20
- *
- * Without doing it this way, the timestamp would be 20. With
- * the next packet to arrive (P4, seq 40), the ts_last value
- * wouldn't be updated for the talker in ProcessTcp() since that
- * code specifically looks for the NEXT sequence number.
- */
- if ( last_ts )
- {
- if ( trs.server_side )
- {
- int32_t delta = last_ts - trs.sos.session->client.get_ts_last();
- if ( delta > 0 )
- trs.sos.session->client.set_ts_last(last_ts);
- }
- else
- {
- int32_t delta = last_ts - trs.sos.session->server.get_ts_last();
- if ( delta > 0 )
- trs.sos.session->server.set_ts_last(last_ts);
- }
- }
+ tracker.tcp_alerts.purge_alerts(seglist.session->flow);
}
// must only purge flushed and acked bytes we may flush partial segments
// part of a segment
// * FIXIT-L need flag to mark any reassembled packets that have a gap
// (if we reassemble such)
-void TcpReassembler::purge_flushed_ackd(TcpReassemblerState& trs)
+void TcpReassembler::purge_flushed_ackd()
{
- TcpSegmentNode* tsn = trs.sos.seglist.head;
- uint32_t seq;
-
- if (!trs.sos.seglist.head)
+ if ( !seglist.head )
return;
- seq = trs.sos.seglist.head->i_seq;
-
- while ( tsn && !tsn->c_len )
+ uint32_t seq = seglist.head->start_seq();
+ TcpSegmentNode* tsn = seglist.head;
+ while ( tsn && !tsn->unscanned() )
{
- uint32_t end = tsn->i_seq + tsn->i_len;
+ uint32_t end = tsn->next_seq();
- if ( SEQ_GT(end, trs.tracker->r_win_base) )
+ if ( SEQ_GT(end, tracker.r_win_base) )
break;
seq = end;
tsn = tsn->next;
}
- if ( seq != trs.sos.seglist.head->i_seq )
- purge_to_seq(trs, seq);
+ if ( !SEQ_EQ(seq, seglist.head->start_seq()) )
+ purge_to_seq(seq);
}
-void TcpReassembler::show_rebuilt_packet(const TcpReassemblerState& trs, Packet* pkt)
+void TcpReassembler::show_rebuilt_packet(Packet* pkt)
{
- if ( trs.sos.session->tcp_config->flags & STREAM_CONFIG_SHOW_PACKETS )
+ if ( seglist.session->tcp_config->flags & STREAM_CONFIG_SHOW_PACKETS )
{
// FIXIT-L setting conf here is required because this is called before context start
pkt->context->conf = SnortConfig::get_conf();
}
}
-int TcpReassembler::flush_data_segments(TcpReassemblerState& trs, uint32_t flush_len, Packet* pdu)
+int TcpReassembler::flush_data_segments(uint32_t flush_len, Packet* pdu)
{
uint32_t flags = PKT_PDU_HEAD;
- uint32_t to_seq = trs.sos.seglist.cur_rseg->c_seq + flush_len;
+
+ uint32_t to_seq = seglist.cur_rseg->scan_seq() + flush_len;
uint32_t remaining_bytes = flush_len;
uint32_t total_flushed = 0;
while ( remaining_bytes )
{
- TcpSegmentNode* tsn = trs.sos.seglist.cur_rseg;
- unsigned bytes_to_copy = ( tsn->c_len <= remaining_bytes ) ? tsn->c_len : remaining_bytes;
+ TcpSegmentNode* tsn = seglist.cur_rseg;
+ unsigned bytes_to_copy = ( tsn->unscanned() <= remaining_bytes ) ? tsn->unscanned() : remaining_bytes;
remaining_bytes -= bytes_to_copy;
if ( !remaining_bytes )
flags |= PKT_PDU_TAIL;
else
- assert( bytes_to_copy >= tsn->c_len );
+ assert( bytes_to_copy >= tsn->unscanned() );
unsigned bytes_copied = 0;
- const StreamBuffer sb = trs.tracker->get_splitter()->reassemble(
- trs.sos.session->flow, flush_len, total_flushed, tsn->payload(),
- bytes_to_copy, flags, bytes_copied);
+ const StreamBuffer sb = splitter->reassemble(seglist.session->flow, flush_len, total_flushed,
+ tsn->paf_data(), bytes_to_copy, flags, bytes_copied);
if ( sb.data )
{
}
total_flushed += bytes_copied;
- tsn->update_reassembly_cursor(bytes_copied);
+ tsn->advance_cursor(bytes_copied);
flags = 0;
- if ( !tsn->c_len )
+ if ( !tsn->unscanned() )
{
- trs.flush_count++;
- update_next(trs, *tsn);
+ seglist.flush_count++;
+ seglist.update_next(tsn);
}
/* Check for a gap/missing packet */
// FIXIT-L FIN may be in to_seq causing bogus gap counts.
- if ( tsn->is_packet_missing(to_seq) or trs.paf_state.paf == StreamSplitter::SKIP )
+ if ( tsn->is_packet_missing(to_seq) or paf.state == StreamSplitter::SKIP )
{
// FIXIT-H // assert(false); find when this scenario happens
// FIXIT-L this is suboptimal - better to exclude fin from to_seq
- if ( !trs.tracker->is_fin_seq_set() or
- SEQ_LEQ(to_seq, trs.tracker->get_fin_final_seq()) )
+ if ( !tracker.is_fin_seq_set() or
+ SEQ_LEQ(to_seq, tracker.get_fin_final_seq()) )
{
- trs.tracker->set_tf_flags(TF_MISSING_PKT);
+ tracker.set_tf_flags(TF_MISSING_PKT);
}
break;
}
- if ( sb.data || !trs.sos.seglist.cur_rseg )
+ if ( sb.data || !seglist.cur_rseg )
break;
}
- if ( trs.paf_state.paf == StreamSplitter::SKIP )
- update_skipped_bytes(remaining_bytes, trs);
+ if ( paf.state == StreamSplitter::SKIP )
+ update_skipped_bytes(remaining_bytes);
return total_flushed;
}
-static inline bool both_splitters_aborted(Flow* flow)
-{
- uint32_t both_splitters_yoinked = (SSNFLAG_ABORT_CLIENT | SSNFLAG_ABORT_SERVER);
- return (flow->get_session_flags() & both_splitters_yoinked) == both_splitters_yoinked;
-}
-
// FIXIT-L consolidate encode format, update, and this into new function?
-void TcpReassembler::prep_pdu(
- TcpReassemblerState&, Flow* flow, Packet* p, uint32_t pkt_flags, Packet* pdu)
+void TcpReassembler::prep_pdu(Flow* flow, Packet* p, uint32_t pkt_flags, Packet* pdu)
{
pdu->ptrs.set_pkt_type(PktType::PDU);
pdu->proto_bits |= PROTO_BIT__TCP;
}
}
-Packet* TcpReassembler::initialize_pdu(
- TcpReassemblerState& trs, Packet* p, uint32_t pkt_flags, struct timeval tv)
+Packet* TcpReassembler::initialize_pdu(Packet* p, uint32_t pkt_flags, struct timeval tv)
{
// partial flushes already set the pdu for http_inspect splitter processing
Packet* pdu = p->was_set() ? p : DetectionEngine::set_next_packet(p);
EncodeFlags enc_flags = 0;
DAQ_PktHdr_t pkth;
- trs.sos.session->get_packet_header_foo(&pkth, p->pkth, pkt_flags);
+ seglist.session->get_packet_header_foo(&pkth, p->pkth, pkt_flags);
PacketManager::format_tcp(enc_flags, p, pdu, PSEUDO_PKT_TCP, &pkth, pkth.opaque);
- prep_pdu(trs, trs.sos.session->flow, p, pkt_flags, pdu);
+ prep_pdu(seglist.session->flow, p, pkt_flags, pdu);
assert(pdu->pkth == pdu->context->pkth);
pdu->context->pkth->ts = tv;
pdu->dsize = 0;
}
// flush a seglist up to the given point, generate a pseudopacket, and fire it thru the system.
-int TcpReassembler::flush_to_seq(
- TcpReassemblerState& trs, uint32_t bytes, Packet* p, uint32_t pkt_flags)
+int TcpReassembler::flush_to_seq(uint32_t bytes, Packet* p, uint32_t pkt_flags)
{
- assert( p && trs.sos.seglist.cur_rseg);
+ assert( p && seglist.cur_rseg);
- trs.tracker->clear_tf_flags(TF_MISSING_PKT | TF_MISSING_PREV_PKT);
+ tracker.clear_tf_flags(TF_MISSING_PKT | TF_MISSING_PREV_PKT);
- TcpSegmentNode* tsn = trs.sos.seglist.cur_rseg;
- assert( trs.sos.seglist_base_seq == tsn->c_seq);
+ TcpSegmentNode* tsn = seglist.cur_rseg;
+ assert( seglist.seglist_base_seq == tsn->scan_seq());
- Packet* pdu = initialize_pdu(trs, p, pkt_flags, tsn->tv);
- int32_t flushed_bytes = flush_data_segments(trs, bytes, pdu);
+ Packet* pdu = initialize_pdu(p, pkt_flags, tsn->tv);
+ int32_t flushed_bytes = flush_data_segments(bytes, pdu);
assert( flushed_bytes );
- trs.sos.seglist_base_seq += flushed_bytes;
+ seglist.seglist_base_seq += flushed_bytes;
if ( pdu->data )
{
else
pdu->packet_flags |= ( PKT_REBUILT_STREAM | PKT_STREAM_EST );
- show_rebuilt_packet(trs, pdu);
+ show_rebuilt_packet(pdu);
tcpStats.rebuilt_packets++;
tcpStats.rebuilt_bytes += flushed_bytes;
else
last_pdu = nullptr;
- trs.tracker->finalize_held_packet(p);
+ tracker.finalize_held_packet(p);
}
else
{
}
// FIXIT-L abort should be by PAF callback only since recovery may be possible
- if ( trs.tracker->get_tf_flags() & TF_MISSING_PKT )
+ if ( tracker.get_tf_flags() & TF_MISSING_PKT )
{
- trs.tracker->set_tf_flags(TF_MISSING_PREV_PKT | TF_PKT_MISSED);
- trs.tracker->clear_tf_flags(TF_MISSING_PKT);
+ tracker.set_tf_flags(TF_MISSING_PREV_PKT | TF_PKT_MISSED);
+ tracker.clear_tf_flags(TF_MISSING_PKT);
tcpStats.gaps++;
}
else
- trs.tracker->clear_tf_flags(TF_MISSING_PREV_PKT);
+ tracker.clear_tf_flags(TF_MISSING_PREV_PKT);
return flushed_bytes;
}
-// flush a seglist up to the given point, generate a pseudopacket, and fire it thru the system.
-int TcpReassembler::do_zero_byte_flush(TcpReassemblerState& trs, Packet* p, uint32_t pkt_flags)
+int TcpReassembler::do_zero_byte_flush(Packet* p, uint32_t pkt_flags)
{
unsigned bytes_copied = 0;
- const StreamBuffer sb = trs.tracker->get_splitter()->reassemble(
- trs.sos.session->flow, 0, 0, nullptr, 0, (PKT_PDU_HEAD | PKT_PDU_TAIL), bytes_copied);
+ const StreamBuffer sb = splitter->reassemble(seglist.session->flow, 0, 0,
+ nullptr, 0, (PKT_PDU_HEAD | PKT_PDU_TAIL), bytes_copied);
if ( sb.data )
{
- Packet* pdu = initialize_pdu(trs, p, pkt_flags, p->pkth->ts);
+ Packet* pdu = initialize_pdu(p, pkt_flags, p->pkth->ts);
/* setup the pseudopacket payload */
pdu->data = sb.data;
pdu->dsize = sb.length;
pdu->packet_flags |= (PKT_REBUILT_STREAM | PKT_STREAM_EST | PKT_PDU_HEAD | PKT_PDU_TAIL);
- trs.flush_count++;
- show_rebuilt_packet(trs, pdu);
+ show_rebuilt_packet(pdu);
DetectionEngine de;
de.inspect(pdu);
return bytes_copied;
}
-// get the footprint for the current trs.sos.seglist, the difference
+// get the footprint for the current seglist, the difference
// between our base sequence and the last ack'd sequence we received
-uint32_t TcpReassembler::get_q_footprint(TcpReassemblerState& trs)
+uint32_t TcpReassembler::get_q_footprint()
{
int32_t footprint = 0;
int32_t sequenced = 0;
- if ( SEQ_GT(trs.tracker->r_win_base, trs.sos.seglist_base_seq) )
- footprint = trs.tracker->r_win_base - trs.sos.seglist_base_seq;
+ if ( SEQ_GT(tracker.r_win_base, seglist.seglist_base_seq) )
+ footprint = tracker.r_win_base - seglist.seglist_base_seq;
if ( footprint )
- sequenced = get_q_sequenced(trs);
+ sequenced = get_q_sequenced();
return ( footprint > sequenced ) ? sequenced : footprint;
}
// FIXIT-P get_q_sequenced() performance could possibly be
-// boosted by tracking sequenced bytes as trs.sos.seglist is updated
+// boosted by tracking sequenced bytes as seglist is updated
// to avoid the while loop, etc. below.
-uint32_t TcpReassembler::get_q_sequenced(TcpReassemblerState& trs)
+uint32_t TcpReassembler::get_q_sequenced()
{
- TcpSegmentNode* tsn = trs.sos.seglist.cur_rseg;
+ TcpSegmentNode* tsn = seglist.cur_rseg;
if ( !tsn )
{
- tsn = trs.sos.seglist.head;
+ tsn = seglist.head;
- if ( !tsn || SEQ_LT(trs.tracker->r_win_base, tsn->c_seq) )
+ if ( !tsn || SEQ_LT(tracker.r_win_base, tsn->scan_seq()) )
return 0;
- trs.sos.seglist.cur_rseg = tsn;
+ seglist.cur_rseg = tsn;
}
uint32_t len = 0;
- const uint32_t limit = trs.tracker->get_splitter()->max();
- while ( len < limit and next_no_gap(*tsn) )
+ const uint32_t limit = splitter->max();
+ while ( len < limit and tsn->next_no_gap() )
{
- if ( !tsn->c_len )
- trs.sos.seglist.cur_rseg = tsn->next;
+
+ if ( !tsn->unscanned() )
+ seglist.cur_rseg = tsn->next;
else
- len += tsn->c_len;
+ len += tsn->unscanned();
tsn = tsn->next;
}
- if ( tsn->c_len )
- len += tsn->c_len;
+ if ( tsn->unscanned() )
+ len += tsn->unscanned();
- trs.sos.seglist_base_seq = trs.sos.seglist.cur_rseg->c_seq;
+ seglist.seglist_base_seq = seglist.cur_rseg->scan_seq();
return len;
}
-bool TcpReassembler::is_q_sequenced(TcpReassemblerState& trs)
+bool TcpReassembler::is_q_sequenced()
{
- TcpSegmentNode* tsn = trs.sos.seglist.cur_rseg;
+ TcpSegmentNode* tsn = seglist.cur_rseg;
if ( !tsn )
{
- tsn = trs.sos.seglist.head;
-
- if ( !tsn || SEQ_LT(trs.tracker->r_win_base, tsn->c_seq) )
+ tsn = seglist.head;
+ if ( !tsn || SEQ_LT(tracker.r_win_base, tsn->scan_seq()) )
return false;
- trs.sos.seglist.cur_rseg = tsn;
+ seglist.cur_rseg = tsn;
}
- while ( next_no_gap(*tsn) )
+ while ( tsn->next_no_gap() )
{
- if ( tsn->c_len )
+ if ( tsn->unscanned() )
break;
- tsn = trs.sos.seglist.cur_rseg = tsn->next;
- }
-
- trs.sos.seglist_base_seq = tsn->c_seq;
-
- return (tsn->c_len != 0);
-}
-
-int TcpReassembler::flush_stream(
- TcpReassemblerState& trs, Packet* p, uint32_t dir, bool final_flush)
-{
- // this is not always redundant; stream_reassemble rule option causes trouble
- if ( !trs.tracker->is_reassembly_enabled() )
- return 0;
-
- if ( trs.sos.session->flow->two_way_traffic()
- or (trs.tracker->get_tcp_state() == TcpStreamTracker::TCP_MID_STREAM_RECV) )
- {
- uint32_t bytes = 0;
-
- if ( trs.tracker->normalizer.is_tcp_ips_enabled() )
- bytes = get_q_sequenced(trs); // num bytes in pre-ack mode
- else
- bytes = get_q_footprint(trs); // num bytes in post-ack mode
-
- if ( bytes )
- return flush_to_seq(trs, bytes, p, dir);
+ tsn = seglist.cur_rseg = tsn->next;
}
- if ( final_flush )
- return do_zero_byte_flush(trs, p, dir);
+ seglist.seglist_base_seq = tsn->scan_seq();
- return 0;
+ return (tsn->unscanned() != 0);
}
-void TcpReassembler::final_flush(TcpReassemblerState& trs, Packet* p, uint32_t dir)
+void TcpReassembler::final_flush(Packet* p, uint32_t dir)
{
- trs.tracker->set_tf_flags(TF_FORCE_FLUSH);
+ tracker.set_tf_flags(TF_FORCE_FLUSH);
- if ( flush_stream(trs, p, dir, true) )
+ if ( flush_stream(p, dir, true) )
{
- if ( trs.server_side )
+ if ( server_side )
tcpStats.server_cleanups++;
else
tcpStats.client_cleanups++;
- purge_flushed_ackd(trs);
+ purge_flushed_ackd();
}
- trs.tracker->clear_tf_flags(TF_FORCE_FLUSH);
+ tracker.clear_tf_flags(TF_FORCE_FLUSH);
}
static Packet* get_packet(Flow* flow, uint32_t flags, bool c2s)
return p;
}
-void TcpReassembler::finish_and_final_flush(
- TcpReassemblerState& trs, Flow* flow, bool clear, Packet* p)
-{
- bool pending = clear and paf_initialized(&trs.paf_state)
- and trs.tracker->splitter_finish(flow);
-
- if ( pending and !(flow->ssn_state.ignore_direction & trs.ignore_dir) )
- final_flush(trs, p, trs.packet_dir);
-}
-
-// Call this only from outside reassembly.
-void TcpReassembler::flush_queued_segments(
- TcpReassemblerState& trs, Flow* flow, bool clear, const Packet* p)
-{
- if ( p )
- {
- finish_and_final_flush(trs, flow, clear, const_cast<Packet*>(p));
- }
- else
- {
- Packet* pdu = get_packet(flow, trs.packet_dir, trs.server_side);
-
- bool pending = clear and paf_initialized(&trs.paf_state);
- if ( pending )
- {
- DetectionEngine de;
- pending = trs.tracker->splitter_finish(flow);
- }
-
- if ( pending and !(flow->ssn_state.ignore_direction & trs.ignore_dir) )
- final_flush(trs, pdu, trs.packet_dir);
- }
-}
-
-// see scan_data_post_ack() for details
-// the key difference is that we operate on forward moving data
-// because we don't wait until it is acknowledged
-int32_t TcpReassembler::scan_data_pre_ack(TcpReassemblerState& trs, uint32_t* flags, Packet* p)
+bool TcpReassembler::splitter_finish(snort::Flow* flow)
{
- assert(trs.sos.session->flow == p->flow);
-
- int32_t ret_val = FINAL_FLUSH_HOLD;
-
- if ( SEQ_GT(trs.sos.seglist.head->c_seq, trs.sos.seglist_base_seq) )
- return ret_val;
-
- if ( !trs.sos.seglist.cur_rseg )
- trs.sos.seglist.cur_rseg = trs.sos.seglist.cur_sseg;
-
- if ( !is_q_sequenced(trs) )
- return ret_val;
-
- TcpSegmentNode* tsn = trs.sos.seglist.cur_sseg;
- uint32_t total = tsn->c_seq - trs.sos.seglist_base_seq;
+ if (!splitter)
+ return true;
- ret_val = FINAL_FLUSH_OK;
- while ( tsn && *flags )
+ if (!splitter_finish_flag)
{
- total += tsn->c_len;
-
- uint32_t end = tsn->c_seq + tsn->c_len;
- uint32_t pos = paf_position(&trs.paf_state);
-
- if ( paf_initialized(&trs.paf_state) && SEQ_LEQ(end, pos) )
- {
- if ( !next_no_gap(*tsn) )
- {
- ret_val = FINAL_FLUSH_HOLD;
- break;
- }
-
- tsn = tsn->next;
- continue;
- }
-
- if ( next_no_gap_c(*tsn) )
- *flags |= PKT_MORE_TO_FLUSH;
- else
- *flags &= ~PKT_MORE_TO_FLUSH;
- int32_t flush_pt = paf_check(
- trs.tracker->get_splitter(), &trs.paf_state, p, tsn->payload(),
- tsn->c_len, total, tsn->c_seq, flags);
-
- if (flush_pt >= 0)
- {
- trs.sos.seglist.cur_sseg = tsn;
- update_rcv_nxt(trs, *tsn);
- return flush_pt;
- }
-
- if (!next_no_gap(*tsn) || (trs.paf_state.paf == StreamSplitter::STOP))
- {
- if ( !(next_no_gap(*tsn) || fin_no_gap(*tsn, trs)) )
- ret_val = FINAL_FLUSH_HOLD;
- break;
- }
-
- tsn = tsn->next;
+ splitter_finish_flag = true;
+ return splitter->finish(flow);
}
-
- trs.sos.seglist.cur_sseg = tsn;
-
- if ( tsn )
- update_rcv_nxt(trs, *tsn);
-
- return ret_val;
+ // there shouldn't be any un-flushed data beyond this point,
+ // returning false here, discards it
+ return false;
}
-static inline void fallback(TcpStreamTracker& trk, bool server_side)
+void TcpReassembler::finish_and_final_flush(Flow* flow, bool clear, Packet* p)
{
-#ifndef NDEBUG
- StreamSplitter* splitter = trk.get_splitter();
- assert(splitter);
+ bool pending = clear and paf.paf_initialized() and splitter_finish(flow);
- // FIXIT-L: consolidate these 3
- bool to_server = splitter->to_server();
- assert(server_side == to_server && server_side == !trk.client_tracker);
-#endif
-
- trk.set_splitter(new AtomSplitter(server_side));
- tcpStats.partial_fallbacks++;
+ if ( pending and !(flow->ssn_state.ignore_direction & ignore_dir) )
+ final_flush(p, packet_dir);
}
-void TcpReassembler::fallback(TcpStreamTracker& tracker, bool server_side)
+// Call this only from outside reassembly.
+void TcpReassembler::flush_queued_segments(Flow* flow, bool clear, Packet* p)
{
- ::fallback(tracker, server_side);
-
- Flow* flow = tracker.session->flow;
- if ( server_side )
- flow->set_session_flags(SSNFLAG_ABORT_SERVER);
- else
- flow->set_session_flags(SSNFLAG_ABORT_CLIENT);
-
- if ( flow->gadget and both_splitters_aborted(flow) )
+ if ( p )
{
- flow->clear_gadget();
-
- if (flow->clouseau)
- flow->clear_clouseau();
-
- tcpStats.inspector_fallbacks++;
+ finish_and_final_flush(flow, clear, p);
}
-}
-
-bool TcpReassembler::segment_within_seglist_window(TcpReassemblerState& trs, TcpSegmentDescriptor& tsd)
-{
- if ( !trs.sos.seglist.head )
- return true;
-
- // Left side
- uint32_t start;
- if ( SEQ_LT(trs.sos.seglist_base_seq, trs.sos.seglist.head->i_seq) )
- start = trs.sos.seglist_base_seq;
else
- start = trs.sos.seglist.head->i_seq;
-
- if ( SEQ_LEQ(tsd.get_end_seq(), start) )
- return false;
-
- // Right side
- uint32_t end = (trs.sos.seglist.tail->i_seq + trs.sos.seglist.tail->i_len);
- if ( SEQ_GEQ(tsd.get_seq(), end) )
- return false;
-
- return true;
-}
-
-void TcpReassembler::check_first_segment_hole(TcpReassemblerState& trs)
-{
- if ( SEQ_LT(trs.sos.seglist_base_seq, trs.sos.seglist.head->i_seq) )
- {
- trs.sos.seglist_base_seq = trs.sos.seglist.head->i_seq;
- trs.tracker->rcv_nxt = trs.tracker->r_win_base;
- trs.paf_state.paf = StreamSplitter::START;
- }
-}
-
-void TcpReassembler::update_rcv_nxt(TcpReassemblerState& trs, TcpSegmentNode& tsn)
-{
- uint32_t temp = (tsn.i_seq + tsn.i_len);
-
- if ( SEQ_GT(temp, trs.tracker->rcv_nxt) )
- trs.tracker->rcv_nxt = temp;
-}
-
-bool TcpReassembler::has_seglist_hole(TcpReassemblerState& trs, TcpSegmentNode& tsn, PAF_State& ps,
- uint32_t& total, uint32_t& flags)
-{
- if ( !tsn.prev or SEQ_GEQ(tsn.prev->c_seq + tsn.prev->c_len, tsn.c_seq) or
- SEQ_GEQ(tsn.c_seq, trs.tracker->r_win_base) )
- {
- check_first_segment_hole(trs);
- return false;
- }
-
- // safety - prevent seq + total < seq
- if ( total > 0x7FFFFFFF )
- total = 0x7FFFFFFF;
-
- if ( !ps.tot )
- flags |= PKT_PDU_HEAD;
-
- return true;
-}
-
-void TcpReassembler::purge_segments_left_of_hole(TcpReassemblerState& trs, const TcpSegmentNode* end_tsn)
-{
- uint32_t packets_skipped = 0;
-
- TcpSegmentNode* cur_tsn = trs.sos.seglist.head;
- do
- {
- TcpSegmentNode* drop_tsn = cur_tsn;
- cur_tsn = cur_tsn->next;
- delete_reassembly_segment(trs, drop_tsn);
- ++packets_skipped;
- } while( cur_tsn and cur_tsn != end_tsn );
-
- if (PacketTracer::is_active())
- PacketTracer::log("Stream: Skipped %u packets before seglist hole)\n", packets_skipped);
-}
-
-void TcpReassembler::reset_asymmetric_flow_reassembly(TcpReassemblerState& trs)
-{
- TcpSegmentNode* tsn = trs.sos.seglist.head;
- // if there is a hole at the beginning, skip it...
- if ( SEQ_GT(tsn->i_seq, trs.sos.seglist_base_seq) )
{
- trs.sos.seglist_base_seq = tsn->i_seq;
- if (PacketTracer::is_active())
- PacketTracer::log("Stream: Skipped hole at beginning of the seglist\n");
- }
+ Packet* pdu = get_packet(flow, packet_dir, server_side);
- while ( tsn )
- {
- if ( tsn->next and SEQ_GT(tsn->next->i_seq, tsn->i_seq + tsn->i_len) )
+ bool pending = clear and paf.paf_initialized();
+ if ( pending )
{
- tsn = tsn->next;
- purge_segments_left_of_hole(trs, tsn);
- trs.sos.seglist_base_seq = trs.sos.seglist.head->i_seq;
+ DetectionEngine de;
+ pending = splitter_finish(flow);
}
- else
- tsn = tsn->next;
- }
- if ( trs.tracker->is_splitter_paf() )
- fallback(*trs.tracker, trs.server_side);
- else
- paf_reset(&trs.paf_state);
+ if ( pending and !(flow->ssn_state.ignore_direction & ignore_dir) )
+ final_flush(pdu, packet_dir);
+ }
}
-void TcpReassembler::skip_midstream_pickup_seglist_hole(TcpReassemblerState& trs, TcpSegmentDescriptor& tsd)
-{
- uint32_t ack = tsd.get_ack();
-
- TcpSegmentNode* tsn = trs.sos.seglist.head;
- while ( tsn )
- {
- if ( SEQ_GEQ( tsn->i_seq + tsn->i_len, ack) )
- break;
-
- if ( tsn->next and SEQ_GT(tsn->next->i_seq, tsn->i_seq + tsn->i_len) )
- {
- tsn = tsn->next;
- purge_segments_left_of_hole(trs, tsn);
- trs.sos.seglist_base_seq = trs.sos.seglist.head->i_seq;
- }
- else if ( !tsn->next and SEQ_LT(tsn->i_seq + tsn->i_len, ack) )
- {
- tsn = tsn->next;
- purge_segments_left_of_hole(trs, tsn);
- trs.sos.seglist_base_seq = ack;
- }
- else
- tsn = tsn->next;
- }
- tsn = trs.sos.seglist.head;
- if ( tsn )
+void TcpReassembler::check_first_segment_hole()
+{
+ if ( SEQ_LT(seglist.seglist_base_seq, seglist.head->start_seq()) )
{
- paf_initialize(&trs.paf_state, tsn->i_seq);
-
- while ( next_no_gap(*tsn) )
- tsn = tsn->next;
- trs.tracker->rcv_nxt = tsn->i_seq + tsn->i_len;
+ seglist.seglist_base_seq = seglist.head->start_seq();
+ seglist.advance_rcv_nxt();
+ paf.state = StreamSplitter::START;
}
- else
- trs.tracker->rcv_nxt = ack;
}
-bool TcpReassembler::flush_on_asymmetric_flow(const TcpReassemblerState &trs, uint32_t flushed, snort::Packet *p)
+uint32_t TcpReassembler::perform_partial_flush(Flow* flow, Packet*& p)
{
- bool asymmetric = flushed && trs.sos.seg_count && !p->flow->two_way_traffic() && !p->ptrs.tcph->is_syn();
- if ( asymmetric )
- {
- TcpStreamTracker::TcpState peer = trs.tracker->session->get_peer_state(trs.tracker);
- asymmetric = ( peer == TcpStreamTracker::TCP_SYN_SENT || peer == TcpStreamTracker::TCP_SYN_RECV
- || peer == TcpStreamTracker::TCP_MID_STREAM_SENT );
- }
-
- return asymmetric;
+ p = get_packet(flow, packet_dir, server_side);
+ return perform_partial_flush(p);
}
-// iterate over trs.sos.seglist and scan all new acked bytes
-// - new means not yet scanned
-// - must use trs.sos.seglist data (not packet) since this packet may plug a
-// hole and enable paf scanning of following segments
-// - if we reach a flush point
-// - return bytes to flush if data available (must be acked)
-// - return zero if not yet received or received but not acked
-// - if we reach a skip point
-// - jump ahead and resume scanning any available data
-// - must stop if we reach a gap
-// - one segment may lead to multiple checks since
-// it may contain multiple encapsulated PDUs
-// - if we partially scan a segment we must save state so we
-// know where we left off and can resume scanning the remainder
-int32_t TcpReassembler::scan_data_post_ack(TcpReassemblerState& trs, uint32_t* flags, Packet* p)
+// No error checking here, so the caller must ensure that p, p->flow are not null.
+uint32_t TcpReassembler::perform_partial_flush(Packet* p, uint32_t flushed)
{
- assert(trs.sos.session->flow == p->flow);
-
- int32_t ret_val = FINAL_FLUSH_HOLD;
-
- if ( !trs.sos.seglist.cur_sseg || SEQ_GEQ(trs.sos.seglist_base_seq, trs.tracker->r_win_base) )
- return ret_val ;
-
- if ( !trs.sos.seglist.cur_rseg )
- trs.sos.seglist.cur_rseg = trs.sos.seglist.cur_sseg;
-
- StreamSplitter* splitter = trs.tracker->get_splitter();
-
- uint32_t total = 0;
- TcpSegmentNode* tsn = trs.sos.seglist.cur_sseg;
- if ( paf_initialized(&trs.paf_state) )
+ if ( splitter->init_partial_flush(p->flow) )
{
- uint32_t end_seq = tsn->c_seq + tsn->c_len;
- if ( SEQ_EQ(end_seq, paf_position(&trs.paf_state)) )
- {
- total = end_seq - trs.sos.seglist_base_seq;
- tsn = tsn->next;
- }
- else
- total = tsn->c_seq - trs.sos.seglist.cur_rseg->c_seq;
- }
-
- ret_val = FINAL_FLUSH_OK;
- while (tsn && *flags && SEQ_LT(tsn->c_seq, trs.tracker->r_win_base))
- {
- // only flush acked data that fits in pdu reassembly buffer...
- uint32_t end = tsn->c_seq + tsn->c_len;
- uint32_t flush_len;
- int32_t flush_pt;
-
- if ( SEQ_GT(end, trs.tracker->r_win_base))
- flush_len = trs.tracker->r_win_base - tsn->c_seq;
- else
- flush_len = tsn->c_len;
-
- if ( next_acked_no_gap_c(*tsn, trs) )
- *flags |= PKT_MORE_TO_FLUSH;
- else
- *flags &= ~PKT_MORE_TO_FLUSH;
-
- if ( has_seglist_hole(trs, *tsn, trs.paf_state, total, *flags) )
- {
- if (!paf_initialized(&trs.paf_state))
- flush_pt = flush_len;
- else
- flush_pt = total;
-
- trs.paf_state.paf = StreamSplitter::SKIP;
- }
- else
- {
- total += flush_len;
- flush_pt = paf_check(splitter, &trs.paf_state, p, tsn->payload(),
- flush_len, total, tsn->c_seq, flags);
- }
-
- // Get splitter from tracker as paf check may change it.
- splitter = trs.tracker->get_splitter();
- trs.sos.seglist.cur_sseg = tsn;
-
- if ( flush_pt >= 0 )
- {
- trs.sos.seglist_base_seq = trs.sos.seglist.cur_rseg->c_seq;
- return flush_pt;
- }
-
- if (flush_len < tsn->c_len || (splitter->is_paf() and !next_no_gap(*tsn)) ||
- (trs.paf_state.paf == StreamSplitter::STOP))
+ flushed += flush_stream(p, packet_dir, false);
+ paf.paf_jump(flushed);
+ tcpStats.partial_flushes++;
+ tcpStats.partial_flush_bytes += flushed;
+ if ( seglist.seg_count )
{
- if ( !(next_no_gap(*tsn) || fin_acked_no_gap(*tsn, trs)) )
- ret_val = FINAL_FLUSH_HOLD;
- break;
+ purge_to_seq(seglist.head->start_seq() + flushed);
+ tracker.r_win_base = seglist.seglist_base_seq;
}
-
- tsn = tsn->next;
}
-
- return ret_val;
+ return flushed;
}
// we are on a FIN, the data has been scanned, it has no gaps,
// but somehow we are waiting for more data - do final flush here
// FIXIT-M this convoluted expression needs some refactoring to simplify
-bool TcpReassembler::final_flush_on_fin(const TcpReassemblerState &trs, int32_t flush_amt, Packet *p)
+bool TcpReassembler::final_flush_on_fin(int32_t flush_amt, Packet *p, FinSeqNumStatus fin_status)
{
- return trs.tracker->fin_seq_status >= TcpStreamTracker::FIN_WITH_SEQ_SEEN
+ return tracker.fin_seq_status >= fin_status
&& -1 <= flush_amt && flush_amt <= 0
- && trs.paf_state.paf == StreamSplitter::SEARCH
+ && paf.state == StreamSplitter::SEARCH
&& !p->flow->searching_for_service();
}
-int TcpReassembler::flush_on_data_policy(TcpReassemblerState& trs, Packet* p)
-{
- uint32_t flushed = 0;
- last_pdu = nullptr;
-
- switch ( trs.tracker->get_flush_policy() )
- {
- case STREAM_FLPOLICY_IGNORE:
- return 0;
-
- case STREAM_FLPOLICY_ON_ACK:
- break;
-
- case STREAM_FLPOLICY_ON_DATA:
- if ( trs.sos.seglist.head )
- {
- uint32_t flags;
- int32_t flush_amt;
- do
- {
- flags = trs.packet_dir;
- flush_amt = scan_data_pre_ack(trs, &flags, p);
- if ( flush_amt <= 0 )
- break;
-
- flushed += flush_to_seq(trs, flush_amt, p, flags);
- } while ( trs.sos.seglist.head and !p->flow->is_inspection_disabled() );
-
- if ( (trs.paf_state.paf == StreamSplitter::ABORT) && trs.tracker->is_splitter_paf() )
- {
- fallback(*trs.tracker, trs.server_side);
- return flush_on_data_policy(trs, p);
- }
- else if ( final_flush_on_fin(trs, flush_amt, p) )
- finish_and_final_flush(trs, p->flow, true, p);
- }
- break;
- }
-
- if ( !trs.sos.seglist.head )
- return flushed;
-
- if ( trs.tracker->is_retransmit_of_held_packet(p) )
- flushed = perform_partial_flush(trs, p, flushed);
-
- if ( flush_on_asymmetric_flow(trs, flushed, p) )
- {
- purge_to_seq(trs, trs.sos.seglist.head->i_seq + flushed);
- trs.tracker->r_win_base = trs.sos.seglist_base_seq;
- tcpStats.flush_on_asymmetric_flow++;
- }
-
- return flushed;
-}
-
-void TcpReassembler::skip_seglist_hole(TcpReassemblerState& trs, Packet* p, uint32_t flags,
- int32_t flush_amt)
-{
- if ( trs.tracker->is_splitter_paf() )
- {
- if ( flush_amt > 0 )
- update_skipped_bytes(flush_amt, trs);
- fallback(*trs.tracker, trs.server_side);
- }
- else
- {
- if ( flush_amt > 0 )
- flush_to_seq(trs, flush_amt, p, flags);
- trs.paf_state.paf = StreamSplitter::START;
- }
-
- if ( trs.sos.seglist.head )
- {
- if ( flush_amt > 0 )
- purge_to_seq(trs, trs.sos.seglist_base_seq + flush_amt);
- trs.sos.seglist_base_seq = trs.sos.seglist.head->c_seq;
- }
- else
- trs.sos.seglist_base_seq = trs.tracker->r_win_base;
-
- trs.tracker->rcv_nxt = trs.tracker->r_win_base;
- trs.sos.seglist.cur_rseg = trs.sos.seglist.head;
-}
-
-int TcpReassembler::flush_on_ack_policy(TcpReassemblerState& trs, Packet* p)
-{
- uint32_t flushed = 0;
- last_pdu = nullptr;
-
- switch ( trs.tracker->get_flush_policy() )
- {
- case STREAM_FLPOLICY_IGNORE:
- return 0;
-
- case STREAM_FLPOLICY_ON_ACK:
- {
- int32_t flush_amt;
- uint32_t flags;
-
- do
- {
- flags = trs.packet_dir;
- flush_amt = scan_data_post_ack(trs, &flags, p);
- if ( flush_amt <= 0 or trs.paf_state.paf == StreamSplitter::SKIP )
- break;
-
- // for consistency with other cases, should return total
- // but that breaks flushing pipelined pdus
- flushed += flush_to_seq(trs, flush_amt, p, flags);
- assert( flushed );
-
- // ideally we would purge just once after this loop but that throws off base
- if ( trs.sos.seglist.head )
- purge_to_seq(trs, trs.sos.seglist_base_seq);
- }
- while ( trs.sos.seglist.head and !p->flow->is_inspection_disabled() );
-
- if ( (trs.paf_state.paf == StreamSplitter::ABORT) && trs.tracker->is_splitter_paf() )
- {
- fallback(*trs.tracker, trs.server_side);
- return flush_on_ack_policy(trs, p);
- }
- else if ( trs.paf_state.paf == StreamSplitter::SKIP )
- {
- skip_seglist_hole(trs, p, flags, flush_amt);
- return flush_on_ack_policy(trs, p);
- }
- else if ( -1 <= flush_amt and flush_amt <= 0 and
- trs.paf_state.paf == StreamSplitter::SEARCH and
- trs.tracker->fin_seq_status == TcpStreamTracker::FIN_WITH_SEQ_ACKED and
- !p->flow->searching_for_service() )
- {
- // we are acknowledging a FIN, the data has been scanned, it has no gaps,
- // but somehow we are waiting for more data - do final flush here
- finish_and_final_flush(trs, p->flow, true, p);
- }
- }
- break;
-
- case STREAM_FLPOLICY_ON_DATA:
- purge_flushed_ackd(trs);
- break;
- }
-
- return flushed;
-}
-
-void TcpReassembler::purge_segment_list(TcpReassemblerState& trs)
-{
- trs.sos.seglist.reset();
- trs.sos.seg_count = 0;
- trs.sos.seg_bytes_total = 0;
- trs.sos.seg_bytes_logical = 0;
- trs.flush_count = 0;
-}
-
-void TcpReassembler::insert_segment_in_empty_seglist(
- TcpReassemblerState& trs, TcpSegmentDescriptor& tsd)
-{
- uint32_t overlap = 0;
-
- if ( SEQ_GT(trs.sos.seglist_base_seq, tsd.get_seq()) )
- {
- overlap = trs.sos.seglist_base_seq - tsd.get_seq();
- if ( overlap >= tsd.get_len() )
- return;
- }
-
- add_reassembly_segment(
- trs, tsd, tsd.get_len(), overlap, 0, tsd.get_seq() + overlap, nullptr);
-}
-
-void TcpReassembler::init_overlap_editor(
- TcpReassemblerState& trs, TcpSegmentDescriptor& tsd)
+bool TcpReassembler::flush_on_asymmetric_flow(uint32_t flushed, snort::Packet *p)
{
- TcpSegmentNode* left = nullptr, *right = nullptr, *tsn = nullptr;
- int32_t dist_head = 0, dist_tail = 0;
-
- if ( trs.sos.seglist.head && trs.sos.seglist.tail )
- {
- if ( SEQ_GT(tsd.get_seq(), trs.sos.seglist.head->i_seq) )
- dist_head = tsd.get_seq() - trs.sos.seglist.head->i_seq;
- else
- dist_head = trs.sos.seglist.head->i_seq - tsd.get_seq();
-
- if ( SEQ_GT(tsd.get_seq(), trs.sos.seglist.tail->i_seq) )
- dist_tail = tsd.get_seq() - trs.sos.seglist.tail->i_seq;
- else
- dist_tail = trs.sos.seglist.tail->i_seq - tsd.get_seq();
- }
-
- if ( SEQ_LEQ(dist_head, dist_tail) )
- {
- for ( tsn = trs.sos.seglist.head; tsn; tsn = tsn->next )
- {
- right = tsn;
- if ( SEQ_GEQ(right->i_seq, tsd.get_seq() ) )
- break;
-
- left = right;
- }
-
- if ( tsn == nullptr )
- right = nullptr;
- }
- else
- {
- for ( tsn = trs.sos.seglist.tail; tsn; tsn = tsn->prev )
- {
- left = tsn;
- if ( SEQ_LT(left->i_seq, tsd.get_seq() ) )
- break;
-
- right = left;
- }
-
- if (tsn == nullptr)
- left = nullptr;
- }
-
- trs.sos.init_soe(tsd, left, right);
-}
-
-void TcpReassembler::insert_segment_in_seglist(
- TcpReassemblerState& trs, TcpSegmentDescriptor& tsd)
-{
- // NORM fast tracks are in sequence - no norms
- if ( trs.sos.seglist.tail && is_segment_fasttrack(trs, trs.sos.seglist.tail, tsd) )
- {
- /* segment fit cleanly at the end of the segment list */
- add_reassembly_segment(
- trs, tsd, tsd.get_len(), 0, 0, tsd.get_seq(), trs.sos.seglist.tail);
- return;
- }
-
- init_overlap_editor(trs, tsd);
- eval_left(trs);
- eval_right(trs);
-
- if ( trs.sos.keep_segment )
- {
- if ( !trs.sos.left and trs.sos.right and
- paf_initialized(&trs.paf_state) and SEQ_GT(trs.paf_state.pos, tsd.get_seq()) )
- {
- return;
- }
-
- // slide is current seq number - initial seq number unless all data
- // truncated from right, then slide is 0
- if ( trs.sos.len - trs.sos.trunc_len > 0 )
- trs.sos.slide = trs.sos.seq - tsd.get_seq();
- else
- trs.sos.slide = 0;
-
- add_reassembly_segment(
- trs, tsd, trs.sos.len, trs.sos.slide, trs.sos.trunc_len, trs.sos.seq, trs.sos.left);
- }
-}
-
-void TcpReassembler::queue_packet_for_reassembly(
- TcpReassemblerState& trs, TcpSegmentDescriptor& tsd)
-{
- if ( trs.sos.seg_count == 0 )
- {
- insert_segment_in_empty_seglist(trs, tsd);
- return;
- }
-
- if ( SEQ_GT(trs.tracker->r_win_base, tsd.get_seq() ) )
+ bool asymmetric = flushed && seglist.seg_count && !p->flow->two_way_traffic() && !p->ptrs.tcph->is_syn();
+ if ( asymmetric )
{
- const int32_t offset = trs.tracker->r_win_base - tsd.get_seq();
-
- if ( offset < tsd.get_len() )
- {
- tsd.slide_segment_in_rcv_window(offset);
- insert_segment_in_seglist(trs, tsd);
- tsd.slide_segment_in_rcv_window(-offset);
- }
+ TcpStreamTracker::TcpState peer = tracker.session->get_peer_state(&tracker);
+ asymmetric = ( peer == TcpStreamTracker::TCP_SYN_SENT || peer == TcpStreamTracker::TCP_SYN_RECV
+ || peer == TcpStreamTracker::TCP_MID_STREAM_SENT );
}
- else
- insert_segment_in_seglist(trs, tsd);
-}
-uint32_t TcpReassembler::perform_partial_flush(TcpReassemblerState& trs, Flow* flow, Packet*& p)
-{
- p = get_packet(flow, trs.packet_dir, trs.server_side);
- return perform_partial_flush(trs, p);
-}
-
-// No error checking here, so the caller must ensure that p, p->flow and context
-// are not null.
-uint32_t TcpReassembler::perform_partial_flush(TcpReassemblerState& trs, Packet* p, uint32_t flushed)
-{
- if ( trs.tracker->get_splitter()->init_partial_flush(p->flow) )
- {
- flushed += flush_stream(trs, p, trs.packet_dir, false);
- paf_jump(&trs.paf_state, flushed);
- tcpStats.partial_flushes++;
- tcpStats.partial_flush_bytes += flushed;
- if ( trs.sos.seg_count )
- {
- purge_to_seq(trs, trs.sos.seglist.head->i_seq + flushed);
- trs.tracker->r_win_base = trs.sos.seglist_base_seq;
- }
- }
- return flushed;
+ return asymmetric;
}
// 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
//--------------------------------------------------------------------------
-// tcp_reassembly.h author davis mcpherson <davmcphe@cisco.com>
+// tcp_reassembler.h author davis mcpherson <davmcphe@cisco.com>
// Created on: Jul 31, 2015
#ifndef TCP_REASSEMBLER_H
#define TCP_REASSEMBLER_H
+
+#include <cstdint>
+
+#include "flow/flow.h"
+#include "protocols/packet.h"
+#include "stream/pafng.h"
#include "stream/stream.h"
-#include "segment_overlap_editor.h"
+#include "tcp_reassembly_segments.h"
-class TcpReassembler : public SegmentOverlapEditor
+namespace snort
+{
+ class StreamSplitter;
+}
+
+class TcpSegmentDescriptor;
+class TcpSegmentNode;
+class TcpStreamTracker;
+enum FinSeqNumStatus : uint8_t;
+
+class TcpReassembler
{
public:
FINAL_FLUSH_OK = -1
};
- virtual void queue_packet_for_reassembly(TcpReassemblerState&, TcpSegmentDescriptor&);
- virtual void purge_segment_list(TcpReassemblerState&);
- virtual void purge_flushed_ackd(TcpReassemblerState&);
- virtual int flush_stream(
- TcpReassemblerState&, snort::Packet* p, uint32_t dir, bool final_flush = false);
- virtual void flush_queued_segments(
- TcpReassemblerState&, snort::Flow* flow, bool clear, const snort::Packet* = nullptr);
- void finish_and_final_flush(
- TcpReassemblerState&, snort::Flow* flow, bool clear, snort::Packet*);
- virtual bool is_segment_pending_flush(const TcpReassemblerState&) const;
- virtual int flush_on_data_policy(TcpReassemblerState&, snort::Packet*);
- virtual int flush_on_ack_policy(TcpReassemblerState&, snort::Packet*);
- virtual bool add_alert(TcpReassemblerState&, uint32_t gid, uint32_t sid);
- virtual bool check_alerted(TcpReassemblerState&, uint32_t gid, uint32_t sid);
- virtual int update_alert(TcpReassemblerState&, uint32_t gid, uint32_t sid,
- uint32_t event_id, uint32_t event_second);
- virtual void purge_alerts(TcpReassemblerState&);
- virtual bool segment_within_seglist_window(TcpReassemblerState&, TcpSegmentDescriptor&);
- void reset_asymmetric_flow_reassembly(TcpReassemblerState&);
- void skip_midstream_pickup_seglist_hole(TcpReassemblerState&, TcpSegmentDescriptor&);
- void initialize_paf(TcpReassemblerState& trs)
+ TcpReassembler(TcpStreamTracker& trk, TcpReassemblySegments& seglist)
+ : tracker(trk), seglist(seglist)
+ { }
+
+ virtual ~TcpReassembler()
+ { }
+
+ virtual void init(bool server, snort::StreamSplitter* ss);
+
+ virtual int eval_flush_policy_on_ack(snort::Packet*) = 0;
+ virtual int eval_flush_policy_on_data(snort::Packet*) = 0;
+ virtual int flush_stream(snort::Packet*, uint32_t dir, bool final_flush = false) = 0;
+ void flush_queued_segments(snort::Flow* flow, bool clear, snort::Packet* = nullptr);
+ void finish_and_final_flush(snort::Flow* flow, bool clear, snort::Packet*);
+ uint32_t perform_partial_flush(snort::Flow*, snort::Packet*&);
+ void purge_flushed_ackd();
+
+ void release_splitter()
+ { splitter = nullptr; }
+
+ snort::StreamSplitter* get_splitter()
+ { return splitter; }
+
+ bool is_splitter_paf() const
+ { return splitter && splitter->is_paf(); }
+
+ bool segment_already_scanned(uint32_t seq)
+ {
+ if ( paf.paf_initialized() and SEQ_GT(paf.pos, seq) )
+ return true;
+ else
+ return false;
+ }
+
+ void initialize_paf()
{
// only initialize if we have a data segment queued
- if ( !trs.sos.seglist.head )
+ if ( !seglist.head )
return;
- if ( !paf_initialized(&trs.paf_state) or SEQ_GT(trs.paf_state.seq, trs.sos.seglist.head->i_seq) )
- paf_initialize(&trs.paf_state, trs.sos.seglist.head->i_seq);
+ if ( !paf.paf_initialized() or !SEQ_EQ(paf.seq_num, seglist.head->start_seq()) )
+ paf.paf_initialize(seglist.head->start_seq());
}
- uint32_t perform_partial_flush(TcpReassemblerState&, snort::Flow*, snort::Packet*&);
+ void reset_paf()
+ { paf.paf_reset(); }
+
+ void clear_paf()
+ { paf.paf_clear(); }
+
+ virtual FlushPolicy get_flush_policy() const = 0;
protected:
- TcpReassembler() = default;
-
- void add_reassembly_segment(
- TcpReassemblerState&, TcpSegmentDescriptor&, uint16_t len, uint32_t slide,
- uint32_t trunc, uint32_t seq, TcpSegmentNode* left) override;
-
- void dup_reassembly_segment(
- TcpReassemblerState&, TcpSegmentNode* left, TcpSegmentNode** retSeg) override;
- int delete_reassembly_segment(TcpReassemblerState&, TcpSegmentNode*) override;
- virtual void insert_segment_in_empty_seglist(TcpReassemblerState&, TcpSegmentDescriptor&);
- virtual void insert_segment_in_seglist(TcpReassemblerState&, TcpSegmentDescriptor&);
- virtual uint32_t get_pending_segment_count(const TcpReassemblerState&, unsigned max) const;
- int trim_delete_reassembly_segment(TcpReassemblerState&, TcpSegmentNode*, uint32_t flush_seq);
- void queue_reassembly_segment(TcpReassemblerState&, TcpSegmentNode* prev, TcpSegmentNode*);
- void init_overlap_editor(TcpReassemblerState&, TcpSegmentDescriptor&);
- bool is_segment_fasttrack
- (TcpReassemblerState&, TcpSegmentNode* tail, const TcpSegmentDescriptor&);
- void show_rebuilt_packet(const TcpReassemblerState&, snort::Packet*);
- int flush_data_segments(TcpReassemblerState&, uint32_t flush_len, snort::Packet* pdu);
- void prep_pdu(
- TcpReassemblerState&, snort::Flow*, snort::Packet*, uint32_t pkt_flags, snort::Packet*);
- snort::Packet* initialize_pdu(
- TcpReassemblerState&, snort::Packet*, uint32_t pkt_flags, struct timeval);
- int flush_to_seq(TcpReassemblerState&, uint32_t bytes, snort::Packet*, uint32_t pkt_flags);
- int do_zero_byte_flush(TcpReassemblerState&, snort::Packet*, uint32_t pkt_flags);
- uint32_t get_q_footprint(TcpReassemblerState&);
- uint32_t get_q_sequenced(TcpReassemblerState&);
- bool is_q_sequenced(TcpReassemblerState&);
- void final_flush(TcpReassemblerState&, snort::Packet*, uint32_t dir);
- uint32_t get_reverse_packet_dir(TcpReassemblerState&, const snort::Packet*);
- uint32_t get_forward_packet_dir(TcpReassemblerState&, const snort::Packet*);
- int32_t scan_data_pre_ack(TcpReassemblerState&, uint32_t*, snort::Packet*);
- void fallback(TcpStreamTracker&, bool server_side);
- int32_t scan_data_post_ack(TcpReassemblerState&, uint32_t* flags, snort::Packet*);
- void purge_to_seq(TcpReassemblerState&, uint32_t flush_seq);
- void purge_segments_left_of_hole(TcpReassemblerState&, const TcpSegmentNode*);
-
- bool next_no_gap(const TcpSegmentNode&);
- bool next_no_gap_c(const TcpSegmentNode&);
- bool next_acked_no_gap_c(const TcpSegmentNode&, const TcpReassemblerState&);
- bool fin_no_gap(const TcpSegmentNode&, const TcpReassemblerState&);
- bool fin_acked_no_gap(const TcpSegmentNode&, const TcpReassemblerState&);
- void update_next(TcpReassemblerState&, const TcpSegmentNode&);
- void update_skipped_bytes(uint32_t, TcpReassemblerState&);
- void check_first_segment_hole(TcpReassemblerState&);
- void update_rcv_nxt(TcpReassemblerState&, TcpSegmentNode&);
- bool has_seglist_hole(TcpReassemblerState&, TcpSegmentNode&, PAF_State&, uint32_t& total,
- uint32_t& flags);
- void skip_seglist_hole(TcpReassemblerState&, snort::Packet*, uint32_t flags,
- int32_t flush_amt);
-
- uint32_t perform_partial_flush(TcpReassemblerState&, snort::Packet*, uint32_t flushed = 0);
-
-private:
- bool final_flush_on_fin(const TcpReassemblerState&, int32_t flush_amt, snort::Packet*);
- bool flush_on_asymmetric_flow(const TcpReassemblerState &trs, uint32_t flushed, snort::Packet *p);
+ void show_rebuilt_packet(snort::Packet*);
+ int flush_data_segments(uint32_t flush_len, snort::Packet* pdu);
+ void prep_pdu(snort::Flow*, snort::Packet*, uint32_t pkt_flags, snort::Packet*);
+ snort::Packet* initialize_pdu(snort::Packet*, uint32_t pkt_flags, struct timeval);
+ int flush_to_seq(uint32_t bytes, snort::Packet*, uint32_t pkt_flags);
+ int do_zero_byte_flush(snort::Packet*, uint32_t pkt_flags);
+ uint32_t get_q_footprint();
+ uint32_t get_q_sequenced();
+ bool is_q_sequenced();
+ void final_flush(snort::Packet*, uint32_t dir);
+ bool splitter_finish(snort::Flow* flow);
+ void purge_to_seq(uint32_t flush_seq);
+
+ bool fin_no_gap(const TcpSegmentNode&);
+ bool fin_acked_no_gap(const TcpSegmentNode&);
+ void update_skipped_bytes(uint32_t);
+ void check_first_segment_hole();
+ uint32_t perform_partial_flush(snort::Packet*, uint32_t flushed = 0);
+ bool final_flush_on_fin(int32_t flush_amt, snort::Packet*, FinSeqNumStatus);
+ bool flush_on_asymmetric_flow(uint32_t flushed, snort::Packet *p);
+
+ ProtocolAwareFlusher paf;
+ TcpStreamTracker& tracker;
+ TcpReassemblySegments& seglist;
+ snort::StreamSplitter* splitter = nullptr;
+
+ snort::Packet* last_pdu = nullptr;
+ uint8_t ignore_dir = 0;
+ uint8_t packet_dir = 0;
+ bool server_side = true;
+ bool splitter_finish_flag = false;
+};
+
+class TcpReassemblerIgnore : public TcpReassembler
+{
+public:
+ TcpReassemblerIgnore(TcpStreamTracker& trk, TcpReassemblySegments& sl)
+ : TcpReassembler(trk, sl)
+ { }
+
+ void init(bool, snort::StreamSplitter*) override
+ { }
+
+ int eval_flush_policy_on_ack(snort::Packet*) override
+ { return 0; }
+
+ int eval_flush_policy_on_data(snort::Packet*) override
+ { return 0; }
+
+ int flush_stream(snort::Packet*, uint32_t, bool) override
+ { return 0; }
+
+ FlushPolicy get_flush_policy() const override
+ { return STREAM_FLPOLICY_IGNORE; }
};
#endif
--- /dev/null
+//--------------------------------------------------------------------------
+// Copyright (C) 2024-2024 Cisco and/or its affiliates. All rights reserved.
+//
+// This program is free software; you can redistribute it and/or modify it
+// under the terms of the GNU General Public License Version 2 as published
+// by the Free Software Foundation. You may not use, modify or distribute
+// this program under any other version of the GNU General Public License.
+//
+// This program is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License along
+// with this program; if not, write to the Free Software Foundation, Inc.,
+// 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+//--------------------------------------------------------------------------
+
+// tcp_reassembler_ids.cc author davis mcpherson <davmcphe@cisco.com>
+// Created on: Jul 31, 2015
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "tcp_reassembler_ids.h"
+
+#include <cassert>
+
+#include "detection/detection_engine.h"
+#include "log/log.h"
+#include "main/analyzer.h"
+#include "packet_io/active.h"
+#include "profiler/profiler.h"
+#include "protocols/packet_manager.h"
+#include "time/packet_time.h"
+
+#include "tcp_module.h"
+#include "tcp_normalizers.h"
+#include "tcp_segment_node.h"
+#include "tcp_session.h"
+#include "tcp_stream_tracker.h"
+
+using namespace snort;
+
+bool TcpReassemblerIds::has_seglist_hole(TcpSegmentNode& tsn, uint32_t& total, uint32_t& flags)
+{
+ if ( !tsn.prev or SEQ_GEQ(tsn.prev->scan_seq() + tsn.prev->unscanned(), tsn.scan_seq())
+ or SEQ_GEQ(tsn.scan_seq(), tracker.r_win_base) )
+ {
+ check_first_segment_hole();
+ return false;
+ }
+
+ // safety - prevent seq + total < seq
+ if ( total > 0x7FFFFFFF )
+ total = 0x7FFFFFFF;
+
+ if ( !paf.tot )
+ flags |= PKT_PDU_HEAD;
+
+ paf.state = StreamSplitter::SKIP;
+ return true;
+}
+
+void TcpReassemblerIds::skip_seglist_hole(Packet* p, uint32_t flags, int32_t flush_amt)
+{
+ if ( is_splitter_paf() )
+ {
+ if ( flush_amt > 0 )
+ update_skipped_bytes(flush_amt);
+ tracker.fallback();
+ }
+ else
+ {
+ if ( flush_amt > 0 )
+ flush_to_seq(flush_amt, p, flags);
+ paf.state = StreamSplitter::START;
+ }
+
+ if ( seglist.head )
+ {
+ if ( flush_amt > 0 )
+ purge_to_seq(seglist.seglist_base_seq + flush_amt);
+ seglist.seglist_base_seq = seglist.head->scan_seq();
+ }
+ else
+ seglist.seglist_base_seq = tracker.r_win_base; // FIXIT-H - do we need to set rcv_nxt here?
+
+ seglist.cur_rseg = seglist.head;
+ tracker.set_order(TcpStreamTracker::OUT_OF_SEQUENCE);
+}
+
+// iterate over seglist and scan all new acked bytes
+// - new means not yet scanned
+// - must use seglist data (not packet) since this packet may plug a
+// hole and enable paf scanning of following segments
+// - if we reach a flush point
+// - return bytes to flush if data available (must be acked)
+// - return zero if not yet received or received but not acked
+// - if we reach a skip point
+// - jump ahead and resume scanning any available data
+// - must stop if we reach a gap
+// - one segment may lead to multiple checks since
+// it may contain multiple encapsulated PDUs
+// - if we partially scan a segment we must save state so we
+// know where we left off and can resume scanning the remainder
+int32_t TcpReassemblerIds::scan_data_post_ack(uint32_t* flags, Packet* p)
+{
+ assert(seglist.session->flow == p->flow);
+
+ int32_t ret_val = FINAL_FLUSH_HOLD;
+
+ if ( !seglist.cur_sseg || SEQ_GEQ(seglist.seglist_base_seq, tracker.r_win_base) )
+ return ret_val ;
+
+ if ( !seglist.cur_rseg )
+ seglist.cur_rseg = seglist.cur_sseg;
+
+ uint32_t total = 0;
+ TcpSegmentNode* tsn = seglist.cur_sseg;
+ if ( paf.paf_initialized() )
+ {
+ uint32_t end_seq = tsn->scan_seq() + tsn->unscanned();
+ if ( SEQ_EQ(end_seq, paf.paf_position()) )
+ {
+ total = end_seq - seglist.seglist_base_seq;
+ tsn = tsn->next;
+ }
+ else
+ total = tsn->scan_seq() - seglist.cur_rseg->scan_seq();
+ }
+
+ ret_val = FINAL_FLUSH_OK;
+ while (tsn && *flags && SEQ_LT(tsn->scan_seq(), tracker.r_win_base))
+ {
+ // only flush acked data that fits in pdu reassembly buffer...
+ uint32_t end = tsn->scan_seq() + tsn->unscanned();
+ uint32_t flush_len;
+ int32_t flush_pt;
+
+ if ( SEQ_GT(end, tracker.r_win_base))
+ flush_len = tracker.r_win_base - tsn->scan_seq();
+ else
+ flush_len = tsn->unscanned();
+
+ if ( tsn->next_acked_no_gap(tracker.r_win_base) )
+ *flags |= PKT_MORE_TO_FLUSH;
+ else
+ *flags &= ~PKT_MORE_TO_FLUSH;
+
+ if ( has_seglist_hole(*tsn, total, *flags) )
+ flush_pt = total;
+ else
+ {
+ total += flush_len;
+ flush_pt = paf.paf_check(p, tsn->paf_data(), flush_len, total, tsn->scan_seq(), flags);
+ }
+
+ // Get splitter from tracker as paf check may change it.
+ seglist.cur_sseg = tsn;
+
+ if ( flush_pt >= 0 )
+ {
+ seglist.seglist_base_seq = seglist.cur_rseg->scan_seq();
+ return flush_pt;
+ }
+
+ if (flush_len < tsn->unscanned() || (splitter->is_paf() and !tsn->next_no_gap()) ||
+ (paf.state == StreamSplitter::STOP))
+ {
+ if ( !(tsn->next_no_gap() || fin_acked_no_gap(*tsn)) )
+ ret_val = FINAL_FLUSH_HOLD;
+ break;
+ }
+
+ tsn = tsn->next;
+ }
+
+ return ret_val;
+}
+
+int TcpReassemblerIds::eval_flush_policy_on_ack(Packet* p)
+{
+ uint32_t flushed = 0;
+ int32_t flush_amt;
+ uint32_t flags;
+
+ last_pdu = nullptr;
+
+ do
+ {
+ flags = packet_dir;
+ flush_amt = scan_data_post_ack(&flags, p);
+ if ( flush_amt <= 0 or paf.state == StreamSplitter::SKIP )
+ break;
+
+ // for consistency with other cases, should return total
+ // but that breaks flushing pipelined pdus
+ flushed += flush_to_seq(flush_amt, p, flags);
+ assert( flushed );
+
+ // ideally we would purge just once after this loop but that throws off base
+ if ( seglist.head )
+ purge_to_seq(seglist.seglist_base_seq);
+ } while ( seglist.head and !p->flow->is_inspection_disabled() );
+
+ if ( (paf.state == StreamSplitter::ABORT) && is_splitter_paf() )
+ {
+ tracker.fallback();
+ return eval_flush_policy_on_ack(p);
+ }
+ else if ( paf.state == StreamSplitter::SKIP )
+ {
+ skip_seglist_hole(p, flags, flush_amt);
+ return eval_flush_policy_on_ack(p);
+ }
+ else if ( final_flush_on_fin(flush_amt, p, FIN_WITH_SEQ_ACKED) )
+ finish_and_final_flush(p->flow, true, p);
+
+ return flushed;
+}
+
+int TcpReassemblerIds::eval_flush_policy_on_data(Packet* p)
+{
+ uint32_t flushed = 0;
+
+ if ( !seglist.head )
+ return flushed;
+
+ if ( tracker.is_retransmit_of_held_packet(p) )
+ flushed = perform_partial_flush(p, flushed);
+
+ if ( flush_on_asymmetric_flow(flushed, p) )
+ {
+ purge_to_seq(seglist.head->start_seq() + flushed);
+ tracker.r_win_base = seglist.seglist_base_seq;
+ tcpStats.flush_on_asymmetric_flow++;
+ }
+
+ return flushed;
+}
+
+int TcpReassemblerIds::flush_stream(Packet* p, uint32_t dir, bool final_flush)
+{
+ if ( seglist.session->flow->two_way_traffic()
+ or (tracker.get_tcp_state() == TcpStreamTracker::TCP_MID_STREAM_RECV) )
+ {
+ uint32_t bytes = get_q_footprint(); // num bytes in post-ack mode
+ if ( bytes )
+ return flush_to_seq(bytes, p, dir);
+ }
+
+ if ( final_flush )
+ return do_zero_byte_flush(p, dir);
+
+ return 0;
+}
+
--- /dev/null
+//--------------------------------------------------------------------------
+// Copyright (C) 2024-2024 Cisco and/or its affiliates. All rights reserved.
+//
+// This program is free software; you can redistribute it and/or modify it
+// under the terms of the GNU General Public License Version 2 as published
+// by the Free Software Foundation. You may not use, modify or distribute
+// this program under any other version of the GNU General Public License.
+//
+// This program is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License along
+// with this program; if not, write to the Free Software Foundation, Inc.,
+// 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+//--------------------------------------------------------------------------
+
+// tcp_reassembler_ids.h author davis mcpherson <davmcphe@cisco.com>
+// Created on: Jul 31, 2015
+
+#ifndef TCP_REASSEMBLER_IDS_H
+#define TCP_REASSEMBLER_IDS_H
+
+#include <cstdint>
+
+#include "protocols/packet.h"
+#include "stream/stream.h"
+
+#include "tcp_reassembler.h"
+#include "tcp_reassembly_segments.h"
+
+class TcpSegmentDescriptor;
+class TcpSegmentNode;
+
+class TcpReassemblerIds : public TcpReassembler
+{
+public:
+
+
+ TcpReassemblerIds(TcpStreamTracker& trk, TcpReassemblySegments& sl)
+ : TcpReassembler(trk, sl)
+ { }
+
+ ~TcpReassemblerIds() override
+ { }
+
+ int eval_flush_policy_on_ack(snort::Packet*) override;
+ int eval_flush_policy_on_data(snort::Packet*) override;
+ int flush_stream(snort::Packet*, uint32_t dir, bool final_flush = false) override;
+
+ FlushPolicy get_flush_policy() const override
+ { return STREAM_FLPOLICY_ON_ACK; }
+
+private:
+ int32_t scan_data_post_ack(uint32_t* flags, snort::Packet*);
+ bool has_seglist_hole(TcpSegmentNode&, uint32_t& total, uint32_t& flags);
+ void skip_seglist_hole(snort::Packet*, uint32_t flags, int32_t flush_amt);
+};
+
+#endif
--- /dev/null
+//--------------------------------------------------------------------------
+// Copyright (C) 2024-2024 Cisco and/or its affiliates. All rights reserved.
+//
+// This program is free software; you can redistribute it and/or modify it
+// under the terms of the GNU General Public License Version 2 as published
+// by the Free Software Foundation. You may not use, modify or distribute
+// this program under any other version of the GNU General Public License.
+//
+// This program is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License along
+// with this program; if not, write to the Free Software Foundation, Inc.,
+// 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+//--------------------------------------------------------------------------
+
+// tcp_reassembler_ips.cc author davis mcpherson <davmcphe@cisco.com>
+// Created on: Jul 31, 2015
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "tcp_reassembler_ips.h"
+
+#include <cassert>
+
+#include "detection/detection_engine.h"
+#include "log/log.h"
+#include "main/analyzer.h"
+#include "packet_io/active.h"
+#include "profiler/profiler.h"
+#include "protocols/packet_manager.h"
+#include "time/packet_time.h"
+
+#include "tcp_module.h"
+#include "tcp_normalizers.h"
+#include "tcp_segment_node.h"
+#include "tcp_session.h"
+#include "tcp_stream_tracker.h"
+
+using namespace snort;
+
+// see scan_data_post_ack() for details
+// the key difference is that we operate on forward moving data
+// because we don't wait until it is acknowledged
+int32_t TcpReassemblerIps::scan_data_pre_ack(uint32_t* flags, Packet* p)
+{
+ assert(seglist.session->flow == p->flow);
+
+ int32_t ret_val = FINAL_FLUSH_HOLD;
+
+ if ( SEQ_GT(seglist.head->scan_seq(), seglist.seglist_base_seq) )
+ return ret_val;
+
+ if ( !seglist.cur_rseg )
+ seglist.cur_rseg = seglist.cur_sseg;
+
+ if ( !is_q_sequenced() )
+ return ret_val;
+
+ TcpSegmentNode* tsn = seglist.cur_sseg;
+ uint32_t total = tsn->scan_seq() - seglist.seglist_base_seq;
+
+ ret_val = FINAL_FLUSH_OK;
+ while ( tsn && *flags )
+ {
+ total += tsn->unscanned();
+
+ uint32_t end = tsn->scan_seq() + tsn->unscanned();
+ uint32_t pos = paf.paf_position();
+
+ if ( paf.paf_initialized() && SEQ_LEQ(end, pos) )
+ {
+ if ( !tsn->next_no_gap() )
+ {
+ ret_val = FINAL_FLUSH_HOLD;
+ break;
+ }
+
+ tsn = tsn->next;
+ continue;
+ }
+
+ if ( tsn->next_no_gap() )
+ *flags |= PKT_MORE_TO_FLUSH;
+ else
+ *flags &= ~PKT_MORE_TO_FLUSH;
+ int32_t flush_pt = paf.paf_check(p, tsn->paf_data(), tsn->unscanned(),
+ total, tsn->scan_seq(), flags);
+
+ if (flush_pt >= 0)
+ {
+ seglist.cur_sseg = tsn;
+ return flush_pt;
+ }
+
+ if (!tsn->next_no_gap() || (paf.state == StreamSplitter::STOP))
+ {
+ if ( !(tsn->next_no_gap() || fin_no_gap(*tsn)) )
+ ret_val = FINAL_FLUSH_HOLD;
+ break;
+ }
+
+ tsn = tsn->next;
+ }
+
+ seglist.cur_sseg = tsn;
+
+ return ret_val;
+}
+
+int TcpReassemblerIps::eval_flush_policy_on_ack(Packet*)
+{
+ purge_flushed_ackd();
+
+ return 0;
+}
+
+int TcpReassemblerIps::eval_flush_policy_on_data(Packet* p)
+{
+ uint32_t flushed = 0;
+ last_pdu = nullptr;
+
+ if ( seglist.head )
+ {
+ uint32_t flags;
+ int32_t flush_amt;
+ do
+ {
+ flags = packet_dir;
+ flush_amt = scan_data_pre_ack(&flags, p);
+ if ( flush_amt <= 0 )
+ break;
+
+ flushed += flush_to_seq(flush_amt, p, flags);
+ } while ( seglist.head and !p->flow->is_inspection_disabled() );
+
+ if ( (paf.state == StreamSplitter::ABORT) && is_splitter_paf() )
+ {
+ tracker.fallback();
+ return eval_flush_policy_on_data(p);
+ }
+ else if ( final_flush_on_fin(flush_amt, p, FIN_WITH_SEQ_SEEN) )
+ finish_and_final_flush(p->flow, true, p);
+ }
+
+ if ( !seglist.head )
+ return flushed;
+
+ if ( tracker.is_retransmit_of_held_packet(p) )
+ flushed = perform_partial_flush(p, flushed);
+
+ if ( flush_on_asymmetric_flow(flushed, p) )
+ {
+ purge_to_seq(seglist.head->start_seq() + flushed);
+ tracker.r_win_base = seglist.seglist_base_seq;
+ tcpStats.flush_on_asymmetric_flow++;
+ }
+
+ return flushed;
+}
+
+int TcpReassemblerIps::flush_stream(Packet* p, uint32_t dir, bool final_flush)
+{
+ if ( seglist.session->flow->two_way_traffic()
+ or (tracker.get_tcp_state() == TcpStreamTracker::TCP_MID_STREAM_RECV) )
+ {
+ uint32_t bytes = get_q_sequenced(); // num bytes in pre-ack mode
+ if ( bytes )
+ return flush_to_seq(bytes, p, dir);
+ }
+
+ if ( final_flush )
+ return do_zero_byte_flush(p, dir);
+
+ return 0;
+}
+
--- /dev/null
+//--------------------------------------------------------------------------
+// Copyright (C) 2024-2024 Cisco and/or its affiliates. All rights reserved.
+//
+// This program is free software; you can redistribute it and/or modify it
+// under the terms of the GNU General Public License Version 2 as published
+// by the Free Software Foundation. You may not use, modify or distribute
+// this program under any other version of the GNU General Public License.
+//
+// This program is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License along
+// with this program; if not, write to the Free Software Foundation, Inc.,
+// 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+//--------------------------------------------------------------------------
+
+// tcp_reassembler_ips.h author davis mcpherson <davmcphe@cisco.com>
+// Created on: Jul 31, 2015
+
+#ifndef TCP_REASSEMBLER_IPS_H
+#define TCP_REASSEMBLER_IPS_H
+
+#include <cstdint>
+
+#include "flow/flow.h"
+#include "protocols/packet.h"
+#include "stream/stream.h"
+
+#include "tcp_reassembler.h"
+#include "tcp_reassembly_segments.h"
+
+class TcpSegmentDescriptor;
+class TcpSegmentNode;
+
+class TcpReassemblerIps : public TcpReassembler
+{
+public:
+ TcpReassemblerIps(TcpStreamTracker& trk, TcpReassemblySegments& sl)
+ : TcpReassembler(trk, sl)
+ { }
+
+ ~TcpReassemblerIps() override
+ { }
+
+ int eval_flush_policy_on_ack(snort::Packet*) override;
+ int eval_flush_policy_on_data(snort::Packet*) override;
+ int flush_stream(snort::Packet*, uint32_t dir, bool final_flush = false) override;
+
+ FlushPolicy get_flush_policy() const override
+ { return STREAM_FLPOLICY_ON_DATA; }
+
+private:
+ int32_t scan_data_pre_ack(uint32_t*, snort::Packet*);
+};
+
+#endif
+++ /dev/null
-//--------------------------------------------------------------------------
-// Copyright (C) 2015-2024 Cisco and/or its affiliates. All rights reserved.
-//
-// This program is free software; you can redistribute it and/or modify it
-// under the terms of the GNU General Public License Version 2 as published
-// by the Free Software Foundation. You may not use, modify or distribute
-// this program under any other version of the GNU General Public License.
-//
-// This program is distributed in the hope that it will be useful, but
-// WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License along
-// with this program; if not, write to the Free Software Foundation, Inc.,
-// 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
-//--------------------------------------------------------------------------
-
-// tcp_reassemblers.cc author davis mcpherson <davmcphe@cisco.com>
-// Created on: Oct 9, 2015
-
-#ifdef HAVE_CONFIG_H
-#include "config.h"
-#endif
-
-#include "tcp_reassemblers.h"
-
-#include "tcp_defs.h"
-#include "tcp_stream_tracker.h"
-
-class TcpReassemblerFirst : public TcpReassembler
-{
-public:
- TcpReassemblerFirst() = default;
-
-private:
- void insert_left_overlap(TcpReassemblerState& trs) override
- { left_overlap_keep_first(trs); }
-
- void insert_right_overlap(TcpReassemblerState& trs) override
- { right_overlap_truncate_new(trs); }
-
- void insert_full_overlap(TcpReassemblerState& trs) override
- { full_right_overlap_os5(trs); }
-};
-
-class TcpReassemblerLast : public TcpReassembler
-{
-public:
- TcpReassemblerLast() = default;
-
-private:
- void insert_left_overlap(TcpReassemblerState& trs) override
- { left_overlap_keep_last(trs); }
-
- void insert_right_overlap(TcpReassemblerState& trs) override
- { right_overlap_truncate_existing(trs); }
-
- void insert_full_overlap(TcpReassemblerState& trs) override
- { full_right_overlap_os4(trs); }
-};
-
-class TcpReassemblerLinux : public TcpReassembler
-{
-public:
- TcpReassemblerLinux() = default;
-
-private:
- void insert_left_overlap(TcpReassemblerState& trs) override
- { left_overlap_keep_first(trs); }
-
- void insert_right_overlap(TcpReassemblerState& trs) override
- { right_overlap_truncate_existing(trs); }
-
- void insert_full_overlap(TcpReassemblerState& trs) override
- { full_right_overlap_os2(trs); }
-};
-
-class TcpReassemblerOldLinux : public TcpReassembler
-{
-public:
- TcpReassemblerOldLinux() = default;
-
-private:
- void insert_left_overlap(TcpReassemblerState& trs) override
- { left_overlap_keep_first(trs); }
-
- void insert_right_overlap(TcpReassemblerState& trs) override
- { right_overlap_truncate_existing(trs); }
-
- void insert_full_overlap(TcpReassemblerState& trs) override
- { full_right_overlap_os4(trs); }
-};
-
-class TcpReassemblerBSD : public TcpReassembler
-{
-public:
- TcpReassemblerBSD() = default;
-
-private:
- void insert_left_overlap(TcpReassemblerState& trs) override
- { left_overlap_keep_first(trs); }
-
- void insert_right_overlap(TcpReassemblerState& trs) override
- { right_overlap_truncate_existing(trs); }
-
- void insert_full_overlap(TcpReassemblerState& trs) override
- { full_right_overlap_os1(trs); }
-};
-
-class TcpReassemblerMacOS : public TcpReassembler
-{
-public:
- TcpReassemblerMacOS() = default;
-
-private:
- void insert_left_overlap(TcpReassemblerState& trs) override
- { left_overlap_keep_first(trs); }
-
- void insert_right_overlap(TcpReassemblerState& trs) override
- { right_overlap_truncate_existing(trs); }
-
- void insert_full_overlap(TcpReassemblerState& trs) override
- { full_right_overlap_os1(trs); }
-};
-
-class TcpReassemblerSolaris : public TcpReassembler
-{
-public:
- TcpReassemblerSolaris() = default;
-
-private:
- void insert_left_overlap(TcpReassemblerState& trs) override
- { left_overlap_trim_first(trs); }
-
- void insert_right_overlap(TcpReassemblerState& trs) override
- { right_overlap_truncate_new(trs); }
-
- void insert_full_overlap(TcpReassemblerState& trs) override
- { full_right_overlap_os3(trs); }
-};
-
-class TcpReassemblerIrix : public TcpReassembler
-{
-public:
- TcpReassemblerIrix() = default;
-
-private:
- void insert_left_overlap(TcpReassemblerState& trs) override
- { left_overlap_keep_first(trs); }
-
- void insert_right_overlap(TcpReassemblerState& trs) override
- { right_overlap_truncate_existing(trs); }
-
- void insert_full_overlap(TcpReassemblerState& trs) override
- { full_right_overlap_os2(trs); }
-};
-
-class TcpReassemblerHpux11 : public TcpReassembler
-{
-public:
- TcpReassemblerHpux11() = default;
-
-private:
- void insert_left_overlap(TcpReassemblerState& trs) override
- { left_overlap_trim_first(trs); }
-
- void insert_right_overlap(TcpReassemblerState& trs) override
- { right_overlap_truncate_new(trs); }
-
- void insert_full_overlap(TcpReassemblerState& trs) override
- { full_right_overlap_os3(trs); }
-};
-
-class TcpReassemblerHpux10 : public TcpReassembler
-{
-public:
- TcpReassemblerHpux10() = default;
-
-private:
- void insert_left_overlap(TcpReassemblerState& trs) override
- { left_overlap_keep_first(trs); }
-
- void insert_right_overlap(TcpReassemblerState& trs) override
- { right_overlap_truncate_existing(trs); }
-
- void insert_full_overlap(TcpReassemblerState& trs) override
- { full_right_overlap_os2(trs); }
-};
-
-class TcpReassemblerWindows : public TcpReassembler
-{
-public:
- TcpReassemblerWindows() = default;
-
-private:
- void insert_left_overlap(TcpReassemblerState& trs) override
- { left_overlap_keep_first(trs); }
-
- void insert_right_overlap(TcpReassemblerState& trs) override
- { right_overlap_truncate_existing(trs); }
-
- void insert_full_overlap(TcpReassemblerState& trs) override
- { full_right_overlap_os1(trs); }
-};
-
-class TcpReassemblerWindows2K3 : public TcpReassembler
-{
-public:
- TcpReassemblerWindows2K3() = default;
-
-private:
- void insert_left_overlap(TcpReassemblerState& trs) override
- { left_overlap_keep_first(trs); }
-
- void insert_right_overlap(TcpReassemblerState& trs) override
- { right_overlap_truncate_existing(trs); }
-
- void insert_full_overlap(TcpReassemblerState& trs) override
- { full_right_overlap_os1(trs); }
-};
-
-class TcpReassemblerVista : public TcpReassembler
-{
-public:
- TcpReassemblerVista() = default;
-
-private:
- void insert_left_overlap(TcpReassemblerState& trs) override
- { left_overlap_keep_first(trs); }
-
- void insert_right_overlap(TcpReassemblerState& trs) override
- { right_overlap_truncate_new(trs); }
-
- void insert_full_overlap(TcpReassemblerState& trs) override
- { full_right_overlap_os5 (trs); }
-};
-
-class TcpReassemblerProxy : public TcpReassemblerFirst
-{
-public:
- TcpReassemblerProxy() = default;
-
-private:
- void insert_left_overlap(TcpReassemblerState& trs) override
- { left_overlap_keep_first(trs); }
-
- void insert_right_overlap(TcpReassemblerState& trs) override
- { right_overlap_truncate_new(trs); }
-
- void insert_full_overlap(TcpReassemblerState& trs) override
- { full_right_overlap_os5(trs); }
-};
-
-class TcpReassemblerMissed3whs : public TcpReassemblerFirst
-{
-public:
- TcpReassemblerMissed3whs() = default;
-
-private:
- void insert_left_overlap(TcpReassemblerState& trs) override
- { left_overlap_keep_first(trs); }
-
- void insert_right_overlap(TcpReassemblerState& trs) override
- { right_overlap_truncate_new(trs); }
-
- void insert_full_overlap(TcpReassemblerState& trs) override
- { full_right_overlap_os5(trs); }
-};
-
-void TcpReassemblerPolicy::init(TcpSession* ssn, TcpStreamTracker* trk, StreamPolicy pol, bool server)
-{
- trs.sos.init_sos(ssn, pol);
- setup_paf();
- trs.server_side = server;
- trs.tracker = trk;
-
- if ( trs.server_side )
- {
- trs.ignore_dir = SSN_DIR_FROM_CLIENT;
- trs.packet_dir = PKT_FROM_CLIENT;
- }
- else
- {
- trs.ignore_dir = SSN_DIR_FROM_SERVER;
- trs.packet_dir = PKT_FROM_SERVER;
- }
-
- trs.flush_count = 0;
- trs.xtradata_mask = 0;
- trs.alerts.clear();
-
- reassembler = TcpReassemblerFactory::get_instance(pol);
-}
-
-void TcpReassemblerPolicy::reset()
-{ init(nullptr, nullptr, StreamPolicy::OS_DEFAULT, false); }
-
-TcpReassembler* TcpReassemblerFactory::reassemblers[StreamPolicy::OS_END_OF_LIST];
-
-void TcpReassemblerFactory::initialize()
-{
- reassemblers[StreamPolicy::OS_FIRST] = new TcpReassemblerFirst;
- reassemblers[StreamPolicy::OS_LAST] = new TcpReassemblerLast;
- reassemblers[StreamPolicy::OS_LINUX] = new TcpReassemblerLinux;
- reassemblers[StreamPolicy::OS_OLD_LINUX] = new TcpReassemblerOldLinux;
- reassemblers[StreamPolicy::OS_BSD] = new TcpReassemblerBSD;
- reassemblers[StreamPolicy::OS_MACOS] = new TcpReassemblerMacOS;
- reassemblers[StreamPolicy::OS_SOLARIS] = new TcpReassemblerSolaris;
- reassemblers[StreamPolicy::OS_IRIX] = new TcpReassemblerIrix;
- reassemblers[StreamPolicy::OS_HPUX11] = new TcpReassemblerHpux11;
- reassemblers[StreamPolicy::OS_HPUX10] = new TcpReassemblerHpux10;
- reassemblers[StreamPolicy::OS_WINDOWS] = new TcpReassemblerWindows;
- reassemblers[StreamPolicy::OS_WINDOWS2K3] = new TcpReassemblerWindows2K3;
- reassemblers[StreamPolicy::OS_VISTA] = new TcpReassemblerVista;
- reassemblers[StreamPolicy::OS_PROXY] = new TcpReassemblerProxy;
- reassemblers[StreamPolicy::MISSED_3WHS] = new TcpReassemblerMissed3whs;
-}
-
-void TcpReassemblerFactory::term()
-{
- for ( auto sp = StreamPolicy::OS_FIRST; sp < StreamPolicy::OS_END_OF_LIST; sp++ )
- delete reassemblers[sp];
-}
-
-TcpReassembler* TcpReassemblerFactory::get_instance(StreamPolicy os_policy)
-{
- assert( os_policy < StreamPolicy::OS_END_OF_LIST );
- return reassemblers[os_policy];
-}
+++ /dev/null
-//--------------------------------------------------------------------------
-// Copyright (C) 2015-2024 Cisco and/or its affiliates. All rights reserved.
-//
-// This program is free software; you can redistribute it and/or modify it
-// under the terms of the GNU General Public License Version 2 as published
-// by the Free Software Foundation. You may not use, modify or distribute
-// this program under any other version of the GNU General Public License.
-//
-// This program is distributed in the hope that it will be useful, but
-// WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License along
-// with this program; if not, write to the Free Software Foundation, Inc.,
-// 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
-//--------------------------------------------------------------------------
-
-// tcp_reassemblers.h author davis mcpherson <davmcphe@cisco.com>
-// Created on: Oct 9, 2015
-
-#ifndef TCP_REASSEMBLERS_H
-#define TCP_REASSEMBLERS_H
-
-#include "tcp_reassembler.h"
-
-class TcpReassemblerFactory
-{
-public:
- static void initialize();
- static void term();
- static TcpReassembler* get_instance(StreamPolicy);
-
-private:
- TcpReassemblerFactory() = delete;
-
- static TcpReassembler* reassemblers[StreamPolicy::OS_END_OF_LIST];
-};
-
-class TcpReassemblerPolicy
-{
-public:
- TcpReassemblerPolicy() = default;
- ~TcpReassemblerPolicy() = default;
-
- void init(TcpSession* ssn, TcpStreamTracker* trk, StreamPolicy pol, bool server);
- void reset();
-
- void queue_packet_for_reassembly(TcpSegmentDescriptor& tsd)
- { reassembler->queue_packet_for_reassembly(trs, tsd); }
-
- bool add_alert(uint32_t gid, uint32_t sid)
- { return reassembler->add_alert(trs, gid, sid); }
-
- bool check_alerted(uint32_t gid, uint32_t sid)
- { return reassembler->check_alerted(trs, gid, sid); }
-
- int update_alert(uint32_t gid, uint32_t sid, uint32_t event_id, uint32_t event_second)
- { return reassembler->update_alert(trs, gid, sid, event_id, event_second); }
-
- void purge_alerts()
- { reassembler->purge_alerts(trs); }
-
- void purge_segment_list()
- { reassembler->purge_segment_list(trs); }
-
- void purge_flushed_ackd()
- { return reassembler->purge_flushed_ackd(trs); }
-
- int flush_stream(snort::Packet* p, uint32_t dir, bool final_flush = false)
- { return reassembler->flush_stream(trs, p, dir, final_flush); }
-
- void finish_and_final_flush(snort::Flow* flow, bool clear, snort::Packet* p)
- { reassembler->finish_and_final_flush(trs, flow, clear, p); }
-
- void flush_queued_segments(snort::Flow* flow, bool clear, const snort::Packet* p = nullptr)
- { reassembler->flush_queued_segments(trs, flow, clear, p); }
-
- bool is_segment_pending_flush() const
- { return reassembler->is_segment_pending_flush(trs); }
-
- void skip_midstream_pickup_seglist_hole(TcpSegmentDescriptor& tsd)
- { reassembler->skip_midstream_pickup_seglist_hole(trs, tsd); }
-
- void reset_asymmetric_flow_reassembly()
- { reassembler->reset_asymmetric_flow_reassembly(trs); }
-
- void initialize_paf()
- { reassembler->initialize_paf(trs); }
-
- int flush_on_data_policy(snort::Packet* p)
- { return reassembler->flush_on_data_policy(trs, p); }
-
- int flush_on_ack_policy(snort::Packet* p)
- { return reassembler->flush_on_ack_policy(trs, p); }
-
- void set_seglist_base_seq(uint32_t seglist_base_seq)
- { trs.sos.seglist_base_seq = seglist_base_seq; }
-
- uint32_t get_seglist_base_seq() const
- { return trs.sos.seglist_base_seq; }
-
- void set_xtradata_mask(uint32_t xtradata_mask)
- { trs.xtradata_mask = xtradata_mask; }
-
- uint32_t get_xtradata_mask() const
- { return trs.xtradata_mask; }
-
- bool data_was_queued() const
- { return trs.sos.total_bytes_queued > 0; }
-
- uint32_t get_seg_count() const
- { return trs.sos.seg_count; }
-
- uint32_t get_seg_bytes_total() const
- { return trs.sos.seg_bytes_total; }
-
- uint32_t get_overlap_count() const
- { return trs.sos.overlap_count; }
-
- void set_overlap_count(uint32_t overlap_count)
- { trs.sos.overlap_count = overlap_count; }
-
- uint32_t get_flush_count() const
- { return trs.flush_count; }
-
- uint32_t get_seg_bytes_logical() const
- { return trs.sos.seg_bytes_logical; }
-
- StreamPolicy get_reassembly_policy() const
- { return trs.sos.reassembly_policy; }
-
- void set_norm_mode_test()
- { trs.sos.tcp_ips_data = NORM_MODE_TEST; }
-
- bool segment_within_seglist_window(TcpSegmentDescriptor& tsd)
- { return reassembler->segment_within_seglist_window(trs, tsd); }
-
- uint32_t perform_partial_flush(snort::Flow* flow, snort::Packet*& p)
- { return reassembler->perform_partial_flush(trs, flow, p); }
-
- void reset_paf()
- { paf_reset(&trs.paf_state); }
-
- void clear_paf()
- { paf_clear(&trs.paf_state); }
-
- void setup_paf()
- {
- paf_setup(&trs.paf_state);
- if ( trs.sos.seglist.cur_rseg )
- trs.sos.seglist.cur_sseg = trs.sos.seglist.cur_rseg;
- else
- trs.sos.seglist.cur_sseg = trs.sos.seglist.head;
- }
-
-private:
- TcpReassembler* reassembler = nullptr;
- TcpReassemblerState trs = {};
- friend inline void TraceSegments(const TcpReassemblerPolicy&, const snort::Packet* p);
-};
-#endif
-
--- /dev/null
+//--------------------------------------------------------------------------
+// Copyright (C) 2015-2024 Cisco and/or its affiliates. All rights reserved.
+//
+// This program is free software; you can redistribute it and/or modify it
+// under the terms of the GNU General Public License Version 2 as published
+// by the Free Software Foundation. You may not use, modify or distribute
+// this program under any other version of the GNU General Public License.
+//
+// This program is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License along
+// with this program; if not, write to the Free Software Foundation, Inc.,
+// 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+//--------------------------------------------------------------------------
+
+// tcp_reassembly_segments.cc author davis mcpherson <davmcphe@cisco.com>
+// Created on: Oct 9, 2015
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "tcp_reassembly_segments.h"
+
+#include "log/messages.h"
+#include "packet_io/packet_tracer.h"
+#include "protocols/tcp.h"
+
+#include "tcp_module.h"
+#include "tcp_overlap_resolver.h"
+#include "tcp_segment_descriptor.h"
+#include "tcp_segment_node.h"
+#include "tcp_session.h"
+#include "tcp_stream_tracker.h"
+#include "tcp_overlap_resolver.h"
+
+using namespace snort;
+
+TcpReassemblySegments::~TcpReassemblySegments()
+{
+ delete tos;
+}
+
+void TcpReassemblySegments::init(TcpSession* ssn, TcpStreamTracker* trk, StreamPolicy pol)
+{
+ session = ssn;
+ tracker = trk;
+ overlap_resolver = TcpOverlapResolverFactory::get_instance(pol);
+ if ( tos )
+ delete tos;
+ tos = new TcpOverlapState(*this);
+}
+
+void TcpReassemblySegments::reset()
+{
+ purge();
+ seglist_base_seq = 0;
+}
+
+void TcpReassemblySegments::update_next(TcpSegmentNode* tsn)
+{
+ cur_rseg = tsn->next_no_gap() ? tsn->next : nullptr;
+}
+
+bool TcpReassemblySegments::is_segment_pending_flush() const
+{
+ return ( get_pending_segment_count(1) > 0 );
+}
+
+uint32_t TcpReassemblySegments::get_pending_segment_count(const unsigned max) const
+{
+ uint32_t n = seg_count - flush_count;
+
+ if ( !n || max == 1 )
+ return n;
+
+ n = 0;
+ const TcpSegmentNode* tsn = head;
+ while ( tsn )
+ {
+ if ( tsn->unscanned() && SEQ_LT(tsn->scan_seq(), tracker->r_win_base) )
+ n++;
+
+ if ( max && n == max )
+ return n;
+
+ tsn = tsn->next;
+ }
+
+ return n;
+}
+
+bool TcpReassemblySegments::segment_within_seglist_window(TcpSegmentDescriptor& tsd)
+{
+ if ( !head )
+ return true;
+
+ // Left side
+ uint32_t start;
+ if ( SEQ_LT(seglist_base_seq, head->start_seq()) )
+ start = seglist_base_seq;
+ else
+ start = head->seq;
+
+ if ( SEQ_LEQ(tsd.get_end_seq(), start) )
+ return false;
+
+ // Right side
+ if ( SEQ_GEQ(tsd.get_seq(), tail->next_seq()) )
+ return false;
+
+ return true;
+}
+
+void TcpReassemblySegments::queue_reassembly_segment(TcpSegmentDescriptor& tsd)
+{
+ if ( seg_count == 0 )
+ {
+ insert_segment_in_empty_seglist(tsd);
+ }
+ else if ( SEQ_GT(tracker->r_win_base, tsd.get_seq() ) )
+ {
+ const int32_t offset = tracker->r_win_base - tsd.get_seq();
+
+ if ( offset < tsd.get_len() )
+ {
+ tsd.slide_segment_in_rcv_window(offset);
+ insert_segment_in_seglist(tsd);
+ tsd.slide_segment_in_rcv_window(-offset);
+ }
+ }
+ else
+ insert_segment_in_seglist(tsd);
+}
+
+void TcpReassemblySegments::insert_segment_in_empty_seglist(TcpSegmentDescriptor& tsd)
+{
+ uint32_t overlap = 0;
+
+ if ( SEQ_GT(seglist_base_seq, tsd.get_seq()) )
+ {
+ overlap = seglist_base_seq - tsd.get_seq();
+ if ( overlap >= tsd.get_len() )
+ return;
+ }
+
+ add_reassembly_segment(tsd, tsd.get_len(), overlap, 0, tsd.get_seq(), nullptr);
+}
+
+bool TcpReassemblySegments::is_segment_fasttrack(TcpSegmentNode* tail, const TcpSegmentDescriptor& tsd)
+{
+ if ( SEQ_EQ(tsd.get_seq(), tail->next_seq()) )
+ return true;
+
+ return false;
+}
+
+void TcpReassemblySegments::insert_segment_in_seglist(TcpSegmentDescriptor& tsd)
+{
+ // NORM fast tracks are in sequence - no norms
+ if ( tail && is_segment_fasttrack(tail, tsd) )
+ {
+ /* segment fit cleanly at the end of the segment list */
+ add_reassembly_segment(tsd, tsd.get_len(), 0, 0, tsd.get_seq(), tail);
+ return;
+ }
+
+ tos->init(tsd);
+ overlap_resolver->eval_left(*tos);
+ overlap_resolver->eval_right(*tos);
+
+ if ( tos->keep_segment )
+ {
+ // FIXIT-L - is this skipping the add if the segment is first and already scan
+ if ( !tos->left and tos->right and tracker->reassembler->segment_already_scanned(tsd.get_seq()) )
+ {
+ return;
+ }
+
+ add_reassembly_segment(tsd, tos->len, tos->slide, tos->trunc_len, tos->seq, tos->left);
+ }
+}
+
+void TcpReassemblySegments::insert_segment_data(TcpSegmentNode* prev, TcpSegmentNode* tsn)
+{
+ insert(prev, tsn);
+
+ if ( !cur_sseg )
+ cur_sseg = tsn;
+ else if ( SEQ_LT(tsn->scan_seq(), cur_sseg->scan_seq()) )
+ {
+ cur_sseg = tsn;
+ if ( SEQ_LT(tsn->scan_seq(), seglist_base_seq) )
+ seglist_base_seq = tsn->scan_seq();
+
+ if ( cur_rseg && SEQ_LT(tsn->scan_seq(), cur_rseg->scan_seq()) )
+ cur_rseg = tsn;
+ }
+
+ // FIXIT-M - increment seg_count here?
+ seg_bytes_total += tsn->size;
+ total_segs_queued++;
+ tcpStats.segs_queued++;
+
+ if ( seg_count > tcpStats.max_segs )
+ tcpStats.max_segs = seg_count;
+
+ if ( seg_bytes_total > tcpStats.max_bytes )
+ tcpStats.max_bytes = seg_bytes_total;
+}
+
+void TcpReassemblySegments::add_reassembly_segment(TcpSegmentDescriptor& tsd, uint16_t len,
+ uint32_t slide, uint32_t trunc_len, uint32_t seq, TcpSegmentNode* left)
+{
+ const int32_t new_size = len - slide - trunc_len;
+ assert(new_size >= 0);
+
+ // if trimming will delete all data, don't insert this segment in the queue
+ if ( new_size <= 0 )
+ {
+ tcpStats.payload_fully_trimmed++;
+ tracker->normalizer.trim_win_payload(tsd);
+ return;
+ }
+
+ // FIXIT-L don't allocate overlapped part
+ TcpSegmentNode* tsn = TcpSegmentNode::init(tsd);
+
+ tsn->seq = seq;
+ tsn->offset = slide;
+ tsn->length = (uint16_t)new_size;
+ tsn->cursor = 0;
+ tsn->ts = tsd.get_timestamp();
+
+ // FIXIT-M the urgent ptr handling is broken... urg_offset could be set here but currently
+ // not actually referenced anywhere else. In 2.9.7 the FlushStream function did reference
+ // this field but that code has been lost... urg ptr handling needs to be reviewed and fixed
+ // tsn->urg_offset = tracker->normalizer.set_urg_offset(tsd.get_tcph(), tsd.get_seg_len());
+
+ insert_segment_data(left, tsn);
+
+ seg_bytes_logical += tsn->length;
+ total_bytes_queued += tsn->size;
+ tsd.set_packet_flags(PKT_STREAM_INSERT);
+
+ if( tsd.is_packet_inorder()
+ or (SEQ_LEQ(tsn->start_seq(), tracker->get_rcv_nxt())
+ and SEQ_GEQ(tsn->next_seq(), tracker->get_rcv_nxt())) )
+ advance_rcv_nxt(tsn);
+}
+
+void TcpReassemblySegments::dup_reassembly_segment(TcpSegmentNode* left, TcpSegmentNode** retSeg)
+{
+ TcpSegmentNode* tsn = TcpSegmentNode::init(*left);
+ tcpStats.segs_split++;
+
+ // twiddle the values for overlaps
+ tsn->cursor = left->cursor;
+ tsn->seq = left->seq;
+ insert_segment_data(left, tsn);
+
+ *retSeg = tsn;
+}
+
+int TcpReassemblySegments::delete_reassembly_segment(TcpSegmentNode* tsn)
+{
+ int ret;
+ assert(tsn);
+
+ remove(tsn);
+ seg_bytes_total -= tsn->size;
+ seg_bytes_logical -= tsn->length;
+ ret = tsn->length;
+
+ if ( !tsn->unscanned() )
+ {
+ tcpStats.segs_used++;
+ flush_count--;
+ }
+
+ if ( cur_sseg == tsn )
+ cur_sseg = tsn->next;
+
+ if ( cur_rseg == tsn )
+ update_next(tsn);
+
+ tsn->term();
+
+ return ret;
+}
+
+void TcpReassemblySegments::purge_flushed_segments(uint32_t flush_seq)
+{
+ assert( head );
+ uint32_t last_ts = 0;
+
+ TcpSegmentNode* tsn = head;
+ while ( tsn && SEQ_LT(tsn->start_seq(), flush_seq))
+ {
+ if ( tsn->unscanned() )
+ break;
+
+ TcpSegmentNode* dump_me = tsn;
+ tsn = tsn->next;
+ if (dump_me->ts > last_ts)
+ last_ts = dump_me->ts;
+
+ delete_reassembly_segment(dump_me);
+ }
+
+ if ( tsn and SEQ_LT(tracker->rcv_nxt, tsn->next_seq()) )
+ advance_rcv_nxt(tsn);
+
+ /* Update the "last" time stamp seen from the other side
+ * to be the most recent timestamp (largest) that was removed
+ * from the queue. This will ensure that as we go forward,
+ * last timestamp is the highest one that we had stored and
+ * purged and handle the case when packets arrive out of order,
+ * such as:
+ * P1: seq 10, length 10, timestamp 10
+ * P3: seq 30, length 10, timestamp 30
+ * P2: seq 20, length 10, timestamp 20
+ *
+ * Without doing it this way, the timestamp would be 20. With
+ * the next packet to arrive (P4, seq 40), the ts_last value
+ * wouldn't be updated for the talker in ProcessTcp() since that
+ * code specifically looks for the NEXT sequence number.
+ */
+ if ( last_ts )
+ {
+ if ( tracker->client_tracker )
+ {
+ int32_t delta = last_ts - session->server.get_ts_last();
+ if ( delta > 0 )
+ session->server.set_ts_last(last_ts);
+ }
+ else
+ {
+ int32_t delta = last_ts - session->client.get_ts_last();
+ if ( delta > 0 )
+ session->client.set_ts_last(last_ts);
+ }
+ }
+}
+
+void TcpReassemblySegments::purge_segments_left_of_hole(const TcpSegmentNode* end_tsn)
+{
+ uint32_t packets_skipped = 0;
+
+ TcpSegmentNode* cur_tsn = head;
+ do
+ {
+ TcpSegmentNode* drop_tsn = cur_tsn;
+ cur_tsn = cur_tsn->next;
+ delete_reassembly_segment(drop_tsn);
+ ++packets_skipped;
+ } while( cur_tsn and cur_tsn != end_tsn );
+
+ tracker->set_order(TcpStreamTracker::OUT_OF_SEQUENCE);
+
+ if (PacketTracer::is_active())
+ PacketTracer::log("Stream: Skipped %u packets before seglist hole)\n", packets_skipped);
+}
+
+void TcpReassemblySegments::advance_rcv_nxt(TcpSegmentNode *tsn)
+{
+ if ( !tsn )
+ {
+ if ( !head )
+ return;
+ tsn = head;
+ }
+
+ while (tsn->next_no_gap())
+ tsn = tsn->next;
+ tracker->set_rcv_nxt(tsn->next_seq());
+}
+
+void TcpReassemblySegments::skip_holes()
+{
+ TcpSegmentNode* tsn = head;
+ // if there is a hole at the beginning, skip it...
+ if ( SEQ_GT(tsn->seq, seglist_base_seq) )
+ {
+ seglist_base_seq = tsn->seq;
+ tracker->set_order(TcpStreamTracker::OUT_OF_SEQUENCE);
+
+ if (PacketTracer::is_active())
+ PacketTracer::log("Stream: Skipped hole at beginning of the seglist\n");
+ }
+
+ while ( tsn )
+ {
+ if ( tsn->next and SEQ_GT(tsn->next->start_seq(), tsn->next_seq()) )
+ {
+ tsn = tsn->next;
+ purge_segments_left_of_hole(tsn);
+ seglist_base_seq = head->start_seq();
+ }
+ else
+ tsn = tsn->next;
+ }
+
+ advance_rcv_nxt();
+ tracker->set_order(TcpStreamTracker::OUT_OF_SEQUENCE);
+}
+
+void TcpReassemblySegments::skip_midstream_pickup_seglist_hole(TcpSegmentDescriptor& tsd)
+{
+ uint32_t ack = tsd.get_ack();
+
+ TcpSegmentNode* tsn = head;
+ while ( tsn )
+ {
+ if ( SEQ_GEQ( tsn->next_seq(), ack) )
+ break;
+
+ if ( tsn->next and SEQ_GT(tsn->next->start_seq(), tsn->next_seq()) )
+ {
+ tsn = tsn->next;
+ purge_segments_left_of_hole(tsn);
+ seglist_base_seq = head->start_seq();
+ }
+ else if ( !tsn->next and SEQ_LT(tsn->next_seq(), ack) )
+ {
+ tsn = tsn->next;
+ purge_segments_left_of_hole(tsn);
+ seglist_base_seq = ack;
+ }
+ else
+ tsn = tsn->next;
+ }
+
+ tsn = head;
+ if ( tsn )
+ {
+ tracker->reassembler->initialize_paf();
+ advance_rcv_nxt(tsn);
+ }
+ else
+ tracker->set_rcv_nxt(ack);
+}
+
+void TcpReassemblySegments::purge_segment_list()
+{
+ purge();
+}
--- /dev/null
+//--------------------------------------------------------------------------
+// Copyright (C) 2015-2024 Cisco and/or its affiliates. All rights reserved.
+//
+// This program is free software; you can redistribute it and/or modify it
+// under the terms of the GNU General Public License Version 2 as published
+// by the Free Software Foundation. You may not use, modify or distribute
+// this program under any other version of the GNU General Public License.
+//
+// This program is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License along
+// with this program; if not, write to the Free Software Foundation, Inc.,
+// 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+//--------------------------------------------------------------------------
+
+// tcp_reassembly_segments.h author davis mcpherson <davmcphe@cisco.com>
+// Created on: Oct 9, 2015
+
+#ifndef TCP_REASSEMBLERS_H
+#define TCP_REASSEMBLERS_H
+
+#include <cstdint>
+
+#include "tcp_defs.h"
+
+#include "tcp_segment_node.h"
+
+class TcpOverlapResolver;
+class TcpOverlapState;
+class TcpSegmentDescriptor;
+class TcpSession;
+class TcpStreamTracker;
+
+class TcpReassemblySegments
+{
+public:
+ TcpReassemblySegments() = default;
+ ~TcpReassemblySegments();
+
+ void init(TcpSession* ssn, TcpStreamTracker* trk, StreamPolicy pol);
+ void reset();
+
+ void update_next(TcpSegmentNode*);
+ bool segment_within_seglist_window(TcpSegmentDescriptor&);
+ void queue_reassembly_segment(TcpSegmentDescriptor&);
+ void add_reassembly_segment(TcpSegmentDescriptor&, uint16_t, uint32_t,
+ uint32_t, uint32_t, TcpSegmentNode*);
+ void dup_reassembly_segment(TcpSegmentNode*, TcpSegmentNode**);
+ int delete_reassembly_segment(TcpSegmentNode*);
+ void advance_rcv_nxt(TcpSegmentNode *tsn = nullptr);
+ void purge_flushed_segments(uint32_t flush_seq);
+ void purge_segments_left_of_hole(const TcpSegmentNode*);
+ void skip_holes();
+ void skip_midstream_pickup_seglist_hole(TcpSegmentDescriptor&);
+ void purge_segment_list();
+
+ bool is_segment_pending_flush() const;
+
+ void set_seglist_base_seq(uint32_t base_seq)
+ { seglist_base_seq = base_seq; }
+
+ uint32_t get_seglist_base_seq() const
+ { return seglist_base_seq; }
+
+ bool data_was_queued() const
+ { return total_bytes_queued > 0; }
+
+ uint32_t get_seg_count() const
+ { return seg_count; }
+
+ uint32_t get_seg_bytes_total() const
+ { return seg_bytes_total; }
+
+ uint32_t get_overlap_count() const
+ { return overlap_count; }
+
+ void set_overlap_count(uint32_t count)
+ { overlap_count = count; }
+
+ uint32_t get_flush_count() const
+ { return flush_count; }
+
+ uint32_t get_seg_bytes_logical() const
+ { return seg_bytes_logical; }
+
+private:
+ void insert_segment_data(TcpSegmentNode* prev, TcpSegmentNode*);
+
+
+ void insert(TcpSegmentNode* prev, TcpSegmentNode* ss)
+ {
+ if ( prev )
+ {
+ ss->next = prev->next;
+ ss->prev = prev;
+ prev->next = ss;
+
+ if ( ss->next )
+ ss->next->prev = ss;
+ else
+ tail = ss;
+ }
+ else
+ {
+ ss->next = head;
+
+ if ( ss->next )
+ ss->next->prev = ss;
+ else
+ tail = ss;
+ head = ss;
+ }
+
+ seg_count++;
+ }
+
+ void remove(TcpSegmentNode* ss)
+ {
+ if ( ss->prev )
+ ss->prev->next = ss->next;
+ else
+ head = ss->next;
+
+ if ( ss->next )
+ ss->next->prev = ss->prev;
+ else
+ tail = ss->prev;
+
+ seg_count--;
+ }
+
+ uint32_t purge()
+ {
+ int i = 0;
+
+ while ( head )
+ {
+ i++;
+ TcpSegmentNode* dump_me = head;
+ head = head->next;
+ dump_me->term();
+ }
+
+ head = tail = cur_rseg = cur_sseg = nullptr;
+ seg_count = 0;
+ flush_count = 0;
+ seg_bytes_total = 0;
+ seg_bytes_logical = 0;
+ total_bytes_queued = 0;
+ total_segs_queued = 0;
+ overlap_count = 0;
+ return i;
+ }
+
+public:
+ TcpSegmentNode* head = nullptr;
+ TcpSegmentNode* tail = nullptr;
+
+ TcpSegmentNode* cur_rseg = nullptr;
+ TcpSegmentNode* cur_sseg = nullptr;
+
+ uint32_t seg_count = 0; /* number of current queued segments */
+ uint32_t flush_count = 0; /* queued segments already flushed */
+
+ uint32_t seglist_base_seq = 0; /* seq of first queued segment */
+ uint32_t seg_bytes_total = 0; /* total bytes currently queued */
+ uint32_t seg_bytes_logical = 0; /* logical bytes queued (total - overlaps) */
+ uint32_t total_bytes_queued = 0; /* total bytes queued (life of session) */
+ uint32_t total_segs_queued = 0; /* number of segments queued (life) */
+ uint32_t overlap_count = 0; /* overlaps encountered */
+
+ TcpSession* session = nullptr;
+ TcpStreamTracker* tracker = nullptr;
+ TcpOverlapResolver* overlap_resolver = nullptr;
+ TcpOverlapState* tos = nullptr;
+
+private:
+ void insert_segment_in_empty_seglist(TcpSegmentDescriptor&);
+ void insert_segment_in_seglist(TcpSegmentDescriptor&);
+
+ bool is_segment_fasttrack(TcpSegmentNode*, const TcpSegmentDescriptor&);
+ uint32_t get_pending_segment_count(const unsigned max) const;
+
+};
+
+#endif
+
#include "detection/rules.h"
#include "packet_io/packet_tracer.h"
#include "protocols/tcp_options.h"
-#include "stream/tcp/tcp_defs.h"
-#include "stream/tcp/tcp_stream_tracker.h"
+
+#include "tcp_defs.h"
+#include "tcp_event_logger.h"
+#include "tcp_stream_tracker.h"
using namespace snort;
#include <cassert>
-#include <daq_common.h>
-
#include "flow/flow.h"
#include "detection/ips_context.h"
#include "main/snort_config.h"
#include "packet_io/active.h"
#include "protocols/packet.h"
#include "protocols/tcp.h"
-#include "stream/tcp/tcp_event_logger.h"
+class TcpEventLogger;
class TcpStreamTracker;
class TcpSegmentDescriptor
void set_talker(TcpStreamTracker& tracker)
{ talker = &tracker; }
+ bool is_packet_inorder() const
+ {
+ return packet_inorder;
+ }
+
+ void set_packet_inorder(bool inorder)
+ {
+ packet_inorder = inorder;
+ }
+
private:
snort::Flow* const flow;
snort::Packet* const pkt;
uint16_t dst_port;
uint32_t packet_timestamp;
bool packet_from_client;
+ bool packet_inorder = false;
bool meta_ack_packet = false;
};
#include "utils/util.h"
-#include "segment_overlap_editor.h"
#include "tcp_module.h"
+#include "tcp_segment_descriptor.h"
+
+using namespace snort;
#define USE_RESERVE
#ifdef USE_RESERVE
tcpStats.mem_in_use += len;
}
tsn->tv = tv;
- tsn->i_len = tsn->c_len = len;
+ tsn->length = len;
memcpy(tsn->data, payload, len);
tsn->prev = tsn->next = nullptr;
- tsn->i_seq = tsn->c_seq = 0;
- tsn->offset = tsn->o_offset = 0;
+
+ tsn->seq = 0;
+ tsn->offset = 0;
+ tsn->cursor = 0;
tsn->ts = 0;
return tsn;
return create(tsd.get_pkt()->pkth->ts, tsd.get_pkt()->data, tsd.get_len());
}
-TcpSegmentNode* TcpSegmentNode::init(TcpSegmentNode& tns)
+TcpSegmentNode* TcpSegmentNode::init(TcpSegmentNode& tsn)
{
- return create(tns.tv, tns.payload(), tns.c_len);
+ return create(tsn.tv, tsn.payload(), tsn.length);
}
void TcpSegmentNode::term()
uint32_t rseq, uint16_t orig_dsize, bool *full_retransmit)
{
// retransmit must have same payload at same place
- if ( !SEQ_EQ(i_seq, rseq) )
+ if ( !SEQ_EQ(seq, rseq) )
return false;
- if ( orig_dsize == c_len )
+ if ( orig_dsize == unscanned() )
{
- uint16_t cmp_len = ( c_len <= rsize ) ? c_len : rsize;
+ uint16_t cmp_len = ( length <= rsize ) ? length : rsize;
if ( !memcmp(data, rdata, cmp_len) )
return true;
}
// tcp_segment_node.h author davis mcpherson <davmcphe@cisco.com>
// Created on: Sep 21, 2015
-#ifndef TCP_SEGMENT_H
-#define TCP_SEGMENT_H
+#ifndef TCP_SEGMENT_NODE_H
+#define TCP_SEGMENT_NODE_H
+
+#include <cassert>
+
+#include "protocols/packet.h"
+#include "protocols/tcp.h"
-#include "tcp_segment_descriptor.h"
#include "tcp_defs.h"
class TcpSegmentDescriptor;
uint8_t* payload()
{ return data + offset; }
+ uint8_t* paf_data()
+ { return data + offset + cursor; }
+
+ uint32_t start_seq() const
+ { return seq + offset; }
+
+ uint32_t next_seq() const
+ { return start_seq() + length; }
+
+ uint32_t scan_seq() const
+ { return start_seq() + cursor; }
+
bool is_packet_missing(uint32_t to_seq)
{
if ( next )
- return !(SEQ_EQ((i_seq + i_len), next->i_seq));
+ return !(SEQ_EQ(next_seq(), next->start_seq()));
else
- return SEQ_LT((c_seq + c_len), to_seq);
+ return SEQ_LT(next_seq(), to_seq);
}
- void update_reassembly_cursor(uint16_t bytes)
+ void advance_cursor(uint16_t bytes)
+ { cursor += bytes; }
+
+ unsigned unscanned() const
{
- c_seq += bytes;
- c_len -= bytes;
- offset += bytes;
+ assert(cursor <= length);
+ return length - cursor;
}
-public:
- TcpSegmentNode* prev;
- TcpSegmentNode* next;
-
- struct timeval tv;
- uint32_t ts;
- uint32_t i_seq; // initial seq # of the data segment
- uint32_t c_seq; // current seq # of data for reassembly
- uint16_t i_len; // initial length of the data segment
- uint16_t c_len; // length of data remaining for reassembly
- uint16_t offset; // current offset into the data buffer for reassembly
- uint16_t o_offset; // offset into the data buffer due to overlaps
- uint16_t size; // actual allocated size (overlaps cause i_len to differ)
- uint8_t data[1];
-};
-
-class TcpSegmentList
-{
-public:
- uint32_t reset()
+ bool next_no_gap()
{
- int i = 0;
-
- while ( head )
- {
- i++;
- TcpSegmentNode* dump_me = head;
- head = head->next;
- dump_me->term();
- }
-
- head = tail = cur_rseg = cur_sseg = nullptr;
- count = 0;
- return i;
+ return next and SEQ_EQ(next_seq(), next->start_seq());
}
- void insert(TcpSegmentNode* prev, TcpSegmentNode* ss)
+ bool next_acked_no_gap(uint32_t seq_acked)
{
- if ( prev )
- {
- ss->next = prev->next;
- ss->prev = prev;
- prev->next = ss;
-
- if ( ss->next )
- ss->next->prev = ss;
- else
- tail = ss;
- }
- else
- {
- ss->next = head;
-
- if ( ss->next )
- ss->next->prev = ss;
- else
- tail = ss;
- head = ss;
- }
+ if ( !next_no_gap() )
+ return false;
- count++;
+ return SEQ_LT(next->start_seq() + next->cursor, seq_acked);
}
- void remove(TcpSegmentNode* ss)
- {
- if ( ss->prev )
- ss->prev->next = ss->next;
- else
- head = ss->next;
-
- if ( ss->next )
- ss->next->prev = ss->prev;
- else
- tail = ss->prev;
+public:
+ TcpSegmentNode* prev;
+ TcpSegmentNode* next;
- count--;
- }
+ struct timeval tv;
+ uint32_t ts;
- TcpSegmentNode* head = nullptr;
- TcpSegmentNode* tail = nullptr;
- TcpSegmentNode* cur_rseg = nullptr;
- TcpSegmentNode* cur_sseg = nullptr;
- uint32_t count = 0;
+ uint32_t seq; // initial seq # of the data segment (fixed)
+ uint16_t length; // working length of the segment data (relative to offset)
+ uint16_t offset; // working start of segment data
+ uint16_t cursor; // scan position (relative to offset)
+ uint16_t size; // allocated payload size
+ uint8_t data[1];
};
#endif
#include "detection/detection_engine.h"
#include "detection/rules.h"
+#include "framework/data_bus.h"
#include "log/log.h"
#include "packet_io/packet_tracer.h"
#include "profiler/profiler.h"
#include "protocols/eth.h"
#include "pub_sub/intrinsic_event_ids.h"
+#include "pub_sub/stream_event_ids.h"
+#include "stream/stream.h"
#include "stream_tcp.h"
#include "tcp_ha.h"
#include "tcp_module.h"
#include "tcp_normalizers.h"
-#include "tcp_reassemblers.h"
#include "tcp_segment_node.h"
#include "tcp_state_machine.h"
#include "tcp_trace.h"
TcpSegmentNode::clear();
}
-TcpSession::TcpSession(Flow* f) : TcpStreamSession(f)
+TcpSession::TcpSession(Flow* f)
+ : Session(f), client(true), server(false)
{
tsm = TcpStateMachine::get_instance();
- splitter_init = false;
+ client.init_tcp_state(this);
+ server.init_tcp_state(this);
- client.session = this;
- server.session = this;
tcpStats.instantiated++;
}
bool TcpSession::setup(Packet*)
{
- client.init_tcp_state();
- server.init_tcp_state();
- lws_init = tcp_init = false;
+ client.init_tcp_state(this);
+ server.init_tcp_state(this);
+ tcp_init = false;
generate_3whs_alert = true;
cleaning = false;
splitter_init = false;
return true;
}
-// FIXIT-M once TcpReassembler interface is abstract class move this to base class
void TcpSession::restart(Packet* p)
{
// sanity check since this is called externally
if ( talker->midstream_initial_ack_flush )
{
talker->midstream_initial_ack_flush = false;
- talker->reassembler.flush_on_data_policy(p);
+ talker->eval_flush_policy_on_data(p);
}
if (p->dsize > 0)
- listener->reassembler.flush_on_data_policy(p);
+ listener->eval_flush_policy_on_data(p);
if (p->ptrs.tcph->is_ack())
- talker->reassembler.flush_on_ack_policy(p);
+ talker->eval_flush_policy_on_ack(p);
tcpStats.restarts++;
}
{
assert(!p or p->flow == flow);
if ( !tcp_init )
- {
- if ( lws_init )
- tcpStats.no_pickups++;
return;
- }
- lws_init = false;
tcp_init = false;
tcpStats.released++;
if ( !flow->two_way_traffic() and free_flow_data )
tcpStats.asymmetric_flows++;
- if ( flush_segments )
- {
- client.reassembler.flush_queued_segments(flow, true, p);
- server.reassembler.flush_queued_segments(flow, true, p);
- }
-
- if ( p )
- {
- client.finalize_held_packet(p);
- server.finalize_held_packet(p);
- }
- else
- {
- client.finalize_held_packet(flow);
- server.finalize_held_packet(flow);
- }
-
- client.reassembler.purge_segment_list();
- server.reassembler.purge_segment_list();
-
+ client.clear_tracker(flow, p, flush_segments, restart);
+ server.clear_tracker(flow, p, flush_segments, restart);
update_perf_base_state(TcpStreamTracker::TCP_CLOSED);
-
- set_splitter(true, nullptr);
- set_splitter(false, nullptr);
+ tel.log_internal_event(SESSION_EVENT_CLEAR);
if ( restart )
- {
flow->restart(free_flow_data);
- client.reassembler.reset_paf();
- server.reassembler.reset_paf();
- }
else
- {
flow->clear(free_flow_data);
- client.reassembler.clear_paf();
- server.reassembler.clear_paf();
- }
-
- tel.log_internal_event(SESSION_EVENT_CLEAR);
}
void TcpSession::update_perf_base_state(char newState)
server.normalizer.init(StreamPolicy::MISSED_3WHS, this, &server, &client);
}
-void TcpSession::update_stream_order(const TcpSegmentDescriptor& tsd, bool aligned)
-{
- TcpStreamTracker* listener = tsd.get_listener();
- uint32_t seq = tsd.get_seq();
-
- switch ( listener->order )
- {
- case TcpStreamTracker::IN_SEQUENCE:
- if ( aligned )
- tsd.set_packet_flags(PKT_STREAM_ORDER_OK);
- else if ( SEQ_GT(seq, listener->rcv_nxt) )
- {
- listener->order = TcpStreamTracker::NONE;
- listener->hole_left_edge = listener->rcv_nxt;
- listener->hole_right_edge = seq - 1;
- }
- break;
-
- case TcpStreamTracker::NONE:
- if ( aligned )
- {
- tsd.set_packet_flags(PKT_STREAM_ORDER_OK);
- if ( SEQ_GT(tsd.get_end_seq(), listener->hole_right_edge) )
- listener->order = TcpStreamTracker::OUT_OF_SEQUENCE;
- else
- listener->hole_left_edge = tsd.get_end_seq();
- }
- else
- {
- if ( SEQ_LEQ(seq, listener->hole_right_edge) )
- {
- if ( SEQ_GT(seq, listener->hole_left_edge) )
- listener->hole_right_edge = seq - 1;
- else if ( SEQ_GT(tsd.get_end_seq(), listener->hole_left_edge) )
- {
- listener->hole_left_edge = tsd.get_end_seq();
- tsd.set_packet_flags(PKT_STREAM_ORDER_OK);
- }
- }
- // accounting for overlaps when not aligned
- if ( SEQ_GT(listener->hole_left_edge, listener->hole_right_edge) )
- listener->order = TcpStreamTracker::OUT_OF_SEQUENCE;
- }
- break;
-
- case TcpStreamTracker::OUT_OF_SEQUENCE:
- tsd.set_packet_flags(PKT_STREAM_ORDER_BAD);
- }
-}
-
void TcpSession::set_os_policy()
{
StreamPolicy client_os_policy = flow->ssn_policy ?
server_os_policy = StreamPolicy::OS_FIRST;
}
- client.reassembler.init(this, &client, client_os_policy, false);
- server.reassembler.init(this, &server, server_os_policy, true);
+ client.seglist.init(this, &client, client_os_policy);
+ server.seglist.init(this, &server, server_os_policy);
}
// FIXIT-M this is no longer called (but should be)
}
}
-void TcpSession::update_session_on_rst(TcpSegmentDescriptor& tsd, bool flush)
+void TcpSession::update_session_on_rst(const TcpSegmentDescriptor& tsd, bool flush)
{
Packet* p = tsd.get_pkt();
set_pkt_action_flag(ACTION_BAD_PKT);
}
-int32_t TcpSession::kickstart_asymmetric_flow(const TcpSegmentDescriptor& tsd, TcpStreamTracker* listener)
-{
- listener->reassembler.reset_asymmetric_flow_reassembly();
- listener->reassembler.flush_on_data_policy(tsd.get_pkt());
-
- int32_t space_left =
- tcp_config->max_queued_bytes - listener->reassembler.get_seg_bytes_total();
-
- if ( listener->get_tcp_state() == TcpStreamTracker::TCP_MID_STREAM_RECV )
- {
- listener->set_tcp_state(TcpStreamTracker::TCP_ESTABLISHED);
- if (PacketTracer::is_active())
- PacketTracer::log("Stream: Kickstart of midstream asymmetric flow! Seglist queue space: %u\n",
- space_left );
- }
- else
- {
- if (PacketTracer::is_active())
- PacketTracer::log("Stream: Kickstart of asymmetric flow! Seglist queue space: %u\n",
- space_left );
- }
-
- return space_left;
-}
-
bool TcpSession::check_reassembly_queue_thresholds(TcpSegmentDescriptor& tsd, TcpStreamTracker* listener)
{
// if this packet fits within the current queue limit window then it's good
- if( listener->reassembler.segment_within_seglist_window(tsd) )
+ if( listener->seglist.segment_within_seglist_window(tsd) )
return false;
bool inline_mode = tsd.is_nap_policy_inline();
if ( tcp_config->max_queued_bytes )
{
int32_t space_left =
- tcp_config->max_queued_bytes - listener->reassembler.get_seg_bytes_total();
+ tcp_config->max_queued_bytes - listener->seglist.get_seg_bytes_total();
if ( space_left < (int32_t)tsd.get_len() )
{
// and flush to free up seglist space
if ( tsd.is_ips_policy_inline() && !tsd.get_pkt()->flow->two_way_traffic() )
{
- space_left = kickstart_asymmetric_flow(tsd, listener);
+ space_left = listener->kickstart_asymmetric_flow(tsd, tcp_config->max_queued_bytes);
if ( space_left >= (int32_t)tsd.get_len() )
return false;
}
if ( tcp_config->max_queued_segs )
{
- if ( listener->reassembler.get_seg_count() + 1 > tcp_config->max_queued_segs )
+ if ( listener->seglist.get_seg_count() + 1 > tcp_config->max_queued_segs )
{
tcpStats.exceeded_max_segs++;
// and flush to free up seglist space
if ( tsd.is_ips_policy_inline() && !tsd.get_pkt()->flow->two_way_traffic() )
{
- kickstart_asymmetric_flow(tsd, listener);
- if ( listener->reassembler.get_seg_count() + 1 <= tcp_config->max_queued_segs )
+ listener->kickstart_asymmetric_flow(tsd, tcp_config->max_queued_bytes);
+ if ( listener->seglist.get_seg_count() + 1 <= tcp_config->max_queued_segs )
return false;
}
else
server.set_tcp_options_len(tcp_options_len);
- bool stream_is_inorder = ( tsd.get_seq() == listener->rcv_nxt );
+ tsd.set_packet_inorder(tsd.get_seq() == listener->rcv_nxt );
- int rc = listener->normalizer.apply_normalizations(tsd, tsd.get_seq(), stream_is_inorder);
+ int rc = listener->normalizer.apply_normalizations(tsd, tsd.get_seq(), tsd.is_packet_inorder());
switch ( rc )
{
case TcpNormalizer::NORM_OK:
- if ( stream_is_inorder )
- listener->rcv_nxt = tsd.get_end_seq();
-
- update_stream_order(tsd, stream_is_inorder);
check_small_segment_threshold(tsd, listener);
// don't queue data if we are ignoring or queue thresholds are exceeded
if ( filter_packet_for_reassembly(tsd, listener) )
{
set_packet_header_foo(tsd);
- listener->reassembler.queue_packet_for_reassembly(tsd);
+ listener->seglist.queue_reassembly_segment(tsd);
// Alert if overlap limit exceeded
if ( (tcp_config->overlap_limit)
- && (listener->reassembler.get_overlap_count() > tcp_config->overlap_limit) )
+ && (listener->seglist.get_overlap_count() > tcp_config->overlap_limit) )
{
tel.set_tcp_event(EVENT_EXCESSIVE_OVERLAP);
- listener->reassembler.set_overlap_count(0);
+ listener->seglist.set_overlap_count(0);
}
}
+ else if ( tsd.is_packet_inorder() )
+ listener->set_rcv_nxt(tsd.get_end_seq());
+
+ listener->update_stream_order(tsd, tsd.is_packet_inorder());
+
break;
case TcpNormalizer::NORM_TRIMMED:
default:
assert(false);
break;
-
}
}
if ( flush )
- listener->reassembler.flush_on_data_policy(tsd.get_pkt());
+ listener->eval_flush_policy_on_data(tsd.get_pkt());
else
- listener->reassembler.initialize_paf();
+ listener->reassembler->initialize_paf();
}
-TcpStreamTracker::TcpState TcpSession::get_talker_state(TcpSegmentDescriptor& tsd)
+TcpStreamTracker::TcpState TcpSession::get_talker_state(const TcpSegmentDescriptor& tsd)
{
return tsd.get_talker()->get_tcp_state();
}
-TcpStreamTracker::TcpState TcpSession::get_listener_state(TcpSegmentDescriptor& tsd)
+TcpStreamTracker::TcpState TcpSession::get_listener_state(const TcpSegmentDescriptor& tsd)
{
return tsd.get_listener()->get_tcp_state();
}
return; // We'll check & clear the TF_FORCE_FLUSH next time through
// Need to convert the addresses to network order
- if ( server.reassembler.flush_stream(p, PKT_FROM_SERVER) )
- server.reassembler.purge_flushed_ackd();
+ if ( server.reassembler->flush_stream(p, PKT_FROM_SERVER) )
+ server.reassembler->purge_flushed_ackd();
server.clear_tf_flags(TF_FORCE_FLUSH);
}
if ( p->packet_flags & PKT_REBUILT_STREAM )
return; // TF_FORCE_FLUSH checked & cleared next time through
- if ( client.reassembler.flush_stream(p, PKT_FROM_CLIENT) )
- client.reassembler.purge_flushed_ackd();
+ if ( client.reassembler->flush_stream(p, PKT_FROM_CLIENT) )
+ client.reassembler->purge_flushed_ackd();
client.clear_tf_flags(TF_FORCE_FLUSH);
}
return;
tracker.set_tf_flags(TF_FORCE_FLUSH);
- if ( tracker.reassembler.flush_stream(p, dir, final_flush) )
- tracker.reassembler.purge_flushed_ackd();
+ if ( tracker.reassembler->flush_stream(p, dir, final_flush) )
+ tracker.reassembler->purge_flushed_ackd();
tracker.clear_tf_flags(TF_FORCE_FLUSH);
}
if ( !tcp_init )
return;
- //FIXIT-L Cleanup tcp_init and lws_init as they have some side effect in TcpSession::clear_session
- lws_init = false;
+ //FIXIT-L Cleanup tcp_init has some side effect in TcpSession::clear_session
tcp_init = false;
- client.reassembler.flush_queued_segments(flow, true);
- server.reassembler.flush_queued_segments(flow, true);
+ client.reassembler->flush_queued_segments(flow, true);
+ server.reassembler->flush_queued_segments(flow, true);
- lws_init = true;
tcp_init = true;
}
void TcpSession::set_extra_data(Packet* p, uint32_t xid)
{
TcpStreamTracker& st = p->ptrs.ip_api.get_src()->equals(flow->client_ip) ? server : client;
- st.reassembler.set_xtradata_mask(st.reassembler.get_xtradata_mask() | BIT(xid));
+ st.tcp_alerts.set_xtradata_mask(st.tcp_alerts.get_xtradata_mask() | BIT(xid));
}
static inline void set_window_scale(TcpSegmentDescriptor& tsd)
if ( Stream::blocked_flow(p) )
return true;
- if ( flow->ssn_state.ignore_direction != SSN_DIR_NONE )
+ if ( flow->ssn_state.ignore_direction == SSN_DIR_BOTH )
{
- server.set_flush_policy(STREAM_FLPOLICY_IGNORE);
- client.set_flush_policy(STREAM_FLPOLICY_IGNORE);
+ server.set_splitter((StreamSplitter*)nullptr);
+ client.set_splitter((StreamSplitter*)nullptr);
return true;
}
client.set_splitter(tsd.get_flow());
server.set_splitter(tsd.get_flow());
- client.init_flush_policy();
- server.init_flush_policy();
-
if ( tsd.is_packet_from_client() ) // Important if the 3-way handshake's ACK contains data
flow->set_session_flags(SSNFLAG_SEEN_CLIENT);
else
flow->set_session_flags(SSNFLAG_SEEN_SERVER);
check_flow_missed_3whs();
-
set_no_ack(tcp_config->no_ack);
}
return process_tcp_packet(tsd, p);
}
+
+void TcpSession::init_new_tcp_session(TcpSegmentDescriptor& tsd)
+{
+ Packet* p = tsd.get_pkt();
+
+ flow->pkt_type = p->type();
+ flow->ip_proto = (uint8_t)p->get_ip_proto_next();
+
+ /* New session, previous was marked as reset. Clear the reset flag. */
+ flow->clear_session_flags(SSNFLAG_RESET);
+
+ flow->set_expire(p, flow->default_session_timeout);
+
+ update_perf_base_state(TcpStreamTracker::TCP_SYN_SENT);
+
+ tcp_init = true;
+}
+
+void TcpSession::update_session_on_server_packet(TcpSegmentDescriptor& tsd)
+{
+ flow->set_session_flags(SSNFLAG_SEEN_SERVER);
+ tsd.set_talker(server);
+ tsd.set_listener(client);
+
+ if ( !flow->inner_server_ttl && !tsd.is_meta_ack_packet() )
+ flow->set_ttl(tsd.get_pkt(), false);
+}
+
+void TcpSession::update_session_on_client_packet(TcpSegmentDescriptor& tsd)
+{
+ /* if we got here we have seen the SYN already... */
+ flow->set_session_flags(SSNFLAG_SEEN_CLIENT);
+ tsd.set_talker(client);
+ tsd.set_listener(server);
+
+ if ( !flow->inner_client_ttl && !tsd.is_meta_ack_packet() )
+ flow->set_ttl(tsd.get_pkt(), true);
+}
+
+bool TcpSession::can_set_no_ack()
+{
+ return ( server.get_flush_policy() == STREAM_FLPOLICY_ON_DATA and
+ client.get_flush_policy() == STREAM_FLPOLICY_ON_DATA );
+}
+
+bool TcpSession::set_no_ack(bool b)
+{
+ if ( can_set_no_ack() )
+ {
+ no_ack = b;
+ return true;
+ }
+ else
+ return false;
+}
+
+void TcpSession::disable_reassembly(Flow* f)
+{
+ client.disable_reassembly(f);
+ server.disable_reassembly(f);
+}
+
+bool TcpSession::is_sequenced(uint8_t dir) const
+{
+ if ( dir & SSN_DIR_FROM_CLIENT )
+ {
+ if ( server.get_tf_flags() & ( TF_MISSING_PREV_PKT | TF_PKT_MISSED ) )
+ return false;
+ }
+
+ if ( dir & SSN_DIR_FROM_SERVER )
+ {
+ if ( client.get_tf_flags() & ( TF_MISSING_PREV_PKT | TF_PKT_MISSED ) )
+ return false;
+ }
+
+ return true;
+}
+
+/* This will falsely return SSN_MISSING_BEFORE on the first reassembled
+ * packet if reassembly for this direction was set mid-session */
+uint8_t TcpSession::missing_in_reassembled(uint8_t dir) const
+{
+ if ( dir & SSN_DIR_FROM_CLIENT )
+ {
+ if ( (server.get_tf_flags() & TF_MISSING_PKT)
+ && (server.get_tf_flags() & TF_MISSING_PREV_PKT) )
+ return SSN_MISSING_BOTH;
+ else if ( server.get_tf_flags() & TF_MISSING_PREV_PKT )
+ return SSN_MISSING_BEFORE;
+ else if ( server.get_tf_flags() & TF_MISSING_PKT )
+ return SSN_MISSING_AFTER;
+ }
+ else if ( dir & SSN_DIR_FROM_SERVER )
+ {
+ if ( (client.get_tf_flags() & TF_MISSING_PKT)
+ && (client.get_tf_flags() & TF_MISSING_PREV_PKT) )
+ return SSN_MISSING_BOTH;
+ else if ( client.get_tf_flags() & TF_MISSING_PREV_PKT )
+ return SSN_MISSING_BEFORE;
+ else if ( client.get_tf_flags() & TF_MISSING_PKT )
+ return SSN_MISSING_AFTER;
+ }
+
+ return SSN_MISSING_NONE;
+}
+
+bool TcpSession::are_packets_missing(uint8_t dir) const
+{
+ if ( dir & SSN_DIR_FROM_CLIENT )
+ {
+ if ( server.get_tf_flags() & TF_PKT_MISSED )
+ return true;
+ }
+
+ if ( dir & SSN_DIR_FROM_SERVER )
+ {
+ if ( client.get_tf_flags() & TF_PKT_MISSED )
+ return true;
+ }
+
+ return false;
+}
+
+bool TcpSession::are_client_segments_queued() const
+{
+ return client.seglist.is_segment_pending_flush();
+}
+
+bool TcpSession::add_alert(Packet* p, uint32_t gid, uint32_t sid)
+{
+ TcpStreamTracker& trk = p->ptrs.ip_api.get_src()->equals(flow->client_ip) ?
+ server : client;
+
+ return trk.tcp_alerts.add_alert(gid, sid);
+}
+
+bool TcpSession::check_alerted(Packet* p, uint32_t gid, uint32_t sid)
+{
+ // only check for alert on wire packet, skip this when processing a rebuilt packet
+ if ( !(p->packet_flags & PKT_REBUILT_STREAM) )
+ return false;
+
+ TcpStreamTracker& trk = p->ptrs.ip_api.get_src()->equals(flow->client_ip) ?
+ server : client;
+
+ return trk.tcp_alerts.check_alerted(gid, sid);
+}
+
+int TcpSession::update_alert(Packet* p, uint32_t gid, uint32_t sid,
+ uint32_t event_id, uint32_t event_second)
+{
+ TcpStreamTracker& trk = p->ptrs.ip_api.get_src()->equals(flow->client_ip) ?
+ server : client;
+
+ return trk.tcp_alerts.update_alert(gid, sid, event_id, event_second);
+}
+
+bool TcpSession::set_packet_action_to_hold(Packet* p)
+{
+ if ( p->is_from_client() )
+ {
+ held_packet_dir = SSN_DIR_FROM_CLIENT;
+ return server.set_held_packet(p);
+ }
+ else
+ {
+ held_packet_dir = SSN_DIR_FROM_SERVER;
+ return client.set_held_packet(p);
+ }
+}
+
+void TcpSession::set_packet_header_foo(const TcpSegmentDescriptor& tsd)
+{
+ const Packet* p = tsd.get_pkt();
+
+ if ( tsd.is_packet_from_client() || (p->pkth->egress_index == DAQ_PKTHDR_UNKNOWN
+ && p->pkth->egress_group == DAQ_PKTHDR_UNKNOWN) )
+ {
+ ingress_index = p->pkth->ingress_index;
+ ingress_group = p->pkth->ingress_group;
+ // ssn egress may be unknown, but will be correct
+ egress_index = p->pkth->egress_index;
+ egress_group = p->pkth->egress_group;
+ }
+ else
+ {
+ egress_index = p->pkth->ingress_index;
+ egress_group = p->pkth->ingress_group;
+ ingress_index = p->pkth->egress_index;
+ ingress_group = p->pkth->egress_group;
+ }
+
+ daq_flags = p->pkth->flags;
+ address_space_id = p->pkth->address_space_id;
+}
+
+void TcpSession::get_packet_header_foo(DAQ_PktHdr_t* pkth, const DAQ_PktHdr_t* orig, uint32_t dir)
+{
+ if ( (dir & PKT_FROM_CLIENT) || (egress_index == DAQ_PKTHDR_UNKNOWN &&
+ egress_group == DAQ_PKTHDR_UNKNOWN) )
+ {
+ pkth->ingress_index = ingress_index;
+ pkth->ingress_group = ingress_group;
+ pkth->egress_index = egress_index;
+ pkth->egress_group = egress_group;
+ }
+ else
+ {
+ pkth->ingress_index = egress_index;
+ pkth->ingress_group = egress_group;
+ pkth->egress_index = ingress_index;
+ pkth->egress_group = ingress_group;
+ }
+ pkth->opaque = 0;
+ pkth->flags = daq_flags;
+ pkth->address_space_id = address_space_id;
+ pkth->tenant_id = orig->tenant_id;
+}
+
+void TcpSession::reset()
+{
+ if ( tcp_init )
+ clear_session(true, false, false );
+}
+
+void TcpSession::cleanup(Packet* p)
+{
+ if ( cleaning )
+ return;
+
+ cleaning = true;
+ clear_session(true, true, false, p);
+ client.reset();
+ server.reset();
+ cleaning = false;
+}
+
+void TcpSession::clear()
+{
+ if ( tcp_init )
+ clear_session( true, false, false );
+
+ TcpHAManager::process_deletion(*flow);
+}
+
+void TcpSession::set_splitter(bool to_server, StreamSplitter* ss)
+{
+ TcpStreamTracker& trk = ( to_server ) ? server : client;
+
+ trk.set_splitter(ss);
+}
+
+uint16_t TcpSession::get_mss(bool to_server) const
+{
+ const TcpStreamTracker& trk = (to_server) ? client : server;
+
+ return trk.get_mss();
+}
+
+uint8_t TcpSession::get_tcp_options_len(bool to_server) const
+{
+ const TcpStreamTracker& trk = (to_server) ? client : server;
+
+ return trk.get_tcp_options_len();
+}
+
+StreamSplitter* TcpSession::get_splitter(bool to_server)
+{
+ if ( to_server )
+ return server.get_splitter();
+ else
+ return client.get_splitter();
+}
+
+void TcpSession::start_proxy()
+{
+ if ( PacketTracer::is_active() )
+ PacketTracer::log("Stream TCP normalization policy set to Proxy mode. Normalizations will be skipped\n");
+
+ tcp_config->policy = StreamPolicy::OS_PROXY;
+ client.normalizer.init(tcp_config->policy, this, &client, &server);
+ server.normalizer.init(tcp_config->policy, this, &server, &client);
+ ++tcpStats.proxy_mode_flows;
+}
+
+void TcpSession::set_established(const TcpSegmentDescriptor& tsd)
+{
+ update_perf_base_state(TcpStreamTracker::TCP_ESTABLISHED);
+ flow->session_state |= STREAM_STATE_ESTABLISHED;
+ flow->set_idle_timeout(this->tcp_config->idle_timeout);
+ if (SSNFLAG_ESTABLISHED != (SSNFLAG_ESTABLISHED & flow->get_session_flags()))
+ {
+ flow->set_session_flags(SSNFLAG_ESTABLISHED);
+ // Only send 1 event
+ if (SSNFLAG_TCP_PSEUDO_EST != (SSNFLAG_TCP_PSEUDO_EST & flow->get_session_flags()))
+ DataBus::publish(Stream::get_pub_id(), StreamEventIds::TCP_ESTABLISHED, tsd.get_pkt());
+ }
+}
+
+void TcpSession::set_pseudo_established(Packet* p)
+{
+ p->flow->ssn_state.session_flags |= SSNFLAG_TCP_PSEUDO_EST;
+ DataBus::publish(Stream::get_pub_id(), StreamEventIds::TCP_ESTABLISHED, p);
+}
+
+bool TcpSession::check_for_one_sided_session(Packet* p)
+{
+ Flow& flow = *p->flow;
+ if ( 0 == ( (SSNFLAG_ESTABLISHED | SSNFLAG_TCP_PSEUDO_EST) & flow.ssn_state.session_flags )
+ && p->is_from_client_originally() )
+ {
+ uint64_t initiator_packets;
+ uint64_t responder_packets;
+ if (flow.flags.client_initiated)
+ {
+ initiator_packets = flow.flowstats.client_pkts;
+ responder_packets = flow.flowstats.server_pkts;
+ }
+ else
+ {
+ initiator_packets = flow.flowstats.server_pkts;
+ responder_packets = flow.flowstats.client_pkts;
+ }
+
+ if ( !responder_packets )
+ {
+ // handle case where traffic is only in one direction, but the sequence numbers
+ // are changing indicating an asynchronous session
+ uint32_t watermark = p->ptrs.tcph->seq() + p->ptrs.tcph->ack();
+ if ( 1 == initiator_packets )
+ initiator_watermark = watermark;
+ else if ( initiator_watermark != watermark )
+ {
+ set_pseudo_established(p);
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+void TcpSession::check_for_pseudo_established(Packet* p)
+{
+ Flow& flow = *p->flow;
+ if ( 0 == ( (SSNFLAG_ESTABLISHED | SSNFLAG_TCP_PSEUDO_EST) & flow.ssn_state.session_flags ) )
+ {
+ if ( check_for_one_sided_session(p) )
+ return;
+ if ( 0 < flow.flowstats.client_pkts && 0 < flow.flowstats.server_pkts )
+ set_pseudo_established(p);
+ }
+}
+
+
#ifndef TCP_SESSION_H
#define TCP_SESSION_H
+#include "flow/flow.h"
+#include "flow/session.h"
+#include "protocols/packet.h"
+
+#include "tcp_event_logger.h"
#include "tcp_state_machine.h"
-#include "tcp_stream_session.h"
+#include "tcp_stream_config.h"
#include "tcp_stream_tracker.h"
namespace snort
class Flow;
struct Packet;
}
-class TcpEventLogger;
+class TcpSegmentDescriptor;
-class TcpSession : public TcpStreamSession
+class TcpSession : public Session
{
public:
TcpSession(snort::Flow*);
void precheck(snort::Packet* p) override;
int process(snort::Packet*) override;
+ void clear() override;
+ void cleanup(snort::Packet* = nullptr) override;
+
+ void set_splitter(bool, snort::StreamSplitter*) override;
+ snort::StreamSplitter* get_splitter(bool) override;
+
+ void disable_reassembly(snort::Flow*) override;
+ uint8_t missing_in_reassembled(uint8_t dir) const override;
+ bool are_client_segments_queued() const override;
+ bool is_sequenced(uint8_t dir) const override;
+ bool are_packets_missing(uint8_t dir) const override;
+ bool set_packet_action_to_hold(snort::Packet*) override;
+
+ bool add_alert(snort::Packet*, uint32_t gid, uint32_t sid) override;
+ bool check_alerted(snort::Packet*, uint32_t gid, uint32_t sid) override;
+ int update_alert(snort::Packet*, uint32_t gid, uint32_t sid,
+ uint32_t event_id, uint32_t event_second) override;
+ void set_extra_data(snort::Packet*, uint32_t /*flag*/) override;
+
void flush() override;
void flush_client(snort::Packet*) override;
void flush_server(snort::Packet*) override;
void flush_talker(snort::Packet*, bool final_flush = false) override;
void flush_listener(snort::Packet*, bool final_flush = false) override;
// cppcheck-suppress virtualCallInConstructor
- void clear_session(bool free_flow_data, bool flush_segments, bool restart, snort::Packet* p = nullptr) override;
- void set_extra_data(snort::Packet*, uint32_t /*flag*/) override;
- void update_perf_base_state(char new_state) override;
- TcpStreamTracker::TcpState get_talker_state(TcpSegmentDescriptor& tsd) override;
- TcpStreamTracker::TcpState get_listener_state(TcpSegmentDescriptor& tsd) override;
- void update_timestamp_tracking(TcpSegmentDescriptor&) override;
- void update_session_on_rst(TcpSegmentDescriptor&, bool) override;
- bool handle_syn_on_reset_session(TcpSegmentDescriptor&) override;
- void handle_data_on_syn(TcpSegmentDescriptor&) override;
- void update_ignored_session(TcpSegmentDescriptor&) override;
- void update_paws_timestamps(TcpSegmentDescriptor&) override;
- void check_for_repeated_syn(TcpSegmentDescriptor&) override;
- void check_for_session_hijack(TcpSegmentDescriptor&) override;
- bool check_for_window_slam(TcpSegmentDescriptor& tsd) override;
- void mark_packet_for_drop(TcpSegmentDescriptor&) override;
- void handle_data_segment(TcpSegmentDescriptor&, bool flush = true);
- bool validate_packet_established_session(TcpSegmentDescriptor&) override;
+
+ void reset();
+ void start_proxy();
+ void clear_session(bool free_flow_data, bool flush_segments, bool restart, snort::Packet* p = nullptr);
+ TcpStreamTracker::TcpState get_talker_state(const TcpSegmentDescriptor& tsd);
+ TcpStreamTracker::TcpState get_listener_state(const TcpSegmentDescriptor& tsd);
+ TcpStreamTracker::TcpState get_peer_state(const TcpStreamTracker* me)
+ { return me == &client ? server.get_tcp_state() : client.get_tcp_state(); }
+
+ void init_new_tcp_session(TcpSegmentDescriptor&);
+ void update_perf_base_state(char new_state);
+ void update_timestamp_tracking(TcpSegmentDescriptor&);
+ void update_paws_timestamps(TcpSegmentDescriptor&);
+ void update_session_on_rst(const TcpSegmentDescriptor&, bool);
+ bool handle_syn_on_reset_session(TcpSegmentDescriptor&);
+ void handle_data_on_syn(TcpSegmentDescriptor&);
+ void update_ignored_session(TcpSegmentDescriptor&);
bool is_midstream_allowed(const TcpSegmentDescriptor& tsd)
{ return tcp_config->midstream_allowed(tsd.get_pkt()); }
+ uint16_t get_mss(bool to_server) const;
+ uint8_t get_tcp_options_len(bool to_server) const;
+
+ void get_packet_header_foo(DAQ_PktHdr_t*, const DAQ_PktHdr_t* orig, uint32_t dir);
+ bool can_set_no_ack();
+ bool set_no_ack(bool);
+ bool no_ack_mode_enabled() { return no_ack; }
+
+ void generate_no_3whs_event()
+ {
+ if ( generate_3whs_alert && flow->two_way_traffic())
+ {
+ tel.set_tcp_event(EVENT_NO_3WHS);
+ generate_3whs_alert = false;
+ }
+ }
+
+ void set_pkt_action_flag(uint32_t flag)
+ { pkt_action_mask |= flag; }
+
+ void set_established(const TcpSegmentDescriptor&);
+ void set_pseudo_established(snort::Packet*);
+ void check_for_pseudo_established(snort::Packet*);
+ bool check_for_one_sided_session(snort::Packet*);
+
+ void check_for_repeated_syn(TcpSegmentDescriptor&);
+ void check_for_session_hijack(TcpSegmentDescriptor&);
+ bool check_for_window_slam(TcpSegmentDescriptor& tsd);
+ void mark_packet_for_drop(TcpSegmentDescriptor&);
+ void handle_data_segment(TcpSegmentDescriptor&, bool flush = true);
+ bool validate_packet_established_session(TcpSegmentDescriptor&);
+
+ TcpStreamTracker client;
+ TcpStreamTracker server;
+ TcpStreamConfig* tcp_config = nullptr;
+ TcpEventLogger tel;
+ bool tcp_init = false;
+ uint32_t pkt_action_mask = ACTION_NOTHING;
+ uint32_t initiator_watermark = 0;
+ int32_t ingress_index = DAQ_PKTHDR_UNKNOWN;
+ int32_t egress_index = DAQ_PKTHDR_UNKNOWN;
+ int16_t ingress_group = DAQ_PKTHDR_UNKNOWN;
+ int16_t egress_group = DAQ_PKTHDR_UNKNOWN;
+ uint32_t daq_flags = 0;
+ uint32_t address_space_id = 0;
+ bool generate_3whs_alert = true;
+ bool cleaning = false;
+ uint8_t held_packet_dir = SSN_DIR_NONE;
+ uint8_t ecn = 0;
+
private:
int process_tcp_packet(TcpSegmentDescriptor&, const snort::Packet*);
- void set_os_policy() override;
- void update_stream_order(const TcpSegmentDescriptor&, bool aligned);
+ void set_os_policy();
void swap_trackers();
void init_session_on_syn(TcpSegmentDescriptor&);
void init_session_on_synack(TcpSegmentDescriptor&);
bool check_reassembly_queue_thresholds(TcpSegmentDescriptor&, TcpStreamTracker*);
bool filter_packet_for_reassembly(TcpSegmentDescriptor&, TcpStreamTracker*);
void check_small_segment_threshold(const TcpSegmentDescriptor&, TcpStreamTracker*);
- int32_t kickstart_asymmetric_flow(const TcpSegmentDescriptor&, TcpStreamTracker*);
void check_flow_missed_3whs();
-private:
+ void set_packet_header_foo(const TcpSegmentDescriptor&);
+ void update_session_on_server_packet(TcpSegmentDescriptor&);
+ void update_session_on_client_packet(TcpSegmentDescriptor&);
+
TcpStateMachine* tsm;
- bool splitter_init;
+ bool splitter_init = false;
+ bool no_ack = false;
};
#endif
#include "tcp_session.h"
-#ifdef UNIT_TEST
-#include "catch/snort_catch.h"
-#endif
-
using namespace snort;
TcpStateClosed::TcpStateClosed(TcpStateMachine& tsm) :
#include "tcp_state_machine.h"
-#ifdef UNIT_TEST
-#include "catch/snort_catch.h"
-#endif
-
using namespace std;
TcpStateHandler::TcpStateHandler(TcpStreamTracker::TcpState state, TcpStateMachine& tsm)
{
if ( trk.normalizer.is_tcp_ips_enabled() )
{
- trk.reassembler.skip_midstream_pickup_seglist_hole(tsd);
- trk.reassembler.flush_on_data_policy(tsd.get_pkt());
+ trk.seglist.skip_midstream_pickup_seglist_hole(tsd);
+ trk.eval_flush_policy_on_data(tsd.get_pkt());
trk.midstream_initial_ack_flush = true;
}
{
if ( trk.normalizer.is_tcp_ips_enabled() )
{
- trk.reassembler.skip_midstream_pickup_seglist_hole(tsd);
- trk.reassembler.flush_on_data_policy(tsd.get_pkt());
+ trk.seglist.skip_midstream_pickup_seglist_hole(tsd);
+ trk.eval_flush_policy_on_data(tsd.get_pkt());
trk.midstream_initial_ack_flush = true;
}
{
if ( trk.normalizer.is_tcp_ips_enabled() )
{
- trk.reassembler.skip_midstream_pickup_seglist_hole(tsd);
- trk.reassembler.flush_on_data_policy(tsd.get_pkt());
+ trk.seglist.skip_midstream_pickup_seglist_hole(tsd);
+ trk.eval_flush_policy_on_data(tsd.get_pkt());
trk.midstream_initial_ack_flush = true;
}
{
if ( trk.normalizer.is_tcp_ips_enabled() )
{
- trk.reassembler.skip_midstream_pickup_seglist_hole(tsd);
- trk.reassembler.flush_on_data_policy(tsd.get_pkt());
+ trk.seglist.skip_midstream_pickup_seglist_hole(tsd);
+ trk.eval_flush_policy_on_data(tsd.get_pkt());
trk.midstream_initial_ack_flush = true;
}
bool TcpStateMidStreamSent::data_seg_recv(TcpSegmentDescriptor& tsd, TcpStreamTracker& trk)
{
trk.update_tracker_ack_recv(tsd);
- trk.reassembler.set_seglist_base_seq(tsd.get_seq());
+ trk.seglist.set_seglist_base_seq(tsd.get_seq());
trk.session->handle_data_segment(tsd);
trk.session->set_established(tsd);
trk.set_tcp_state(TcpStreamTracker::TCP_ESTABLISHED);
bool TcpStateSynSent::ack_sent(TcpSegmentDescriptor& tsd, TcpStreamTracker& trk)
{
trk.update_tracker_ack_sent(tsd);
+ if ( SEQ_GT(tsd.get_ack(), trk.get_rcv_nxt()) )
+ trk.set_rcv_nxt(tsd.get_ack());
trk.session->update_timestamp_tracking(tsd);
if ( trk.session->flow->two_way_traffic() )
{
+++ /dev/null
-//--------------------------------------------------------------------------
-// Copyright (C) 2015-2024 Cisco and/or its affiliates. All rights reserved.
-//
-// This program is free software; you can redistribute it and/or modify it
-// under the terms of the GNU General Public License Version 2 as published
-// by the Free Software Foundation. You may not use, modify or distribute
-// this program under any other version of the GNU General Public License.
-//
-// This program is distributed in the hope that it will be useful, but
-// WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License along
-// with this program; if not, write to the Free Software Foundation, Inc.,
-// 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
-//--------------------------------------------------------------------------
-
-// tcp_stream_session.cc author davis mcpherson <davmcphe@cisco.com>
-// Created on: Feb 18, 2016
-
-#ifdef HAVE_CONFIG_H
-#include "config.h"
-#endif
-
-#include "tcp_stream_session.h"
-
-#include "framework/data_bus.h"
-#include "packet_io/packet_tracer.h"
-#include "pub_sub/stream_event_ids.h"
-#include "stream/stream.h"
-#include "stream/tcp/tcp_ha.h"
-
-using namespace snort;
-
-TcpStreamSession::TcpStreamSession(Flow* f)
- : Session(f), client(true), server(false)
-{ }
-
-TcpStreamSession::~TcpStreamSession() = default;
-
-void TcpStreamSession::init_new_tcp_session(TcpSegmentDescriptor& tsd)
-{
- Packet* p = tsd.get_pkt();
-
- flow->pkt_type = p->type();
- flow->ip_proto = (uint8_t)p->get_ip_proto_next();
-
- /* New session, previous was marked as reset. Clear the reset flag. */
- flow->clear_session_flags(SSNFLAG_RESET);
-
- flow->set_expire(p, flow->default_session_timeout);
-
- update_perf_base_state(TcpStreamTracker::TCP_SYN_SENT);
-
- tcp_init = true;
- lws_init = true;
-}
-
-void TcpStreamSession::update_session_on_server_packet(TcpSegmentDescriptor& tsd)
-{
- flow->set_session_flags(SSNFLAG_SEEN_SERVER);
- tsd.set_talker(server);
- tsd.set_listener(client);
-
- if ( !flow->inner_server_ttl && !tsd.is_meta_ack_packet() )
- flow->set_ttl(tsd.get_pkt(), false);
-}
-
-void TcpStreamSession::update_session_on_client_packet(TcpSegmentDescriptor& tsd)
-{
- /* if we got here we have seen the SYN already... */
- flow->set_session_flags(SSNFLAG_SEEN_CLIENT);
- tsd.set_talker(client);
- tsd.set_listener(server);
-
- if ( !flow->inner_client_ttl && !tsd.is_meta_ack_packet() )
- flow->set_ttl(tsd.get_pkt(), true);
-}
-
-bool TcpStreamSession::can_set_no_ack()
-{
- return ( server.get_flush_policy() == STREAM_FLPOLICY_ON_DATA and
- client.get_flush_policy() == STREAM_FLPOLICY_ON_DATA );
-}
-
-bool TcpStreamSession::set_no_ack(bool b)
-{
- if ( can_set_no_ack() )
- {
- no_ack = b;
- return true;
- }
- else
- return false;
-}
-
-void TcpStreamSession::disable_reassembly(Flow* f)
-{
- client.set_splitter((StreamSplitter*)nullptr);
- server.set_splitter((StreamSplitter*)nullptr);
-
- client.reassembler.purge_segment_list();
- server.reassembler.purge_segment_list();
-
- client.set_flush_policy(STREAM_FLPOLICY_IGNORE);
- server.set_flush_policy(STREAM_FLPOLICY_IGNORE);
-
- client.finalize_held_packet(f);
- server.finalize_held_packet(f);
-}
-
-uint8_t TcpStreamSession::get_reassembly_direction() const
-{
- uint8_t dir = SSN_DIR_NONE;
-
- if ( server.get_flush_policy() != STREAM_FLPOLICY_IGNORE )
- dir |= SSN_DIR_FROM_CLIENT;
-
- if ( client.get_flush_policy() != STREAM_FLPOLICY_IGNORE )
- dir |= SSN_DIR_FROM_SERVER;
-
- return dir;
-}
-
-bool TcpStreamSession::is_sequenced(uint8_t dir) const
-{
- if ( dir & SSN_DIR_FROM_CLIENT )
- {
- if ( server.get_tf_flags() & ( TF_MISSING_PREV_PKT | TF_PKT_MISSED ) )
- return false;
- }
-
- if ( dir & SSN_DIR_FROM_SERVER )
- {
- if ( client.get_tf_flags() & ( TF_MISSING_PREV_PKT | TF_PKT_MISSED ) )
- return false;
- }
-
- return true;
-}
-
-/* This will falsely return SSN_MISSING_BEFORE on the first reassembled
- * packet if reassembly for this direction was set mid-session */
-uint8_t TcpStreamSession::missing_in_reassembled(uint8_t dir) const
-{
- if ( dir & SSN_DIR_FROM_CLIENT )
- {
- if ( (server.get_tf_flags() & TF_MISSING_PKT)
- && (server.get_tf_flags() & TF_MISSING_PREV_PKT) )
- return SSN_MISSING_BOTH;
- else if ( server.get_tf_flags() & TF_MISSING_PREV_PKT )
- return SSN_MISSING_BEFORE;
- else if ( server.get_tf_flags() & TF_MISSING_PKT )
- return SSN_MISSING_AFTER;
- }
- else if ( dir & SSN_DIR_FROM_SERVER )
- {
- if ( (client.get_tf_flags() & TF_MISSING_PKT)
- && (client.get_tf_flags() & TF_MISSING_PREV_PKT) )
- return SSN_MISSING_BOTH;
- else if ( client.get_tf_flags() & TF_MISSING_PREV_PKT )
- return SSN_MISSING_BEFORE;
- else if ( client.get_tf_flags() & TF_MISSING_PKT )
- return SSN_MISSING_AFTER;
- }
-
- return SSN_MISSING_NONE;
-}
-
-bool TcpStreamSession::are_packets_missing(uint8_t dir) const
-{
- if ( dir & SSN_DIR_FROM_CLIENT )
- {
- if ( server.get_tf_flags() & TF_PKT_MISSED )
- return true;
- }
-
- if ( dir & SSN_DIR_FROM_SERVER )
- {
- if ( client.get_tf_flags() & TF_PKT_MISSED )
- return true;
- }
-
- return false;
-}
-
-bool TcpStreamSession::are_client_segments_queued() const
-{
- return client.reassembler.is_segment_pending_flush();
-}
-
-bool TcpStreamSession::add_alert(Packet* p, uint32_t gid, uint32_t sid)
-{
- TcpReassemblerPolicy& trp = p->ptrs.ip_api.get_src()->equals(flow->client_ip) ?
- server.reassembler : client.reassembler;
-
- return trp.add_alert(gid, sid);
-}
-
-bool TcpStreamSession::check_alerted(Packet* p, uint32_t gid, uint32_t sid)
-{
- // only check for alert on wire packet if this when processing a rebuilt packet
- if ( !(p->packet_flags & PKT_REBUILT_STREAM) )
- return false;
-
- TcpReassemblerPolicy& trp = p->ptrs.ip_api.get_src()->equals(flow->client_ip) ?
- server.reassembler : client.reassembler;
-
- return trp.check_alerted(gid, sid);
-}
-
-int TcpStreamSession::update_alert(Packet* p, uint32_t gid, uint32_t sid,
- uint32_t event_id, uint32_t event_second)
-{
- TcpReassemblerPolicy& trp = p->ptrs.ip_api.get_src()->equals(flow->client_ip) ?
- server.reassembler : client.reassembler;
-
- return trp.update_alert(gid, sid, event_id, event_second);
-}
-
-bool TcpStreamSession::set_packet_action_to_hold(Packet* p)
-{
- if ( p->is_from_client() )
- {
- held_packet_dir = SSN_DIR_FROM_CLIENT;
- return server.set_held_packet(p);
- }
- else
- {
- held_packet_dir = SSN_DIR_FROM_SERVER;
- return client.set_held_packet(p);
- }
-}
-
-void TcpStreamSession::set_packet_header_foo(const TcpSegmentDescriptor& tsd)
-{
- const Packet* p = tsd.get_pkt();
-
- if ( tsd.is_packet_from_client() || (p->pkth->egress_index == DAQ_PKTHDR_UNKNOWN
- && p->pkth->egress_group == DAQ_PKTHDR_UNKNOWN) )
- {
- ingress_index = p->pkth->ingress_index;
- ingress_group = p->pkth->ingress_group;
- // ssn egress may be unknown, but will be correct
- egress_index = p->pkth->egress_index;
- egress_group = p->pkth->egress_group;
- }
- else
- {
- egress_index = p->pkth->ingress_index;
- egress_group = p->pkth->ingress_group;
- ingress_index = p->pkth->egress_index;
- ingress_group = p->pkth->egress_group;
- }
-
- daq_flags = p->pkth->flags;
- address_space_id = p->pkth->address_space_id;
-}
-
-void TcpStreamSession::get_packet_header_foo(DAQ_PktHdr_t* pkth, const DAQ_PktHdr_t* orig, uint32_t dir)
-{
- if ( (dir & PKT_FROM_CLIENT) || (egress_index == DAQ_PKTHDR_UNKNOWN &&
- egress_group == DAQ_PKTHDR_UNKNOWN) )
- {
- pkth->ingress_index = ingress_index;
- pkth->ingress_group = ingress_group;
- pkth->egress_index = egress_index;
- pkth->egress_group = egress_group;
- }
- else
- {
- pkth->ingress_index = egress_index;
- pkth->ingress_group = egress_group;
- pkth->egress_index = ingress_index;
- pkth->egress_group = ingress_group;
- }
- pkth->opaque = 0;
- pkth->flags = daq_flags;
- pkth->address_space_id = address_space_id;
- pkth->tenant_id = orig->tenant_id;
-}
-
-void TcpStreamSession::reset()
-{
- if ( tcp_init )
- clear_session(true, false, false );
-}
-
-void TcpStreamSession::cleanup(Packet* p)
-{
- if ( cleaning )
- return;
-
- cleaning = true;
- clear_session(true, true, false, p);
- client.normalizer.reset();
- server.normalizer.reset();
- client.reassembler.reset();
- server.reassembler.reset();
- cleaning = false;
-}
-
-void TcpStreamSession::clear()
-{
- if ( tcp_init )
- clear_session( true, false, false );
-
- TcpHAManager::process_deletion(*flow);
-}
-
-void TcpStreamSession::set_splitter(bool to_server, StreamSplitter* ss)
-{
- TcpStreamTracker& trk = ( to_server ) ? server : client;
-
- trk.set_splitter(ss);
-}
-
-uint16_t TcpStreamSession::get_mss(bool to_server) const
-{
- const TcpStreamTracker& trk = (to_server) ? client : server;
-
- return trk.get_mss();
-}
-
-uint8_t TcpStreamSession::get_tcp_options_len(bool to_server) const
-{
- const TcpStreamTracker& trk = (to_server) ? client : server;
-
- return trk.get_tcp_options_len();
-}
-
-StreamSplitter* TcpStreamSession::get_splitter(bool to_server)
-{
- if ( to_server )
- return server.get_splitter();
- else
- return client.get_splitter();
-}
-
-void TcpStreamSession::start_proxy()
-{
- if ( PacketTracer::is_active() )
- PacketTracer::log("Stream TCP normalization policy set to Proxy mode. Normalizations will be skipped\n");
-
- tcp_config->policy = StreamPolicy::OS_PROXY;
- client.normalizer.init(tcp_config->policy, this, &client, &server);
- server.normalizer.init(tcp_config->policy, this, &server, &client);
- ++tcpStats.proxy_mode_flows;
-}
-
-void TcpStreamSession::set_established(const TcpSegmentDescriptor& tsd)
-{
- update_perf_base_state(TcpStreamTracker::TCP_ESTABLISHED);
- flow->session_state |= STREAM_STATE_ESTABLISHED;
- flow->set_idle_timeout(this->tcp_config->idle_timeout);
- if (SSNFLAG_ESTABLISHED != (SSNFLAG_ESTABLISHED & flow->get_session_flags()))
- {
- flow->set_session_flags(SSNFLAG_ESTABLISHED);
- // Only send 1 event
- if (SSNFLAG_TCP_PSEUDO_EST != (SSNFLAG_TCP_PSEUDO_EST & flow->get_session_flags()))
- DataBus::publish(Stream::get_pub_id(), StreamEventIds::TCP_ESTABLISHED, tsd.get_pkt());
- }
-}
-
-void TcpStreamSession::set_pseudo_established(Packet* p)
-{
- p->flow->ssn_state.session_flags |= SSNFLAG_TCP_PSEUDO_EST;
- DataBus::publish(Stream::get_pub_id(), StreamEventIds::TCP_ESTABLISHED, p);
-}
-
-bool TcpStreamSession::check_for_one_sided_session(Packet* p)
-{
- Flow& flow = *p->flow;
- if ( 0 == ( (SSNFLAG_ESTABLISHED | SSNFLAG_TCP_PSEUDO_EST) & flow.ssn_state.session_flags )
- && p->is_from_client_originally() )
- {
- uint64_t initiator_packets;
- uint64_t responder_packets;
- if (flow.flags.client_initiated)
- {
- initiator_packets = flow.flowstats.client_pkts;
- responder_packets = flow.flowstats.server_pkts;
- }
- else
- {
- initiator_packets = flow.flowstats.server_pkts;
- responder_packets = flow.flowstats.client_pkts;
- }
-
- if ( !responder_packets )
- {
- // handle case where traffic is only in one direction, but the sequence numbers
- // are changing indicating an asynchronous session
- uint32_t watermark = p->ptrs.tcph->seq() + p->ptrs.tcph->ack();
- if ( 1 == initiator_packets )
- initiator_watermark = watermark;
- else if ( initiator_watermark != watermark )
- {
- set_pseudo_established(p);
- return true;
- }
- }
- }
- return false;
-}
-
-void TcpStreamSession::check_for_pseudo_established(Packet* p)
-{
- Flow& flow = *p->flow;
- if ( 0 == ( (SSNFLAG_ESTABLISHED | SSNFLAG_TCP_PSEUDO_EST) & flow.ssn_state.session_flags ) )
- {
- if ( check_for_one_sided_session(p) )
- return;
- if ( 0 < flow.flowstats.client_pkts && 0 < flow.flowstats.server_pkts )
- set_pseudo_established(p);
- }
-}
-
+++ /dev/null
-//--------------------------------------------------------------------------
-// Copyright (C) 2015-2024 Cisco and/or its affiliates. All rights reserved.
-//
-// This program is free software; you can redistribute it and/or modify it
-// under the terms of the GNU General Public License Version 2 as published
-// by the Free Software Foundation. You may not use, modify or distribute
-// this program under any other version of the GNU General Public License.
-//
-// This program is distributed in the hope that it will be useful, but
-// WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-// General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License along
-// with this program; if not, write to the Free Software Foundation, Inc.,
-// 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
-//--------------------------------------------------------------------------
-
-// tcp_stream_session.h author davis mcpherson <davmcphe@cisco.com>
-// Created on: Feb 18, 2016
-
-#ifndef TCP_STREAM_SESSION_H
-#define TCP_STREAM_SESSION_H
-
-#include "detection/detection_engine.h"
-#include "flow/session.h"
-#include "protocols/ipv6.h"
-
-#include "tcp_stream_config.h"
-#include "tcp_stream_tracker.h"
-
-// FIXIT-L session tracking could be split from reassembly
-// into a separate module a la ip_session.cc and ip_defrag.cc
-// (of course defrag should also be cleaned up)
-class TcpStreamSession : public Session
-{
-public:
- ~TcpStreamSession() override;
-
- void clear() override;
- void cleanup(snort::Packet* = nullptr) override;
-
- void set_splitter(bool, snort::StreamSplitter*) override;
- snort::StreamSplitter* get_splitter(bool) override;
-
- bool is_sequenced(uint8_t dir) const override;
- bool are_packets_missing(uint8_t dir) const override;
-
- void disable_reassembly(snort::Flow*) override;
- uint8_t get_reassembly_direction() const override;
- uint8_t missing_in_reassembled(uint8_t dir) const override;
- bool are_client_segments_queued() const override;
-
- bool add_alert(snort::Packet*, uint32_t gid, uint32_t sid) override;
- bool check_alerted(snort::Packet*, uint32_t gid, uint32_t sid) override;
- int update_alert(snort::Packet*, uint32_t gid, uint32_t sid,
- uint32_t event_id, uint32_t event_second) override;
-
- bool set_packet_action_to_hold(snort::Packet*) override;
-
- uint16_t get_mss(bool to_server) const;
- uint8_t get_tcp_options_len(bool to_server) const;
-
- void reset();
- void start_proxy();
-
- void set_packet_header_foo(const TcpSegmentDescriptor&);
- void get_packet_header_foo(DAQ_PktHdr_t*, const DAQ_PktHdr_t* orig, uint32_t dir);
- bool can_set_no_ack();
- bool set_no_ack(bool);
- bool no_ack_mode_enabled() { return no_ack; }
- virtual void update_perf_base_state(char) = 0;
- virtual void clear_session(
- bool free_flow_data, bool flush_segments, bool restart, snort::Packet* p = nullptr) = 0;
- virtual TcpStreamTracker::TcpState get_talker_state(TcpSegmentDescriptor&) = 0;
- virtual TcpStreamTracker::TcpState get_listener_state(TcpSegmentDescriptor&) = 0;
- TcpStreamTracker::TcpState get_peer_state(const TcpStreamTracker* me)
- { return me == &client ? server.get_tcp_state() : client.get_tcp_state(); }
-
- virtual void init_new_tcp_session(TcpSegmentDescriptor&);
- virtual void update_timestamp_tracking(TcpSegmentDescriptor&) = 0;
- virtual void update_session_on_server_packet(TcpSegmentDescriptor&);
- virtual void update_session_on_client_packet(TcpSegmentDescriptor&);
- virtual void update_session_on_rst(TcpSegmentDescriptor&, bool) = 0;
- virtual bool handle_syn_on_reset_session(TcpSegmentDescriptor&) = 0;
- virtual void handle_data_on_syn(TcpSegmentDescriptor&) = 0;
- virtual void update_ignored_session(TcpSegmentDescriptor&) = 0;
- void generate_no_3whs_event()
- {
- if ( generate_3whs_alert && flow->two_way_traffic())
- {
- tel.set_tcp_event(EVENT_NO_3WHS);
- generate_3whs_alert = false;
- }
- }
-
- void set_pkt_action_flag(uint32_t flag)
- { pkt_action_mask |= flag; }
-
- void set_established(const TcpSegmentDescriptor&);
- void set_pseudo_established(snort::Packet*);
- bool check_for_one_sided_session(snort::Packet*);
- void check_for_pseudo_established(snort::Packet*);
-
- virtual void update_paws_timestamps(TcpSegmentDescriptor&) = 0;
- virtual void check_for_repeated_syn(TcpSegmentDescriptor&) = 0;
- virtual void check_for_session_hijack(TcpSegmentDescriptor&) = 0;
- virtual bool check_for_window_slam(TcpSegmentDescriptor&) = 0;
- virtual void mark_packet_for_drop(TcpSegmentDescriptor&) = 0;
- virtual bool validate_packet_established_session(TcpSegmentDescriptor&) = 0;
-
- TcpStreamTracker client;
- TcpStreamTracker server;
- bool lws_init = false;
- bool tcp_init = false;
- uint32_t pkt_action_mask = ACTION_NOTHING;
- uint32_t initiator_watermark = 0;
- int32_t ingress_index = DAQ_PKTHDR_UNKNOWN;
- int32_t egress_index = DAQ_PKTHDR_UNKNOWN;
- int16_t ingress_group = DAQ_PKTHDR_UNKNOWN;
- int16_t egress_group = DAQ_PKTHDR_UNKNOWN;
- uint32_t daq_flags = 0;
- uint32_t address_space_id = 0;
- bool generate_3whs_alert = true;
- TcpStreamConfig* tcp_config = nullptr;
- TcpEventLogger tel;
- bool cleaning = false;
- uint8_t held_packet_dir = SSN_DIR_NONE;
- uint8_t ecn = 0;
-
-private:
- bool no_ack = false;
-
-protected:
- TcpStreamSession(snort::Flow*);
- virtual void set_os_policy() = 0;
-};
-
-#endif
-
#include "main/analyzer.h"
#include "main/snort.h"
#include "packet_io/active.h"
+#include "packet_io/packet_tracer.h"
#include "profiler/profiler_defs.h"
#include "protocols/eth.h"
#include "pub_sub/stream_event_ids.h"
#include "stream/stream.h"
#include "held_packet_queue.h"
-#include "segment_overlap_editor.h"
+#include "tcp_overlap_resolver.h"
#include "tcp_normalizers.h"
-#include "tcp_reassemblers.h"
+#include "tcp_reassembler.h"
+#include "tcp_reassembler_ids.h"
+#include "tcp_reassembler_ips.h"
#include "tcp_session.h"
using namespace snort;
TcpStreamTracker::TcpStreamTracker(bool client) :
tcp_state(client ? TCP_STATE_NONE : TCP_LISTEN), client_tracker(client),
held_packet(null_iterator)
-{ }
+{
+ reassembler = new TcpReassemblerIgnore(*this, seglist);
+ reassembler->init(!client_tracker, nullptr);
+}
TcpStreamTracker::~TcpStreamTracker()
-{ if (splitter != nullptr) splitter->go_away(); }
+{
+ delete reassembler;
+
+ if( oaitw_reassembler )
+ {
+ delete oaitw_reassembler;
+ oaitw_reassembler = nullptr;
+ }
+
+ if ( splitter )
+ splitter->go_away();
+}
+
+void TcpStreamTracker::reset()
+{
+ tcp_alerts.clear();
+ normalizer.reset();
+ seglist.reset();
+ reassembler->reset_paf();
+}
+
+void TcpStreamTracker::clear_tracker(snort::Flow* flow, snort::Packet* p, bool flush_segments, bool restart)
+{
+ if ( flush_segments )
+ reassembler->flush_queued_segments(flow, true, p);
+
+ if ( p )
+ finalize_held_packet(p);
+ else
+ finalize_held_packet(flow);
+
+ seglist.purge_segment_list();
+
+ if ( restart )
+ reassembler->reset_paf();
+ else
+ reassembler->clear_paf();
+
+ set_splitter((StreamSplitter*)nullptr);
+}
+
+int TcpStreamTracker::eval_flush_policy_on_ack(snort::Packet* p)
+{
+ if( oaitw_reassembler )
+ {
+ delete oaitw_reassembler;
+ oaitw_reassembler = nullptr;
+ }
+
+ reassembler->eval_flush_policy_on_ack(p);
+
+ return 0;
+}
+
+int TcpStreamTracker::eval_flush_policy_on_data(snort::Packet* p)
+{
+ if( oaitw_reassembler )
+ {
+ delete oaitw_reassembler;
+ oaitw_reassembler = nullptr;
+ }
+
+ reassembler->eval_flush_policy_on_data(p);
+
+ return 0;
+}
TcpStreamTracker::TcpEvent TcpStreamTracker::set_tcp_event(const TcpSegmentDescriptor& tsd)
{
}
}
-
void TcpStreamTracker::set_fin_seq_status_seen(const TcpSegmentDescriptor& tsd)
{
if ( !fin_seq_set and SEQ_GEQ(tsd.get_end_seq(), r_win_base) )
fin_i_seq = tsd.get_seq();
fin_final_seq = tsd.get_end_seq();
fin_seq_set = true;
- fin_seq_status = TcpStreamTracker::FIN_WITH_SEQ_SEEN;
+ fin_seq_status = FIN_WITH_SEQ_SEEN;
}
}
-void TcpStreamTracker::init_tcp_state()
+void TcpStreamTracker::init_tcp_state(TcpSession* s)
{
+ session = s;
tcp_state = ( client_tracker ) ?
TcpStreamTracker::TCP_STATE_NONE : TcpStreamTracker::TCP_LISTEN;
mac_addr_valid = false;
fin_i_seq = 0;
fin_final_seq = 0;
- fin_seq_status = TcpStreamTracker::FIN_NOT_SEEN;
+ fin_seq_status = FIN_NOT_SEEN;
fin_seq_set = false;
rst_pkt_sent = false;
order = TcpStreamTracker::IN_SEQUENCE;
held_packet = null_iterator;
+
flush_policy = STREAM_FLPOLICY_IGNORE;
- reassembler.reset();
- splitter_finish_flag = false;
+ if( oaitw_reassembler )
+ {
+ delete oaitw_reassembler;
+ oaitw_reassembler = nullptr;
+ }
+ if ( reassembler )
+ delete reassembler;
+ reassembler = new TcpReassemblerIgnore(*this, seglist);
+ reassembler->init(!client_tracker, nullptr);
+
+ normalizer.reset();
+ seglist.reset();
+ tcp_alerts.clear();
}
-//-------------------------------------------------------------------------
-// flush policy stuff
-//-------------------------------------------------------------------------
+void TcpStreamTracker::update_stream_order(const TcpSegmentDescriptor& tsd, bool aligned)
+{
+ uint32_t seq = tsd.get_seq();
-void TcpStreamTracker::init_flush_policy()
+ switch ( order )
+ {
+ case TcpStreamTracker::IN_SEQUENCE:
+ if ( aligned )
+ tsd.set_packet_flags(PKT_STREAM_ORDER_OK);
+ else if ( SEQ_GT(seq, rcv_nxt) )
+ {
+ order = TcpStreamTracker::NONE;
+ hole_left_edge = rcv_nxt;
+ hole_right_edge = seq - 1;
+ }
+ break;
+
+ case TcpStreamTracker::NONE:
+ if ( aligned )
+ {
+ tsd.set_packet_flags(PKT_STREAM_ORDER_OK);
+ if ( SEQ_GT(tsd.get_end_seq(), hole_right_edge) )
+ order = TcpStreamTracker::OUT_OF_SEQUENCE;
+ else
+ hole_left_edge = tsd.get_end_seq();
+ }
+ else
+ {
+ if ( SEQ_LEQ(seq, hole_right_edge) )
+ {
+ if ( SEQ_GT(seq, hole_left_edge) )
+ hole_right_edge = seq - 1;
+ else if ( SEQ_GT(tsd.get_end_seq(), hole_left_edge) )
+ {
+ hole_left_edge = tsd.get_end_seq();
+ tsd.set_packet_flags(PKT_STREAM_ORDER_OK);
+ }
+ }
+ // accounting for overlaps when not aligned
+ if ( SEQ_GT(hole_left_edge, hole_right_edge) )
+ order = TcpStreamTracker::OUT_OF_SEQUENCE;
+ }
+ break;
+
+ case TcpStreamTracker::OUT_OF_SEQUENCE:
+ tsd.set_packet_flags(PKT_STREAM_ORDER_BAD);
+ }
+}
+
+void TcpStreamTracker::update_flush_policy(StreamSplitter* splitter)
{
- if ( !splitter )
- flush_policy = STREAM_FLPOLICY_IGNORE;
- else if ( normalizer.is_tcp_ips_enabled() )
- flush_policy = STREAM_FLPOLICY_ON_DATA;
+ if( oaitw_reassembler )
+ {
+ delete oaitw_reassembler;
+ oaitw_reassembler = nullptr;
+ }
+
+ if ( reassembler and flush_policy == reassembler->get_flush_policy() )
+ {
+ reassembler->init(!client_tracker, splitter);
+ return;
+ }
+
+ if ( flush_policy == STREAM_FLPOLICY_IGNORE )
+ {
+ // switching to Ignore flush policy...save pointer to current reassembler to delete later
+ if ( reassembler )
+ oaitw_reassembler = reassembler;
+
+ reassembler = new TcpReassemblerIgnore(*this, seglist);
+ reassembler->init(!client_tracker, splitter);
+ }
+ else if ( flush_policy == STREAM_FLPOLICY_ON_DATA )
+ {
+ if ( reassembler )
+ {
+ // update from IDS -> IPS is not supported
+ assert( reassembler->get_flush_policy() != STREAM_FLPOLICY_ON_ACK );
+ delete reassembler;
+ }
+
+ reassembler = new TcpReassemblerIps(*this, seglist);
+ reassembler->init(!client_tracker, splitter);
+ }
else
- flush_policy = STREAM_FLPOLICY_ON_ACK;
+ {
+ if ( reassembler )
+ {
+ // update from IPS -> IDS is not supported
+ assert( reassembler->get_flush_policy() != STREAM_FLPOLICY_ON_DATA );
+ delete reassembler;
+ }
+
+ reassembler = new TcpReassemblerIds(*this, seglist);
+ reassembler->init(!client_tracker, splitter);
+ }
}
void TcpStreamTracker::set_splitter(StreamSplitter* ss)
{
if ( splitter )
+ {
+ reassembler->release_splitter();
splitter->go_away();
+ }
splitter = ss;
-
- if ( !splitter )
- flush_policy = STREAM_FLPOLICY_IGNORE;
+ if ( ss )
+ {
+ if ( normalizer.is_tcp_ips_enabled() )
+ flush_policy = STREAM_FLPOLICY_ON_DATA;
+ else
+ flush_policy = STREAM_FLPOLICY_ON_ACK;
+ }
else
- reassembler.setup_paf();
+ flush_policy = STREAM_FLPOLICY_IGNORE;
+
+ update_flush_policy(ss);
}
void TcpStreamTracker::set_splitter(const Flow* flow)
ins = flow->clouseau;
if ( ins )
- set_splitter(ins->get_splitter(!client_tracker) );
+ set_splitter(ins->get_splitter(!client_tracker));
else
- set_splitter(new AtomSplitter(!client_tracker) );
+ set_splitter(new AtomSplitter(!client_tracker));
}
-bool TcpStreamTracker::splitter_finish(snort::Flow* flow)
+static inline bool both_splitters_aborted(Flow* flow)
{
- if (!splitter)
- return true;
+ uint32_t both_splitters_yoinked = (SSNFLAG_ABORT_CLIENT | SSNFLAG_ABORT_SERVER);
+ return (flow->get_session_flags() & both_splitters_yoinked) == both_splitters_yoinked;
+}
+
+void TcpStreamTracker::fallback()
+{
+#ifndef NDEBUG
+ assert(splitter);
+
+ // FIXIT-L: consolidate these 3
+ //bool to_server = splitter->to_server();
+ //assert(server_side == to_server && server_side == !tracker.client_tracker);
+#endif
+
+ set_splitter(new AtomSplitter(!client_tracker));
+ tcpStats.partial_fallbacks++;
+
+ Flow* flow = session->flow;
+ if ( !client_tracker )
+ flow->set_session_flags(SSNFLAG_ABORT_SERVER);
+ else
+ flow->set_session_flags(SSNFLAG_ABORT_CLIENT);
- if (!splitter_finish_flag)
+ if ( flow->gadget and both_splitters_aborted(flow) )
{
- splitter_finish_flag = true;
- return splitter->finish(flow);
+ flow->clear_gadget();
+
+ if (flow->clouseau)
+ flow->clear_clouseau();
+
+ tcpStats.inspector_fallbacks++;
}
- // there shouldn't be any un-flushed data beyond this point,
- // returning false here, discards it
- return false;
+}
+
+void TcpStreamTracker::disable_reassembly(Flow* f)
+{
+ set_splitter((StreamSplitter*)nullptr);
+ seglist.reset();
+ reassembler->reset_paf();
+ finalize_held_packet(f);
}
void TcpStreamTracker::init_on_syn_sent(TcpSegmentDescriptor& tsd)
tcpStats.sessions_on_syn++;
}
-void TcpStreamTracker::init_on_syn_recv(TcpSegmentDescriptor& tsd)
+void TcpStreamTracker::init_on_syn_recv(const TcpSegmentDescriptor& tsd)
{
irs = tsd.get_seq();
rcv_nxt = tsd.get_seq() + 1;
r_win_base = tsd.get_seq() + 1;
- reassembler.set_seglist_base_seq(tsd.get_seq() + 1);
+ seglist.set_seglist_base_seq(tsd.get_seq() + 1);
cache_mac_address(tsd, FROM_CLIENT);
tcp_state = TcpStreamTracker::TCP_SYN_RECV;
r_win_base = tsd.get_ack();
rcv_nxt = tsd.get_ack();
- reassembler.set_seglist_base_seq(tsd.get_ack() );
+ seglist.set_seglist_base_seq(tsd.get_ack() );
ts_last_packet = tsd.get_packet_timestamp();
tf_flags |= normalizer.get_tcp_timestamp(tsd, false);
tcpStats.sessions_on_syn_ack++;
}
-void TcpStreamTracker::init_on_synack_recv(TcpSegmentDescriptor& tsd)
+void TcpStreamTracker::init_on_synack_recv(const TcpSegmentDescriptor& tsd)
{
iss = tsd.get_ack() - 1;
irs = tsd.get_seq();
rcv_nxt = tsd.get_seq() + 1;
r_win_base = tsd.get_seq() + 1;
- reassembler.set_seglist_base_seq(tsd.get_seq() + 1);
+ seglist.set_seglist_base_seq(tsd.get_seq() + 1);
cache_mac_address(tsd, FROM_SERVER);
if ( TcpStreamTracker::TCP_SYN_SENT == tcp_state )
r_win_base = tsd.get_ack();
rcv_nxt = tsd.get_ack();
- reassembler.set_seglist_base_seq(tsd.get_ack());
+ seglist.set_seglist_base_seq(tsd.get_ack());
ts_last_packet = tsd.get_packet_timestamp();
tf_flags |= normalizer.get_tcp_timestamp(tsd, false);
tcp_state = TcpStreamTracker::TCP_MID_STREAM_SENT;
}
-void TcpStreamTracker::init_on_data_seg_recv(TcpSegmentDescriptor& tsd)
+void TcpStreamTracker::init_on_data_seg_recv(const TcpSegmentDescriptor& tsd)
{
iss = tsd.get_ack() - 1;
irs = tsd.get_seq() - 1;
rcv_nxt = tsd.get_seq();
r_win_base = tsd.get_seq();
- reassembler.set_seglist_base_seq(tsd.get_seq());
+ seglist.set_seglist_base_seq(tsd.get_seq());
cache_mac_address(tsd, tsd.get_direction() );
tcpStats.sessions_on_data++;
tf_flags |= ( tsd.init_mss(&mss) | tsd.init_wscale(&wscale) );
}
-void TcpStreamTracker::finish_client_init(TcpSegmentDescriptor& tsd)
+void TcpStreamTracker::finish_client_init(const TcpSegmentDescriptor& tsd)
{
Flow* flow = tsd.get_flow();
rcv_nxt = tsd.get_end_seq();
- if ( reassembler.data_was_queued() )
+ if ( seglist.data_was_queued() )
return; // we already have state, don't mess it up
if ( !( flow->session_state & STREAM_STATE_MIDSTREAM ) )
{
if ( tsd.get_tcph()->is_syn() )
- reassembler.set_seglist_base_seq(tsd.get_seq() + 1);
+ seglist.set_seglist_base_seq(tsd.get_seq() + 1);
else
- reassembler.set_seglist_base_seq(tsd.get_seq());
+ seglist.set_seglist_base_seq(tsd.get_seq());
r_win_base = tsd.get_end_seq();
}
else
{
- reassembler.set_seglist_base_seq(tsd.get_seq());
+ seglist.set_seglist_base_seq(tsd.get_seq());
r_win_base = tsd.get_seq();
}
}
}
// In no-ack policy, data is implicitly acked immediately.
-void TcpStreamTracker::update_tracker_no_ack_recv(TcpSegmentDescriptor& tsd)
+void TcpStreamTracker::update_tracker_no_ack_recv(const TcpSegmentDescriptor& tsd)
{
snd_una = snd_nxt = tsd.get_end_seq();
}
-void TcpStreamTracker::update_tracker_no_ack_sent(TcpSegmentDescriptor& tsd)
+void TcpStreamTracker::update_tracker_no_ack_sent(const TcpSegmentDescriptor& tsd)
{
r_win_base = tsd.get_end_seq();
- reassembler.flush_on_ack_policy(tsd.get_pkt());
+ eval_flush_policy_on_ack(tsd.get_pkt());
}
void TcpStreamTracker::update_tracker_ack_sent(TcpSegmentDescriptor& tsd)
snd_wnd = tsd.get_wnd();
}
- if ( ( fin_seq_status == TcpStreamTracker::FIN_WITH_SEQ_SEEN )
+ if ( flush_policy == STREAM_FLPOLICY_IGNORE
+ and SEQ_GT(tsd.get_ack(), rcv_nxt) )
+ rcv_nxt = tsd.get_ack();
+
+ if ( ( fin_seq_status == FIN_WITH_SEQ_SEEN )
&& SEQ_GEQ(tsd.get_ack(), fin_final_seq + 1) && !(tsd.is_meta_ack_packet()) )
{
- fin_seq_status = TcpStreamTracker::FIN_WITH_SEQ_ACKED;
+ fin_seq_status = FIN_WITH_SEQ_ACKED;
}
- reassembler.flush_on_ack_policy(tsd.get_pkt());
+ eval_flush_policy_on_ack(tsd.get_pkt());
}
bool TcpStreamTracker::update_on_3whs_ack(TcpSegmentDescriptor& tsd)
return true;
}
+int32_t TcpStreamTracker::kickstart_asymmetric_flow(const TcpSegmentDescriptor& tsd, uint32_t max_queued_bytes)
+{
+ seglist.skip_holes();
+
+ if ( reassembler->is_splitter_paf() )
+ fallback();
+ else
+ reassembler->reset_paf();
+
+ eval_flush_policy_on_data(tsd.get_pkt());
+
+ int32_t space_left = max_queued_bytes - seglist.get_seg_bytes_total();
+
+ if ( get_tcp_state() == TcpStreamTracker::TCP_MID_STREAM_RECV )
+ {
+ set_tcp_state(TcpStreamTracker::TCP_ESTABLISHED);
+ if (PacketTracer::is_active())
+ PacketTracer::log("Stream: Kickstart of midstream asymmetric flow! Seglist queue space: %u\n",
+ space_left );
+ }
+ else
+ {
+ if (PacketTracer::is_active())
+ PacketTracer::log("Stream: Kickstart of asymmetric flow! Seglist queue space: %u\n",
+ space_left );
+ }
+
+ return space_left;
+}
+
void TcpStreamTracker::perform_fin_recv_flush(TcpSegmentDescriptor& tsd)
{
if ( tsd.is_data_segment() )
if ( flush_policy == STREAM_FLPOLICY_ON_DATA and SEQ_EQ(tsd.get_end_seq(), rcv_nxt)
and !tsd.get_flow()->searching_for_service() )
- reassembler.finish_and_final_flush(tsd.get_flow(), true, tsd.get_pkt());
+ reassembler->finish_and_final_flush(tsd.get_flow(), true, tsd.get_pkt());
}
uint32_t TcpStreamTracker::perform_partial_flush()
if ( held_packet != null_iterator )
{
Packet* p;
- flushed = reassembler.perform_partial_flush(session->flow, p);
+ flushed = reassembler->perform_partial_flush(session->flow, p);
// If the held_packet hasn't been released by perform_partial_flush(),
// call finalize directly.
tcpStats.held_packets_passed++;
}
- TcpStreamSession* tcp_session = (TcpStreamSession*)cp->flow->session;
+ TcpSession* tcp_session = (TcpSession*)cp->flow->session;
tcp_session->held_packet_dir = SSN_DIR_NONE;
}
}
else
{
- TcpStreamSession* tcp_session = (TcpStreamSession*)flow->session;
+ TcpSession* tcp_session = (TcpSession*)flow->session;
tcp_session->held_packet_dir = SSN_DIR_NONE;
Analyzer::get_local_analyzer()->finalize_daq_message(msg, DAQ_VERDICT_PASS);
tcpStats.held_packets_passed++;
#include <cstdint>
#include <list>
-#include "stream/paf.h"
-
-#include "segment_overlap_editor.h"
+#include "tcp_alerts.h"
#include "tcp_defs.h"
#include "tcp_module.h"
#include "tcp_normalizers.h"
-#include "tcp_reassemblers.h"
+#include "tcp_reassembler.h"
+#include "tcp_reassembly_segments.h"
+#include "tcp_segment_node.h"
#include "tcp_segment_descriptor.h"
extern const char* tcp_state_names[];
}
class HeldPacket;
-class TcpReassembler;
class TcpSession;
+enum FinSeqNumStatus : uint8_t { FIN_NOT_SEEN, FIN_WITH_SEQ_SEEN, FIN_WITH_SEQ_ACKED };
+
class TcpStreamTracker
{
public:
enum PacketOrder : uint8_t { OUT_OF_SEQUENCE, IN_SEQUENCE, NONE };
- enum FinSeqNumStatus : uint8_t { FIN_NOT_SEEN, FIN_WITH_SEQ_SEEN, FIN_WITH_SEQ_ACKED };
-
TcpStreamTracker(bool client);
- virtual ~TcpStreamTracker();
+ ~TcpStreamTracker();
+
+ void reset();
+ void clear_tracker(snort::Flow*, snort::Packet*, bool flush_segments, bool restart);
+
+ int eval_flush_policy_on_ack(snort::Packet*);
+ int eval_flush_policy_on_data(snort::Packet*);
+ void update_stream_order(const TcpSegmentDescriptor&, bool aligned);
+
+ void fallback();
bool is_client_tracker() const
{ return client_tracker; }
bool is_rst_pkt_sent() const
{ return rst_pkt_sent; }
- void set_flush_policy(FlushPolicy policy)
- { flush_policy = policy; }
-
+ void set_flush_policy(FlushPolicy policy);
FlushPolicy get_flush_policy() const
{ return flush_policy; }
- virtual void init_tcp_state();
- virtual void init_flush_policy();
- virtual void set_splitter(snort::StreamSplitter* ss);
- virtual void set_splitter(const snort::Flow* flow);
+ void set_order(uint8_t order)
+ { this->order = order; }
+
+ void init_tcp_state(TcpSession*);
+ void set_splitter(snort::StreamSplitter* ss);
+ void set_splitter(const snort::Flow* flow);
snort::StreamSplitter* get_splitter()
{ return splitter; }
- bool is_splitter_paf() const
- { return splitter && splitter->is_paf(); }
-
- bool splitter_finish(snort::Flow* flow);
-
- bool is_reassembly_enabled() const
- { return ( splitter and (flush_policy != STREAM_FLPOLICY_IGNORE) ); }
-
- virtual void init_on_syn_sent(TcpSegmentDescriptor&);
- virtual void init_on_syn_recv(TcpSegmentDescriptor&);
- virtual void init_on_synack_sent(TcpSegmentDescriptor&);
- virtual void init_on_synack_recv(TcpSegmentDescriptor&);
- virtual void init_on_data_seg_sent(TcpSegmentDescriptor&);
- virtual void init_on_data_seg_recv(TcpSegmentDescriptor&);
- virtual void finish_server_init(TcpSegmentDescriptor&);
- virtual void finish_client_init(TcpSegmentDescriptor&);
-
- virtual void update_tracker_ack_recv(TcpSegmentDescriptor&);
- virtual void update_tracker_ack_sent(TcpSegmentDescriptor&);
- virtual void update_tracker_no_ack_recv(TcpSegmentDescriptor&);
- virtual void update_tracker_no_ack_sent(TcpSegmentDescriptor&);
- virtual bool update_on_3whs_ack(TcpSegmentDescriptor&);
- virtual bool update_on_rst_recv(TcpSegmentDescriptor&);
- virtual void update_on_rst_sent();
- virtual bool update_on_fin_recv(TcpSegmentDescriptor&);
- virtual bool update_on_fin_sent(TcpSegmentDescriptor&);
- virtual bool is_segment_seq_valid(TcpSegmentDescriptor&);
+ void disable_reassembly(snort::Flow* f);
+
+ void init_on_syn_sent(TcpSegmentDescriptor&);
+ void init_on_syn_recv(const TcpSegmentDescriptor&);
+ void init_on_synack_sent(TcpSegmentDescriptor&);
+ void init_on_synack_recv(const TcpSegmentDescriptor&);
+ void init_on_data_seg_sent(TcpSegmentDescriptor&);
+ void init_on_data_seg_recv(const TcpSegmentDescriptor&);
+ void finish_server_init(TcpSegmentDescriptor&);
+ void finish_client_init(const TcpSegmentDescriptor&);
+
+ void update_tracker_ack_recv(TcpSegmentDescriptor&);
+ void update_tracker_ack_sent(TcpSegmentDescriptor&);
+ void update_tracker_no_ack_recv(const TcpSegmentDescriptor&);
+ void update_tracker_no_ack_sent(const TcpSegmentDescriptor&);
+ bool update_on_3whs_ack(TcpSegmentDescriptor&);
+ bool update_on_rst_recv(TcpSegmentDescriptor&);
+ void update_on_rst_sent();
+ bool update_on_fin_recv(TcpSegmentDescriptor&);
+ bool update_on_fin_sent(TcpSegmentDescriptor&);
+ bool is_segment_seq_valid(TcpSegmentDescriptor&);
bool set_held_packet(snort::Packet*);
bool is_retransmit_of_held_packet(snort::Packet*);
void finalize_held_packet(snort::Packet*);
void finalize_held_packet(snort::Flow*);
void perform_fin_recv_flush(TcpSegmentDescriptor&);
+ int32_t kickstart_asymmetric_flow(const TcpSegmentDescriptor& tsd, uint32_t max_queued_bytes);
uint32_t perform_partial_flush();
- bool is_holding_packet() const { return held_packet != null_iterator; }
+ bool is_holding_packet() const
+ { return held_packet != null_iterator; }
// max_remove < 0 means time out all eligible packets.
// Return whether there are more packets that need to be released.
static void thread_term();
public:
- uint32_t snd_una = 0; // SND.UNA - send unacknowledged
- uint32_t snd_nxt = 0; // SND.NXT - send next
- uint32_t snd_wnd = 0; // SND.WND - send window
- uint32_t snd_wl1 = 0; // SND.WL1 - segment sequence number used for last window update
- uint32_t snd_wl2 = 0; // SND.WL2 - segment acknowledgment number used for last window update
- uint32_t iss = 0; // ISS - initial send sequence number
+ uint32_t snd_una = 0; // SND.UNA - send unacknowledged
+ uint32_t snd_nxt = 0; // SND.NXT - send next
+ uint32_t snd_wnd = 0; // SND.WND - send window
+ uint32_t snd_wl1 = 0; // SND.WL1 - segment sequence number used for last window update
+ uint32_t snd_wl2 = 0; // SND.WL2 - segment acknowledgment number used for last window update
+ uint32_t iss = 0; // ISS - initial send sequence number
- uint32_t rcv_nxt = 0; // RCV.NXT - receive next
- uint32_t rcv_wnd = 0; // RCV.WND - receive window
- uint32_t irs = 0; // IRS - initial receive sequence number
+ uint32_t rcv_nxt = 0; // RCV.NXT - receive next
+ uint32_t rcv_wnd = 0; // RCV.WND - receive window
+ uint32_t irs = 0; // IRS - initial receive sequence number
- uint16_t snd_up = 0; // SND.UP - send urgent pointer
- uint16_t rcv_up = 0; // RCV.UP - receive urgent pointer
+ uint16_t snd_up = 0; // SND.UP - send urgent pointer
+ uint16_t rcv_up = 0; // RCV.UP - receive urgent pointer
uint32_t held_pkt_seq = 0;
- uint32_t hole_left_edge = 0; // First left hole
- uint32_t hole_right_edge = 0;
TcpState tcp_state;
TcpEvent tcp_event = TCP_MAX_EVENTS;
// FIXIT-L make these non-public
public:
TcpNormalizerPolicy normalizer;
- TcpReassemblerPolicy reassembler;
+ TcpReassemblySegments seglist;
+ TcpReassembler* reassembler = nullptr;
+ TcpReassembler* oaitw_reassembler = nullptr;
TcpSession* session = nullptr;
+ TcpAlerts tcp_alerts;
uint32_t r_win_base = 0; // remote side window base sequence number (the last ack we got)
uint32_t small_seg_count = 0;
- uint8_t order = IN_SEQUENCE;
- FinSeqNumStatus fin_seq_status = TcpStreamTracker::FIN_NOT_SEEN;
+ FinSeqNumStatus fin_seq_status = FIN_NOT_SEEN;
+
+private:
+ void update_flush_policy(snort::StreamSplitter*);
-protected:
+ snort::StreamSplitter* splitter = nullptr;
static const std::list<HeldPacket>::iterator null_iterator;
std::list<HeldPacket>::iterator held_packet;
- snort::StreamSplitter* splitter = nullptr;
uint32_t ts_last_packet = 0;
uint32_t ts_last = 0; // last timestamp (for PAWS)
uint32_t fin_final_seq = 0;
FlushPolicy flush_policy = STREAM_FLPOLICY_IGNORE;
bool mac_addr_valid = false;
bool fin_seq_set = false; // FIXIT-M should be obviated by tcp state
- bool splitter_finish_flag = false;
+
+ uint8_t order = IN_SEQUENCE;
+ uint32_t hole_left_edge = 0; // First left hole
+ uint32_t hole_right_edge = 0;
};
// <--- note -- the 'state' parameter must be a reference
flow->ssn_state.session_flags, flow->client_port, flow->server_port);
}
-inline void TraceSegments(const TcpReassemblerPolicy& trp, const snort::Packet* p)
+inline void TraceSegments(const TcpReassemblySegments& seglist, const snort::Packet* p)
{
- const TcpSegmentNode* tsn = trp.trs.sos.seglist.head;
- uint32_t sx = trp.trs.tracker->r_win_base;
+ const TcpSegmentNode* tsn = seglist.head;
+ uint32_t sx = seglist.tracker->r_win_base;
unsigned segs = 0;
unsigned bytes = 0;
std::stringstream ss;
while ( tsn )
{
- if ( SEQ_LT(sx, tsn->i_seq) )
- ss << " +" << tsn->i_seq - sx;
- else if ( SEQ_GT(sx, tsn->i_seq) )
- ss << " -" << sx - tsn->i_seq;
+ uint32_t seq = tsn->start_seq();
- ss << " " << tsn->i_len;
+ if ( SEQ_LT(sx, seq) )
+ ss << " +" << seq - sx;
- if ( tsn->c_len and tsn->c_len != tsn->i_len )
- {
- ss << "(" << tsn->offset << "|" << tsn->c_len;
- ss << "|" << tsn->i_len-tsn->offset-tsn->c_len << ")";
- }
+ else if ( SEQ_GT(sx, seq) )
+ ss << " -" << sx - seq;
+ ss << " " << tsn->length;
+
+ if ( tsn->cursor and tsn->unscanned() > 0 )
+ ss << "(" << tsn->cursor << "|" << tsn->unscanned() << ")";
segs++;
- bytes += tsn->i_len;
- sx = tsn->i_seq + tsn->i_len;
+ bytes += tsn->length;
+ sx = seq + tsn->length;
tsn = tsn->next;
}
if ( !ss.str().empty() )
trace_logf(DEFAULT_TRACE_LOG_LEVEL, stream_tcp_trace, TRACE_SEGMENTS, p, " %s\n", ss.str().c_str());
- assert(trp.trs.sos.seg_count == segs);
- assert(trp.trs.sos.seg_bytes_logical == bytes);
+ assert(seglist.seg_count == segs);
+ assert(seglist.seg_bytes_logical == bytes);
}
inline void TraceState(const TcpStreamTracker& a, const TcpStreamTracker& b, const char* s,
s, stream_tcp_state_to_str(a), ua, ns, a.get_snd_wnd( ),
RMT(a, rcv_nxt, b), RMT(a, r_win_base, b), a.get_iss(), a.get_irs());
- unsigned paf = a.is_splitter_paf() ? 2 : 0;
+ unsigned paf = a.reassembler->is_splitter_paf() ? 2 : 0;
unsigned fpt = a.get_flush_policy() ? 192 : 0;
trace_logf(DEFAULT_TRACE_LOG_LEVEL, stream_tcp_trace, TRACE_STATE, p,
" FP=%s:%-4u SC=%-4u FL=%-4u SL=%-5u BS=%-4u\n",
flushxt[a.get_flush_policy() + paf], fpt,
- a.reassembler.get_seg_count(), a.reassembler.get_flush_count(),
- a.reassembler.get_seg_bytes_logical(),
- a.reassembler.get_seglist_base_seq() - b.get_iss());
+ a.seglist.get_seg_count(), a.seglist.get_flush_count(),
+ a.seglist.get_seg_bytes_logical(),
+ a.seglist.get_seglist_base_seq() - b.get_iss());
- TraceSegments(a.reassembler, p);
+ TraceSegments(a.seglist, p);
}
void S5TraceTCP(const TcpSegmentDescriptor& tsd, const snort::Packet* p)
TraceEvent(tsd, txd, rxd, p);
- if ( ssn->lws_init )
+ if ( ssn->tcp_init )
TraceSession(tsd.get_flow(), p);
TraceState(cli, srv, cdir, p);
void TcpSession::set_extra_data(Packet*, unsigned int){ }
bool TcpSession::is_sequenced(unsigned char) const { return true; }
bool TcpSession::are_packets_missing(unsigned char) const { return false; }
-uint8_t TcpSession::get_reassembly_direction() const { return 0; }
uint8_t TcpSession::missing_in_reassembled(unsigned char) const { return 0; }
class TcpSessionMock : public TcpSession