]> git.ipfire.org Git - thirdparty/snort3.git/commitdiff
Pull request #4400: stream_tcp: fix snort crash when processing held packet on flow...
authorDavis McPherson -X (davmcphe - XORIANT CORPORATION at Cisco) <davmcphe@cisco.com>
Fri, 11 Oct 2024 14:42:38 +0000 (14:42 +0000)
committerSteven Baigal (sbaigal) <sbaigal@cisco.com>
Fri, 11 Oct 2024 14:42:38 +0000 (14:42 +0000)
Merge in SNORT/snort3 from ~DAVMCPHE/snort3:tcp_reassembly_rewrite_u3.0 to master

Squashed commit of the following:

commit 4aeb81a4548c117b0d5595b03f5d3a4860ee8c0d
Author: davis mcpherson <davmcphe@cisco.com>
Date:   Fri Jul 26 12:37:12 2024 -0400

    stream_tcp: streamline allocation and release of reassemblers, tweak ips flush_on_data process

    stream_tcp: implement ignore flush policy reassembler as a singleton to improve performance,
    implement all TcpReassembler base class public methods as virtual methods and override each in
    the ignore flush policy subclass to do nothing

14 files changed:
src/stream/tcp/dev_notes.txt
src/stream/tcp/tcp_reassembler.cc
src/stream/tcp/tcp_reassembler.h
src/stream/tcp/tcp_reassembler_ids.cc
src/stream/tcp/tcp_reassembler_ids.h
src/stream/tcp/tcp_reassembler_ips.cc
src/stream/tcp/tcp_reassembler_ips.h
src/stream/tcp/tcp_reassembly_segments.cc
src/stream/tcp/tcp_reassembly_segments.h
src/stream/tcp/tcp_session.cc
src/stream/tcp/tcp_session.h
src/stream/tcp/tcp_state_mid_stream_recv.cc
src/stream/tcp/tcp_stream_tracker.cc
src/stream/tcp/tcp_stream_tracker.h

index 26ba69430f637bb53fa038872aeecf2df37b2158..07673015d80b5c26c18712dbaa1b16ea6e812cfd 100644 (file)
@@ -1,3 +1,4 @@
+
 This directory contains the implementation of TCP session tracking and
 processing functions.  When the network protocol for a flow is determined
 to be TCP the base Stream preprocessor will delegate handling of the
index e7255ec57fd281bce2a53473cb29069aa248e8e5..df49f076369566e3119ec343081e25008c050560 100644 (file)
@@ -47,10 +47,10 @@ void TcpReassembler::init(bool server, StreamSplitter* ss)
 {
     splitter = ss;
     paf.paf_setup(ss);
-    if ( seglist.cur_rseg )
-        seglist.cur_sseg = seglist.cur_rseg;
+    if ( seglist->cur_rseg )
+        seglist->cur_sseg = seglist->cur_rseg;
     else
-        seglist.cur_sseg = seglist.head;
+        seglist->cur_sseg = seglist->head;
 
     server_side = server;
 
@@ -68,14 +68,14 @@ void TcpReassembler::init(bool server, StreamSplitter* ss)
 
 bool TcpReassembler::fin_no_gap(const TcpSegmentNode& tsn)
 {
-    return tracker.fin_seq_status >= FIN_WITH_SEQ_SEEN
-        and SEQ_GEQ(tsn.next_seq(), tracker.get_fin_i_seq());
+    return tracker->fin_seq_status >= FIN_WITH_SEQ_SEEN
+        and SEQ_GEQ(tsn.next_seq(), tracker->get_fin_i_seq());
 }
 
 bool TcpReassembler::fin_acked_no_gap(const TcpSegmentNode& tsn)
 {
-    return tracker.fin_seq_status >= FIN_WITH_SEQ_ACKED
-        and SEQ_GEQ(tsn.next_seq(), tracker.get_fin_i_seq());
+    return tracker->fin_seq_status >= FIN_WITH_SEQ_ACKED
+        and SEQ_GEQ(tsn.next_seq(), tracker->get_fin_i_seq());
 }
 
 // If we are skipping seglist hole, update tsn so that we can purge
@@ -83,7 +83,7 @@ void TcpReassembler::update_skipped_bytes(uint32_t remaining_bytes)
 {
     TcpSegmentNode* tsn;
 
-    while ( remaining_bytes and (tsn = seglist.cur_rseg) )
+    while ( remaining_bytes and (tsn = seglist->cur_rseg) )
     {
         auto bytes_skipped = ( tsn->unscanned() <= remaining_bytes ) ? tsn->unscanned() : remaining_bytes;
 
@@ -92,23 +92,23 @@ void TcpReassembler::update_skipped_bytes(uint32_t remaining_bytes)
 
         if ( !tsn->unscanned() )
         {
-            seglist.flush_count++;
-            seglist.update_next(tsn);
+            seglist->flush_count++;
+            seglist->update_next(tsn);
         }
     }
 }
 
 void TcpReassembler::purge_to_seq(uint32_t flush_seq)
 {
-    seglist.purge_flushed_segments(flush_seq);
+    seglist->purge_flushed_segments(flush_seq);
 
     if ( last_pdu )
     {
-        tracker.tcp_alerts.purge_alerts(*last_pdu, tracker.normalizer.is_tcp_ips_enabled());
+        tracker->tcp_alerts.purge_alerts(*last_pdu, tracker->normalizer.is_tcp_ips_enabled());
         last_pdu = nullptr;
     }
     else
-        tracker.tcp_alerts.purge_alerts(seglist.session->flow);
+        tracker->tcp_alerts.purge_alerts(seglist->session->flow);
 }
 
 // must only purge flushed and acked bytes we may flush partial segments
@@ -118,29 +118,29 @@ void TcpReassembler::purge_to_seq(uint32_t flush_seq)
 //   (if we reassemble such)
 void TcpReassembler::purge_flushed_ackd()
 {
-    if ( !seglist.head )
+    if ( !seglist->head )
         return;
 
-    uint32_t seq = seglist.head->start_seq();
-    TcpSegmentNode* tsn = seglist.head;
+    uint32_t seq = seglist->head->start_seq();
+    TcpSegmentNode* tsn = seglist->head;
     while ( tsn && !tsn->unscanned() )
     {
         uint32_t end = tsn->next_seq();
 
-        if ( SEQ_GT(end, tracker.r_win_base) )
+        if ( SEQ_GT(end, tracker->r_win_base) )
             break;
 
         seq = end;
         tsn = tsn->next;
     }
 
-    if ( !SEQ_EQ(seq, seglist.head->start_seq()) )
+    if ( !SEQ_EQ(seq, seglist->head->start_seq()) )
         purge_to_seq(seq);
 }
 
 void TcpReassembler::show_rebuilt_packet(Packet* pkt)
 {
-    if ( seglist.session->tcp_config->flags & STREAM_CONFIG_SHOW_PACKETS )
+    if ( seglist->session->tcp_config->flags & STREAM_CONFIG_SHOW_PACKETS )
     {
         // FIXIT-L setting conf here is required because this is called before context start
         pkt->context->conf = SnortConfig::get_conf();
@@ -153,13 +153,13 @@ int TcpReassembler::flush_data_segments(uint32_t flush_len, Packet* pdu)
 {
     uint32_t flags = PKT_PDU_HEAD;
 
-    uint32_t to_seq = seglist.cur_rseg->scan_seq() + flush_len;
+    uint32_t to_seq = seglist->cur_rseg->scan_seq() + flush_len;
     uint32_t remaining_bytes = flush_len;
     uint32_t total_flushed = 0;
 
     while ( remaining_bytes )
     {
-        TcpSegmentNode* tsn = seglist.cur_rseg;
+        TcpSegmentNode* tsn = seglist->cur_rseg;
         unsigned bytes_to_copy = ( tsn->unscanned() <= remaining_bytes ) ? tsn->unscanned() : remaining_bytes;
 
         remaining_bytes -= bytes_to_copy;
@@ -169,7 +169,7 @@ int TcpReassembler::flush_data_segments(uint32_t flush_len, Packet* pdu)
             assert( bytes_to_copy >= tsn->unscanned() );
 
         unsigned bytes_copied = 0;
-        const StreamBuffer sb = splitter->reassemble(seglist.session->flow, flush_len, total_flushed,
+        const StreamBuffer sb = splitter->reassemble(seglist->session->flow, flush_len, total_flushed,
             tsn->paf_data(), bytes_to_copy, flags, bytes_copied);
 
         if ( sb.data )
@@ -184,8 +184,8 @@ int TcpReassembler::flush_data_segments(uint32_t flush_len, Packet* pdu)
 
         if ( !tsn->unscanned() )
         {
-            seglist.flush_count++;
-            seglist.update_next(tsn);
+            seglist->flush_count++;
+            seglist->update_next(tsn);
         }
 
         /* Check for a gap/missing packet */
@@ -194,15 +194,15 @@ int TcpReassembler::flush_data_segments(uint32_t flush_len, Packet* pdu)
         {
             // FIXIT-H // assert(false); find when this scenario happens
             // FIXIT-L this is suboptimal - better to exclude fin from to_seq
-            if ( !tracker.is_fin_seq_set() or
-                SEQ_LEQ(to_seq, tracker.get_fin_final_seq()) )
+            if ( !tracker->is_fin_seq_set() or
+                SEQ_LEQ(to_seq, tracker->get_fin_final_seq()) )
             {
-                tracker.set_tf_flags(TF_MISSING_PKT);
+                tracker->set_tf_flags(TF_MISSING_PKT);
             }
             break;
         }
 
-        if ( sb.data || !seglist.cur_rseg )
+        if ( sb.data || !seglist->cur_rseg )
             break;
     }
 
@@ -267,9 +267,9 @@ Packet* TcpReassembler::initialize_pdu(Packet* p, uint32_t pkt_flags, struct tim
 
     EncodeFlags enc_flags = 0;
     DAQ_PktHdr_t pkth;
-    seglist.session->get_packet_header_foo(&pkth, p->pkth, pkt_flags);
+    seglist->session->get_packet_header_foo(&pkth, p->pkth, pkt_flags);
     PacketManager::format_tcp(enc_flags, p, pdu, PSEUDO_PKT_TCP, &pkth, pkth.opaque);
-    prep_pdu(seglist.session->flow, p, pkt_flags, pdu);
+    prep_pdu(seglist->session->flow, p, pkt_flags, pdu);
     assert(pdu->pkth == pdu->context->pkth);
     pdu->context->pkth->ts = tv;
     pdu->dsize = 0;
@@ -291,18 +291,18 @@ Packet* TcpReassembler::initialize_pdu(Packet* p, uint32_t pkt_flags, struct tim
 // flush a seglist up to the given point, generate a pseudopacket, and fire it thru the system.
 int TcpReassembler::flush_to_seq(uint32_t bytes, Packet* p, uint32_t pkt_flags)
 {
-    assert( p && seglist.cur_rseg);
+    assert( p && seglist->cur_rseg);
 
-    tracker.clear_tf_flags(TF_MISSING_PKT | TF_MISSING_PREV_PKT);
+    tracker->clear_tf_flags(TF_MISSING_PKT | TF_MISSING_PREV_PKT);
 
-    TcpSegmentNode* tsn = seglist.cur_rseg;
-    assert( seglist.seglist_base_seq == tsn->scan_seq());
+    TcpSegmentNode* tsn = seglist->cur_rseg;
+    assert( seglist->seglist_base_seq == tsn->scan_seq());
 
     Packet* pdu = initialize_pdu(p, pkt_flags, tsn->tv);
     int32_t flushed_bytes = flush_data_segments(bytes, pdu);
     assert( flushed_bytes );
 
-    seglist.seglist_base_seq += flushed_bytes;
+    seglist->seglist_base_seq += flushed_bytes;
 
     if ( pdu->data )
     {
@@ -322,7 +322,7 @@ int TcpReassembler::flush_to_seq(uint32_t bytes, Packet* p, uint32_t pkt_flags)
         else
             last_pdu = nullptr;
 
-        tracker.finalize_held_packet(p);
+        tracker->finalize_held_packet(p);
     }
     else
     {
@@ -331,14 +331,14 @@ int TcpReassembler::flush_to_seq(uint32_t bytes, Packet* p, uint32_t pkt_flags)
     }
 
     // FIXIT-L abort should be by PAF callback only since recovery may be possible
-    if ( tracker.get_tf_flags() & TF_MISSING_PKT )
+    if ( tracker->get_tf_flags() & TF_MISSING_PKT )
     {
-        tracker.set_tf_flags(TF_MISSING_PREV_PKT | TF_PKT_MISSED);
-        tracker.clear_tf_flags(TF_MISSING_PKT);
+        tracker->set_tf_flags(TF_MISSING_PREV_PKT | TF_PKT_MISSED);
+        tracker->clear_tf_flags(TF_MISSING_PKT);
         tcpStats.gaps++;
     }
     else
-        tracker.clear_tf_flags(TF_MISSING_PREV_PKT);
+        tracker->clear_tf_flags(TF_MISSING_PREV_PKT);
 
     return flushed_bytes;
 }
@@ -347,7 +347,7 @@ int TcpReassembler::do_zero_byte_flush(Packet* p, uint32_t pkt_flags)
 {
     unsigned bytes_copied = 0;
 
-    const StreamBuffer sb = splitter->reassemble(seglist.session->flow, 0, 0,
+    const StreamBuffer sb = splitter->reassemble(seglist->session->flow, 0, 0,
         nullptr, 0, (PKT_PDU_HEAD | PKT_PDU_TAIL), bytes_copied);
 
      if ( sb.data )
@@ -375,8 +375,8 @@ uint32_t TcpReassembler::get_q_footprint()
     int32_t footprint = 0;
     int32_t sequenced = 0;
 
-    if ( SEQ_GT(tracker.r_win_base, seglist.seglist_base_seq) )
-        footprint = tracker.r_win_base - seglist.seglist_base_seq;
+    if ( SEQ_GT(tracker->r_win_base, seglist->seglist_base_seq) )
+        footprint = tracker->r_win_base - seglist->seglist_base_seq;
 
     if ( footprint )
         sequenced = get_q_sequenced();
@@ -390,16 +390,16 @@ uint32_t TcpReassembler::get_q_footprint()
 
 uint32_t TcpReassembler::get_q_sequenced()
 {
-    TcpSegmentNode* tsn = seglist.cur_rseg;
+    TcpSegmentNode* tsn = seglist->cur_rseg;
 
     if ( !tsn )
     {
-        tsn = seglist.head;
+        tsn = seglist->head;
 
-        if ( !tsn || SEQ_LT(tracker.r_win_base, tsn->scan_seq()) )
+        if ( !tsn || SEQ_LT(tracker->r_win_base, tsn->scan_seq()) )
             return 0;
 
-        seglist.cur_rseg = tsn;
+        seglist->cur_rseg = tsn;
     }
 
     uint32_t len = 0;
@@ -408,7 +408,7 @@ uint32_t TcpReassembler::get_q_sequenced()
     {
 
         if ( !tsn->unscanned() )
-            seglist.cur_rseg = tsn->next;
+            seglist->cur_rseg = tsn->next;
         else
             len += tsn->unscanned();
 
@@ -417,22 +417,22 @@ uint32_t TcpReassembler::get_q_sequenced()
     if ( tsn->unscanned() )
         len += tsn->unscanned();
 
-    seglist.seglist_base_seq = seglist.cur_rseg->scan_seq();
+    seglist->seglist_base_seq = seglist->cur_rseg->scan_seq();
 
     return len;
 }
 
 bool TcpReassembler::is_q_sequenced()
 {
-    TcpSegmentNode* tsn = seglist.cur_rseg;
+    TcpSegmentNode* tsn = seglist->cur_rseg;
 
     if ( !tsn )
     {
-        tsn = seglist.head;
-        if ( !tsn || SEQ_LT(tracker.r_win_base, tsn->scan_seq()) )
+        tsn = seglist->head;
+        if ( !tsn || SEQ_LT(tracker->r_win_base, tsn->scan_seq()) )
             return false;
 
-        seglist.cur_rseg = tsn;
+        seglist->cur_rseg = tsn;
     }
 
     while ( tsn->next_no_gap() )
@@ -440,17 +440,17 @@ bool TcpReassembler::is_q_sequenced()
         if ( tsn->unscanned() )
             break;
 
-        tsn = seglist.cur_rseg = tsn->next;
+        tsn = seglist->cur_rseg = tsn->next;
     }
 
-    seglist.seglist_base_seq = tsn->scan_seq();
+    seglist->seglist_base_seq = tsn->scan_seq();
 
     return (tsn->unscanned() != 0);
 }
 
 void TcpReassembler::final_flush(Packet* p, uint32_t dir)
 {
-    tracker.set_tf_flags(TF_FORCE_FLUSH);
+    tracker->set_tf_flags(TF_FORCE_FLUSH);
 
     if ( flush_stream(p, dir, true) )
     {
@@ -461,7 +461,7 @@ void TcpReassembler::final_flush(Packet* p, uint32_t dir)
 
         purge_flushed_ackd();
     }
-    tracker.clear_tf_flags(TF_FORCE_FLUSH);
+    tracker->clear_tf_flags(TF_FORCE_FLUSH);
 }
 
 static Packet* get_packet(Flow* flow, uint32_t flags, bool c2s)
@@ -552,10 +552,10 @@ void TcpReassembler::flush_queued_segments(Flow* flow, bool clear, Packet* p)
 
 void TcpReassembler::check_first_segment_hole()
 {
-    if ( SEQ_LT(seglist.seglist_base_seq, seglist.head->start_seq()) )
+    if ( SEQ_LT(seglist->seglist_base_seq, seglist->head->start_seq()) )
     {
-        seglist.seglist_base_seq = seglist.head->start_seq();
-        seglist.advance_rcv_nxt();
+        seglist->seglist_base_seq = seglist->head->start_seq();
+        seglist->advance_rcv_nxt();
         paf.state = StreamSplitter::START;
     }
 }
@@ -567,20 +567,22 @@ uint32_t TcpReassembler::perform_partial_flush(Flow* flow, Packet*& p)
 }
 
 // No error checking here, so the caller must ensure that p, p->flow are not null.
-uint32_t TcpReassembler::perform_partial_flush(Packet* p, uint32_t flushed)
+uint32_t TcpReassembler::perform_partial_flush(Packet* p)
 {
+    uint32_t flushed = 0;
     if ( splitter->init_partial_flush(p->flow) )
     {
-        flushed += flush_stream(p, packet_dir, false);
+        flushed = flush_stream(p, packet_dir, false);
         paf.paf_jump(flushed);
         tcpStats.partial_flushes++;
         tcpStats.partial_flush_bytes += flushed;
-        if ( seglist.seg_count )
+        if ( seglist->seg_count )
         {
-            purge_to_seq(seglist.head->start_seq() + flushed);
-            tracker.r_win_base = seglist.seglist_base_seq;
+            purge_to_seq(seglist->head->start_seq() + flushed);
+            tracker->r_win_base = seglist->seglist_base_seq;
         }
     }
+
     return flushed;
 }
 
@@ -589,7 +591,7 @@ uint32_t TcpReassembler::perform_partial_flush(Packet* p, uint32_t flushed)
 // FIXIT-M this convoluted expression needs some refactoring to simplify
 bool TcpReassembler::final_flush_on_fin(int32_t flush_amt, Packet *p, FinSeqNumStatus fin_status)
 {
-    return tracker.fin_seq_status >= fin_status
+    return tracker->fin_seq_status >= fin_status
         && -1 <= flush_amt && flush_amt <= 0
         && paf.state == StreamSplitter::SEARCH
         && !p->flow->searching_for_service();
@@ -597,13 +599,20 @@ bool TcpReassembler::final_flush_on_fin(int32_t flush_amt, Packet *p, FinSeqNumS
 
 bool TcpReassembler::asymmetric_flow_flushed(uint32_t flushed, snort::Packet *p)
 {
-    bool asymmetric = flushed && seglist.seg_count && !p->flow->two_way_traffic() && !p->ptrs.tcph->is_syn();
+    bool asymmetric = flushed && seglist->seg_count && !p->flow->two_way_traffic() && !p->ptrs.tcph->is_syn();
     if ( asymmetric )
     {
-        TcpStreamTracker::TcpState peer = tracker.session->get_peer_state(&tracker);
+        TcpStreamTracker::TcpState peer = tracker->session->get_peer_state(tracker);
         asymmetric = ( peer == TcpStreamTracker::TCP_SYN_SENT || peer == TcpStreamTracker::TCP_SYN_RECV
             || peer == TcpStreamTracker::TCP_MID_STREAM_SENT );
     }
 
     return asymmetric;
 }
+
+uint32_t TcpReassemblerIgnore::perform_partial_flush(snort::Flow* flow, snort::Packet*& p)
+{
+    p = get_packet(flow, packet_dir, server_side);
+    return 0;
+}
+
index 66fed13e890ac9e971ed99c13f6aa677dd291a61..a966241828acd83d7dfc96c81cb49cd85431006b 100644 (file)
@@ -52,7 +52,7 @@ public:
         FINAL_FLUSH_OK = -1
     };
 
-    TcpReassembler(TcpStreamTracker& trk, TcpReassemblySegments& seglist)
+    TcpReassembler(TcpStreamTracker* trk, TcpReassemblySegments* seglist)
         : tracker(trk), seglist(seglist)
     { }
 
@@ -60,22 +60,19 @@ public:
     { }
 
     virtual void init(bool server, snort::StreamSplitter* ss);
-
     virtual int eval_flush_policy_on_ack(snort::Packet*) = 0;
     virtual int eval_flush_policy_on_data(snort::Packet*) = 0;
     virtual int eval_asymmetric_flush(snort::Packet*) = 0;
     virtual int flush_stream(snort::Packet*, uint32_t dir, bool final_flush = false) = 0;
-    void flush_queued_segments(snort::Flow* flow, bool clear, snort::Packet* = nullptr);
-    void finish_and_final_flush(snort::Flow* flow, bool clear, snort::Packet*);
-    uint32_t perform_partial_flush(snort::Flow*, snort::Packet*&);
-    void purge_flushed_ackd();
+    virtual void flush_queued_segments(snort::Flow* flow, bool clear, snort::Packet* = nullptr);
+    virtual void finish_and_final_flush(snort::Flow* flow, bool clear, snort::Packet*);
+    virtual uint32_t perform_partial_flush(snort::Flow*, snort::Packet*&);
+    virtual void purge_flushed_ackd();
+    virtual FlushPolicy get_flush_policy() const = 0;
 
     void release_splitter()
     { splitter = nullptr; }
 
-    snort::StreamSplitter* get_splitter()
-     { return splitter; }
-
     bool is_splitter_paf() const
     { return splitter && splitter->is_paf(); }
 
@@ -89,12 +86,14 @@ public:
 
     void initialize_paf()
     {
+        assert( get_flush_policy() != STREAM_FLPOLICY_IGNORE );
+
         // only initialize if we have a data segment queued
-        if ( !seglist.head )
+        if ( !seglist->head )
             return;
 
-       if ( !paf.paf_initialized() or !SEQ_EQ(paf.seq_num, seglist.head->start_seq()) )
-            paf.paf_initialize(seglist.head->start_seq());
+       if ( !paf.paf_initialized() or !SEQ_EQ(paf.seq_num, seglist->head->start_seq()) )
+            paf.paf_initialize(seglist->head->start_seq());
     }
 
     void reset_paf()
@@ -103,8 +102,6 @@ public:
     void clear_paf()
     { paf.paf_clear(); }
 
-    virtual FlushPolicy get_flush_policy() const = 0;
-
 protected:
     void show_rebuilt_packet(snort::Packet*);
     int flush_data_segments(uint32_t flush_len, snort::Packet* pdu);
@@ -123,13 +120,13 @@ protected:
     bool fin_acked_no_gap(const TcpSegmentNode&);
     void update_skipped_bytes(uint32_t);
     void check_first_segment_hole();
-    uint32_t perform_partial_flush(snort::Packet*, uint32_t flushed = 0);
+    uint32_t perform_partial_flush(snort::Packet*);
     bool final_flush_on_fin(int32_t flush_amt, snort::Packet*, FinSeqNumStatus);
     bool asymmetric_flow_flushed(uint32_t flushed, snort::Packet *p);
 
     ProtocolAwareFlusher paf;
-    TcpStreamTracker& tracker;
-    TcpReassemblySegments& seglist;
+    TcpStreamTracker* tracker = nullptr;
+    TcpReassemblySegments* seglist = nullptr;
     snort::StreamSplitter* splitter = nullptr;
 
     snort::Packet* last_pdu = nullptr;
@@ -142,7 +139,7 @@ protected:
 class TcpReassemblerIgnore : public TcpReassembler
 {
 public:
-    TcpReassemblerIgnore(TcpStreamTracker& trk, TcpReassemblySegments& sl)
+       TcpReassemblerIgnore(TcpStreamTracker* trk, TcpReassemblySegments* sl)
         : TcpReassembler(trk, sl)
     { }
 
@@ -161,8 +158,20 @@ public:
     int flush_stream(snort::Packet*, uint32_t, bool) override
     { return 0; }
 
+    void flush_queued_segments(snort::Flow*, bool, snort::Packet*) override
+    { }
+
+    void finish_and_final_flush(snort::Flow*, bool, snort::Packet*) override
+    { }
+
+    uint32_t perform_partial_flush(snort::Flow*, snort::Packet*&) override;
+
+    void purge_flushed_ackd() override
+    { }
+
     FlushPolicy get_flush_policy() const override
     { return STREAM_FLPOLICY_IGNORE; }
 };
 
 #endif
+
index 124b855a0083fc452cb82d5fba4ff52ce3fe43e2..b8d7825933fc09fabf57f74730e85a34c2ce5010 100644 (file)
@@ -46,7 +46,7 @@ using namespace snort;
 bool TcpReassemblerIds::has_seglist_hole(TcpSegmentNode& tsn, uint32_t& total, uint32_t& flags)
 {
     if ( !tsn.prev or SEQ_GEQ(tsn.prev->scan_seq() + tsn.prev->unscanned(), tsn.scan_seq())
-       or SEQ_GEQ(tsn.scan_seq(), tracker.r_win_base) )
+       or SEQ_GEQ(tsn.scan_seq(), tracker->r_win_base) )
     {
        check_first_segment_hole();
        return false;
@@ -69,7 +69,7 @@ void TcpReassemblerIds::skip_seglist_hole(Packet* p, uint32_t flags, int32_t flu
     {
         if ( flush_amt > 0 )
             update_skipped_bytes(flush_amt);
-        tracker.fallback();
+        tracker->fallback();
     }
     else
     {
@@ -78,17 +78,17 @@ void TcpReassemblerIds::skip_seglist_hole(Packet* p, uint32_t flags, int32_t flu
         paf.state = StreamSplitter::START;
     }
 
-    if ( seglist.head )
+    if ( seglist->head )
     {
         if ( flush_amt > 0 )
-            purge_to_seq(seglist.seglist_base_seq + flush_amt);
-        seglist.seglist_base_seq = seglist.head->scan_seq();
+            purge_to_seq(seglist->seglist_base_seq + flush_amt);
+        seglist->seglist_base_seq = seglist->head->scan_seq();
     }
     else
-        seglist.seglist_base_seq = tracker.r_win_base;  // FIXIT-H - do we need to set rcv_nxt here?
+        seglist->seglist_base_seq = tracker->r_win_base;  // FIXIT-H - do we need to set rcv_nxt here?
 
-    seglist.cur_rseg = seglist.head;
-    tracker.set_order(TcpStreamTracker::OUT_OF_SEQUENCE);
+    seglist->cur_rseg = seglist->head;
+    tracker->set_order(TcpStreamTracker::OUT_OF_SEQUENCE);
 }
 
 // iterate over seglist and scan all new acked bytes
@@ -107,44 +107,44 @@ void TcpReassemblerIds::skip_seglist_hole(Packet* p, uint32_t flags, int32_t flu
 //   know where we left off and can resume scanning the remainder
 int32_t TcpReassemblerIds::scan_data_post_ack(uint32_t* flags, Packet* p)
 {
-    assert(seglist.session->flow == p->flow);
+    assert(seglist->session->flow == p->flow);
 
     int32_t ret_val = FINAL_FLUSH_HOLD;
 
-    if ( !seglist.cur_sseg || SEQ_GEQ(seglist.seglist_base_seq, tracker.r_win_base) )
+    if ( !seglist->cur_sseg || SEQ_GEQ(seglist->seglist_base_seq, tracker->r_win_base) )
         return ret_val ;
 
-    if ( !seglist.cur_rseg )
-        seglist.cur_rseg = seglist.cur_sseg;
+    if ( !seglist->cur_rseg )
+        seglist->cur_rseg = seglist->cur_sseg;
 
     uint32_t total = 0;
-    TcpSegmentNode* tsn = seglist.cur_sseg;
+    TcpSegmentNode* tsn = seglist->cur_sseg;
     if ( paf.paf_initialized() )
     {
         uint32_t end_seq = tsn->scan_seq() + tsn->unscanned();
         if ( SEQ_EQ(end_seq, paf.paf_position()) )
         {
-            total = end_seq - seglist.seglist_base_seq;
+            total = end_seq - seglist->seglist_base_seq;
             tsn = tsn->next;
         }
         else
-            total = tsn->scan_seq() - seglist.cur_rseg->scan_seq();
+            total = tsn->scan_seq() - seglist->cur_rseg->scan_seq();
     }
 
     ret_val = FINAL_FLUSH_OK;
-    while (tsn && *flags && SEQ_LT(tsn->scan_seq(), tracker.r_win_base))
+    while (tsn && *flags && SEQ_LT(tsn->scan_seq(), tracker->r_win_base))
     {
         // only flush acked data that fits in pdu reassembly buffer...
         uint32_t end = tsn->scan_seq() + tsn->unscanned();
         uint32_t flush_len;
         int32_t flush_pt;
 
-        if ( SEQ_GT(end, tracker.r_win_base))
-            flush_len = tracker.r_win_base - tsn->scan_seq();
+        if ( SEQ_GT(end, tracker->r_win_base))
+            flush_len = tracker->r_win_base - tsn->scan_seq();
         else
             flush_len = tsn->unscanned();
 
-        if ( tsn->next_acked_no_gap(tracker.r_win_base) )
+        if ( tsn->next_acked_no_gap(tracker->r_win_base) )
             *flags |= PKT_MORE_TO_FLUSH;
         else
             *flags &= ~PKT_MORE_TO_FLUSH;
@@ -158,11 +158,11 @@ int32_t TcpReassemblerIds::scan_data_post_ack(uint32_t* flags, Packet* p)
         }
 
         // Get splitter from tracker as paf check may change it.
-        seglist.cur_sseg = tsn;
+        seglist->cur_sseg = tsn;
 
         if ( flush_pt >= 0 )
         {
-            seglist.seglist_base_seq = seglist.cur_rseg->scan_seq();
+            seglist->seglist_base_seq = seglist->cur_rseg->scan_seq();
             return flush_pt;
         }
 
@@ -182,12 +182,11 @@ int32_t TcpReassemblerIds::scan_data_post_ack(uint32_t* flags, Packet* p)
 
 int TcpReassemblerIds::eval_flush_policy_on_ack(Packet* p)
 {
+    last_pdu = nullptr;
     uint32_t flushed = 0;
     int32_t flush_amt;
     uint32_t flags;
 
-    last_pdu = nullptr;
-
     do
     {
         flags = packet_dir;
@@ -201,13 +200,13 @@ int TcpReassemblerIds::eval_flush_policy_on_ack(Packet* p)
         assert( flushed );
 
         // ideally we would purge just once after this loop but that throws off base
-        if ( seglist.head )
-            purge_to_seq(seglist.seglist_base_seq);
-    } while ( seglist.head and !p->flow->is_inspection_disabled() );
+        if ( seglist->head )
+            purge_to_seq(seglist->seglist_base_seq);
+    } while ( seglist->head and !p->flow->is_inspection_disabled() );
 
     if ( (paf.state == StreamSplitter::ABORT) && is_splitter_paf() )
     {
-        tracker.fallback();
+        tracker->fallback();
         return eval_flush_policy_on_ack(p);
     }
     else if ( paf.state == StreamSplitter::SKIP )
@@ -225,16 +224,16 @@ int TcpReassemblerIds::eval_flush_policy_on_data(Packet* p)
 {
     uint32_t flushed = 0;
 
-    if ( !seglist.head )
+    if ( !seglist->head )
         return flushed;
 
-    if ( tracker.is_retransmit_of_held_packet(p) )
-        flushed = perform_partial_flush(p, flushed);
+    if ( tracker->is_retransmit_of_held_packet(p) )
+        flushed += perform_partial_flush(p);
 
     if ( !p->flow->two_way_traffic() and
-        seglist.get_seg_bytes_total() > seglist.session->tcp_config->asymmetric_ids_flush_threshold )
+        seglist->get_seg_bytes_total() > seglist->session->tcp_config->asymmetric_ids_flush_threshold )
     {
-        seglist.skip_holes();
+        seglist->skip_holes();
         flushed += eval_asymmetric_flush(p);
     }
 
@@ -244,7 +243,7 @@ int TcpReassemblerIds::eval_flush_policy_on_data(Packet* p)
 int TcpReassemblerIds::eval_asymmetric_flush(snort::Packet* p)
 {
     // asymmetric flush in IDS mode.. advance r_win_base to end of in-order data
-    tracker.r_win_base = tracker.rcv_nxt;
+    tracker->r_win_base = tracker->rcv_nxt;
 
     uint32_t flushed = eval_flush_policy_on_ack(p);
     if ( flushed )
@@ -257,8 +256,8 @@ int TcpReassemblerIds::flush_stream(Packet* p, uint32_t dir, bool final_flush)
 {
     uint32_t bytes = 0;
 
-    if ( seglist.session->flow->two_way_traffic() )
-         bytes = get_q_footprint();
+    if ( seglist->session->flow->two_way_traffic() )
+        bytes = get_q_footprint();
     else
         bytes = get_q_sequenced();
 
index 4ee533d551da72292e7d6a0d3d745bc6edf27658..131b92de8d0f9deed77861e86b2e6eb51461c58b 100644 (file)
@@ -38,7 +38,7 @@ class TcpReassemblerIds : public TcpReassembler
 public:
 
 
-    TcpReassemblerIds(TcpStreamTracker& trk, TcpReassemblySegments& sl)
+    TcpReassemblerIds(TcpStreamTracker* trk, TcpReassemblySegments* sl)
         : TcpReassembler(trk, sl)
     { }
 
index f0a513d7676d4fdf7d66e777c69658b4e9026fa4..6ed2bd5cf334f1cc007171f3ca82fe475375f0ac 100644 (file)
@@ -48,21 +48,21 @@ using namespace snort;
 // because we don't wait until it is acknowledged
 int32_t TcpReassemblerIps::scan_data_pre_ack(uint32_t* flags, Packet* p)
 {
-    assert(seglist.session->flow == p->flow);
+    assert(seglist->session->flow == p->flow);
 
     int32_t ret_val = FINAL_FLUSH_HOLD;
 
-    if ( SEQ_GT(seglist.head->scan_seq(), seglist.seglist_base_seq) )
+    if ( SEQ_GT(seglist->head->scan_seq(), seglist->seglist_base_seq) )
         return ret_val;
 
-    if ( !seglist.cur_rseg )
-        seglist.cur_rseg = seglist.cur_sseg;
+    if ( !seglist->cur_rseg )
+        seglist->cur_rseg = seglist->cur_sseg;
 
     if ( !is_q_sequenced() )
         return ret_val;
 
-    TcpSegmentNode* tsn = seglist.cur_sseg;
-    uint32_t total = tsn->scan_seq() - seglist.seglist_base_seq;
+    TcpSegmentNode* tsn = seglist->cur_sseg;
+    uint32_t total = tsn->scan_seq() - seglist->seglist_base_seq;
 
     ret_val = FINAL_FLUSH_OK;
     while ( tsn && *flags )
@@ -93,7 +93,7 @@ int32_t TcpReassemblerIps::scan_data_pre_ack(uint32_t* flags, Packet* p)
 
         if (flush_pt >= 0)
         {
-            seglist.cur_sseg = tsn;
+            seglist->cur_sseg = tsn;
             return flush_pt;
         }
 
@@ -107,7 +107,7 @@ int32_t TcpReassemblerIps::scan_data_pre_ack(uint32_t* flags, Packet* p)
         tsn = tsn->next;
     }
 
-    seglist.cur_sseg = tsn;
+    seglist->cur_sseg = tsn;
     
     return ret_val;
 }
@@ -121,42 +121,42 @@ int TcpReassemblerIps::eval_flush_policy_on_ack(Packet*)
 
 int TcpReassemblerIps::eval_flush_policy_on_data(Packet* p)
 {
-    uint32_t flushed = 0;
+    if ( !seglist->head )
+        return 0;
+
     last_pdu = nullptr;
+    uint32_t flags;
+    uint32_t flushed = 0;
+    int32_t flush_amt;
 
-    if ( seglist.head )
+    do
     {
-        uint32_t flags;
-        int32_t flush_amt;
-        do
-        {
-            flags = packet_dir;
-            flush_amt = scan_data_pre_ack(&flags, p);
-            if ( flush_amt <= 0 )
-                break;
+        flags = packet_dir;
+        flush_amt = scan_data_pre_ack(&flags, p);
+        if ( flush_amt <= 0 )
+            break;
 
-            flushed += flush_to_seq(flush_amt, p, flags);
-        } while ( seglist.head and !p->flow->is_inspection_disabled() );
+        flushed += flush_to_seq(flush_amt, p, flags);
+    } while ( seglist->head and !p->flow->is_inspection_disabled() );
 
-        if ( (paf.state == StreamSplitter::ABORT) && is_splitter_paf() )
-        {
-            tracker.fallback();
-            return eval_flush_policy_on_data(p);
-        }
-        else if ( final_flush_on_fin(flush_amt, p, FIN_WITH_SEQ_SEEN) )
-            finish_and_final_flush(p->flow, true, p);
+    if ( (paf.state == StreamSplitter::ABORT) && is_splitter_paf() )
+    {
+        tracker->fallback();
+        return eval_flush_policy_on_data(p);
     }
+    else if ( final_flush_on_fin(flush_amt, p, FIN_WITH_SEQ_SEEN) )
+        finish_and_final_flush(p->flow, true, p);
 
-    if ( !seglist.head )
+    if ( !seglist->head )
         return flushed;
 
-    if ( tracker.is_retransmit_of_held_packet(p) )
-        flushed = perform_partial_flush(p, flushed);
+    if ( tracker->is_retransmit_of_held_packet(p) )
+        flushed += perform_partial_flush(p);
 
     if ( asymmetric_flow_flushed(flushed, p) )
     {
-        purge_to_seq(seglist.head->start_seq() + flushed);
-        tracker.r_win_base = seglist.seglist_base_seq;
+        purge_to_seq(seglist->head->start_seq() + flushed);
+        tracker->r_win_base = seglist->seglist_base_seq;
         tcpStats.flush_on_asymmetric_flow++;
     }
 
@@ -170,8 +170,8 @@ int TcpReassemblerIps::eval_asymmetric_flush(snort::Packet* p)
 
 int TcpReassemblerIps::flush_stream(Packet* p, uint32_t dir, bool final_flush)
 {
-    if ( seglist.session->flow->two_way_traffic()
-        or (tracker.get_tcp_state() == TcpStreamTracker::TCP_MID_STREAM_RECV) )
+    if ( seglist->session->flow->two_way_traffic()
+        or (tracker->get_tcp_state() == TcpStreamTracker::TCP_MID_STREAM_RECV) )
     {
         uint32_t bytes = get_q_sequenced();  // num bytes in pre-ack mode
         if ( bytes )
index 3a81efd1ee10138ddd013d123170218cd6f4a5e9..328cfbbd3bf21af1558ba3a30f072bdc7f05ea78 100644 (file)
@@ -37,7 +37,7 @@ class TcpSegmentNode;
 class TcpReassemblerIps : public TcpReassembler
 {
 public:
-    TcpReassemblerIps(TcpStreamTracker& trk, TcpReassemblySegments& sl)
+    TcpReassemblerIps(TcpStreamTracker* trk, TcpReassemblySegments* sl)
         : TcpReassembler(trk, sl)
     { }
 
index 0a3e0469f22aa21b665e1338953a2eebfeb5a3bf..20d97e29d90866d2728e27d389fa6270ce415f45 100644 (file)
@@ -60,6 +60,11 @@ void TcpReassemblySegments::reset()
     seglist_base_seq = 0;
 }
 
+void TcpReassemblySegments::purge_segment_list()
+{
+    purge();
+}
+
 void TcpReassemblySegments::update_next(TcpSegmentNode* tsn)
 {
     cur_rseg = tsn->next_no_gap() ?  tsn->next : nullptr;
@@ -443,8 +448,3 @@ void TcpReassemblySegments::skip_midstream_pickup_seglist_hole(TcpSegmentDescrip
     else
         tracker->set_rcv_nxt(ack);
 }
-
-void TcpReassemblySegments::purge_segment_list()
-{
-    purge();
-}
index bff362df08b32eb540b0b9bd371bc5e50c3578ae..df1a79d7d7786823c112992bc2ede7b9bcdfb6f2 100644 (file)
@@ -52,7 +52,6 @@ public:
     int delete_reassembly_segment(TcpSegmentNode*);
     void advance_rcv_nxt(TcpSegmentNode *tsn = nullptr);
     void purge_flushed_segments(uint32_t flush_seq);
-    void purge_segments_left_of_hole(const TcpSegmentNode*);
     void skip_holes();
     void skip_midstream_pickup_seglist_hole(TcpSegmentDescriptor&);
     void purge_segment_list();
@@ -88,7 +87,7 @@ public:
 
 private:
     void insert_segment_data(TcpSegmentNode* prev, TcpSegmentNode*);
-
+    void purge_segments_left_of_hole(const TcpSegmentNode*);
 
     void insert(TcpSegmentNode* prev, TcpSegmentNode* ss)
     {
index 11bc88670bf40f36f78839cc1964472c984dd3cc..f730fe3e3f9da379170fc467c03e090c36ec1273 100644 (file)
@@ -150,14 +150,14 @@ void TcpSession::restart(Packet* p)
     if ( talker->midstream_initial_ack_flush )
     {
         talker->midstream_initial_ack_flush = false;
-        talker->eval_flush_policy_on_data(p);
+        talker->reassembler->eval_flush_policy_on_data(p);
     }
 
     if (p->dsize > 0)
-        listener->eval_flush_policy_on_data(p);
+        listener->reassembler->eval_flush_policy_on_data(p);
 
     if (p->ptrs.tcph->is_ack())
-        talker->eval_flush_policy_on_ack(p);
+        talker->reassembler->eval_flush_policy_on_ack(p);
 
     tcpStats.restarts++;
 }
@@ -747,7 +747,7 @@ void TcpSession::handle_data_segment(TcpSegmentDescriptor& tsd, bool flush)
     }
 
     if ( flush )
-        listener->eval_flush_policy_on_data(tsd.get_pkt());
+        listener->reassembler->eval_flush_policy_on_data(tsd.get_pkt());
     else
         listener->reassembler->initialize_paf();
 }
index f71da8f3972c5dcc4bf89f53e50e1616d1daf881..f66b72e55472272b1dca9fe2933b93c866ea06cc 100644 (file)
@@ -101,7 +101,7 @@ public:
     void get_packet_header_foo(DAQ_PktHdr_t*, const DAQ_PktHdr_t* orig, uint32_t dir);
     bool can_set_no_ack();
     bool set_no_ack(bool);
-    bool no_ack_mode_enabled() { return no_ack; }
+    inline bool no_ack_mode_enabled() { return no_ack; }
 
     void set_pkt_action_flag(uint32_t flag)
     { pkt_action_mask |= flag; }
index 476fcaffffb17ff3b55ce1f6e31e8ea2b3719079..1ddffc3fb9209929eed95d39ddc1a6b658d7edf9 100644 (file)
@@ -54,7 +54,7 @@ bool TcpStateMidStreamRecv::syn_ack_sent(TcpSegmentDescriptor& tsd, TcpStreamTra
     if ( trk.normalizer.is_tcp_ips_enabled() )
     {
         trk.seglist.skip_midstream_pickup_seglist_hole(tsd);
-        trk.eval_flush_policy_on_data(tsd.get_pkt());
+        trk.reassembler->eval_flush_policy_on_data(tsd.get_pkt());
         trk.midstream_initial_ack_flush = true;
     }
 
@@ -68,7 +68,7 @@ bool TcpStateMidStreamRecv::ack_sent(TcpSegmentDescriptor& tsd, TcpStreamTracker
     if ( trk.normalizer.is_tcp_ips_enabled() )
     {
         trk.seglist.skip_midstream_pickup_seglist_hole(tsd);
-        trk.eval_flush_policy_on_data(tsd.get_pkt());
+        trk.reassembler->eval_flush_policy_on_data(tsd.get_pkt());
         trk.midstream_initial_ack_flush = true;
     }
 
@@ -88,7 +88,7 @@ bool TcpStateMidStreamRecv::data_seg_sent(TcpSegmentDescriptor& tsd, TcpStreamTr
     if ( trk.normalizer.is_tcp_ips_enabled() )
     {
         trk.seglist.skip_midstream_pickup_seglist_hole(tsd);
-        trk.eval_flush_policy_on_data(tsd.get_pkt());
+        trk.reassembler->eval_flush_policy_on_data(tsd.get_pkt());
         trk.midstream_initial_ack_flush = true;
     }
 
@@ -111,7 +111,7 @@ bool TcpStateMidStreamRecv::fin_sent(TcpSegmentDescriptor& tsd, TcpStreamTracker
     if ( trk.normalizer.is_tcp_ips_enabled() )
     {
         trk.seglist.skip_midstream_pickup_seglist_hole(tsd);
-        trk.eval_flush_policy_on_data(tsd.get_pkt());
+        trk.reassembler->eval_flush_policy_on_data(tsd.get_pkt());
         trk.midstream_initial_ack_flush = true;
     }
 
index 29d2584dc57f15604994aa93ba565b847a5ce47f..54c5d000de465041641b638b97ea38c7443ebbb5 100644 (file)
@@ -16,7 +16,7 @@
 // 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
 //--------------------------------------------------------------------------
 
-// tcp_stream_tracker.cpp author davis mcpherson <davmcphe@cisco.com>
+// tcp_stream_tracker.cc author davis mcpherson <davmcphe@cisco.com>
 // Created on: Jun 24, 2015
 
 #ifdef HAVE_CONFIG_H
@@ -47,6 +47,7 @@
 using namespace snort;
 
 THREAD_LOCAL HeldPacketQueue* hpq = nullptr;
+TcpReassemblerIgnore* tcp_ignore_reassembler = new TcpReassemblerIgnore(nullptr, nullptr);
 
 const std::list<HeldPacket>::iterator TcpStreamTracker::null_iterator { };
 
@@ -71,14 +72,12 @@ const char* tcp_event_names[] = {
 TcpStreamTracker::TcpStreamTracker(bool client) :
     tcp_state(client ? TCP_STATE_NONE : TCP_LISTEN), client_tracker(client),
     held_packet(null_iterator)
-{
-    reassembler = new  TcpReassemblerIgnore(*this, seglist);
-    reassembler->init(!client_tracker, nullptr);
-}
+{ }
 
 TcpStreamTracker::~TcpStreamTracker()
 {
-    delete reassembler;
+    if ( reassembler->get_flush_policy() != STREAM_FLPOLICY_IGNORE )
+       delete reassembler;
 
     if( oaitw_reassembler )
     {
@@ -118,43 +117,6 @@ void TcpStreamTracker::clear_tracker(snort::Flow* flow, snort::Packet* p, bool f
     set_splitter((StreamSplitter*)nullptr);
 }
 
-int TcpStreamTracker::eval_flush_policy_on_ack(snort::Packet* p)
-{
-    if( oaitw_reassembler )
-    {
-        delete oaitw_reassembler;
-        oaitw_reassembler = nullptr;
-    }
-
-    return reassembler->eval_flush_policy_on_ack(p);
-}
-
-int TcpStreamTracker::eval_flush_policy_on_data(snort::Packet* p)
-{
-    if( oaitw_reassembler )
-    {
-        delete oaitw_reassembler;
-        oaitw_reassembler = nullptr;
-    }
-
-    reassembler->eval_flush_policy_on_data(p);
-
-    return 0;
-}
-
-int TcpStreamTracker::eval_asymmetric_flush(snort::Packet* p)
-{
-    if( oaitw_reassembler )
-    {
-        delete oaitw_reassembler;
-        oaitw_reassembler = nullptr;
-    }
-
-    reassembler->eval_asymmetric_flush(p);
-
-    return 0;
-}
-
 TcpStreamTracker::TcpEvent TcpStreamTracker::set_tcp_event(const TcpSegmentDescriptor& tsd)
 {
     bool talker;
@@ -328,15 +290,7 @@ void TcpStreamTracker::init_tcp_state(TcpSession* s)
     held_packet = null_iterator;
 
     flush_policy = STREAM_FLPOLICY_IGNORE;
-    if( oaitw_reassembler )
-    {
-        delete oaitw_reassembler;
-        oaitw_reassembler = nullptr;
-    }
-    if ( reassembler )
-        delete reassembler;
-    reassembler = new  TcpReassemblerIgnore(*this, seglist);
-    reassembler->init(!client_tracker, nullptr);
+    update_flush_policy(nullptr);
 
     normalizer.reset();
     seglist.reset();
@@ -410,9 +364,12 @@ void TcpStreamTracker::update_flush_policy(StreamSplitter* splitter)
     {
         // switching to Ignore flush policy...save pointer to current reassembler to delete later
         if ( reassembler )
+        {
+            seglist.purge_segment_list();
             oaitw_reassembler = reassembler;
+        }
 
-        reassembler = new  TcpReassemblerIgnore(*this, seglist);
+        reassembler = tcp_ignore_reassembler;
         reassembler->init(!client_tracker, splitter);
     }
     else if ( flush_policy == STREAM_FLPOLICY_ON_DATA )
@@ -421,10 +378,9 @@ void TcpStreamTracker::update_flush_policy(StreamSplitter* splitter)
         {
             // update from IDS -> IPS is not supported
             assert( reassembler->get_flush_policy() != STREAM_FLPOLICY_ON_ACK );
-            delete reassembler;
         }
 
-        reassembler = new  TcpReassemblerIps(*this, seglist);
+        reassembler = new  TcpReassemblerIps(this, &seglist);
         reassembler->init(!client_tracker, splitter);
     }
     else
@@ -433,10 +389,9 @@ void TcpStreamTracker::update_flush_policy(StreamSplitter* splitter)
         {
             // update from IPS -> IDS is not supported
             assert( reassembler->get_flush_policy() != STREAM_FLPOLICY_ON_DATA );
-            delete reassembler;
         }
 
-        reassembler = new  TcpReassemblerIds(*this, seglist);
+        reassembler = new  TcpReassemblerIds(this, &seglist);
         reassembler->init(!client_tracker, splitter);
     }
 }
@@ -519,7 +474,7 @@ void TcpStreamTracker::fallback()
 void TcpStreamTracker::disable_reassembly(Flow* f)
 {
     set_splitter((StreamSplitter*)nullptr);
-    seglist.reset();
+    seglist.purge_segment_list();
     reassembler->reset_paf();
     finalize_held_packet(f);
 }
@@ -726,7 +681,7 @@ void TcpStreamTracker::update_tracker_no_ack_recv(const TcpSegmentDescriptor& ts
 void TcpStreamTracker::update_tracker_no_ack_sent(const TcpSegmentDescriptor& tsd)
 {
     r_win_base = tsd.get_end_seq();
-    eval_flush_policy_on_ack(tsd.get_pkt());
+    reassembler->eval_flush_policy_on_ack(tsd.get_pkt());
 }
 
 void TcpStreamTracker::update_tracker_ack_sent(TcpSegmentDescriptor& tsd)
@@ -752,7 +707,7 @@ void TcpStreamTracker::update_tracker_ack_sent(TcpSegmentDescriptor& tsd)
         fin_seq_status = FIN_WITH_SEQ_ACKED;
     }
 
-    eval_flush_policy_on_ack(tsd.get_pkt());
+    reassembler->eval_flush_policy_on_ack(tsd.get_pkt());
 }
 
 bool TcpStreamTracker::update_on_3whs_ack(TcpSegmentDescriptor& tsd)
@@ -887,7 +842,7 @@ int32_t TcpStreamTracker::kickstart_asymmetric_flow(const TcpSegmentDescriptor&
     else
         reassembler->reset_paf();
 
-    eval_asymmetric_flush(tsd.get_pkt());
+    reassembler->eval_flush_policy_on_data(tsd.get_pkt());
 
     int32_t space_left = max_queued_bytes - seglist.get_seg_bytes_total();
 
index 8ecaba36bfbfaa08aa9ef85fbb91706941ea24a0..73c39a1fb874280f4de51446c6368f48bc63908b 100644 (file)
@@ -95,9 +95,6 @@ public:
     void reset();
     void clear_tracker(snort::Flow*, snort::Packet*, bool flush_segments, bool restart);
 
-    int eval_flush_policy_on_ack(snort::Packet*);
-    int eval_flush_policy_on_data(snort::Packet*);
-    int eval_asymmetric_flush(snort::Packet*);
     void update_stream_order(const TcpSegmentDescriptor&, bool aligned);
 
     void fallback();