]> git.ipfire.org Git - thirdparty/snort3.git/commitdiff
Merge pull request #2669 in SNORT/snort3 from ~DAVMCPHE/snort3:max_pdu_only to master
authorMichael Altizer (mialtize) <mialtize@cisco.com>
Fri, 25 Dec 2020 03:07:40 +0000 (03:07 +0000)
committerMichael Altizer (mialtize) <mialtize@cisco.com>
Fri, 25 Dec 2020 03:07:40 +0000 (03:07 +0000)
Squashed commit of the following:

commit c3b1baf2fd09a5aaf58ee09a26efd7048c8d3ea9
Author: davis mcpherson <davmcphe@cisco.com>
Date:   Tue Dec 22 11:30:01 2020 -0500

    stream_tcp: delete redundant calls to check if the tcp packet contains a data payload

commit ed0484fe30ec0e9fbd7808aaab06dbdbc8d61a75
Author: davis mcpherson <davmcphe@cisco.com>
Date:   Tue Dec 22 11:28:56 2020 -0500

     stream_tcp: on midstream pickup, when first packet is a data segment, set flag on talker tracker to reinit seglist base seg on first received data packet

commit c2d0eadde2b5eee60372c891b931bc39f626fc4f
Author: davis mcpherson <davmcphe@cisco.com>
Date:   Tue Dec 1 16:51:47 2020 -0500

    stream_tcp: fix issues causing overrun of the pdu reassembly buffer.  make splitters authoritative of size of the reassembled pdu

    rpc_decode: implement adjust_to_fit for RPC splitter

21 files changed:
src/service_inspectors/rpc_decode/rpc_decode.cc
src/service_inspectors/wizard/wizard.cc
src/stream/stream_splitter.cc
src/stream/stream_splitter.h
src/stream/tcp/segment_overlap_editor.cc
src/stream/tcp/segment_overlap_editor.h
src/stream/tcp/tcp_reassembler.cc
src/stream/tcp/tcp_reassembler.h
src/stream/tcp/tcp_reassemblers.cc
src/stream/tcp/tcp_reassemblers.h
src/stream/tcp/tcp_segment_node.h
src/stream/tcp/tcp_session.cc
src/stream/tcp/tcp_state_close_wait.cc
src/stream/tcp/tcp_state_fin_wait1.cc
src/stream/tcp/tcp_state_fin_wait2.cc
src/stream/tcp/tcp_state_listen.cc
src/stream/tcp/tcp_state_none.cc
src/stream/tcp/tcp_state_syn_recv.cc
src/stream/tcp/tcp_state_syn_sent.cc
src/stream/tcp/tcp_stream_tracker.cc
src/stream/tcp/tcp_stream_tracker.h

index 8ff39c750246ace066ade6d790ccae60c5fd1349..9089fcbde061b2a0333aa7e5545b573c589eb447 100644 (file)
@@ -776,11 +776,29 @@ class RpcSplitter : public StreamSplitter
 public:
     RpcSplitter(bool c2s) : StreamSplitter(c2s) { }
 
-    Status scan(Packet*, const uint8_t*, uint32_t,
-        uint32_t, uint32_t*) override
-    { return SEARCH; }
+    Status scan(Packet*, const uint8_t*, uint32_t len,
+        uint32_t, uint32_t* fp) override
+    {
+
+        bytes_scanned += len;
+        if ( bytes_scanned < max(nullptr) )
+            return SEARCH;
+
+        *fp = len;
+        return FLUSH;
+    }
 
-    unsigned max(Flow*) override { return MIN_CALL_BODY_SZ; }
+    unsigned max(Flow*) override
+    { return MIN_CALL_BODY_SZ; }
+
+    // FIXIT-M this limits rpc flushes to 32 bytes per pdu, is that what we want?
+    unsigned adjust_to_fit(unsigned len) override
+    {
+        if ( len > max(nullptr) )
+            return max(nullptr);
+
+        return len;
+    }
 };
 
 //-------------------------------------------------------------------------
index c6fad20e128aa967feb2b48216cda124c65b8bb1..d5a7a85ff5ebbfc0b975de4d14b0e0e6dbd1c8a7 100644 (file)
@@ -124,7 +124,6 @@ private:
 private:
     Wizard* wizard;
     Wand wand;
-    unsigned bytes_scanned = 0;
 };
 
 class Wizard : public Inspector
index 4ba8feb5d9c69b083f66d7a1adf9e64430ac3138..78409ad869a0a7cd323871754f603d3f99dcf0c6 100644 (file)
@@ -23,6 +23,8 @@
 
 #include "stream_splitter.h"
 
+#include <algorithm>
+
 #include "detection/detection_engine.h"
 #include "main/snort_config.h"
 #include "protocols/packet.h"
@@ -69,13 +71,22 @@ AtomSplitter::AtomSplitter(bool b, uint16_t sz) : StreamSplitter(b)
     min = base + get_flush_bucket_size();
 }
 
+unsigned AtomSplitter::adjust_to_fit(unsigned len)
+{
+    return std::min(SnortConfig::get_conf()->max_pdu - bytes_scanned, len);
+}
+
 StreamSplitter::Status AtomSplitter::scan(
     Packet*, const uint8_t*, uint32_t len, uint32_t, uint32_t* fp)
 {
-    bytes += len;
+    bytes_scanned += len;
     segs++;
 
-    if ( segs >= 2 && bytes >= min )
+    if ( bytes_scanned < scan_footprint
+        && bytes_scanned < SnortConfig::get_conf()->max_pdu )
+        return SEARCH;
+
+    if ( segs >= 2 && bytes_scanned >= min )
     {
         *fp = len;
         return FLUSH;
@@ -84,9 +95,7 @@ StreamSplitter::Status AtomSplitter::scan(
 }
 
 void AtomSplitter::reset()
-{
-    bytes = segs = 0;
-}
+{  segs = scan_footprint = bytes_scanned = 0; }
 
 void AtomSplitter::update()
 {
index a2c19a83cfb1c31487ed92da039aaa5ad88068e3..febe21f41ef6971c7107f11840448700e352a4aa 100644 (file)
@@ -80,21 +80,28 @@ public:
         );
 
     virtual bool is_paf() { return false; }
-    virtual unsigned max(Flow*);
+    virtual unsigned max(Flow* = nullptr);
+    virtual unsigned adjust_to_fit(unsigned len) { return len; }
+    virtual void update()
+    {
+        scan_footprint = 0;
+        bytes_scanned = 0;
+    }
 
-    virtual void update() { }
+    void set_scan_footprint(unsigned fp)
+    { scan_footprint = fp; }
 
-    unsigned get_max_pdu() { return max_pdu; }
     bool to_server() { return c2s; }
     bool to_client() { return !c2s; }
 
 protected:
     StreamSplitter(bool b) : c2s(b) { }
     uint16_t get_flush_bucket_size();
+    unsigned scan_footprint = 0;
+    unsigned bytes_scanned = 0;
 
 private:
     const bool c2s;
-    const unsigned max_pdu = 65536;
 };
 
 //-------------------------------------------------------------------------
@@ -106,6 +113,7 @@ public:
     AtomSplitter(bool, uint16_t size = 0);
 
     Status scan(Packet*, const uint8_t*, uint32_t, uint32_t, uint32_t*) override;
+    unsigned adjust_to_fit(unsigned len) override;
     void update() override;
 
 private:
@@ -115,7 +123,6 @@ private:
     uint16_t base;
     uint16_t min;
     uint16_t segs;
-    uint16_t bytes;
 };
 
 //-------------------------------------------------------------------------
index 318025e6e4153ab1ecc36bc9129e2cce01e5be27..c97f5de7fbe2c5366913f44a27d6188793bd67f9 100644 (file)
@@ -48,6 +48,7 @@ void SegmentOverlapState::init_sos(TcpSession* ssn, StreamPolicy pol)
     total_bytes_queued = 0;
     total_segs_queued = 0;
     overlap_count = 0;
+    scan_total = 0;
 
     tsd = nullptr;
     left = nullptr;
index a040758324c532ea6ac5759b4e5b47ecb19bbf0d..6f789e4f23a85b58f40936d047e2358c30e8902f 100644 (file)
@@ -47,6 +47,7 @@ struct SegmentOverlapState
     uint32_t total_bytes_queued;    /* total bytes queued (life of session) */
     uint32_t total_segs_queued;     /* number of segments queued (life) */
     uint32_t overlap_count;         /* overlaps encountered */
+    uint32_t scan_total;
 
     uint32_t seq;
     uint32_t seq_end;
index e8329ed745017e97b1621b3b0ba715efaa5cd4f9..b318cbf9d6ae8d4a2765665100f2200221e9ebb3 100644 (file)
@@ -98,7 +98,7 @@ bool TcpReassembler::flush_data_ready(TcpReassemblerState& trs)
     if ( !trs.tracker->is_reassembly_enabled() )
         return false;
 
-    if ( (trs.tracker->get_flush_policy() == STREAM_FLPOLICY_ON_DATA) or trs.tracker->is_splitter_paf() )
+    if ( (trs.tracker->get_flush_policy() == STREAM_FLPOLICY_ON_DATA) || trs.tracker->is_splitter_paf() )
         return ( is_segment_pending_flush(trs) );
 
     return ( get_pending_segment_count(trs, 2) > 1 );  // FIXIT-L return false?
@@ -132,12 +132,12 @@ int TcpReassembler::delete_reassembly_segment(TcpReassemblerState& trs, TcpSegme
         trs.flush_count--;
     }
 
+    if ( trs.sos.seglist.cur_sseg == tsn )
+        trs.sos.seglist.cur_sseg = tsn->next;
+
     if ( trs.sos.seglist.cur_rseg == tsn )
         update_next(trs, *tsn);
 
-    if ( trs.sos.seglist.cur_pseg == tsn )
-        trs.sos.seglist.cur_pseg = nullptr;
-
     tsn->term();
     trs.sos.seg_count--;
 
@@ -148,10 +148,17 @@ void TcpReassembler::queue_reassembly_segment(
     TcpReassemblerState& trs, TcpSegmentNode* prev, TcpSegmentNode* tsn)
 {
     trs.sos.seglist.insert(prev, tsn);
-    if ( SEQ_EQ(tsn->i_seq, trs.sos.seglist_base_seq) )
+
+    if ( !trs.sos.seglist.cur_sseg )
+        trs.sos.seglist.cur_sseg = tsn;
+    else if ( SEQ_LT(tsn->c_seq, trs.sos.seglist.cur_sseg->c_seq) )
     {
-        tsn->c_seq = tsn->i_seq;
-        trs.sos.seglist.cur_rseg = tsn;
+        trs.sos.seglist.cur_sseg = tsn;
+        if ( SEQ_LT(tsn->c_seq, trs.sos.seglist_base_seq) )
+            trs.sos.seglist_base_seq = tsn->c_seq;
+
+        if ( trs.sos.seglist.cur_rseg && SEQ_LT(tsn->c_seq, trs.sos.seglist.cur_rseg->c_seq) )
+            trs.sos.seglist.cur_rseg = tsn;
     }
 
     trs.sos.seg_count++;
@@ -264,7 +271,7 @@ void TcpReassembler::purge_alerts(TcpReassemblerState& trs)
 
 void TcpReassembler::purge_to_seq(TcpReassemblerState& trs, uint32_t flush_seq)
 {
-    assert(trs.sos.seglist.head != nullptr);
+    assert( trs.sos.seglist.head );
     uint32_t last_ts = 0;
 
     TcpSegmentNode* tsn = trs.sos.seglist.head;
@@ -372,43 +379,27 @@ void TcpReassembler::show_rebuilt_packet(const TcpReassemblerState& trs, Packet*
     }
 }
 
-uint32_t TcpReassembler::get_flush_data_len(
-    TcpReassemblerState& trs, TcpSegmentNode* tsn, uint32_t to_seq, unsigned max)
-{
-    unsigned int flush_len = ( tsn->c_len <= max ) ? tsn->c_len : max;
-
-    // copy only to flush point
-    if ( paf_active(&trs.paf_state) && SEQ_GT(tsn->c_seq + flush_len, to_seq) )
-        flush_len = to_seq - tsn->c_seq;
-
-    return flush_len;
-}
-
-int TcpReassembler::flush_data_segments(
-    TcpReassemblerState& trs, Packet* p, uint32_t total, Packet* pdu)
+int TcpReassembler::flush_data_segments(TcpReassemblerState& trs, uint32_t flush_len, Packet* pdu)
 {
-    assert(trs.sos.seglist.cur_rseg);
-
-    uint32_t total_flushed = 0;
     uint32_t flags = PKT_PDU_HEAD;
-    uint32_t to_seq = trs.sos.seglist.cur_rseg->c_seq + total;
+    uint32_t to_seq = trs.sos.seglist.cur_rseg->c_seq + flush_len;
+    uint32_t remaining_bytes = flush_len;
+    uint32_t total_flushed = 0;
 
-    while ( SEQ_LT(trs.sos.seglist.cur_rseg->c_seq, to_seq) )
+    while ( remaining_bytes )
     {
         TcpSegmentNode* tsn = trs.sos.seglist.cur_rseg;
-        unsigned bytes_copied = 0;
-        unsigned bytes_to_copy = get_flush_data_len(
-            trs, tsn, to_seq, trs.tracker->get_splitter()->max(p->flow));
-        assert(bytes_to_copy);
+        unsigned bytes_to_copy = ( tsn->c_len <= remaining_bytes ) ? tsn->c_len : remaining_bytes;
 
-        if ( !tsn->next or (bytes_to_copy < tsn->c_len) or
-            SEQ_EQ(tsn->c_seq + bytes_to_copy, to_seq) or
-            (total_flushed + tsn->c_len > trs.tracker->get_splitter()->get_max_pdu()) )
-        {
+        remaining_bytes -= bytes_to_copy;
+        if ( !remaining_bytes )
             flags |= PKT_PDU_TAIL;
-        }
+        else
+            assert( bytes_to_copy >= tsn->c_len );
+
+        unsigned bytes_copied = 0;
         const StreamBuffer sb = trs.tracker->get_splitter()->reassemble(
-            trs.sos.session->flow, total, total_flushed, tsn->payload(),
+            trs.sos.session->flow, flush_len, total_flushed, tsn->payload(),
             bytes_to_copy, flags, bytes_copied);
 
         if ( sb.data )
@@ -427,8 +418,6 @@ int TcpReassembler::flush_data_segments(
         {
             trs.flush_count++;
             update_next(trs, *tsn);
-            if ( SEQ_EQ(tsn->c_seq, to_seq) )
-                break;
         }
 
         /* Check for a gap/missing packet */
@@ -437,6 +426,7 @@ int TcpReassembler::flush_data_segments(
         // FIXIT-L FIN may be in to_seq causing bogus gap counts.
         if ( tsn->is_packet_missing(to_seq) )
         {
+            // FIXIT-H // assert(false); find when this scenario happens
             // FIXIT-L this is suboptimal - better to exclude fin from to_seq
             if ( !trs.tracker->is_fin_seq_set() or
                 SEQ_LEQ(to_seq, trs.tracker->get_fin_final_seq()) )
@@ -522,121 +512,62 @@ Packet* TcpReassembler::initialize_pdu(
     return pdu;
 }
 
-int TcpReassembler::_flush_to_seq(
+// flush a seglist up to the given point, generate a pseudopacket, and fire it thru the system.
+int TcpReassembler::flush_to_seq(
     TcpReassemblerState& trs, uint32_t bytes, Packet* p, uint32_t pkt_flags)
 {
-    if ( !p )
-    {
-        // FIXIT-M we need to have user_policy_id in this case
-        // FIXIT-M this leads to format_tcp() copying from pdu to pdu
-        // (neither of these issues is created by passing null through to here)
-        p = DetectionEngine::set_next_packet();
-    }
-
-    uint32_t bytes_processed = 0;
-    uint32_t stop_seq = trs.sos.seglist.cur_rseg->c_seq + bytes;
-
-    while ( trs.sos.seglist.cur_rseg and SEQ_LT(trs.sos.seglist.cur_rseg->c_seq, stop_seq) )
-    {
-        trs.sos.seglist_base_seq = trs.sos.seglist.cur_rseg->c_seq;
-        uint32_t footprint = stop_seq - trs.sos.seglist_base_seq;
-
-        if ( footprint == 0 )
-            return bytes_processed;
+    assert( p && trs.sos.seglist.cur_rseg);
 
-        if ( footprint > Packet::max_dsize )    // max stream buffer size
-            footprint = Packet::max_dsize;
-
-        if ( trs.tracker->is_splitter_paf()
-            and ( trs.tracker->get_tf_flags() & TF_MISSING_PREV_PKT ) )
-            fallback(*trs.tracker, trs.server_side);
-
-        Packet* pdu = initialize_pdu(trs, p, pkt_flags, trs.sos.seglist.cur_rseg->tv);
-        int32_t flushed_bytes = flush_data_segments(trs, p, footprint, pdu);
-        if ( flushed_bytes == 0 )
-            break; /* No more data... bail */
-
-        bytes_processed += flushed_bytes;
-        trs.sos.seglist_base_seq += flushed_bytes;
+    trs.tracker->clear_tf_flags(TF_MISSING_PKT | TF_MISSING_PREV_PKT);
 
-        if ( pdu->data )
-        {
-            if ( p->packet_flags & PKT_PDU_TAIL )
-                pdu->packet_flags |= ( PKT_REBUILT_STREAM | PKT_STREAM_EST | PKT_PDU_TAIL );
-            else
-                pdu->packet_flags |= ( PKT_REBUILT_STREAM | PKT_STREAM_EST );
+    TcpSegmentNode* tsn = trs.sos.seglist.cur_rseg;
+    assert( trs.sos.seglist_base_seq == tsn->c_seq);
 
-            show_rebuilt_packet(trs, pdu);
-            tcpStats.rebuilt_packets++;
-            tcpStats.rebuilt_bytes += flushed_bytes;
+    Packet* pdu = initialize_pdu(trs, p, pkt_flags, tsn->tv);
+    int32_t flushed_bytes = flush_data_segments(trs, bytes, pdu);
+    assert( flushed_bytes );
 
-            if ( !Analyzer::get_local_analyzer()->inspect_rebuilt(pdu) )
-                last_pdu = pdu;
-            else
-                last_pdu = nullptr;
+    trs.sos.seglist_base_seq += flushed_bytes;
 
-            trs.tracker->finalize_held_packet(p);
-        }
+    if ( pdu->data )
+    {
+        if ( p->packet_flags & PKT_PDU_TAIL )
+            pdu->packet_flags |= ( PKT_REBUILT_STREAM | PKT_STREAM_EST | PKT_PDU_TAIL );
         else
-        {
-            tcpStats.rebuilt_buffers++; // FIXIT-L this is not accurate
-            last_pdu = nullptr;
-        }
+            pdu->packet_flags |= ( PKT_REBUILT_STREAM | PKT_STREAM_EST );
 
-        // FIXIT-L must check because above may clear trs.sos.session
-        if ( trs.tracker->get_splitter() )
-            trs.tracker->get_splitter()->update();
+        show_rebuilt_packet(trs, pdu);
+        tcpStats.rebuilt_packets++;
+        tcpStats.rebuilt_bytes += flushed_bytes;
 
-        // FIXIT-L abort should be by PAF callback only since recovery may be possible
-        if ( trs.tracker->get_tf_flags() & TF_MISSING_PKT )
-        {
-            trs.tracker->set_tf_flags(TF_MISSING_PREV_PKT | TF_PKT_MISSED);
-            trs.tracker->clear_tf_flags(TF_MISSING_PKT);
-            tcpStats.gaps++;
-        }
+        if ( !Analyzer::get_local_analyzer()->inspect_rebuilt(pdu) )
+            last_pdu = pdu;
         else
-            trs.tracker->clear_tf_flags(TF_MISSING_PREV_PKT);
+            last_pdu = nullptr;
 
-        // check here instead of in while to allow single segment flushes
-        if ( !flush_data_ready(trs) )
-            break;
+        trs.tracker->finalize_held_packet(p);
     }
-
-    return bytes_processed;
-}
-
-// flush a seglist up to the given point, generate a pseudopacket, and fire it thru the system.
-int TcpReassembler::flush_to_seq(
-    TcpReassemblerState& trs, uint32_t bytes, Packet* p, uint32_t pkt_flags)
-{
-    if ( !bytes || !trs.sos.seglist.cur_rseg )
-        return 0;
-
-    if ( !flush_data_ready(trs) and !(trs.tracker->get_tf_flags() & TF_FORCE_FLUSH) and
-        !trs.tracker->is_splitter_paf() )
-        return 0;
-
-    trs.tracker->clear_tf_flags(TF_MISSING_PKT | TF_MISSING_PREV_PKT);
-
-    /* This will set this flag on the first reassembly
-     * if reassembly for this direction was set midstream */
-    if ( SEQ_LT(trs.sos.seglist_base_seq, trs.sos.seglist.cur_rseg->c_seq) )
+    else
     {
-        uint32_t missed = trs.sos.seglist.cur_rseg->c_seq - trs.sos.seglist_base_seq;
+        tcpStats.rebuilt_buffers++; // FIXIT-L this is not accurate
+        last_pdu = nullptr;
+    }
 
-        if ( missed <= bytes )
-            bytes -= missed;
+    // FIXIT-L must check because above may clear trs.sos.session
+    if ( trs.tracker->get_splitter() )
+        trs.tracker->get_splitter()->update();
 
+    // FIXIT-L abort should be by PAF callback only since recovery may be possible
+    if ( trs.tracker->get_tf_flags() & TF_MISSING_PKT )
+    {
         trs.tracker->set_tf_flags(TF_MISSING_PREV_PKT | TF_PKT_MISSED);
-
+        trs.tracker->clear_tf_flags(TF_MISSING_PKT);
         tcpStats.gaps++;
-        trs.sos.seglist_base_seq = trs.sos.seglist.cur_rseg->c_seq;
-
-        if ( !bytes )
-            return 0;
     }
+    else
+        trs.tracker->clear_tf_flags(TF_MISSING_PREV_PKT);
 
-    return _flush_to_seq(trs, bytes, p, pkt_flags);
+    return flushed_bytes;
 }
 
 // flush a seglist up to the given point, generate a pseudopacket, and fire it thru the system.
@@ -671,12 +602,11 @@ int TcpReassembler::do_zero_byte_flush(TcpReassemblerState& trs, Packet* p, uint
 
 uint32_t TcpReassembler::get_q_footprint(TcpReassemblerState& trs)
 {
-    int32_t footprint = 0, sequenced = 0;
-
-    if ( !trs.tracker )
-        return 0;
+    int32_t footprint = 0;
+    int32_t sequenced = 0;
 
-    footprint = trs.tracker->r_win_base - trs.sos.seglist_base_seq;
+    if ( SEQ_GT(trs.tracker->r_win_base, trs.sos.seglist_base_seq) )
+        footprint = trs.tracker->r_win_base - trs.sos.seglist_base_seq;
 
     if ( footprint )
     {
@@ -701,14 +631,14 @@ uint32_t TcpReassembler::get_q_sequenced(TcpReassemblerState& trs)
     {
         tsn = trs.sos.seglist.head;
 
-        if ( !tsn or SEQ_LT(trs.tracker->r_win_base, tsn->c_seq) )
-            return false;
+        if ( !tsn || SEQ_LT(trs.tracker->r_win_base, tsn->c_seq) )
+            return 0;
 
         trs.sos.seglist.cur_rseg = tsn;
     }
-    uint32_t len = 0;
-    const uint32_t limit = trs.tracker->get_splitter()->get_max_pdu();
 
+    uint32_t len = 0;
+    const uint32_t limit = trs.tracker->get_splitter()->max();
     while ( len < limit and next_no_gap(*tsn) )
     {
         if ( !tsn->c_len )
@@ -734,11 +664,12 @@ bool TcpReassembler::is_q_sequenced(TcpReassemblerState& trs)
     {
         tsn = trs.sos.seglist.head;
 
-        if ( !tsn or SEQ_LT(trs.tracker->r_win_base, tsn->c_seq) )
+        if ( !tsn || SEQ_LT(trs.tracker->r_win_base, tsn->c_seq) )
             return false;
 
         trs.sos.seglist.cur_rseg = tsn;
     }
+
     while ( next_no_gap(*tsn) )
     {
         if ( tsn->c_len )
@@ -746,14 +677,12 @@ bool TcpReassembler::is_q_sequenced(TcpReassemblerState& trs)
 
         tsn = trs.sos.seglist.cur_rseg = tsn->next;
     }
+
     trs.sos.seglist_base_seq = tsn->c_seq;
 
     return (tsn->c_len != 0);
 }
 
-// FIXIT-L flush_stream() calls should be replaced with calls to
-// CheckFlushPolicyOn*() with the exception that for the *OnAck() case,
-// any available ackd data must be flushed in both directions.
 int TcpReassembler::flush_stream(
     TcpReassemblerState& trs, Packet* p, uint32_t dir, bool final_flush)
 {
@@ -761,17 +690,18 @@ int TcpReassembler::flush_stream(
     if ( !trs.tracker->is_reassembly_enabled() )
         return 0;
 
-    uint32_t bytes;
+    if ( trs.sos.session->flow->two_way_traffic() )
+    {
+        uint32_t bytes = 0;
 
-    if ( !trs.sos.session->flow->two_way_traffic() )
-        bytes = 0;
-    else if ( trs.tracker->normalizer.is_tcp_ips_enabled() )
-        bytes = get_q_sequenced(trs);
-    else
-        bytes = get_q_footprint(trs);
+        if ( trs.tracker->normalizer.is_tcp_ips_enabled() )
+            bytes = get_q_sequenced(trs);
+        else
+            bytes = get_q_footprint(trs);
 
-    if ( bytes )
-        return flush_to_seq(trs, bytes, p, dir);
+        if ( bytes )
+            return flush_to_seq(trs, bytes, p, dir);
+    }
 
     if ( final_flush )
         return do_zero_byte_flush(trs, p, dir);
@@ -844,14 +774,13 @@ static Packet* set_packet(Flow* flow, uint32_t flags, bool c2s)
 void TcpReassembler::flush_queued_segments(
     TcpReassemblerState& trs, Flow* flow, bool clear, Packet* p)
 {
+    // if flushing outside the context of wire packet p will be null, initialize
+    // Packet object allocated for the current IpsContext
     if ( !p )
-    {
-        // this packet is required if we call finish and/or final_flush
         p = set_packet(flow, trs.packet_dir, trs.server_side);
-    }
 
     bool pending = clear and paf_initialized(&trs.paf_state)
-        and (!trs.tracker->get_splitter() or trs.tracker->get_splitter()->finish(flow) );
+        and (!trs.tracker->get_splitter() || trs.tracker->get_splitter()->finish(flow) );
 
     if ( pending and !(flow->ssn_state.ignore_direction & trs.ignore_dir) )
         final_flush(trs, p, trs.packet_dir);
@@ -889,28 +818,24 @@ uint32_t TcpReassembler::get_forward_packet_dir(TcpReassemblerState&, const Pack
     return 0;
 }
 
-// see flush_pdu_ackd() for details
+// see scan_data_post_ack() for details
 // the key difference is that we operate on forward moving data
 // because we don't wait until it is acknowledged
-int32_t TcpReassembler::flush_pdu_ips(TcpReassemblerState& trs, uint32_t* flags, Packet* p)
+int32_t TcpReassembler::scan_data_pre_ack(TcpReassemblerState& trs, uint32_t* flags, Packet* p)
 {
     assert(trs.sos.session->flow == p->flow);
 
-    if ( !is_q_sequenced(trs) )
+    if ( SEQ_GT(trs.sos.seglist.head->c_seq, trs.sos.seglist_base_seq) )
         return -1;
 
-    TcpSegmentNode* tsn = trs.sos.seglist.cur_pseg;
-    uint32_t total = 0;
-
-    if ( !tsn )
-        tsn = trs.sos.seglist.cur_rseg;
+    if ( !trs.sos.seglist.cur_rseg )
+        trs.sos.seglist.cur_rseg = trs.sos.seglist.cur_sseg;
 
-    else if ( paf_initialized(&trs.paf_state) )
-    {
-        assert(trs.sos.seglist.cur_rseg);
-        total = tsn->c_seq - trs.sos.seglist.cur_rseg->c_seq;
-    }
+    if ( !is_q_sequenced(trs) )
+        return -1;
 
+    TcpSegmentNode* tsn = trs.sos.seglist.cur_sseg;
+    uint32_t total = tsn->c_seq - trs.sos.seglist_base_seq;
     while ( tsn && *flags )
     {
         total += tsn->c_len;
@@ -921,7 +846,7 @@ int32_t TcpReassembler::flush_pdu_ips(TcpReassemblerState& trs, uint32_t* flags,
         if ( paf_initialized(&trs.paf_state) && SEQ_LEQ(end, pos) )
         {
             if ( !next_no_gap(*tsn) )
-                return -1;
+                break;
 
             tsn = tsn->next;
             continue;
@@ -933,17 +858,17 @@ int32_t TcpReassembler::flush_pdu_ips(TcpReassemblerState& trs, uint32_t* flags,
 
         if (flush_pt >= 0)
         {
-            trs.sos.seglist.cur_pseg = nullptr;
+            trs.sos.seglist.cur_sseg = tsn;
             return flush_pt;
         }
 
-        trs.sos.seglist.cur_pseg = tsn;
-
         if ( !next_no_gap(*tsn) )
-            return -1;
+            break;
 
         tsn = tsn->next;
     }
+
+    trs.sos.seglist.cur_sseg = tsn;
     return -1;
 }
 
@@ -956,7 +881,7 @@ static inline bool both_splitters_aborted(Flow* flow)
 static inline void fallback(TcpStreamTracker& trk, bool server_side, uint16_t max)
 {
     trk.set_splitter(new AtomSplitter(!server_side, max));
-    trk.reassembler.reset_paf();
+    trk.reassembler.setup_paf();
     tcpStats.partial_fallbacks++;
 }
 
@@ -992,68 +917,71 @@ void TcpReassembler::fallback(TcpStreamTracker& tracker, bool server_side)
 //   it may contain multiple encapsulated PDUs
 // - if we partially scan a segment we must save state so we
 //   know where we left off and can resume scanning the remainder
-
-int32_t TcpReassembler::flush_pdu_ackd(TcpReassemblerState& trs, uint32_t* flags, Packet* p)
+int32_t TcpReassembler::scan_data_post_ack(TcpReassemblerState& trs, uint32_t* flags, Packet* p)
 {
     assert(trs.sos.session->flow == p->flow);
 
-    uint32_t total = 0;
-    TcpSegmentNode* tsn = SEQ_LT(trs.sos.seglist_base_seq, trs.tracker->r_win_base) ?
-        trs.sos.seglist.head : nullptr;
+    if ( !trs.sos.seglist.cur_sseg || SEQ_GEQ(trs.sos.seglist_base_seq, trs.tracker->r_win_base) )
+        return -1;
 
-    // must stop if not acked
-    // must use adjusted size of tsn if not fully acked
-    // must stop if gap (checked in paf_check)
-    while (tsn && *flags && SEQ_LT(tsn->c_seq, trs.tracker->r_win_base))
+    if ( !trs.sos.seglist.cur_rseg )
+        trs.sos.seglist.cur_rseg = trs.sos.seglist.cur_sseg;
+
+    StreamSplitter* splitter = trs.tracker->get_splitter();
+    if ( !splitter->is_paf() )
     {
-        uint32_t size = tsn->c_len;
-        uint32_t end = tsn->c_seq + tsn->c_len;
-        uint32_t pos = paf_position(&trs.paf_state);
+        // init splitter with current length of in sequence bytes..
+        int32_t footprint = trs.tracker->r_win_base - trs.sos.seglist_base_seq;
+        if ( footprint > 0 )
+            splitter->set_scan_footprint(footprint);
+    }
 
-        if ( paf_initialized(&trs.paf_state) && SEQ_LEQ(end, pos) )
+    uint32_t total = 0;
+    TcpSegmentNode* tsn = trs.sos.seglist.cur_sseg;
+    if ( paf_initialized(&trs.paf_state) )
+    {
+        uint32_t end_seq = tsn->c_seq + tsn->c_len;
+        if ( SEQ_EQ(end_seq, paf_position(&trs.paf_state)) )
         {
-            total += size;
+            if ( splitter->is_paf() and !next_no_gap(*tsn) )
+                return -1;
+
+            total = end_seq - trs.sos.seglist_base_seq;
             tsn = tsn->next;
-            continue;
         }
+        else
+            total = tsn->c_seq - trs.sos.seglist.cur_rseg->c_seq;
+    }
 
+    while (tsn && *flags && SEQ_LT(tsn->c_seq, trs.tracker->r_win_base))
+    {
+        // only flush acked data that fits in pdu reassembly buffer...
+        uint32_t flush_len;
+        uint32_t end = tsn->c_seq + tsn->c_len;
         if ( SEQ_GT(end, trs.tracker->r_win_base))
-            size = trs.tracker->r_win_base - tsn->c_seq;
+            flush_len = splitter->adjust_to_fit(trs.tracker->r_win_base - tsn->c_seq);
+        else
+            flush_len = splitter->adjust_to_fit(tsn->c_len);
 
-        total += size;
+        total += flush_len;
 
-        int32_t flush_pt = paf_check(
-            trs.tracker->get_splitter(), &trs.paf_state, p, tsn->payload(),
-            size, total, tsn->c_seq, flags);
+        int32_t flush_pt = paf_check(splitter, &trs.paf_state, p, tsn->payload(),
+            flush_len, total, tsn->c_seq, flags);
+
+        trs.sos.seglist.cur_sseg = tsn;
 
         if ( flush_pt >= 0 )
         {
-            trs.sos.seglist.cur_rseg = trs.sos.seglist.head;
-            trs.sos.seglist_base_seq = trs.sos.seglist.head->c_seq;
-
-            // FIXIT-L this special case should be eliminated
-            // the splitters should be the sole source of where to flush
-
-            // for non-paf splitters, flush_pt > 0 means we reached
-            // the minimum required, but we flush what is available
-            // instead of creating more, but smaller, packets
-            // FIXIT-L just flush to end of segment to avoid splitting
-            // instead of all avail?
-            if ( !trs.tracker->is_splitter_paf() )
-            {
-                // get_q_footprint() w/o side effects
-                int32_t avail = trs.tracker->r_win_base - trs.sos.seglist_base_seq;
-
-                if ( avail > flush_pt )
-                {
-                    paf_jump(&trs.paf_state, avail - flush_pt);
-                    return avail;
-                }
-            }
+            trs.sos.seglist_base_seq = trs.sos.seglist.cur_rseg->c_seq;
             return flush_pt;
         }
+
+        if ( flush_len < tsn->c_len || ( splitter->is_paf() and !next_no_gap(*tsn) ) )
+            break;
+
         tsn = tsn->next;
     }
+
     return -1;
 }
 
@@ -1071,33 +999,31 @@ int TcpReassembler::flush_on_data_policy(TcpReassemblerState& trs, Packet* p)
         break;
 
     case STREAM_FLPOLICY_ON_DATA:
-    {
-        uint32_t flags = get_forward_packet_dir(trs, p);
-        int32_t flush_amt = flush_pdu_ips(trs, &flags, p);
-
-        while ( flush_amt >= 0 )
+        if ( trs.sos.seglist.head )
         {
-            if ( !flush_amt )
-                flush_amt = trs.sos.seglist.cur_rseg->c_seq - trs.sos.seglist_base_seq;
-
-            uint32_t this_flush = flush_to_seq(trs, flush_amt, p, flags);
-            if (!this_flush)
-                break;       // bail if nothing flushed
+            uint32_t flags;
+            do
+            {
+                flags = get_forward_packet_dir(trs, p);
+                int32_t flush_amt = scan_data_pre_ack(trs, &flags, p);
+                if ( flush_amt <= 0 )
+                    break;
 
-            flushed += this_flush;
-            flags = get_forward_packet_dir(trs, p);
-            flush_amt = flush_pdu_ips(trs, &flags, p);
-        }
+                flushed += flush_to_seq(trs, flush_amt, p, flags);
+            } while( trs.sos.seglist.head );
 
-        if ( !flags && trs.tracker->is_splitter_paf() )
-        {
-            fallback(*trs.tracker, trs.server_side);
-            return flush_on_data_policy(trs, p);
+            if ( !flags && trs.tracker->is_splitter_paf() )
+            {
+                fallback(*trs.tracker, trs.server_side);
+                return flush_on_data_policy(trs, p);
+            }
         }
-    }
         break;
     }
 
+    if ( !trs.sos.seglist.head )
+        return flushed;
+
     if ( trs.tracker->is_retransmit_of_held_packet(p) )
         flushed = perform_partial_flush(trs, p, flushed);
 
@@ -1109,7 +1035,7 @@ int TcpReassembler::flush_on_data_policy(TcpReassemblerState& trs, Packet* p)
     {
         TcpStreamTracker::TcpState peer = trs.tracker->session->get_peer_state(trs.tracker);
 
-        if ( peer == TcpStreamTracker::TCP_SYN_SENT or peer == TcpStreamTracker::TCP_SYN_RECV )
+        if ( peer == TcpStreamTracker::TCP_SYN_SENT || peer == TcpStreamTracker::TCP_SYN_RECV )
         {
             purge_to_seq(trs, trs.sos.seglist.head->i_seq + flushed);
             trs.tracker->r_win_base = trs.sos.seglist_base_seq;
@@ -1130,31 +1056,25 @@ int TcpReassembler::flush_on_ack_policy(TcpReassemblerState& trs, Packet* p)
 
     case STREAM_FLPOLICY_ON_ACK:
     {
-        uint32_t flags = get_reverse_packet_dir(trs, p);
-        int32_t flush_amt = flush_pdu_ackd(trs, &flags, p);
-
-        while (flush_amt >= 0)
+        do
         {
-            if ( !flush_amt )
-                flush_amt = trs.sos.seglist.cur_rseg->c_seq - trs.sos.seglist_base_seq;
+            uint32_t flags = get_reverse_packet_dir(trs, p);
+            int32_t flush_amt = scan_data_post_ack(trs, &flags, p);
+            if ( flush_amt <= 0 )
+                break;
 
             if ( trs.paf_state.paf == StreamSplitter::ABORT )
                 trs.tracker->get_splitter()->finish(p->flow);
 
             // for consistency with other cases, should return total
             // but that breaks flushing pipelined pdus
-            flushed = flush_to_seq(trs, flush_amt, p, flags);
+            flushed += flush_to_seq(trs, flush_amt, p, flags);
+            assert( flushed );
 
             // ideally we would purge just once after this loop but that throws off base
-            if ( flushed and trs.sos.seglist.head )
-            {
+            if ( trs.sos.seglist.head )
                 purge_to_seq(trs, trs.sos.seglist_base_seq);
-                flags = get_reverse_packet_dir(trs, p);
-                flush_amt = flush_pdu_ackd(trs, &flags, p);
-            }
-            else
-                break;  // bail if nothing flushed
-        }
+        } while ( trs.sos.seglist.head );
 
         if ( (trs.paf_state.paf == StreamSplitter::ABORT) && trs.tracker->is_splitter_paf() )
         {
@@ -1192,26 +1112,15 @@ void TcpReassembler::insert_segment_in_empty_seglist(
     if ( tcph->is_syn() )
         seq++;
 
-    if ( SEQ_GT(trs.tracker->r_win_base, seq) )
+    if ( SEQ_GT(trs.sos.seglist_base_seq, seq) )
     {
-        overlap = trs.tracker->r_win_base - tsd.get_seq();
-
+        overlap = trs.sos.seglist_base_seq - tsd.get_seq();
         if ( overlap >= tsd.get_len() )
             return;
     }
 
-    if ( SEQ_GT(trs.sos.seglist_base_seq, seq + overlap) )
-    {
-        overlap = trs.sos.seglist_base_seq- seq - overlap;
-
-        if ( overlap >= tsd.get_len() )
-            return;
-    }
-
-    // BLOCK add new block to trs.sos.seglist containing data
     add_reassembly_segment(
         trs, tsd, tsd.get_len(), overlap, 0, seq + overlap, nullptr);
-
 }
 
 void TcpReassembler::init_overlap_editor(
index 2d90eb3cb336b04020d39b0e1860b3f6f2d06e32..04c88f868c34d494b353d96dacdb5e2c274ed571 100644 (file)
@@ -63,34 +63,29 @@ protected:
     int trim_delete_reassembly_segment(TcpReassemblerState&, TcpSegmentNode*, uint32_t flush_seq);
     void queue_reassembly_segment(TcpReassemblerState&, TcpSegmentNode* prev, TcpSegmentNode*);
     void init_overlap_editor(TcpReassemblerState&, TcpSegmentDescriptor&);
-    bool is_segment_fasttrack(TcpReassemblerState&, TcpSegmentNode* tail, const TcpSegmentDescriptor&);
+    bool is_segment_fasttrack
+        (TcpReassemblerState&, TcpSegmentNode* tail, const TcpSegmentDescriptor&);
     void show_rebuilt_packet(const TcpReassemblerState&, snort::Packet*);
-    uint32_t get_flush_data_len(
-        TcpReassemblerState&, TcpSegmentNode*, uint32_t to_seq, unsigned max);
-    int flush_data_segments(
-        TcpReassemblerState&, snort::Packet*, uint32_t total, snort::Packet* pdu);
+    int flush_data_segments(TcpReassemblerState&, uint32_t flush_len, snort::Packet* pdu);
     void prep_pdu(
-        TcpReassemblerState&, snort::Flow*, snort::Packet*, uint32_t pkt_flags,
-        snort::Packet* pdu);
+        TcpReassemblerState&, snort::Flow*, snort::Packet*, uint32_t pkt_flags, snort::Packet*);
     snort::Packet* initialize_pdu(
-        TcpReassemblerState&, snort::Packet* p, uint32_t pkt_flags, struct timeval tv);
-    int _flush_to_seq(TcpReassemblerState&, uint32_t bytes, snort::Packet*, uint32_t pkt_flags);
+        TcpReassemblerState&, snort::Packet*, uint32_t pkt_flags, struct timeval);
     int flush_to_seq(TcpReassemblerState&, uint32_t bytes, snort::Packet*, uint32_t pkt_flags);
-    int do_zero_byte_flush(TcpReassemblerState&, snort::Packet* p, uint32_t pkt_flags);
+    int do_zero_byte_flush(TcpReassemblerState&, snort::Packet*, uint32_t pkt_flags);
     uint32_t get_q_footprint(TcpReassemblerState&);
     uint32_t get_q_sequenced(TcpReassemblerState&);
     bool is_q_sequenced(TcpReassemblerState&);
     void final_flush(TcpReassemblerState&, snort::Packet*, uint32_t dir);
     uint32_t get_reverse_packet_dir(TcpReassemblerState&, const snort::Packet*);
     uint32_t get_forward_packet_dir(TcpReassemblerState&, const snort::Packet*);
-    int32_t flush_pdu_ips(TcpReassemblerState&, uint32_t*, snort::Packet*);
+    int32_t scan_data_pre_ack(TcpReassemblerState&, uint32_t*, snort::Packet*);
     void fallback(TcpStreamTracker&, bool server_side);
-    int32_t flush_pdu_ackd(TcpReassemblerState&, uint32_t* flags, snort::Packet*);
+    int32_t scan_data_post_ack(TcpReassemblerState&, uint32_t* flags, snort::Packet*);
     void purge_to_seq(TcpReassemblerState&, uint32_t flush_seq);
 
     bool next_no_gap(const TcpSegmentNode&);
     void update_next(TcpReassemblerState&, const TcpSegmentNode&);
-
     uint32_t perform_partial_flush(TcpReassemblerState&, snort::Packet*, uint32_t flushed = 0);
 };
 
index 4b24d8fee43d15aa57c31ab3571b8a84875b21f6..7dbc293f84006fe8ebdf6bc45d77eb6f6e934bdf 100644 (file)
@@ -255,6 +255,7 @@ private:
 void TcpReassemblerPolicy::init(TcpSession* ssn, TcpStreamTracker* trk, StreamPolicy pol, bool server)
 {
     trs.sos.init_sos(ssn, pol);
+    setup_paf();
     trs.server_side = server;
     trs.tracker = trk;
 
index 9e961c167b1d9176354cbe5af4ad44853deffc27..0e529e094fbc510d57af8e140a00c7ba08034324 100644 (file)
@@ -118,9 +118,6 @@ public:
     void set_norm_mode_test()
     { trs.sos.tcp_ips_data = NORM_MODE_TEST; }
 
-    void reset_paf_segment()
-    { trs.sos.seglist.cur_pseg = nullptr; }
-
     uint32_t perform_partial_flush(snort::Flow* flow)
     { return reassembler->perform_partial_flush(trs, flow); }
 
@@ -131,7 +128,13 @@ public:
     { paf_clear(&trs.paf_state); }
 
     void setup_paf()
-    { paf_setup(&trs.paf_state); }
+    {
+        paf_setup(&trs.paf_state);
+        if ( trs.sos.seglist.cur_rseg )
+            trs.sos.seglist.cur_sseg = trs.sos.seglist.cur_rseg;
+        else
+            trs.sos.seglist.cur_sseg = trs.sos.seglist.head;
+    }
 
 private:
     TcpReassembler* reassembler = nullptr;
index ff9fb1a7a95f316ba1a0ee6143ff951954cf1797..5761d51fdaa795f2d8644962ec29208ecb562e35 100644 (file)
@@ -91,7 +91,7 @@ public:
             dump_me->term();
         }
 
-        head = tail = cur_rseg = cur_pseg = nullptr;
+        head = tail = cur_rseg = cur_sseg = nullptr;
         count = 0;
         return i;
     }
@@ -141,7 +141,7 @@ public:
     TcpSegmentNode* head = nullptr;
     TcpSegmentNode* tail = nullptr;
     TcpSegmentNode* cur_rseg = nullptr;
-    TcpSegmentNode* cur_pseg = nullptr;
+    TcpSegmentNode* cur_sseg = nullptr;
     uint32_t count = 0;
 };
 
index 0eaa8e07f4909ffd0bfa2d6aa810a1bc7e0844aa..692e9bd74b0f9f6ce29cc7dff11af97a5c5ce6ea 100644 (file)
@@ -736,7 +736,7 @@ bool TcpSession::check_for_window_slam(TcpSegmentDescriptor& tsd)
     }
     else if ( tsd.is_packet_from_client() && (tsd.get_wnd() <= SLAM_MAX)
         && (tsd.get_ack() == listener->get_iss() + 1)
-        && !(tsd.get_tcph()->is_fin() | tsd.get_tcph()->is_rst())
+        && !(tsd.get_tcph()->is_fin() || tsd.get_tcph()->is_rst())
         && !(flow->get_session_flags() & SSNFLAG_MIDSTREAM))
     {
         /* got a window slam alert! */
@@ -761,6 +761,13 @@ void TcpSession::handle_data_segment(TcpSegmentDescriptor& tsd)
     TcpStreamTracker* listener = tsd.get_listener();
     TcpStreamTracker* talker = tsd.get_talker();
 
+    // if this session started midstream we may need to init the listener's base seq #
+    if ( listener->reinit_seg_base )
+    {
+        listener->reassembler.set_seglist_base_seq(tsd.get_seq());
+        listener->reinit_seg_base = false;
+    }
+
     if ( TcpStreamTracker::TCP_CLOSED != talker->get_tcp_state() )
     {
         uint8_t tcp_options_len = tsd.get_tcph()->options_len();
index f36e1cade2bfa41d46f37aa5fb5b2a51cc0fa0f1..43d05c90df03651eb57481c66a9a71d67a522e00 100644 (file)
@@ -64,8 +64,18 @@ bool TcpStateCloseWait::data_seg_sent(TcpSegmentDescriptor& tsd, TcpStreamTracke
 
 bool TcpStateCloseWait::data_seg_recv(TcpSegmentDescriptor& tsd, TcpStreamTracker& trk)
 {
-    trk.update_tracker_ack_recv(tsd);
-    trk.session->handle_data_segment(tsd);
+    if ( SEQ_GT(tsd.get_seq(), trk.get_fin_final_seq() ) )
+    {
+        trk.session->tel.set_tcp_event(EVENT_DATA_ON_CLOSED);
+        trk.normalizer.packet_dropper(tsd, NORM_TCP_BLOCK);
+        trk.session->set_pkt_action_flag(ACTION_BAD_PKT);
+    }
+    else
+    {
+        trk.update_tracker_ack_recv(tsd);
+        trk.session->handle_data_segment(tsd);
+    }
+
     return true;
 }
 
index 43e72b933dfdac92480b4a979f260acd19478108..6866a16d0016b6a7a5725118e72977eb6678a5ea 100644 (file)
@@ -79,10 +79,7 @@ bool TcpStateFinWait1::data_seg_recv(TcpSegmentDescriptor& tsd, TcpStreamTracker
 {
     trk.update_tracker_ack_recv(tsd);
     if ( check_for_window_slam(tsd, trk) )
-    {
-        if ( tsd.is_data_segment() )
-            trk.session->handle_data_segment(tsd);
-    }
+        trk.session->handle_data_segment(tsd);
     return true;
 }
 
index 81df9352fa117528fce21b8967572d0ab9aa4b87..85843356db4b0ecf9220d5fbb13f0c66295a55bb 100644 (file)
@@ -95,8 +95,7 @@ bool TcpStateFinWait2::data_seg_recv(TcpSegmentDescriptor& tsd, TcpStreamTracker
     else
     {
         trk.update_tracker_ack_recv(tsd);
-        if ( tsd.is_data_segment() )
-            trk.session->handle_data_segment(tsd);
+        trk.session->handle_data_segment(tsd);
     }
 
     return true;
index 0f9f3dd4230599b7a982d4aa76824e310ef9afd4..839148cd8853c84dccc783235042c6763b7ef90a 100644 (file)
@@ -92,8 +92,7 @@ bool TcpStateListen::syn_ack_recv(TcpSegmentDescriptor& tsd, TcpStreamTracker& t
 
 bool TcpStateListen::ack_sent(TcpSegmentDescriptor& tsd, TcpStreamTracker& trk)
 {
-    if ( trk.session->tcp_config->midstream_allowed(tsd.get_pkt())
-         && (tsd.has_wscale() || tsd.is_data_segment()) )
+    if ( trk.session->tcp_config->midstream_allowed(tsd.get_pkt()) && tsd.has_wscale() )
     {
         Flow* flow = tsd.get_flow();
         flow->session_state |= ( STREAM_STATE_ACK | STREAM_STATE_SYN_ACK |
@@ -113,7 +112,7 @@ bool TcpStateListen::ack_sent(TcpSegmentDescriptor& tsd, TcpStreamTracker& trk)
 
 bool TcpStateListen::ack_recv(TcpSegmentDescriptor& tsd, TcpStreamTracker& trk)
 {
-    if ( trk.session->is_midstream_allowed(tsd) && (tsd.has_wscale() || tsd.is_data_segment()) )
+    if ( trk.session->is_midstream_allowed(tsd) && tsd.has_wscale() )
     {
         Flow* flow = tsd.get_flow();
 
@@ -185,7 +184,7 @@ bool TcpStateListen::data_seg_recv(TcpSegmentDescriptor& tsd, TcpStreamTracker&
 
 bool TcpStateListen::fin_sent(TcpSegmentDescriptor& tsd, TcpStreamTracker& trk)
 {
-    if ( !trk.session->is_midstream_allowed(tsd) and trk.session->tcp_config->require_3whs() )
+    if ( !trk.session->is_midstream_allowed(tsd) && trk.session->tcp_config->require_3whs() )
     {
         // FIXIT-L listen gets fin triggers 129:20 ??
         trk.session->generate_no_3whs_event();
index 95a63cde323b9f10d5f6fd92aabe4b69e6693ffa..24a3236a74981dc498fb1ff649465f3ddddfb5d5 100644 (file)
@@ -88,7 +88,7 @@ bool TcpStateNone::syn_ack_recv(TcpSegmentDescriptor& tsd, TcpStreamTracker& trk
 
 bool TcpStateNone::ack_sent(TcpSegmentDescriptor& tsd, TcpStreamTracker& trk)
 {
-    if ( trk.session->is_midstream_allowed(tsd) && (tsd.has_wscale() || tsd.is_data_segment()) )
+    if ( trk.session->is_midstream_allowed(tsd) && tsd.has_wscale() )
     {
         Flow* flow = tsd.get_flow();
 
@@ -108,7 +108,7 @@ bool TcpStateNone::ack_sent(TcpSegmentDescriptor& tsd, TcpStreamTracker& trk)
 
 bool TcpStateNone::ack_recv(TcpSegmentDescriptor& tsd, TcpStreamTracker& trk)
 {
-    if ( trk.session->is_midstream_allowed(tsd) && (tsd.has_wscale() || tsd.is_data_segment()) )
+    if ( trk.session->is_midstream_allowed(tsd) && tsd.has_wscale() )
     {
         Flow* flow = tsd.get_flow();
 
index bab35624b6a59261db934a9aae3c2a5dc13df086..238e410a549506c5113600c390b54bd9cb403f42 100644 (file)
@@ -114,10 +114,7 @@ bool TcpStateSynRecv::ack_recv(TcpSegmentDescriptor& tsd, TcpStreamTracker& trk)
         flow->session_state |= ( STREAM_STATE_ACK | STREAM_STATE_ESTABLISHED );
         trk.session->update_perf_base_state(TcpStreamTracker::TCP_ESTABLISHED);
         trk.set_tcp_state(TcpStreamTracker::TCP_ESTABLISHED);
-        if ( tsd.is_data_segment() )
-            trk.session->handle_data_segment(tsd);
-        else
-            trk.session->check_for_window_slam(tsd);
+        trk.session->check_for_window_slam(tsd);
     }
 
     return true;
@@ -142,8 +139,8 @@ bool TcpStateSynRecv::data_seg_recv(TcpSegmentDescriptor& tsd, TcpStreamTracker&
         trk.session->update_perf_base_state(TcpStreamTracker::TCP_ESTABLISHED);
         trk.set_tcp_state(TcpStreamTracker::TCP_ESTABLISHED);
     }
-    if ( tsd.is_data_segment() )
-        trk.session->handle_data_segment(tsd);
+
+    trk.session->handle_data_segment(tsd);
 
     return true;
 }
index ad37f36e6334f31318771157667d0a0c918e053b..ee4843572a5a47b39b97925889a98998e45f60ca 100644 (file)
@@ -80,9 +80,7 @@ bool TcpStateSynSent::ack_sent(TcpSegmentDescriptor& tsd, TcpStreamTracker& trk)
 
 bool TcpStateSynSent::ack_recv(TcpSegmentDescriptor& tsd, TcpStreamTracker& trk)
 {
-    if ( !tsd.is_meta_ack_packet() && tsd.is_data_segment() )
-        trk.session->handle_data_segment(tsd);
-
+    trk.update_on_3whs_ack(tsd);
     return true;
 }
 
@@ -102,7 +100,8 @@ bool TcpStateSynSent::data_seg_sent(TcpSegmentDescriptor& tsd, TcpStreamTracker&
 
 bool TcpStateSynSent::data_seg_recv(TcpSegmentDescriptor& tsd, TcpStreamTracker& trk)
 {
-    trk.session->handle_data_segment(tsd);
+    if ( trk.update_on_3whs_ack(tsd) )
+        trk.session->handle_data_segment(tsd);
 
     return true;
 }
index 12bc51e1ed20004d704f44529a2f624f894e2814..f24400db2bfc3d172fc9a1aa46e0a619000841c3 100644 (file)
@@ -204,7 +204,7 @@ void TcpStreamTracker::init_tcp_state()
         TcpStreamTracker::TCP_STATE_NONE : TcpStreamTracker::TCP_LISTEN;
 
     snd_una = snd_nxt = snd_wnd = 0;
-    rcv_nxt = r_win_base = iss = 0;
+    rcv_nxt = r_win_base = iss = irs = 0;
     ts_last = ts_last_packet = 0;
     small_seg_count = 0;
     wscale = 0;
@@ -218,7 +218,7 @@ void TcpStreamTracker::init_tcp_state()
     order = 0;
     held_packet = null_iterator;
     flush_policy = STREAM_FLPOLICY_IGNORE;
-    reassembler.setup_paf();
+    reassembler.reset();
 }
 
 //-------------------------------------------------------------------------
@@ -245,10 +245,7 @@ void TcpStreamTracker::set_splitter(StreamSplitter* ss)
     if ( !splitter )
         flush_policy = STREAM_FLPOLICY_IGNORE;
     else
-    {
         reassembler.setup_paf();
-        reassembler.reset_paf_segment();
-    }
 }
 
 void TcpStreamTracker::set_splitter(const Flow* flow)
@@ -404,6 +401,7 @@ void TcpStreamTracker::init_on_data_seg_sent(TcpSegmentDescriptor& tsd)
     r_win_base = tsd.get_ack();
     rcv_nxt = tsd.get_ack();
     reassembler.set_seglist_base_seq(tsd.get_ack());
+    reinit_seg_base = true;
 
     ts_last_packet = tsd.get_packet_timestamp();
     tf_flags |= normalizer.get_tcp_timestamp(tsd, false);
index 4da152f26c475419d30addadbaa9ed0e9291b93f..1b6644e5c31e67f1a18446a38dd6c418d8844423 100644 (file)
@@ -332,7 +332,7 @@ public:
     uint32_t small_seg_count = 0;
     uint8_t order = 0;
     FinSeqNumStatus fin_seq_status = TcpStreamTracker::FIN_NOT_SEEN;
-
+    bool reinit_seg_base = false;
 
 protected:
     static const std::list<HeldPacket>::iterator null_iterator;