and SEQ_LT(tsn.next->c_seq, trs.tracker->r_win_base);
}
+bool TcpReassembler::fin_no_gap(const TcpSegmentNode& tsn, const TcpReassemblerState& trs)
+{
+ return trs.tracker->fin_seq_status >= TcpStreamTracker::FIN_WITH_SEQ_SEEN
+ and SEQ_GEQ(tsn.i_seq + tsn.i_len, trs.tracker->get_fin_i_seq());
+}
+
+bool TcpReassembler::fin_acked_no_gap(const TcpSegmentNode& tsn, const TcpReassemblerState& trs)
+{
+ return trs.tracker->fin_seq_status >= TcpStreamTracker::FIN_WITH_SEQ_ACKED
+ and SEQ_GEQ(tsn.i_seq + tsn.i_len, trs.tracker->get_fin_i_seq());
+}
+
void TcpReassembler::update_next(TcpReassemblerState& trs, const TcpSegmentNode& tsn)
{
trs.sos.seglist.cur_rseg = next_no_gap(tsn) ? tsn.next : nullptr;
footprint = trs.tracker->r_win_base - trs.sos.seglist_base_seq;
if ( footprint )
- {
sequenced = get_q_sequenced(trs);
- if ( trs.tracker->fin_seq_status == TcpStreamTracker::FIN_WITH_SEQ_ACKED )
- --footprint;
- }
-
return ( footprint > sequenced ) ? sequenced : footprint;
}
{
assert(trs.sos.session->flow == p->flow);
+ int32_t ret_val = FINAL_FLUSH_HOLD;
+
if ( SEQ_GT(trs.sos.seglist.head->c_seq, trs.sos.seglist_base_seq) )
- return -1;
+ return ret_val;
if ( !trs.sos.seglist.cur_rseg )
trs.sos.seglist.cur_rseg = trs.sos.seglist.cur_sseg;
if ( !is_q_sequenced(trs) )
- return -1;
+ return ret_val;
TcpSegmentNode* tsn = trs.sos.seglist.cur_sseg;
uint32_t total = tsn->c_seq - trs.sos.seglist_base_seq;
+
+ ret_val = FINAL_FLUSH_OK;
while ( tsn && *flags )
{
total += tsn->c_len;
if ( paf_initialized(&trs.paf_state) && SEQ_LEQ(end, pos) )
{
if ( !next_no_gap(*tsn) )
+ {
+ ret_val = FINAL_FLUSH_HOLD;
break;
+ }
tsn = tsn->next;
continue;
}
if (!next_no_gap(*tsn) || (trs.paf_state.paf == StreamSplitter::STOP))
+ {
+ if ( !(next_no_gap(*tsn) || fin_no_gap(*tsn, trs)) )
+ ret_val = FINAL_FLUSH_HOLD;
break;
+ }
tsn = tsn->next;
}
trs.sos.seglist.cur_sseg = tsn;
- return -1;
+ return ret_val;
}
static inline bool both_splitters_aborted(Flow* flow)
{
assert(trs.sos.session->flow == p->flow);
+ int32_t ret_val = FINAL_FLUSH_HOLD;
+
if ( !trs.sos.seglist.cur_sseg || SEQ_GEQ(trs.sos.seglist_base_seq, trs.tracker->r_win_base) )
- return -1;
+ return ret_val ;
if ( !trs.sos.seglist.cur_rseg )
trs.sos.seglist.cur_rseg = trs.sos.seglist.cur_sseg;
total = tsn->c_seq - trs.sos.seglist.cur_rseg->c_seq;
}
+ ret_val = FINAL_FLUSH_OK;
while (tsn && *flags && SEQ_LT(tsn->c_seq, trs.tracker->r_win_base))
{
// only flush acked data that fits in pdu reassembly buffer...
if (flush_len < tsn->c_len || (splitter->is_paf() and !next_no_gap(*tsn)) ||
(trs.paf_state.paf == StreamSplitter::STOP))
+ {
+ if ( !(next_no_gap(*tsn) || fin_acked_no_gap(*tsn, trs)) )
+ ret_val = FINAL_FLUSH_HOLD;
break;
+ }
tsn = tsn->next;
}
- return -1;
+ return ret_val;
}
int TcpReassembler::flush_on_data_policy(TcpReassemblerState& trs, Packet* p)
if ( trs.sos.seglist.head )
{
uint32_t flags;
+ int32_t flush_amt;
do
{
flags = get_forward_packet_dir(trs, p);
- int32_t flush_amt = scan_data_pre_ack(trs, &flags, p);
+ flush_amt = scan_data_pre_ack(trs, &flags, p);
if ( flush_amt <= 0 )
- {
- if (trs.tracker->delayed_finish())
- flush_queued_segments(trs, p->flow, true, p);
break;
- }
flushed += flush_to_seq(trs, flush_amt, p, flags);
}
fallback(*trs.tracker, trs.server_side);
return flush_on_data_policy(trs, p);
}
+ else if ( trs.tracker->fin_seq_status >= TcpStreamTracker::FIN_WITH_SEQ_SEEN and
+ -1 <= flush_amt and flush_amt <= 0 and
+ trs.paf_state.paf == StreamSplitter::SEARCH and
+ !p->flow->searching_for_service() )
+ {
+ // we are on a FIN, the data has been scanned, it has no gaps,
+ // but somehow we are waiting for more data - do final flush here
+ flush_queued_segments(trs, p->flow, true, p );
+ }
}
break;
}
flags = get_reverse_packet_dir(trs, p);
flush_amt = scan_data_post_ack(trs, &flags, p);
if ( flush_amt <= 0 or trs.paf_state.paf == StreamSplitter::SKIP )
- {
- if (trs.tracker->delayed_finish())
- flush_queued_segments(trs, p->flow, true, p);
break;
- }
if ( trs.paf_state.paf == StreamSplitter::ABORT )
trs.tracker->splitter_finish(p->flow);
skip_seglist_hole(trs, p, flags, flush_amt);
return flush_on_ack_policy(trs, p);
}
+ else if ( -1 <= flush_amt and flush_amt <= 0 and
+ trs.paf_state.paf == StreamSplitter::SEARCH and
+ trs.tracker->fin_seq_status == TcpStreamTracker::FIN_WITH_SEQ_ACKED and
+ !p->flow->searching_for_service() )
+ {
+ // we are acknowledging a FIN, the data has been scanned, it has no gaps,
+ // but somehow we are waiting for more data - do final flush here
+ flush_queued_segments(trs, p->flow, true, p);
+ }
}
break;
class TcpReassembler : public SegmentOverlapEditor
{
public:
+
+ // OK means FIN seen, data scanned, flush point not found, no gaps
+ enum ScanStatus {
+ FINAL_FLUSH_HOLD = -2,
+ FINAL_FLUSH_OK = -1
+ };
+
virtual void queue_packet_for_reassembly(TcpReassemblerState&, TcpSegmentDescriptor&);
virtual void purge_segment_list(TcpReassemblerState&);
virtual void purge_flushed_ackd(TcpReassemblerState&);
bool next_no_gap(const TcpSegmentNode&);
bool next_no_gap_c(const TcpSegmentNode&);
bool next_acked_no_gap_c(const TcpSegmentNode&, const TcpReassemblerState&);
+ bool fin_no_gap(const TcpSegmentNode&, const TcpReassemblerState&);
+ bool fin_acked_no_gap(const TcpSegmentNode&, const TcpReassemblerState&);
void update_next(TcpReassemblerState&, const TcpSegmentNode&);
void update_skipped_bytes(uint32_t, TcpReassemblerState&);
bool has_seglist_hole(TcpReassemblerState&, TcpSegmentNode&, PAF_State&, uint32_t& total,
{
Flow* flow = tsd.get_flow();
+ trk.set_fin_seq_status_seen(tsd);
trk.update_tracker_ack_recv(tsd);
if ( SEQ_GT(tsd.get_seq(), trk.get_fin_final_seq() ) )
bool TcpStateEstablished::fin_recv(TcpSegmentDescriptor& tsd, TcpStreamTracker& trk)
{
+ trk.set_fin_seq_status_seen(tsd);
trk.update_tracker_ack_recv(tsd);
trk.perform_fin_recv_flush(tsd);
Flow* flow = tsd.get_flow();
bool is_ack_valid = false;
+ trk.set_fin_seq_status_seen(tsd);
trk.update_tracker_ack_recv(tsd);
if ( SEQ_GEQ(tsd.get_end_seq(), trk.r_win_base) and
check_for_window_slam(tsd, trk, &is_ack_valid) )
{
Flow* flow = tsd.get_flow();
+ trk.set_fin_seq_status_seen(tsd);
trk.update_tracker_ack_recv(tsd);
if ( SEQ_GEQ(tsd.get_end_seq(), trk.r_win_base) )
{
{
Flow* flow = tsd.get_flow();
+ trk.set_fin_seq_status_seen(tsd);
trk.update_tracker_ack_recv(tsd);
trk.session->set_pkt_action_flag(trk.normalizer.handle_paws(tsd));
flow->session_state |= STREAM_STATE_ACK;
bool TcpStateSynSent::fin_recv(TcpSegmentDescriptor& tsd, TcpStreamTracker& trk)
{
+ trk.set_fin_seq_status_seen(tsd);
trk.perform_fin_recv_flush(tsd);
return true;
}
bool TcpStateTimeWait::fin_recv(TcpSegmentDescriptor& tsd, TcpStreamTracker& trk)
{
+ trk.set_fin_seq_status_seen(tsd);
trk.update_tracker_ack_recv(tsd);
if ( SEQ_GT(tsd.get_seq(), trk.get_fin_final_seq() ) )
{
}
}
+
+void TcpStreamTracker::set_fin_seq_status_seen(const TcpSegmentDescriptor& tsd)
+{
+ if ( !fin_seq_set and SEQ_GEQ(tsd.get_end_seq(), r_win_base) )
+ {
+ fin_i_seq = tsd.get_seq();
+ fin_final_seq = tsd.get_end_seq();
+ fin_seq_set = true;
+ fin_seq_status = TcpStreamTracker::FIN_WITH_SEQ_SEEN;
+ }
+}
+
void TcpStreamTracker::init_tcp_state()
{
tcp_state = ( client_tracker ) ?
mss = 0;
tf_flags = 0;
mac_addr_valid = false;
+ fin_i_seq = 0;
fin_final_seq = 0;
fin_seq_status = TcpStreamTracker::FIN_NOT_SEEN;
fin_seq_set = false;
flush_policy = STREAM_FLPOLICY_IGNORE;
reassembler.reset();
splitter_finish_flag = false;
- delayed_finish_flag = false;
}
//-------------------------------------------------------------------------
}
if ( ( fin_seq_status == TcpStreamTracker::FIN_WITH_SEQ_SEEN )
- && SEQ_EQ(r_win_base, fin_final_seq) )
+ && SEQ_GEQ(tsd.get_ack(), fin_final_seq + 1) )
{
fin_seq_status = TcpStreamTracker::FIN_WITH_SEQ_ACKED;
}
else
fin_seq_adjust = 1;
- // set final seq # any packet rx'ed with seq > is bad
- if ( !fin_seq_set )
- {
- fin_final_seq = tsd.get_end_seq();
- fin_seq_set = true;
- if( tsd.get_len() == 0 )
- fin_seq_status = TcpStreamTracker::FIN_WITH_SEQ_SEEN;
- }
-
return true;
}
if ( tsd.is_data_segment() )
session->handle_data_segment(tsd);
- // If the packet is in-sequence, call finish and final flush on it.
- // FIXIT-L: what do we do about out-of-sequence packets?
- if ( flush_policy == STREAM_FLPOLICY_ON_DATA and SEQ_EQ(tsd.get_end_seq(), rcv_nxt) )
- {
- if (tsd.get_flow()->searching_for_service())
- {
- delayed_finish_flag = true;
- return;
- }
+ if ( flush_policy == STREAM_FLPOLICY_ON_DATA and SEQ_EQ(tsd.get_end_seq(), rcv_nxt)
+ and !tsd.get_flow()->searching_for_service() )
reassembler.flush_queued_segments(tsd.get_flow(), true, tsd.get_pkt());
- }
}
uint32_t TcpStreamTracker::perform_partial_flush()
uint32_t get_fin_seq_adjust()
{ return fin_seq_adjust; }
+ uint32_t get_fin_i_seq() const
+ { return fin_i_seq; }
+
+ // set final seq # any packet rx'ed with seq > is bad
+ void set_fin_seq_status_seen(const TcpSegmentDescriptor&);
+
bool is_fin_seq_set() const
{ return fin_seq_set; }
{ return splitter && splitter->is_paf(); }
bool splitter_finish(snort::Flow* flow);
- bool delayed_finish() const
- { return delayed_finish_flag; }
bool is_reassembly_enabled() const
{ return ( splitter and (flush_policy != STREAM_FLPOLICY_IGNORE) ); }
uint32_t ts_last_packet = 0;
uint32_t ts_last = 0; // last timestamp (for PAWS)
uint32_t fin_final_seq = 0;
+ uint32_t fin_i_seq = 0;
uint32_t fin_seq_adjust = 0;
uint16_t mss = 0; // max segment size
uint16_t wscale = 0; // window scale setting
bool mac_addr_valid = false;
bool fin_seq_set = false; // FIXIT-M should be obviated by tcp state
bool splitter_finish_flag = false;
- bool delayed_finish_flag = false;
};
// <--- note -- the 'state' parameter must be a reference