+
This directory contains the implementation of TCP session tracking and
processing functions. When the network protocol for a flow is determined
to be TCP the base Stream preprocessor will delegate handling of the
{
splitter = ss;
paf.paf_setup(ss);
- if ( seglist.cur_rseg )
- seglist.cur_sseg = seglist.cur_rseg;
+ if ( seglist->cur_rseg )
+ seglist->cur_sseg = seglist->cur_rseg;
else
- seglist.cur_sseg = seglist.head;
+ seglist->cur_sseg = seglist->head;
server_side = server;
bool TcpReassembler::fin_no_gap(const TcpSegmentNode& tsn)
{
- return tracker.fin_seq_status >= FIN_WITH_SEQ_SEEN
- and SEQ_GEQ(tsn.next_seq(), tracker.get_fin_i_seq());
+ return tracker->fin_seq_status >= FIN_WITH_SEQ_SEEN
+ and SEQ_GEQ(tsn.next_seq(), tracker->get_fin_i_seq());
}
bool TcpReassembler::fin_acked_no_gap(const TcpSegmentNode& tsn)
{
- return tracker.fin_seq_status >= FIN_WITH_SEQ_ACKED
- and SEQ_GEQ(tsn.next_seq(), tracker.get_fin_i_seq());
+ return tracker->fin_seq_status >= FIN_WITH_SEQ_ACKED
+ and SEQ_GEQ(tsn.next_seq(), tracker->get_fin_i_seq());
}
// If we are skipping seglist hole, update tsn so that we can purge
{
TcpSegmentNode* tsn;
- while ( remaining_bytes and (tsn = seglist.cur_rseg) )
+ while ( remaining_bytes and (tsn = seglist->cur_rseg) )
{
auto bytes_skipped = ( tsn->unscanned() <= remaining_bytes ) ? tsn->unscanned() : remaining_bytes;
if ( !tsn->unscanned() )
{
- seglist.flush_count++;
- seglist.update_next(tsn);
+ seglist->flush_count++;
+ seglist->update_next(tsn);
}
}
}
void TcpReassembler::purge_to_seq(uint32_t flush_seq)
{
- seglist.purge_flushed_segments(flush_seq);
+ seglist->purge_flushed_segments(flush_seq);
if ( last_pdu )
{
- tracker.tcp_alerts.purge_alerts(*last_pdu, tracker.normalizer.is_tcp_ips_enabled());
+ tracker->tcp_alerts.purge_alerts(*last_pdu, tracker->normalizer.is_tcp_ips_enabled());
last_pdu = nullptr;
}
else
- tracker.tcp_alerts.purge_alerts(seglist.session->flow);
+ tracker->tcp_alerts.purge_alerts(seglist->session->flow);
}
// must only purge flushed and acked bytes we may flush partial segments
// (if we reassemble such)
void TcpReassembler::purge_flushed_ackd()
{
- if ( !seglist.head )
+ if ( !seglist->head )
return;
- uint32_t seq = seglist.head->start_seq();
- TcpSegmentNode* tsn = seglist.head;
+ uint32_t seq = seglist->head->start_seq();
+ TcpSegmentNode* tsn = seglist->head;
while ( tsn && !tsn->unscanned() )
{
uint32_t end = tsn->next_seq();
- if ( SEQ_GT(end, tracker.r_win_base) )
+ if ( SEQ_GT(end, tracker->r_win_base) )
break;
seq = end;
tsn = tsn->next;
}
- if ( !SEQ_EQ(seq, seglist.head->start_seq()) )
+ if ( !SEQ_EQ(seq, seglist->head->start_seq()) )
purge_to_seq(seq);
}
void TcpReassembler::show_rebuilt_packet(Packet* pkt)
{
- if ( seglist.session->tcp_config->flags & STREAM_CONFIG_SHOW_PACKETS )
+ if ( seglist->session->tcp_config->flags & STREAM_CONFIG_SHOW_PACKETS )
{
// FIXIT-L setting conf here is required because this is called before context start
pkt->context->conf = SnortConfig::get_conf();
{
uint32_t flags = PKT_PDU_HEAD;
- uint32_t to_seq = seglist.cur_rseg->scan_seq() + flush_len;
+ uint32_t to_seq = seglist->cur_rseg->scan_seq() + flush_len;
uint32_t remaining_bytes = flush_len;
uint32_t total_flushed = 0;
while ( remaining_bytes )
{
- TcpSegmentNode* tsn = seglist.cur_rseg;
+ TcpSegmentNode* tsn = seglist->cur_rseg;
unsigned bytes_to_copy = ( tsn->unscanned() <= remaining_bytes ) ? tsn->unscanned() : remaining_bytes;
remaining_bytes -= bytes_to_copy;
assert( bytes_to_copy >= tsn->unscanned() );
unsigned bytes_copied = 0;
- const StreamBuffer sb = splitter->reassemble(seglist.session->flow, flush_len, total_flushed,
+ const StreamBuffer sb = splitter->reassemble(seglist->session->flow, flush_len, total_flushed,
tsn->paf_data(), bytes_to_copy, flags, bytes_copied);
if ( sb.data )
if ( !tsn->unscanned() )
{
- seglist.flush_count++;
- seglist.update_next(tsn);
+ seglist->flush_count++;
+ seglist->update_next(tsn);
}
/* Check for a gap/missing packet */
{
// FIXIT-H // assert(false); find when this scenario happens
// FIXIT-L this is suboptimal - better to exclude fin from to_seq
- if ( !tracker.is_fin_seq_set() or
- SEQ_LEQ(to_seq, tracker.get_fin_final_seq()) )
+ if ( !tracker->is_fin_seq_set() or
+ SEQ_LEQ(to_seq, tracker->get_fin_final_seq()) )
{
- tracker.set_tf_flags(TF_MISSING_PKT);
+ tracker->set_tf_flags(TF_MISSING_PKT);
}
break;
}
- if ( sb.data || !seglist.cur_rseg )
+ if ( sb.data || !seglist->cur_rseg )
break;
}
EncodeFlags enc_flags = 0;
DAQ_PktHdr_t pkth;
- seglist.session->get_packet_header_foo(&pkth, p->pkth, pkt_flags);
+ seglist->session->get_packet_header_foo(&pkth, p->pkth, pkt_flags);
PacketManager::format_tcp(enc_flags, p, pdu, PSEUDO_PKT_TCP, &pkth, pkth.opaque);
- prep_pdu(seglist.session->flow, p, pkt_flags, pdu);
+ prep_pdu(seglist->session->flow, p, pkt_flags, pdu);
assert(pdu->pkth == pdu->context->pkth);
pdu->context->pkth->ts = tv;
pdu->dsize = 0;
// flush a seglist up to the given point, generate a pseudopacket, and fire it thru the system.
int TcpReassembler::flush_to_seq(uint32_t bytes, Packet* p, uint32_t pkt_flags)
{
- assert( p && seglist.cur_rseg);
+ assert( p && seglist->cur_rseg);
- tracker.clear_tf_flags(TF_MISSING_PKT | TF_MISSING_PREV_PKT);
+ tracker->clear_tf_flags(TF_MISSING_PKT | TF_MISSING_PREV_PKT);
- TcpSegmentNode* tsn = seglist.cur_rseg;
- assert( seglist.seglist_base_seq == tsn->scan_seq());
+ TcpSegmentNode* tsn = seglist->cur_rseg;
+ assert( seglist->seglist_base_seq == tsn->scan_seq());
Packet* pdu = initialize_pdu(p, pkt_flags, tsn->tv);
int32_t flushed_bytes = flush_data_segments(bytes, pdu);
assert( flushed_bytes );
- seglist.seglist_base_seq += flushed_bytes;
+ seglist->seglist_base_seq += flushed_bytes;
if ( pdu->data )
{
else
last_pdu = nullptr;
- tracker.finalize_held_packet(p);
+ tracker->finalize_held_packet(p);
}
else
{
}
// FIXIT-L abort should be by PAF callback only since recovery may be possible
- if ( tracker.get_tf_flags() & TF_MISSING_PKT )
+ if ( tracker->get_tf_flags() & TF_MISSING_PKT )
{
- tracker.set_tf_flags(TF_MISSING_PREV_PKT | TF_PKT_MISSED);
- tracker.clear_tf_flags(TF_MISSING_PKT);
+ tracker->set_tf_flags(TF_MISSING_PREV_PKT | TF_PKT_MISSED);
+ tracker->clear_tf_flags(TF_MISSING_PKT);
tcpStats.gaps++;
}
else
- tracker.clear_tf_flags(TF_MISSING_PREV_PKT);
+ tracker->clear_tf_flags(TF_MISSING_PREV_PKT);
return flushed_bytes;
}
{
unsigned bytes_copied = 0;
- const StreamBuffer sb = splitter->reassemble(seglist.session->flow, 0, 0,
+ const StreamBuffer sb = splitter->reassemble(seglist->session->flow, 0, 0,
nullptr, 0, (PKT_PDU_HEAD | PKT_PDU_TAIL), bytes_copied);
if ( sb.data )
int32_t footprint = 0;
int32_t sequenced = 0;
- if ( SEQ_GT(tracker.r_win_base, seglist.seglist_base_seq) )
- footprint = tracker.r_win_base - seglist.seglist_base_seq;
+ if ( SEQ_GT(tracker->r_win_base, seglist->seglist_base_seq) )
+ footprint = tracker->r_win_base - seglist->seglist_base_seq;
if ( footprint )
sequenced = get_q_sequenced();
uint32_t TcpReassembler::get_q_sequenced()
{
- TcpSegmentNode* tsn = seglist.cur_rseg;
+ TcpSegmentNode* tsn = seglist->cur_rseg;
if ( !tsn )
{
- tsn = seglist.head;
+ tsn = seglist->head;
- if ( !tsn || SEQ_LT(tracker.r_win_base, tsn->scan_seq()) )
+ if ( !tsn || SEQ_LT(tracker->r_win_base, tsn->scan_seq()) )
return 0;
- seglist.cur_rseg = tsn;
+ seglist->cur_rseg = tsn;
}
uint32_t len = 0;
{
if ( !tsn->unscanned() )
- seglist.cur_rseg = tsn->next;
+ seglist->cur_rseg = tsn->next;
else
len += tsn->unscanned();
if ( tsn->unscanned() )
len += tsn->unscanned();
- seglist.seglist_base_seq = seglist.cur_rseg->scan_seq();
+ seglist->seglist_base_seq = seglist->cur_rseg->scan_seq();
return len;
}
bool TcpReassembler::is_q_sequenced()
{
- TcpSegmentNode* tsn = seglist.cur_rseg;
+ TcpSegmentNode* tsn = seglist->cur_rseg;
if ( !tsn )
{
- tsn = seglist.head;
- if ( !tsn || SEQ_LT(tracker.r_win_base, tsn->scan_seq()) )
+ tsn = seglist->head;
+ if ( !tsn || SEQ_LT(tracker->r_win_base, tsn->scan_seq()) )
return false;
- seglist.cur_rseg = tsn;
+ seglist->cur_rseg = tsn;
}
while ( tsn->next_no_gap() )
if ( tsn->unscanned() )
break;
- tsn = seglist.cur_rseg = tsn->next;
+ tsn = seglist->cur_rseg = tsn->next;
}
- seglist.seglist_base_seq = tsn->scan_seq();
+ seglist->seglist_base_seq = tsn->scan_seq();
return (tsn->unscanned() != 0);
}
void TcpReassembler::final_flush(Packet* p, uint32_t dir)
{
- tracker.set_tf_flags(TF_FORCE_FLUSH);
+ tracker->set_tf_flags(TF_FORCE_FLUSH);
if ( flush_stream(p, dir, true) )
{
purge_flushed_ackd();
}
- tracker.clear_tf_flags(TF_FORCE_FLUSH);
+ tracker->clear_tf_flags(TF_FORCE_FLUSH);
}
static Packet* get_packet(Flow* flow, uint32_t flags, bool c2s)
void TcpReassembler::check_first_segment_hole()
{
- if ( SEQ_LT(seglist.seglist_base_seq, seglist.head->start_seq()) )
+ if ( SEQ_LT(seglist->seglist_base_seq, seglist->head->start_seq()) )
{
- seglist.seglist_base_seq = seglist.head->start_seq();
- seglist.advance_rcv_nxt();
+ seglist->seglist_base_seq = seglist->head->start_seq();
+ seglist->advance_rcv_nxt();
paf.state = StreamSplitter::START;
}
}
}
// No error checking here, so the caller must ensure that p, p->flow are not null.
-uint32_t TcpReassembler::perform_partial_flush(Packet* p, uint32_t flushed)
+uint32_t TcpReassembler::perform_partial_flush(Packet* p)
{
+ uint32_t flushed = 0;
if ( splitter->init_partial_flush(p->flow) )
{
- flushed += flush_stream(p, packet_dir, false);
+ flushed = flush_stream(p, packet_dir, false);
paf.paf_jump(flushed);
tcpStats.partial_flushes++;
tcpStats.partial_flush_bytes += flushed;
- if ( seglist.seg_count )
+ if ( seglist->seg_count )
{
- purge_to_seq(seglist.head->start_seq() + flushed);
- tracker.r_win_base = seglist.seglist_base_seq;
+ purge_to_seq(seglist->head->start_seq() + flushed);
+ tracker->r_win_base = seglist->seglist_base_seq;
}
}
+
return flushed;
}
// FIXIT-M this convoluted expression needs some refactoring to simplify
bool TcpReassembler::final_flush_on_fin(int32_t flush_amt, Packet *p, FinSeqNumStatus fin_status)
{
- return tracker.fin_seq_status >= fin_status
+ return tracker->fin_seq_status >= fin_status
&& -1 <= flush_amt && flush_amt <= 0
&& paf.state == StreamSplitter::SEARCH
&& !p->flow->searching_for_service();
bool TcpReassembler::asymmetric_flow_flushed(uint32_t flushed, snort::Packet *p)
{
- bool asymmetric = flushed && seglist.seg_count && !p->flow->two_way_traffic() && !p->ptrs.tcph->is_syn();
+ bool asymmetric = flushed && seglist->seg_count && !p->flow->two_way_traffic() && !p->ptrs.tcph->is_syn();
if ( asymmetric )
{
- TcpStreamTracker::TcpState peer = tracker.session->get_peer_state(&tracker);
+ TcpStreamTracker::TcpState peer = tracker->session->get_peer_state(tracker);
asymmetric = ( peer == TcpStreamTracker::TCP_SYN_SENT || peer == TcpStreamTracker::TCP_SYN_RECV
|| peer == TcpStreamTracker::TCP_MID_STREAM_SENT );
}
return asymmetric;
}
+
+uint32_t TcpReassemblerIgnore::perform_partial_flush(snort::Flow* flow, snort::Packet*& p)
+{
+ p = get_packet(flow, packet_dir, server_side);
+ return 0;
+}
+
FINAL_FLUSH_OK = -1
};
- TcpReassembler(TcpStreamTracker& trk, TcpReassemblySegments& seglist)
+ TcpReassembler(TcpStreamTracker* trk, TcpReassemblySegments* seglist)
: tracker(trk), seglist(seglist)
{ }
{ }
virtual void init(bool server, snort::StreamSplitter* ss);
-
virtual int eval_flush_policy_on_ack(snort::Packet*) = 0;
virtual int eval_flush_policy_on_data(snort::Packet*) = 0;
virtual int eval_asymmetric_flush(snort::Packet*) = 0;
virtual int flush_stream(snort::Packet*, uint32_t dir, bool final_flush = false) = 0;
- void flush_queued_segments(snort::Flow* flow, bool clear, snort::Packet* = nullptr);
- void finish_and_final_flush(snort::Flow* flow, bool clear, snort::Packet*);
- uint32_t perform_partial_flush(snort::Flow*, snort::Packet*&);
- void purge_flushed_ackd();
+ virtual void flush_queued_segments(snort::Flow* flow, bool clear, snort::Packet* = nullptr);
+ virtual void finish_and_final_flush(snort::Flow* flow, bool clear, snort::Packet*);
+ virtual uint32_t perform_partial_flush(snort::Flow*, snort::Packet*&);
+ virtual void purge_flushed_ackd();
+ virtual FlushPolicy get_flush_policy() const = 0;
void release_splitter()
{ splitter = nullptr; }
- snort::StreamSplitter* get_splitter()
- { return splitter; }
-
bool is_splitter_paf() const
{ return splitter && splitter->is_paf(); }
void initialize_paf()
{
+ assert( get_flush_policy() != STREAM_FLPOLICY_IGNORE );
+
// only initialize if we have a data segment queued
- if ( !seglist.head )
+ if ( !seglist->head )
return;
- if ( !paf.paf_initialized() or !SEQ_EQ(paf.seq_num, seglist.head->start_seq()) )
- paf.paf_initialize(seglist.head->start_seq());
+ if ( !paf.paf_initialized() or !SEQ_EQ(paf.seq_num, seglist->head->start_seq()) )
+ paf.paf_initialize(seglist->head->start_seq());
}
void reset_paf()
void clear_paf()
{ paf.paf_clear(); }
- virtual FlushPolicy get_flush_policy() const = 0;
-
protected:
void show_rebuilt_packet(snort::Packet*);
int flush_data_segments(uint32_t flush_len, snort::Packet* pdu);
bool fin_acked_no_gap(const TcpSegmentNode&);
void update_skipped_bytes(uint32_t);
void check_first_segment_hole();
- uint32_t perform_partial_flush(snort::Packet*, uint32_t flushed = 0);
+ uint32_t perform_partial_flush(snort::Packet*);
bool final_flush_on_fin(int32_t flush_amt, snort::Packet*, FinSeqNumStatus);
bool asymmetric_flow_flushed(uint32_t flushed, snort::Packet *p);
ProtocolAwareFlusher paf;
- TcpStreamTracker& tracker;
- TcpReassemblySegments& seglist;
+ TcpStreamTracker* tracker = nullptr;
+ TcpReassemblySegments* seglist = nullptr;
snort::StreamSplitter* splitter = nullptr;
snort::Packet* last_pdu = nullptr;
class TcpReassemblerIgnore : public TcpReassembler
{
public:
- TcpReassemblerIgnore(TcpStreamTracker& trk, TcpReassemblySegments& sl)
+ TcpReassemblerIgnore(TcpStreamTracker* trk, TcpReassemblySegments* sl)
: TcpReassembler(trk, sl)
{ }
int flush_stream(snort::Packet*, uint32_t, bool) override
{ return 0; }
+ void flush_queued_segments(snort::Flow*, bool, snort::Packet*) override
+ { }
+
+ void finish_and_final_flush(snort::Flow*, bool, snort::Packet*) override
+ { }
+
+ uint32_t perform_partial_flush(snort::Flow*, snort::Packet*&) override;
+
+ void purge_flushed_ackd() override
+ { }
+
FlushPolicy get_flush_policy() const override
{ return STREAM_FLPOLICY_IGNORE; }
};
#endif
+
bool TcpReassemblerIds::has_seglist_hole(TcpSegmentNode& tsn, uint32_t& total, uint32_t& flags)
{
if ( !tsn.prev or SEQ_GEQ(tsn.prev->scan_seq() + tsn.prev->unscanned(), tsn.scan_seq())
- or SEQ_GEQ(tsn.scan_seq(), tracker.r_win_base) )
+ or SEQ_GEQ(tsn.scan_seq(), tracker->r_win_base) )
{
check_first_segment_hole();
return false;
{
if ( flush_amt > 0 )
update_skipped_bytes(flush_amt);
- tracker.fallback();
+ tracker->fallback();
}
else
{
paf.state = StreamSplitter::START;
}
- if ( seglist.head )
+ if ( seglist->head )
{
if ( flush_amt > 0 )
- purge_to_seq(seglist.seglist_base_seq + flush_amt);
- seglist.seglist_base_seq = seglist.head->scan_seq();
+ purge_to_seq(seglist->seglist_base_seq + flush_amt);
+ seglist->seglist_base_seq = seglist->head->scan_seq();
}
else
- seglist.seglist_base_seq = tracker.r_win_base; // FIXIT-H - do we need to set rcv_nxt here?
+ seglist->seglist_base_seq = tracker->r_win_base; // FIXIT-H - do we need to set rcv_nxt here?
- seglist.cur_rseg = seglist.head;
- tracker.set_order(TcpStreamTracker::OUT_OF_SEQUENCE);
+ seglist->cur_rseg = seglist->head;
+ tracker->set_order(TcpStreamTracker::OUT_OF_SEQUENCE);
}
// iterate over seglist and scan all new acked bytes
// know where we left off and can resume scanning the remainder
int32_t TcpReassemblerIds::scan_data_post_ack(uint32_t* flags, Packet* p)
{
- assert(seglist.session->flow == p->flow);
+ assert(seglist->session->flow == p->flow);
int32_t ret_val = FINAL_FLUSH_HOLD;
- if ( !seglist.cur_sseg || SEQ_GEQ(seglist.seglist_base_seq, tracker.r_win_base) )
+ if ( !seglist->cur_sseg || SEQ_GEQ(seglist->seglist_base_seq, tracker->r_win_base) )
return ret_val ;
- if ( !seglist.cur_rseg )
- seglist.cur_rseg = seglist.cur_sseg;
+ if ( !seglist->cur_rseg )
+ seglist->cur_rseg = seglist->cur_sseg;
uint32_t total = 0;
- TcpSegmentNode* tsn = seglist.cur_sseg;
+ TcpSegmentNode* tsn = seglist->cur_sseg;
if ( paf.paf_initialized() )
{
uint32_t end_seq = tsn->scan_seq() + tsn->unscanned();
if ( SEQ_EQ(end_seq, paf.paf_position()) )
{
- total = end_seq - seglist.seglist_base_seq;
+ total = end_seq - seglist->seglist_base_seq;
tsn = tsn->next;
}
else
- total = tsn->scan_seq() - seglist.cur_rseg->scan_seq();
+ total = tsn->scan_seq() - seglist->cur_rseg->scan_seq();
}
ret_val = FINAL_FLUSH_OK;
- while (tsn && *flags && SEQ_LT(tsn->scan_seq(), tracker.r_win_base))
+ while (tsn && *flags && SEQ_LT(tsn->scan_seq(), tracker->r_win_base))
{
// only flush acked data that fits in pdu reassembly buffer...
uint32_t end = tsn->scan_seq() + tsn->unscanned();
uint32_t flush_len;
int32_t flush_pt;
- if ( SEQ_GT(end, tracker.r_win_base))
- flush_len = tracker.r_win_base - tsn->scan_seq();
+ if ( SEQ_GT(end, tracker->r_win_base))
+ flush_len = tracker->r_win_base - tsn->scan_seq();
else
flush_len = tsn->unscanned();
- if ( tsn->next_acked_no_gap(tracker.r_win_base) )
+ if ( tsn->next_acked_no_gap(tracker->r_win_base) )
*flags |= PKT_MORE_TO_FLUSH;
else
*flags &= ~PKT_MORE_TO_FLUSH;
}
// Get splitter from tracker as paf check may change it.
- seglist.cur_sseg = tsn;
+ seglist->cur_sseg = tsn;
if ( flush_pt >= 0 )
{
- seglist.seglist_base_seq = seglist.cur_rseg->scan_seq();
+ seglist->seglist_base_seq = seglist->cur_rseg->scan_seq();
return flush_pt;
}
int TcpReassemblerIds::eval_flush_policy_on_ack(Packet* p)
{
+ last_pdu = nullptr;
uint32_t flushed = 0;
int32_t flush_amt;
uint32_t flags;
- last_pdu = nullptr;
-
do
{
flags = packet_dir;
assert( flushed );
// ideally we would purge just once after this loop but that throws off base
- if ( seglist.head )
- purge_to_seq(seglist.seglist_base_seq);
- } while ( seglist.head and !p->flow->is_inspection_disabled() );
+ if ( seglist->head )
+ purge_to_seq(seglist->seglist_base_seq);
+ } while ( seglist->head and !p->flow->is_inspection_disabled() );
if ( (paf.state == StreamSplitter::ABORT) && is_splitter_paf() )
{
- tracker.fallback();
+ tracker->fallback();
return eval_flush_policy_on_ack(p);
}
else if ( paf.state == StreamSplitter::SKIP )
{
uint32_t flushed = 0;
- if ( !seglist.head )
+ if ( !seglist->head )
return flushed;
- if ( tracker.is_retransmit_of_held_packet(p) )
- flushed = perform_partial_flush(p, flushed);
+ if ( tracker->is_retransmit_of_held_packet(p) )
+ flushed += perform_partial_flush(p);
if ( !p->flow->two_way_traffic() and
- seglist.get_seg_bytes_total() > seglist.session->tcp_config->asymmetric_ids_flush_threshold )
+ seglist->get_seg_bytes_total() > seglist->session->tcp_config->asymmetric_ids_flush_threshold )
{
- seglist.skip_holes();
+ seglist->skip_holes();
flushed += eval_asymmetric_flush(p);
}
int TcpReassemblerIds::eval_asymmetric_flush(snort::Packet* p)
{
// asymmetric flush in IDS mode.. advance r_win_base to end of in-order data
- tracker.r_win_base = tracker.rcv_nxt;
+ tracker->r_win_base = tracker->rcv_nxt;
uint32_t flushed = eval_flush_policy_on_ack(p);
if ( flushed )
{
uint32_t bytes = 0;
- if ( seglist.session->flow->two_way_traffic() )
- bytes = get_q_footprint();
+ if ( seglist->session->flow->two_way_traffic() )
+ bytes = get_q_footprint();
else
bytes = get_q_sequenced();
public:
- TcpReassemblerIds(TcpStreamTracker& trk, TcpReassemblySegments& sl)
+ TcpReassemblerIds(TcpStreamTracker* trk, TcpReassemblySegments* sl)
: TcpReassembler(trk, sl)
{ }
// because we don't wait until it is acknowledged
int32_t TcpReassemblerIps::scan_data_pre_ack(uint32_t* flags, Packet* p)
{
- assert(seglist.session->flow == p->flow);
+ assert(seglist->session->flow == p->flow);
int32_t ret_val = FINAL_FLUSH_HOLD;
- if ( SEQ_GT(seglist.head->scan_seq(), seglist.seglist_base_seq) )
+ if ( SEQ_GT(seglist->head->scan_seq(), seglist->seglist_base_seq) )
return ret_val;
- if ( !seglist.cur_rseg )
- seglist.cur_rseg = seglist.cur_sseg;
+ if ( !seglist->cur_rseg )
+ seglist->cur_rseg = seglist->cur_sseg;
if ( !is_q_sequenced() )
return ret_val;
- TcpSegmentNode* tsn = seglist.cur_sseg;
- uint32_t total = tsn->scan_seq() - seglist.seglist_base_seq;
+ TcpSegmentNode* tsn = seglist->cur_sseg;
+ uint32_t total = tsn->scan_seq() - seglist->seglist_base_seq;
ret_val = FINAL_FLUSH_OK;
while ( tsn && *flags )
if (flush_pt >= 0)
{
- seglist.cur_sseg = tsn;
+ seglist->cur_sseg = tsn;
return flush_pt;
}
tsn = tsn->next;
}
- seglist.cur_sseg = tsn;
+ seglist->cur_sseg = tsn;
return ret_val;
}
int TcpReassemblerIps::eval_flush_policy_on_data(Packet* p)
{
- uint32_t flushed = 0;
+ if ( !seglist->head )
+ return 0;
+
last_pdu = nullptr;
+ uint32_t flags;
+ uint32_t flushed = 0;
+ int32_t flush_amt;
- if ( seglist.head )
+ do
{
- uint32_t flags;
- int32_t flush_amt;
- do
- {
- flags = packet_dir;
- flush_amt = scan_data_pre_ack(&flags, p);
- if ( flush_amt <= 0 )
- break;
+ flags = packet_dir;
+ flush_amt = scan_data_pre_ack(&flags, p);
+ if ( flush_amt <= 0 )
+ break;
- flushed += flush_to_seq(flush_amt, p, flags);
- } while ( seglist.head and !p->flow->is_inspection_disabled() );
+ flushed += flush_to_seq(flush_amt, p, flags);
+ } while ( seglist->head and !p->flow->is_inspection_disabled() );
- if ( (paf.state == StreamSplitter::ABORT) && is_splitter_paf() )
- {
- tracker.fallback();
- return eval_flush_policy_on_data(p);
- }
- else if ( final_flush_on_fin(flush_amt, p, FIN_WITH_SEQ_SEEN) )
- finish_and_final_flush(p->flow, true, p);
+ if ( (paf.state == StreamSplitter::ABORT) && is_splitter_paf() )
+ {
+ tracker->fallback();
+ return eval_flush_policy_on_data(p);
}
+ else if ( final_flush_on_fin(flush_amt, p, FIN_WITH_SEQ_SEEN) )
+ finish_and_final_flush(p->flow, true, p);
- if ( !seglist.head )
+ if ( !seglist->head )
return flushed;
- if ( tracker.is_retransmit_of_held_packet(p) )
- flushed = perform_partial_flush(p, flushed);
+ if ( tracker->is_retransmit_of_held_packet(p) )
+ flushed += perform_partial_flush(p);
if ( asymmetric_flow_flushed(flushed, p) )
{
- purge_to_seq(seglist.head->start_seq() + flushed);
- tracker.r_win_base = seglist.seglist_base_seq;
+ purge_to_seq(seglist->head->start_seq() + flushed);
+ tracker->r_win_base = seglist->seglist_base_seq;
tcpStats.flush_on_asymmetric_flow++;
}
int TcpReassemblerIps::flush_stream(Packet* p, uint32_t dir, bool final_flush)
{
- if ( seglist.session->flow->two_way_traffic()
- or (tracker.get_tcp_state() == TcpStreamTracker::TCP_MID_STREAM_RECV) )
+ if ( seglist->session->flow->two_way_traffic()
+ or (tracker->get_tcp_state() == TcpStreamTracker::TCP_MID_STREAM_RECV) )
{
uint32_t bytes = get_q_sequenced(); // num bytes in pre-ack mode
if ( bytes )
class TcpReassemblerIps : public TcpReassembler
{
public:
- TcpReassemblerIps(TcpStreamTracker& trk, TcpReassemblySegments& sl)
+ TcpReassemblerIps(TcpStreamTracker* trk, TcpReassemblySegments* sl)
: TcpReassembler(trk, sl)
{ }
seglist_base_seq = 0;
}
+void TcpReassemblySegments::purge_segment_list()
+{
+ purge();
+}
+
void TcpReassemblySegments::update_next(TcpSegmentNode* tsn)
{
cur_rseg = tsn->next_no_gap() ? tsn->next : nullptr;
else
tracker->set_rcv_nxt(ack);
}
-
-void TcpReassemblySegments::purge_segment_list()
-{
- purge();
-}
int delete_reassembly_segment(TcpSegmentNode*);
void advance_rcv_nxt(TcpSegmentNode *tsn = nullptr);
void purge_flushed_segments(uint32_t flush_seq);
- void purge_segments_left_of_hole(const TcpSegmentNode*);
void skip_holes();
void skip_midstream_pickup_seglist_hole(TcpSegmentDescriptor&);
void purge_segment_list();
private:
void insert_segment_data(TcpSegmentNode* prev, TcpSegmentNode*);
-
+ void purge_segments_left_of_hole(const TcpSegmentNode*);
void insert(TcpSegmentNode* prev, TcpSegmentNode* ss)
{
if ( talker->midstream_initial_ack_flush )
{
talker->midstream_initial_ack_flush = false;
- talker->eval_flush_policy_on_data(p);
+ talker->reassembler->eval_flush_policy_on_data(p);
}
if (p->dsize > 0)
- listener->eval_flush_policy_on_data(p);
+ listener->reassembler->eval_flush_policy_on_data(p);
if (p->ptrs.tcph->is_ack())
- talker->eval_flush_policy_on_ack(p);
+ talker->reassembler->eval_flush_policy_on_ack(p);
tcpStats.restarts++;
}
}
if ( flush )
- listener->eval_flush_policy_on_data(tsd.get_pkt());
+ listener->reassembler->eval_flush_policy_on_data(tsd.get_pkt());
else
listener->reassembler->initialize_paf();
}
void get_packet_header_foo(DAQ_PktHdr_t*, const DAQ_PktHdr_t* orig, uint32_t dir);
bool can_set_no_ack();
bool set_no_ack(bool);
- bool no_ack_mode_enabled() { return no_ack; }
+ inline bool no_ack_mode_enabled() { return no_ack; }
void set_pkt_action_flag(uint32_t flag)
{ pkt_action_mask |= flag; }
if ( trk.normalizer.is_tcp_ips_enabled() )
{
trk.seglist.skip_midstream_pickup_seglist_hole(tsd);
- trk.eval_flush_policy_on_data(tsd.get_pkt());
+ trk.reassembler->eval_flush_policy_on_data(tsd.get_pkt());
trk.midstream_initial_ack_flush = true;
}
if ( trk.normalizer.is_tcp_ips_enabled() )
{
trk.seglist.skip_midstream_pickup_seglist_hole(tsd);
- trk.eval_flush_policy_on_data(tsd.get_pkt());
+ trk.reassembler->eval_flush_policy_on_data(tsd.get_pkt());
trk.midstream_initial_ack_flush = true;
}
if ( trk.normalizer.is_tcp_ips_enabled() )
{
trk.seglist.skip_midstream_pickup_seglist_hole(tsd);
- trk.eval_flush_policy_on_data(tsd.get_pkt());
+ trk.reassembler->eval_flush_policy_on_data(tsd.get_pkt());
trk.midstream_initial_ack_flush = true;
}
if ( trk.normalizer.is_tcp_ips_enabled() )
{
trk.seglist.skip_midstream_pickup_seglist_hole(tsd);
- trk.eval_flush_policy_on_data(tsd.get_pkt());
+ trk.reassembler->eval_flush_policy_on_data(tsd.get_pkt());
trk.midstream_initial_ack_flush = true;
}
// 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
//--------------------------------------------------------------------------
-// tcp_stream_tracker.cpp author davis mcpherson <davmcphe@cisco.com>
+// tcp_stream_tracker.cc author davis mcpherson <davmcphe@cisco.com>
// Created on: Jun 24, 2015
#ifdef HAVE_CONFIG_H
using namespace snort;
THREAD_LOCAL HeldPacketQueue* hpq = nullptr;
+TcpReassemblerIgnore* tcp_ignore_reassembler = new TcpReassemblerIgnore(nullptr, nullptr);
const std::list<HeldPacket>::iterator TcpStreamTracker::null_iterator { };
TcpStreamTracker::TcpStreamTracker(bool client) :
tcp_state(client ? TCP_STATE_NONE : TCP_LISTEN), client_tracker(client),
held_packet(null_iterator)
-{
- reassembler = new TcpReassemblerIgnore(*this, seglist);
- reassembler->init(!client_tracker, nullptr);
-}
+{ }
TcpStreamTracker::~TcpStreamTracker()
{
- delete reassembler;
+ if ( reassembler->get_flush_policy() != STREAM_FLPOLICY_IGNORE )
+ delete reassembler;
if( oaitw_reassembler )
{
set_splitter((StreamSplitter*)nullptr);
}
-int TcpStreamTracker::eval_flush_policy_on_ack(snort::Packet* p)
-{
- if( oaitw_reassembler )
- {
- delete oaitw_reassembler;
- oaitw_reassembler = nullptr;
- }
-
- return reassembler->eval_flush_policy_on_ack(p);
-}
-
-int TcpStreamTracker::eval_flush_policy_on_data(snort::Packet* p)
-{
- if( oaitw_reassembler )
- {
- delete oaitw_reassembler;
- oaitw_reassembler = nullptr;
- }
-
- reassembler->eval_flush_policy_on_data(p);
-
- return 0;
-}
-
-int TcpStreamTracker::eval_asymmetric_flush(snort::Packet* p)
-{
- if( oaitw_reassembler )
- {
- delete oaitw_reassembler;
- oaitw_reassembler = nullptr;
- }
-
- reassembler->eval_asymmetric_flush(p);
-
- return 0;
-}
-
TcpStreamTracker::TcpEvent TcpStreamTracker::set_tcp_event(const TcpSegmentDescriptor& tsd)
{
bool talker;
held_packet = null_iterator;
flush_policy = STREAM_FLPOLICY_IGNORE;
- if( oaitw_reassembler )
- {
- delete oaitw_reassembler;
- oaitw_reassembler = nullptr;
- }
- if ( reassembler )
- delete reassembler;
- reassembler = new TcpReassemblerIgnore(*this, seglist);
- reassembler->init(!client_tracker, nullptr);
+ update_flush_policy(nullptr);
normalizer.reset();
seglist.reset();
{
// switching to Ignore flush policy...save pointer to current reassembler to delete later
if ( reassembler )
+ {
+ seglist.purge_segment_list();
oaitw_reassembler = reassembler;
+ }
- reassembler = new TcpReassemblerIgnore(*this, seglist);
+ reassembler = tcp_ignore_reassembler;
reassembler->init(!client_tracker, splitter);
}
else if ( flush_policy == STREAM_FLPOLICY_ON_DATA )
{
// update from IDS -> IPS is not supported
assert( reassembler->get_flush_policy() != STREAM_FLPOLICY_ON_ACK );
- delete reassembler;
}
- reassembler = new TcpReassemblerIps(*this, seglist);
+ reassembler = new TcpReassemblerIps(this, &seglist);
reassembler->init(!client_tracker, splitter);
}
else
{
// update from IPS -> IDS is not supported
assert( reassembler->get_flush_policy() != STREAM_FLPOLICY_ON_DATA );
- delete reassembler;
}
- reassembler = new TcpReassemblerIds(*this, seglist);
+ reassembler = new TcpReassemblerIds(this, &seglist);
reassembler->init(!client_tracker, splitter);
}
}
void TcpStreamTracker::disable_reassembly(Flow* f)
{
set_splitter((StreamSplitter*)nullptr);
- seglist.reset();
+ seglist.purge_segment_list();
reassembler->reset_paf();
finalize_held_packet(f);
}
void TcpStreamTracker::update_tracker_no_ack_sent(const TcpSegmentDescriptor& tsd)
{
r_win_base = tsd.get_end_seq();
- eval_flush_policy_on_ack(tsd.get_pkt());
+ reassembler->eval_flush_policy_on_ack(tsd.get_pkt());
}
void TcpStreamTracker::update_tracker_ack_sent(TcpSegmentDescriptor& tsd)
fin_seq_status = FIN_WITH_SEQ_ACKED;
}
- eval_flush_policy_on_ack(tsd.get_pkt());
+ reassembler->eval_flush_policy_on_ack(tsd.get_pkt());
}
bool TcpStreamTracker::update_on_3whs_ack(TcpSegmentDescriptor& tsd)
else
reassembler->reset_paf();
- eval_asymmetric_flush(tsd.get_pkt());
+ reassembler->eval_flush_policy_on_data(tsd.get_pkt());
int32_t space_left = max_queued_bytes - seglist.get_seg_bytes_total();
void reset();
void clear_tracker(snort::Flow*, snort::Packet*, bool flush_segments, bool restart);
- int eval_flush_policy_on_ack(snort::Packet*);
- int eval_flush_policy_on_data(snort::Packet*);
- int eval_asymmetric_flush(snort::Packet*);
void update_stream_order(const TcpSegmentDescriptor&, bool aligned);
void fallback();