return ( ps->paf != snort::StreamSplitter::START );
}
+SO_PUBLIC inline void paf_initialize(PAF_State* ps, uint32_t seq)
+{
+ ps->seq = ps->pos = seq;
+ ps->fpt = ps->tot = 0;
+ ps->paf = snort::StreamSplitter::SEARCH;
+}
+
inline uint32_t paf_active (PAF_State* ps)
{
return ( ps->paf != snort::StreamSplitter::ABORT );
#include "log/log.h"
#include "main/analyzer.h"
#include "packet_io/active.h"
+#include "packet_tracer/packet_tracer.h"
#include "profiler/profiler.h"
#include "protocols/packet_manager.h"
#include "time/packet_time.h"
#include "tcp_module.h"
#include "tcp_normalizers.h"
#include "tcp_session.h"
+#include "tcp_stream_tracker.h"
using namespace snort;
if ( !trs.tracker->is_reassembly_enabled() )
return 0;
- if ( trs.sos.session->flow->two_way_traffic() )
+ if ( trs.sos.session->flow->two_way_traffic()
+ or (trs.tracker->get_tcp_state() == TcpStreamTracker::TCP_MID_STREAM_RECV) )
{
uint32_t bytes = 0;
{
if ( p )
{
- finish_and_final_flush(trs, flow, clear, const_cast<Packet*>(p));
+ finish_and_final_flush(trs, flow, clear, const_cast<Packet*>(p));
}
else
{
Packet* pdu = get_packet(flow, trs.packet_dir, trs.server_side);
bool pending = clear and paf_initialized(&trs.paf_state);
-
if ( pending )
{
DetectionEngine de;
}
}
-// this is for post-ack flushing
-uint32_t TcpReassembler::get_reverse_packet_dir(TcpReassemblerState&, const Packet* p)
-{
- /* Remember, one side's packets are stored in the
- * other side's queue. So when talker ACKs data,
- * we need to check if we're ready to flush.
- *
- * If we do decide to flush, the flush IP & port info
- * is the opposite of the packet -- again because this
- * is the ACK from the talker and we're flushing packets
- * that actually came from the listener.
- */
- if ( p->is_from_server() )
- return PKT_FROM_CLIENT;
-
- if ( p->is_from_client() )
- return PKT_FROM_SERVER;
-
- return 0;
-}
-
-uint32_t TcpReassembler::get_forward_packet_dir(TcpReassemblerState&, const Packet* p)
-{
- if ( p->is_from_server() )
- return PKT_FROM_SERVER;
-
- if ( p->is_from_client() )
- return PKT_FROM_CLIENT;
-
- return 0;
-}
-
// see scan_data_post_ack() for details
// the key difference is that we operate on forward moving data
// because we don't wait until it is acknowledged
}
trs.sos.seglist.cur_sseg = tsn;
-
- if (tsn)
+ if ( tsn )
update_rcv_nxt(trs, *tsn);
return ret_val;
if (!trs.tracker->ooo_packet_seen and SEQ_LT(trs.tracker->rcv_nxt, temp))
trs.tracker->ooo_packet_seen = true;
- trs.tracker->rcv_nxt = temp;
+ if ( SEQ_GT(temp, trs.tracker->rcv_nxt) )
+ trs.tracker->rcv_nxt = temp;
}
bool TcpReassembler::has_seglist_hole(TcpReassemblerState& trs, TcpSegmentNode& tsn, PAF_State& ps,
return true;
}
+void TcpReassembler::purge_segments_left_of_hole(TcpReassemblerState& trs, const TcpSegmentNode* end_tsn)
+{
+ uint32_t packets_skipped = 0;
+
+ TcpSegmentNode* cur_tsn = trs.sos.seglist.head;
+ do
+ {
+ TcpSegmentNode* drop_tsn = cur_tsn;
+ cur_tsn = cur_tsn->next;
+ delete_reassembly_segment(trs, drop_tsn);
+ ++packets_skipped;
+ } while( cur_tsn and cur_tsn != end_tsn );
+
+ if (PacketTracer::is_active())
+ PacketTracer::log("Stream: Skipped %u packets before seglist hole)\n", packets_skipped);
+}
+
+void TcpReassembler::skip_midstream_pickup_seglist_hole(TcpReassemblerState& trs, TcpSegmentDescriptor& tsd)
+{
+ uint32_t ack = tsd.get_ack();
+
+ TcpSegmentNode* tsn = trs.sos.seglist.head;
+ while ( tsn )
+ {
+ if ( SEQ_GEQ( tsn->i_seq + tsn->i_len, ack) )
+ break;
+
+ if ( tsn->next and SEQ_GT(tsn->next->i_seq, tsn->i_seq + tsn->i_len) )
+ {
+ tsn = tsn->next;
+ purge_segments_left_of_hole(trs, tsn);
+ trs.sos.seglist_base_seq = trs.sos.seglist.head->i_seq;
+ }
+ else if ( !tsn->next and SEQ_LT(tsn->i_seq + tsn->i_len, ack) )
+ {
+ tsn = tsn->next;
+ purge_segments_left_of_hole(trs, tsn);
+ trs.sos.seglist_base_seq = ack;
+ }
+ else
+ tsn = tsn->next;
+ }
+
+ tsn = trs.sos.seglist.head;
+ if ( tsn )
+ {
+ paf_initialize(&trs.paf_state, tsn->i_seq);
+
+ while ( next_no_gap(*tsn) )
+ tsn = tsn->next;
+ trs.tracker->rcv_nxt = tsn->i_seq + tsn->i_len;
+ }
+ else
+ trs.tracker->rcv_nxt = ack;
+}
+
// iterate over trs.sos.seglist and scan all new acked bytes
// - new means not yet scanned
// - must use trs.sos.seglist data (not packet) since this packet may plug a
int32_t flush_amt;
do
{
- flags = get_forward_packet_dir(trs, p);
+ flags = trs.packet_dir;
flush_amt = scan_data_pre_ack(trs, &flags, p);
if ( flush_amt <= 0 )
break;
do
{
- flags = get_reverse_packet_dir(trs, p);
+ flags = trs.packet_dir;
flush_amt = scan_data_post_ack(trs, &flags, p);
if ( flush_amt <= 0 or trs.paf_state.paf == StreamSplitter::SKIP )
break;
uint32_t event_id, uint32_t event_second);
virtual void purge_alerts(TcpReassemblerState&);
virtual bool segment_within_seglist_window(TcpReassemblerState&, TcpSegmentDescriptor&);
+ void skip_midstream_pickup_seglist_hole(TcpReassemblerState&, TcpSegmentDescriptor&);
+ void initialize_paf(TcpReassemblerState& trs)
+ {
+ if ( !paf_initialized(&trs.paf_state) or SEQ_GT(trs.paf_state.seq, trs.sos.seglist.head->i_seq) )
+ paf_initialize(&trs.paf_state, trs.sos.seglist.head->i_seq);
+ }
uint32_t perform_partial_flush(TcpReassemblerState&, snort::Flow*, snort::Packet*&);
void fallback(TcpStreamTracker&, bool server_side);
int32_t scan_data_post_ack(TcpReassemblerState&, uint32_t* flags, snort::Packet*);
void purge_to_seq(TcpReassemblerState&, uint32_t flush_seq);
+ void purge_segments_left_of_hole(TcpReassemblerState&, const TcpSegmentNode*);
bool next_no_gap(const TcpSegmentNode&);
bool next_no_gap_c(const TcpSegmentNode&);
uint32_t& flags);
void skip_seglist_hole(TcpReassemblerState&, snort::Packet*, uint32_t flags,
int32_t flush_amt);
+
uint32_t perform_partial_flush(TcpReassemblerState&, snort::Packet*, uint32_t flushed = 0);
};
bool is_segment_pending_flush() const
{ return reassembler->is_segment_pending_flush(trs); }
+ void skip_midstream_pickup_seglist_hole(TcpSegmentDescriptor& tsd)
+ { reassembler->skip_midstream_pickup_seglist_hole(trs, tsd); }
+
+ void initialize_paf()
+ { reassembler->initialize_paf(trs); }
+
int flush_on_data_policy(snort::Packet* p)
{ return reassembler->flush_on_data_policy(trs, p); }
listener = &server;
}
+ if ( talker->midstream_initial_ack_flush )
+ {
+ talker->midstream_initial_ack_flush = false;
+ talker->reassembler.flush_on_data_policy(p);
+ }
+
if (p->dsize > 0)
listener->reassembler.flush_on_data_policy(p);
default:
if ( aligned )
tsd.set_packet_flags(PKT_STREAM_ORDER_OK);
-
else
{
if ( !(flow->get_session_flags() & SSNFLAG_STREAM_ORDER_BAD) )
set_pkt_action_flag(ACTION_BAD_PKT);
}
-void TcpSession::handle_data_segment(TcpSegmentDescriptor& tsd)
+void TcpSession::handle_data_segment(TcpSegmentDescriptor& tsd, bool flush)
{
TcpStreamTracker* listener = tsd.get_listener();
TcpStreamTracker* talker = tsd.get_talker();
- // if this session started midstream we may need to init the listener's base seq #
- if ( listener->reinit_seg_base )
- {
- listener->reassembler.set_seglist_base_seq(tsd.get_seq());
- listener->reinit_seg_base = false;
- }
-
if ( TcpStreamTracker::TCP_CLOSED != talker->get_tcp_state() )
{
uint8_t tcp_options_len = tsd.get_tcph()->options_len();
process_tcp_data(tsd);
}
- listener->reassembler.flush_on_data_policy(tsd.get_pkt());
+ if ( flush )
+ listener->reassembler.flush_on_data_policy(tsd.get_pkt());
+ else
+ listener->reassembler.initialize_paf();
}
TcpStreamTracker::TcpState TcpSession::get_talker_state(TcpSegmentDescriptor& tsd)
void check_for_session_hijack(TcpSegmentDescriptor&) override;
bool check_for_window_slam(TcpSegmentDescriptor& tsd) override;
void mark_packet_for_drop(TcpSegmentDescriptor&) override;
- void handle_data_segment(TcpSegmentDescriptor&) override;
+ void handle_data_segment(TcpSegmentDescriptor&, bool flush = true);
bool validate_packet_established_session(TcpSegmentDescriptor&) override;
bool is_midstream_allowed(const TcpSegmentDescriptor& tsd)
flow->session_state |= STREAM_STATE_MIDSTREAM;
trk.init_on_data_seg_recv(tsd);
trk.normalizer.ecn_tracker(tsd.get_tcph(), trk.session->tcp_config->require_3whs());
- trk.session->handle_data_segment(tsd);
+ trk.session->handle_data_segment(tsd, !trk.normalizer.is_tcp_ips_enabled());
}
else if ( trk.session->tcp_config->require_3whs() )
{
bool TcpStateMidStreamRecv::syn_ack_sent(TcpSegmentDescriptor& tsd, TcpStreamTracker& trk)
{
+ if ( trk.normalizer.is_tcp_ips_enabled() )
+ {
+ trk.reassembler.skip_midstream_pickup_seglist_hole(tsd);
+ trk.reassembler.flush_on_data_policy(tsd.get_pkt());
+ trk.midstream_initial_ack_flush = true;
+ }
+
trk.session->check_for_repeated_syn(tsd);
trk.set_tcp_state(TcpStreamTracker::TCP_ESTABLISHED);
return true;
bool TcpStateMidStreamRecv::ack_sent(TcpSegmentDescriptor& tsd, TcpStreamTracker& trk)
{
+ if ( trk.normalizer.is_tcp_ips_enabled() )
+ {
+ trk.reassembler.skip_midstream_pickup_seglist_hole(tsd);
+ trk.reassembler.flush_on_data_policy(tsd.get_pkt());
+ trk.midstream_initial_ack_flush = true;
+ }
+
trk.update_tracker_ack_sent(tsd);
trk.set_tcp_state(TcpStreamTracker::TCP_ESTABLISHED);
return true;
bool TcpStateMidStreamRecv::data_seg_sent(TcpSegmentDescriptor& tsd, TcpStreamTracker& trk)
{
+ if ( trk.normalizer.is_tcp_ips_enabled() )
+ {
+ trk.reassembler.skip_midstream_pickup_seglist_hole(tsd);
+ trk.reassembler.flush_on_data_policy(tsd.get_pkt());
+ trk.midstream_initial_ack_flush = true;
+ }
+
trk.update_tracker_ack_sent(tsd);
trk.set_tcp_state(TcpStreamTracker::TCP_ESTABLISHED);
if ( trk.session->no_ack_mode_enabled() )
bool TcpStateMidStreamRecv::data_seg_recv(TcpSegmentDescriptor& tsd, TcpStreamTracker& trk)
{
trk.update_tracker_ack_recv(tsd);
- trk.session->handle_data_segment(tsd);
+ trk.session->handle_data_segment(tsd, !trk.normalizer.is_tcp_ips_enabled());
return true;
}
bool TcpStateMidStreamRecv::fin_sent(TcpSegmentDescriptor& tsd, TcpStreamTracker& trk)
{
+ if ( trk.normalizer.is_tcp_ips_enabled() )
+ {
+ trk.reassembler.skip_midstream_pickup_seglist_hole(tsd);
+ trk.reassembler.flush_on_data_policy(tsd.get_pkt());
+ trk.midstream_initial_ack_flush = true;
+ }
+
trk.update_on_fin_sent(tsd);
trk.session->flow->call_handlers(tsd.get_pkt(), true);
TcpStreamTracker::TcpState listener_state = tsd.get_listener()->get_tcp_state();
bool TcpStateMidStreamRecv::do_post_sm_packet_actions(TcpSegmentDescriptor& tsd, TcpStreamTracker& trk)
{
// Always need to check for one sided
- bool one_sided = trk.session->check_for_one_sided_session(tsd.get_pkt());
- if ( one_sided && TcpStreamTracker::TCP_MID_STREAM_RECV == trk.get_tcp_state() )
- trk.set_tcp_state(TcpStreamTracker::TCP_ESTABLISHED);
+ trk.session->check_for_one_sided_session(tsd.get_pkt());
return true;
}
bool TcpStateMidStreamSent::data_seg_recv(TcpSegmentDescriptor& tsd, TcpStreamTracker& trk)
{
trk.update_tracker_ack_recv(tsd);
+ trk.reassembler.set_seglist_base_seq(tsd.get_seq());
+ trk.session->handle_data_segment(tsd);
trk.session->set_established(tsd);
trk.set_tcp_state(TcpStreamTracker::TCP_ESTABLISHED);
- trk.session->handle_data_segment(tsd);
return true;
}
trk.init_on_synack_recv(tsd);
trk.normalizer.ecn_tracker(tsd.get_tcph(), trk.session->tcp_config->require_3whs());
if ( tsd.is_data_segment() )
- trk.session->handle_data_segment(tsd);
+ trk.session->handle_data_segment(tsd, !trk.normalizer.is_tcp_ips_enabled());
}
else if ( trk.session->tcp_config->require_3whs() )
{
flow->session_state |= STREAM_STATE_MIDSTREAM;
trk.init_on_data_seg_recv(tsd);
trk.normalizer.ecn_tracker(tsd.get_tcph(), trk.session->tcp_config->require_3whs());
- trk.session->handle_data_segment(tsd);
+ trk.session->handle_data_segment(tsd, !trk.normalizer.is_tcp_ips_enabled());
}
else if ( trk.session->tcp_config->require_3whs() )
{
virtual void check_for_session_hijack(TcpSegmentDescriptor&) = 0;
virtual bool check_for_window_slam(TcpSegmentDescriptor&) = 0;
virtual void mark_packet_for_drop(TcpSegmentDescriptor&) = 0;
- virtual void handle_data_segment(TcpSegmentDescriptor&) = 0;
virtual bool validate_packet_established_session(TcpSegmentDescriptor&) = 0;
TcpStreamTracker client;
r_win_base = tsd.get_ack();
rcv_nxt = tsd.get_ack();
reassembler.set_seglist_base_seq(tsd.get_ack());
- reinit_seg_base = true;
ts_last_packet = tsd.get_packet_timestamp();
tf_flags |= normalizer.get_tcp_timestamp(tsd, false);
tf_flags |= tsd.init_wscale(&wscale);
cache_mac_address(tsd, tsd.get_direction() );
- if ( TcpStreamTracker::TCP_LISTEN == tcp_state || TcpStreamTracker::TCP_STATE_NONE == tcp_state)
- tcp_state = TcpStreamTracker::TCP_MID_STREAM_SENT;
- else
- tcp_state = TcpStreamTracker::TCP_ESTABLISHED;
+ tcp_state = TcpStreamTracker::TCP_MID_STREAM_SENT;
}
void TcpStreamTracker::init_on_data_seg_recv(TcpSegmentDescriptor& tsd)
reassembler.set_seglist_base_seq(tsd.get_seq());
cache_mac_address(tsd, tsd.get_direction() );
- if ( TcpStreamTracker::TCP_LISTEN == tcp_state || TcpStreamTracker::TCP_STATE_NONE == tcp_state )
- tcp_state = TcpStreamTracker::TCP_MID_STREAM_RECV;
- else
- tcp_state = TcpStreamTracker::TCP_ESTABLISHED;
tcpStats.sessions_on_data++;
+ tcp_state = TcpStreamTracker::TCP_MID_STREAM_RECV;
}
void TcpStreamTracker::finish_server_init(TcpSegmentDescriptor& tsd)
}
else
{
- reassembler.set_seglist_base_seq(tsd.get_seq() );
+ reassembler.set_seglist_base_seq(tsd.get_seq());
r_win_base = tsd.get_seq();
}
}
bool require_3whs = false;
bool rst_pkt_sent = false;
bool ooo_packet_seen = false;
+ bool midstream_initial_ack_flush = false;
// FIXIT-L make these non-public
public:
uint8_t max_queue_exceeded = MQ_NONE;
uint8_t order = 0;
FinSeqNumStatus fin_seq_status = TcpStreamTracker::FIN_NOT_SEEN;
- bool reinit_seg_base = false;
protected:
static const std::list<HeldPacket>::iterator null_iterator;