tcpStats.overlaps++;
trs.sos.overlap_count++;
insert_full_overlap(trs);
+
+ if ( trs.sos.keep_segment == false )
+ return;
}
}
}
// REASSEMBLY_POLICY_VISTA:
void SegmentOverlapEditor::full_right_overlap_truncate_new(TcpReassemblerState& trs)
{
+
if ( trs.sos.tcp_ips_data == NORM_MODE_ON )
{
unsigned offset = trs.sos.right->i_seq - trs.sos.tsd->get_seq();
+
+ if ( !offset && zwp_data_mismatch(trs, *trs.sos.tsd, trs.sos.right->i_len))
+ {
+ trs.tracker->normalizer.session_blocker(*trs.sos.tsd);
+ trs.sos.keep_segment = false;
+ return;
+ }
+
trs.sos.tsd->rewrite_payload(offset, trs.sos.right->data, trs.sos.right->i_len);
}
full_right_overlap_truncate_new(trs);
}
+bool SegmentOverlapEditor::zwp_data_mismatch(
+ TcpReassemblerState& trs, TcpSegmentDescriptor& tsd, uint32_t overlap)
+{
+ if ( overlap == ZERO_WIN_PROBE_LEN
+ and trs.sos.right->i_seq == trs.tracker->normalizer.get_zwp_seq()
+ and (trs.sos.right->data[0] != tsd.get_pkt()->data[0]) )
+ {
+ return tsd.is_nap_policy_inline();
+ }
+
+ return false;
+}
+
void SegmentOverlapEditor::print(TcpReassemblerState& trs)
{
LogMessage(" seglist_base_seq: %X\n", trs.sos.seglist_base_seq);
virtual bool is_segment_retransmit(TcpReassemblerState&, bool*);
virtual void drop_old_segment(TcpReassemblerState&);
+ virtual bool zwp_data_mismatch(TcpReassemblerState&, TcpSegmentDescriptor&, uint32_t);
virtual void left_overlap_keep_first(TcpReassemblerState&);
virtual void left_overlap_trim_first(TcpReassemblerState&);
#define SLAM_MAX 4
+#define ZERO_WIN_PROBE_LEN 1
+
// target-based policy types - changes to this enum require changes to stream.h::TCP_POLICIES
enum StreamPolicy : uint8_t
{
{ CountType::MAX, "max_segs", "maximum number of segments queued in any flow" },
{ CountType::MAX, "max_bytes", "maximum number of bytes queued in any flow" },
{ CountType::SUM, "zero_len_tcp_opt", "number of zero length tcp options" },
+ { CountType::SUM, "zero_win_probes", "number of tcp zero window probes" },
{ CountType::END, nullptr, nullptr }
};
PegCount max_segs;
PegCount max_bytes;
PegCount zero_len_tcp_opt;
+ PegCount zero_win_probes;
};
extern THREAD_LOCAL struct TcpStats tcpStats;
#include "tcp_module.h"
#include "tcp_stream_session.h"
#include "tcp_stream_tracker.h"
+#include "packet_tracer/packet_tracer.h"
using namespace snort;
return false;
}
+void TcpNormalizer::session_blocker(
+ TcpNormalizerState&, TcpSegmentDescriptor& tsd)
+{
+ Packet *p = tsd.get_pkt();
+ DetectionEngine::disable_all(p);
+ p->active->block_session(p, true);
+ p->active->set_drop_reason("normalizer");
+ if (PacketTracer::is_active())
+ {
+ PacketTracer::log("Normalizer: TCP Zero Window Probe byte data mismatch\n");
+ }
+}
+
bool TcpNormalizer::packet_dropper(
TcpNormalizerState& tns, TcpSegmentDescriptor& tsd, NormFlags f)
{
}
}
+uint32_t TcpNormalizer::get_zwp_seq(
+ TcpNormalizerState& tns)
+{
+ return tns.zwp_seq;
+}
+
// don't use the window if we may have missed scaling
// one way zero window is uninitialized
// two way zero window is actually closed (regardless of scaling)
}
}
+void TcpNormalizer::set_zwp_seq(
+ TcpNormalizerState& tns, uint32_t seq)
+{
+ tns.zwp_seq = seq;
+}
+
uint16_t TcpNormalizer::set_urg_offset(
TcpNormalizerState&, const tcp::TCPHdr* tcph, uint16_t dsize)
{
int32_t paws_ts_fudge = 0;
int tcp_ts_flags = 0;
+ uint32_t zwp_seq = 0;
int8_t trim_syn = 0;
int8_t trim_rst = 0;
virtual ~TcpNormalizer() = default;
virtual void init(State&) { }
+ virtual void session_blocker(State&, TcpSegmentDescriptor&);
virtual bool packet_dropper(State&, TcpSegmentDescriptor&, NormFlags);
virtual bool trim_syn_payload(State&, TcpSegmentDescriptor&, uint32_t max = 0);
virtual void trim_rst_payload(State&, TcpSegmentDescriptor&, uint32_t max = 0);
virtual void trim_mss_payload(State&, TcpSegmentDescriptor&, uint32_t max = 0);
virtual void ecn_tracker(State&, const snort::tcp::TCPHdr*, bool req3way);
virtual void ecn_stripper(State&, TcpSegmentDescriptor&);
+ virtual uint32_t get_zwp_seq(State&);
virtual uint32_t get_stream_window(State&, TcpSegmentDescriptor&);
virtual uint32_t get_tcp_timestamp(State&, TcpSegmentDescriptor&, bool strip);
virtual int handle_paws(State&, TcpSegmentDescriptor&);
virtual bool validate_rst(State&, TcpSegmentDescriptor&);
virtual int handle_repeated_syn(State&, TcpSegmentDescriptor&) = 0;
virtual uint16_t set_urg_offset(State&, const snort::tcp::TCPHdr* tcph, uint16_t dsize);
+ virtual void set_zwp_seq(State&, uint32_t seq);
static void reset_stats();
void reset()
{ init(StreamPolicy::OS_DEFAULT, nullptr, nullptr, nullptr); }
+ void session_blocker(TcpSegmentDescriptor& tsd)
+ { norm->session_blocker(tns, tsd); }
+
bool packet_dropper(TcpSegmentDescriptor& tsd, NormFlags nflags)
{ return norm->packet_dropper(tns, tsd, nflags); }
void ecn_stripper(TcpSegmentDescriptor& tsd)
{ norm->ecn_stripper(tns, tsd); }
+ uint32_t get_zwp_seq()
+ { return norm->get_zwp_seq(tns); }
+
uint32_t get_stream_window(TcpSegmentDescriptor& tsd)
{ return norm->get_stream_window(tns, tsd); }
int handle_repeated_syn(TcpSegmentDescriptor& tsd)
{ return norm->handle_repeated_syn(tns, tsd); }
+ void set_zwp_seq(uint32_t seq)
+ { return norm->set_zwp_seq(tns, seq); }
+
uint16_t set_urg_offset(const snort::tcp::TCPHdr* tcph, uint16_t dsize)
{ return norm->set_urg_offset(tns, tcph, dsize); }
#include "profiler/profiler.h"
#include "protocols/eth.h"
#include "pub_sub/intrinsic_event_ids.h"
+#include "packet_tracer/packet_tracer.h"
#include "stream_tcp.h"
#include "tcp_ha.h"
else
(const_cast<tcp::TCPHdr*>(tsd.get_pkt()->ptrs.tcph))->set_seq(listener->max_queue_seq_nxt);
}
+
+ if ( inline_mode || listener->normalizer.get_trim_win() == NORM_MODE_ON)
+ {
+ tsd.get_pkt()->active->set_drop_reason("stream");
+ if (PacketTracer::is_active())
+ PacketTracer::log("Stream: Flow exceeded the configured max byte threshold (%u)\n", tcp_config->max_queued_bytes);
+ }
+
listener->normalizer.trim_win_payload(tsd, space_left, inline_mode);
return ret_val;
}
else
(const_cast<tcp::TCPHdr*>(tsd.get_pkt()->ptrs.tcph))->set_seq(listener->max_queue_seq_nxt);
}
+
+ if ( inline_mode || listener->normalizer.get_trim_win() == NORM_MODE_ON)
+ {
+ tsd.get_pkt()->active->set_drop_reason("stream");
+ if (PacketTracer::is_active())
+ PacketTracer::log("Stream: Flow exceeded the configured max segment threshold (%u)\n", tcp_config->max_queued_segs);
+ }
+
listener->normalizer.trim_win_payload(tsd, 0, inline_mode);
return true;
}
if ( tcp_config->policy != StreamPolicy::OS_PROXY
and listener->normalizer.get_stream_window(tsd) == 0 )
{
- listener->normalizer.trim_win_payload(tsd);
- return STREAM_UNALIGNED;
+ if (tsd.get_len() == ZERO_WIN_PROBE_LEN)
+ {
+ tcpStats.zero_win_probes++;
+ listener->normalizer.set_zwp_seq(seq);
+ }
+ else
+ {
+ listener->normalizer.trim_win_payload(tsd);
+ return STREAM_UNALIGNED;
+ }
}
/* move the ack boundary up, this is the only way we'll accept data */
if ( tcp_config->policy != StreamPolicy::OS_PROXY
and listener->normalizer.get_stream_window(tsd) == 0 )
{
+ if (tsd.get_len() == ZERO_WIN_PROBE_LEN)
+ tcpStats.zero_win_probes++;
+
listener->normalizer.trim_win_payload(tsd);
return STREAM_UNALIGNED;
}
if ( snd_nxt < snd_una )
snd_nxt = snd_una;
}
+ if ( !tsd.get_len() and snd_wnd == 0
+ and SEQ_LT(tsd.get_seq(), r_win_base) )
+ tcpStats.zero_win_probes++;
}
// In no-ack policy, data is implicitly acked immediately.