inline void set_raw_urp(uint16_t new_urp)
{ th_urp = new_urp; }
+
+ inline void set_seq(uint32_t new_seq)
+ { th_seq = htonl(new_seq); }
};
} // namespace tcp
} // namespace snort
#define STREAM_UNALIGNED 0
#define STREAM_ALIGNED 1
-#define STREAM_DEFAULT_MAX_QUEUED_BYTES 1048576 /* 1 MB */
-#define AVG_PKT_SIZE 400
-#define STREAM_DEFAULT_MAX_QUEUED_SEGS ( STREAM_DEFAULT_MAX_QUEUED_BYTES / AVG_PKT_SIZE )
+#define MQ_NONE 0
+#define MQ_BYTES 1
+#define MQ_SEGS 2
#define STREAM_DEFAULT_MAX_SMALL_SEG_SIZE 0 /* disabled */
#define STREAM_DEFAULT_CONSEC_SMALL_SEGS 0 /* disabled */
{ CountType::SUM, "partial_flush_bytes", "partial flush total bytes" },
{ CountType::SUM, "inspector_fallbacks", "count of fallbacks from assigned service inspector" },
{ CountType::SUM, "partial_fallbacks", "count of fallbacks from assigned service stream splitter" },
+ { CountType::MAX, "max_segs", "maximum number of segments queued in any flow" },
+ { CountType::MAX, "max_bytes", "maximum number of bytes queued in any flow" },
{ CountType::END, nullptr, nullptr }
};
static const Parameter stream_queue_limit_params[] =
{
- { "max_bytes", Parameter::PT_INT, "0:max32", "1048576",
- "don't queue more than given bytes per session and direction" },
+ { "max_bytes", Parameter::PT_INT, "0:max32", "4194304",
+ "don't queue more than given bytes per session and direction, 0 = unlimited" },
- { "max_segments", Parameter::PT_INT, "0:max32", "2621",
- "don't queue more than given segments per session and direction" },
+ { "max_segments", Parameter::PT_INT, "0:max32", "3072",
+ "don't queue more than given segments per session and direction, 0 = unlimited" },
{ nullptr, Parameter::PT_MAX, nullptr, nullptr, nullptr }
};
PegCount partial_flush_bytes;
PegCount inspector_fallbacks;
PegCount partial_fallbacks;
+ PegCount max_segs;
+ PegCount max_bytes;
};
extern THREAD_LOCAL struct TcpStats tcpStats;
return tcp_norm_stats;
}
-bool TcpNormalizer::trim_payload(
- TcpNormalizerState&, TcpSegmentDescriptor& tsd, uint32_t max, NormMode mode, TcpPegCounts peg)
+bool TcpNormalizer::trim_payload(TcpNormalizerState&, TcpSegmentDescriptor& tsd, uint32_t max,
+ NormMode mode, TcpPegCounts peg, bool force)
{
+ if ( force )
+ mode = NORM_MODE_ON;
+
tcp_norm_stats[peg][mode]++;
- if (mode == NORM_MODE_ON)
+
+ if ( mode == NORM_MODE_ON )
{
uint16_t fat = tsd.get_len() - max;
tsd.set_len(max);
}
void TcpNormalizer::trim_win_payload(
- TcpNormalizerState& tns, TcpSegmentDescriptor& tsd, uint32_t max)
+ TcpNormalizerState& tns, TcpSegmentDescriptor& tsd, uint32_t max, bool force)
{
if (tsd.get_len() > max)
- trim_payload(tns, tsd, max, (NormMode)tns.trim_win, PC_TCP_TRIM_WIN);
+ trim_payload(tns, tsd, max, (NormMode)tns.trim_win, PC_TCP_TRIM_WIN, force);
}
void TcpNormalizer::trim_mss_payload(
virtual bool packet_dropper(State&, TcpSegmentDescriptor&, NormFlags);
virtual bool trim_syn_payload(State&, TcpSegmentDescriptor&, uint32_t max = 0);
virtual void trim_rst_payload(State&, TcpSegmentDescriptor&, uint32_t max = 0);
- virtual void trim_win_payload(State&, TcpSegmentDescriptor&, uint32_t max = 0);
+ virtual void trim_win_payload(State&, TcpSegmentDescriptor&, uint32_t max = 0,
+ bool force = false);
virtual void trim_mss_payload(State&, TcpSegmentDescriptor&, uint32_t max = 0);
virtual void ecn_tracker(State&, const snort::tcp::TCPHdr*, bool req3way);
virtual void ecn_stripper(State&, TcpSegmentDescriptor&);
protected:
TcpNormalizer() = default;
- virtual bool trim_payload(State&, TcpSegmentDescriptor&, uint32_t, NormMode, TcpPegCounts);
+ virtual bool trim_payload(State&, TcpSegmentDescriptor&, uint32_t, NormMode, TcpPegCounts,
+ bool force = false);
virtual bool strip_tcp_timestamp(
State&, TcpSegmentDescriptor&, const snort::tcp::TcpOption*, NormMode);
virtual bool validate_rst_seq_geq(State&, TcpSegmentDescriptor&);
void trim_rst_payload(TcpSegmentDescriptor& tsd, uint32_t max = 0)
{ norm->trim_rst_payload(tns, tsd, max); }
- void trim_win_payload(TcpSegmentDescriptor& tsd, uint32_t max = 0)
- { norm->trim_win_payload(tns, tsd, max); }
+ void trim_win_payload(TcpSegmentDescriptor& tsd, uint32_t max = 0, bool force = false)
+ { norm->trim_win_payload(tns, tsd, max, force); }
void trim_mss_payload(TcpSegmentDescriptor& tsd, uint32_t max = 0)
{ norm->trim_mss_payload(tns, tsd, max); }
trs.sos.seg_bytes_total += tsn->i_len;
trs.sos.total_segs_queued++;
tcpStats.segs_queued++;
+
+ if ( trs.sos.seg_count > tcpStats.max_segs )
+ tcpStats.max_segs = trs.sos.seg_count;
+
+ if ( trs.sos.seg_bytes_total > tcpStats.max_bytes )
+ tcpStats.max_bytes = trs.sos.seg_bytes_total;
}
bool TcpReassembler::is_segment_fasttrack(
DataBus::publish(FLOW_STATE_EVENT, nullptr, flow);
}
-bool TcpSession::flow_exceeds_config_thresholds(const TcpSegmentDescriptor& tsd)
+bool TcpSession::flow_exceeds_config_thresholds(TcpSegmentDescriptor& tsd)
{
TcpStreamTracker* listener = tsd.get_listener();
tel.set_tcp_event(EVENT_MAX_SMALL_SEGS_EXCEEDED);
}
- if ( tcp_config->max_queued_bytes
- && ( listener->reassembler.get_seg_bytes_total() > tcp_config->max_queued_bytes ) )
+ if ( tcp_config->max_queued_bytes )
{
- tcpStats.exceeded_max_bytes++;
- // FIXIT-M add one alert per flow per above
- return true;
+ int32_t space_left =
+ tcp_config->max_queued_bytes - listener->reassembler.get_seg_bytes_total();
+
+ if ( space_left < (int32_t)tsd.get_len() )
+ {
+ tcpStats.exceeded_max_bytes++;
+ bool inline_mode = tsd.is_policy_inline();
+ bool ret_val = true;
+
+ if ( space_left > 0 )
+ ret_val = !inline_mode; // For partial trim, reassemble only if we can force an inject
+ else
+ space_left = 0;
+
+ if ( inline_mode )
+ {
+ if ( listener->max_queue_exceeded == MQ_NONE )
+ {
+ listener->max_queue_seq_nxt = tsd.get_seq() + space_left;
+ listener->max_queue_exceeded = MQ_BYTES;
+ }
+ else
+ (const_cast<tcp::TCPHdr*>(tsd.get_pkt()->ptrs.tcph))->set_seq(listener->max_queue_seq_nxt);
+ }
+ listener->normalizer.trim_win_payload(tsd, space_left, inline_mode);
+ return ret_val;
+ }
+ else if ( listener->max_queue_exceeded == MQ_BYTES )
+ listener->max_queue_exceeded = MQ_NONE;
}
- if ( tcp_config->max_queued_segs
- && ( listener->reassembler.get_seg_count() + 1 > tcp_config->max_queued_segs ) )
+ if ( tcp_config->max_queued_segs )
{
- tcpStats.exceeded_max_segs++;
- // FIXIT-M add one alert per flow per above
- return true;
+ if ( listener->reassembler.get_seg_count() + 1 > tcp_config->max_queued_segs )
+ {
+ tcpStats.exceeded_max_segs++;
+ bool inline_mode = tsd.is_policy_inline();
+
+ if ( inline_mode )
+ {
+ if ( listener->max_queue_exceeded == MQ_NONE )
+ {
+ listener->max_queue_seq_nxt = tsd.get_seq();
+ listener->max_queue_exceeded = MQ_SEGS;
+ }
+ else
+ (const_cast<tcp::TCPHdr*>(tsd.get_pkt()->ptrs.tcph))->set_seq(listener->max_queue_seq_nxt);
+ }
+ listener->normalizer.trim_win_payload(tsd, 0, inline_mode);
+ return true;
+ }
+ else if ( listener->max_queue_exceeded == MQ_SEGS )
+ listener->max_queue_exceeded = MQ_NONE;
}
return false;
void process_tcp_stream(TcpSegmentDescriptor&);
int process_tcp_data(TcpSegmentDescriptor&);
void set_os_policy() override;
- bool flow_exceeds_config_thresholds(const TcpSegmentDescriptor&);
+ bool flow_exceeds_config_thresholds(TcpSegmentDescriptor&);
void update_stream_order(const TcpSegmentDescriptor&, bool aligned);
void swap_trackers();
void init_session_on_syn(TcpSegmentDescriptor&);
uint32_t max_window = 0;
uint32_t overlap_limit = 0;
- uint32_t max_queued_bytes = STREAM_DEFAULT_MAX_QUEUED_BYTES;
- uint32_t max_queued_segs = STREAM_DEFAULT_MAX_QUEUED_SEGS;
+ uint32_t max_queued_bytes = 4194304;
+ uint32_t max_queued_segs = 3072;
uint32_t max_consec_small_segs = STREAM_DEFAULT_CONSEC_SMALL_SEGS;
uint32_t max_consec_small_seg_size = STREAM_DEFAULT_MAX_SMALL_SEG_SIZE;
if ( SEQ_GT(tsd.get_end_seq(), snd_nxt) )
snd_nxt = tsd.get_end_seq();
- if ( SEQ_GT(tsd.get_ack(), r_win_base) )
- r_win_base = tsd.get_ack();
+ if ( SEQ_GEQ(tsd.get_ack(), r_win_base) )
+ {
+ if ( SEQ_GT(tsd.get_ack(), r_win_base) )
+ r_win_base = tsd.get_ack();
+
+ snd_wnd = tsd.get_wnd();
+ }
if ( ( fin_seq_status == TcpStreamTracker::FIN_WITH_SEQ_SEEN )
&& SEQ_EQ(r_win_base, fin_final_seq) )
fin_seq_status = TcpStreamTracker::FIN_WITH_SEQ_ACKED;
}
- snd_wnd = tsd.get_wnd();
reassembler.flush_on_ack_policy(tsd.get_pkt());
}
uint32_t r_win_base = 0; // remote side window base sequence number (the last ack we got)
uint32_t small_seg_count = 0;
+ uint32_t max_queue_seq_nxt; // next expected sequence once queue limit is exceeded
+ uint8_t max_queue_exceeded = MQ_NONE;
uint8_t order = 0;
FinSeqNumStatus fin_seq_status = TcpStreamTracker::FIN_NOT_SEEN;
bool reinit_seg_base = false;