class TcpCodec : public Codec
{
public:
- TcpCodec() : Codec(CD_TCP_NAME)
- {
- }
-
+ TcpCodec() : Codec(CD_TCP_NAME) { }
void get_protocol_ids(std::vector<ProtocolId>& v) override;
void log(TextLog* const, const uint8_t* pkt, const uint16_t len) override;
bool decode(const RawData&, CodecData&, DecodeData&) override;
+
bool encode(const uint8_t* const raw_in, const uint16_t raw_len,
EncState&, Buffer&, Flow*) override;
+
void update(const ip::IpApi&, const EncodeFlags, uint8_t* raw_pkt,
uint16_t lyr_len, uint32_t& updated_len) override;
+
void format(bool reverse, uint8_t* raw_pkt, DecodeData& snort) override;
private:
+ bool valid_checksum4(const RawData&, DecodeData&);
+ bool valid_checksum6(const RawData&, const CodecData&, DecodeData&);
+
+ int validate_option(const tcp::TcpOption* const opt,
+ const uint8_t* const end, const int expected_len);
- int OptLenValidate(const tcp::TcpOption* const opt,
- const uint8_t* const end,
- const int expected_len);
+ void decode_options(const uint8_t*, uint32_t, CodecData&, DecodeData&);
- void DecodeTCPOptions(const uint8_t*, uint32_t, CodecData&, DecodeData&);
- void TCPMiscTests(const tcp::TCPHdr* const tcph,
- const DecodeData& snort,
- const CodecData& codec);
+ void flag_tests(const tcp::TCPHdr* const tcph,
+ const DecodeData& snort, const CodecData& codec);
};
static sfip_var_t* SynToMulticastDstIp = nullptr;
v.emplace_back(ProtocolId::TCP);
}
+bool TcpCodec::valid_checksum4(const RawData& raw, DecodeData& snort)
+{
+ const ip::IP4Hdr* ip4h = snort.ip_api.get_ip4h();
+
+ checksum::Pseudoheader ph;
+ ph.sip = ip4h->get_src();
+ ph.dip = ip4h->get_dst();
+ ph.zero = 0;
+ ph.protocol = ip4h->proto();
+ ph.len = htons((uint16_t)raw.len);
+
+ uint16_t csum = checksum::tcp_cksum((const uint16_t*)raw.data, raw.len, &ph);
+
+ if ( csum )
+ {
+ stats.bad_ip4_cksum++;
+ return false;
+ }
+ return true;
+}
+
+bool TcpCodec::valid_checksum6(
+ const RawData& raw, const CodecData& codec, DecodeData& snort)
+{
+ const ip::IP6Hdr* const ip6h = snort.ip_api.get_ip6h();
+
+ checksum::Pseudoheader6 ph6;
+ COPY4(ph6.sip, ip6h->get_src()->u6_addr32);
+ COPY4(ph6.dip, ip6h->get_dst()->u6_addr32);
+ ph6.zero = 0;
+ ph6.protocol = codec.ip6_csum_proto;
+ ph6.len = htons((uint16_t)raw.len);
+
+ uint16_t csum = checksum::tcp_cksum((const uint16_t*)raw.data, raw.len, &ph6);
+
+ if ( csum )
+ {
+ stats.bad_ip6_cksum++;
+ return false;
+ }
+ return true;
+}
+
bool TcpCodec::decode(const RawData& raw, CodecData& codec, DecodeData& snort)
{
if (raw.len < tcp::TCP_MIN_HEADER_LEN)
return false;
}
- /* lay TCP on top of the data cause there is enough of it! */
const tcp::TCPHdr* tcph = reinterpret_cast<const tcp::TCPHdr*>(raw.data);
const uint16_t tcph_len = tcph->hlen();
return false;
}
- /* Checksum code moved in front of the other decoder alerts.
- If it's a bad checksum (maybe due to encrypted ESP traffic), the other
- alerts could be false positives. */
if ( SnortConfig::tcp_checksums() )
{
- uint16_t csum;
- PegCount* bad_cksum_cnt;
+ bool valid;
if (snort.ip_api.is_ip4())
- {
- bad_cksum_cnt = &(stats.bad_ip4_cksum);
- checksum::Pseudoheader ph;
- const ip::IP4Hdr* ip4h = snort.ip_api.get_ip4h();
- ph.sip = ip4h->get_src();
- ph.dip = ip4h->get_dst();
- /* setup the pseudo header for checksum calculation */
- ph.zero = 0;
- ph.protocol = ip4h->proto();
- ph.len = htons((uint16_t)raw.len);
-
- /* if we're being "stateless" we probably don't care about the TCP
- * checksum, but it's not bad to keep around for shits and giggles */
- /* calculate the checksum */
- csum = checksum::tcp_cksum((const uint16_t*)(tcph), raw.len, &ph);
- }
- /* IPv6 traffic */
+ valid = valid_checksum4(raw, snort);
else
- {
- bad_cksum_cnt = &(stats.bad_ip6_cksum);
- checksum::Pseudoheader6 ph6;
- const ip::IP6Hdr* const ip6h = snort.ip_api.get_ip6h();
- COPY4(ph6.sip, ip6h->get_src()->u6_addr32);
- COPY4(ph6.dip, ip6h->get_dst()->u6_addr32);
- ph6.zero = 0;
- ph6.protocol = codec.ip6_csum_proto;
- ph6.len = htons((uint16_t)raw.len);
-
- csum = checksum::tcp_cksum((const uint16_t*)(tcph), raw.len, &ph6);
- }
+ valid = valid_checksum6(raw, codec, snort);
- if (csum && !codec.is_cooked())
+ if (!valid)
{
if ( !(codec.codec_flags & CODEC_UNSURE_ENCAP) )
- {
- (*bad_cksum_cnt)++;
snort.decode_flags |= DECODE_ERR_CKSUM_TCP;
- }
+
return false;
}
}
- if (tcph->are_flags_set(TH_FIN|TH_PUSH|TH_URG))
- {
- if (tcph->are_flags_set(TH_SYN|TH_ACK|TH_RST))
- codec_event(codec, DECODE_TCP_XMAS);
- else
- codec_event(codec, DECODE_TCP_NMAP_XMAS);
-
- // Allowing this packet for further processing
- // (in case there is a valid data inside it).
- /* return;*/
- }
-
- if (tcph->are_flags_set(TH_SYN))
- {
- /* check if only SYN is set */
- if ( tcph->th_flags == TH_SYN )
- {
- if ((tcph->th_seq == naptha_seq) and (snort.ip_api.get_ip4h()->ip_id == naptha_id))
- {
- codec_event(codec, DECODE_DOS_NAPTHA);
- }
- }
-
- if ( SnortConfig::is_address_anomaly_check_enabled() )
- {
- if ( sfvar_ip_in(SynToMulticastDstIp, snort.ip_api.get_dst()) )
- codec_event(codec, DECODE_SYN_TO_MULTICAST);
- }
-
- if ( (tcph->th_flags & TH_RST) )
- codec_event(codec, DECODE_TCP_SYN_RST);
-
- if ( (tcph->th_flags & TH_FIN) )
- codec_event(codec, DECODE_TCP_SYN_FIN);
- }
- else
- { // we already know there is no SYN
- if ( !(tcph->th_flags & (TH_ACK|TH_RST)) )
- codec_event(codec, DECODE_TCP_NO_SYN_ACK_RST);
- }
-
- if ( (tcph->th_flags & (TH_FIN|TH_PUSH|TH_URG)) &&
- !(tcph->th_flags & TH_ACK) )
- codec_event(codec, DECODE_TCP_MUST_ACK);
+ flag_tests(tcph, snort, codec);
/* if options are present, decode them */
uint16_t tcp_opt_len = (uint16_t)(tcph->hlen() - tcp::TCP_MIN_HEADER_LEN);
if (tcp_opt_len > 0)
{
const uint8_t* opts = (const uint8_t*)(raw.data + tcp::TCP_MIN_HEADER_LEN);
- DecodeTCPOptions(opts, tcp_opt_len, codec, snort);
+ decode_options(opts, tcp_opt_len, codec, snort);
}
int dsize = raw.len - tcph->hlen();
+
if (dsize < 0)
dsize = 0;
- if ( (tcph->th_flags & TH_URG) &&
- ((dsize == 0) || tcph->urp() > dsize) )
+ if ( (tcph->th_flags & TH_URG) && ((dsize == 0) || tcph->urp() > dsize) )
codec_event(codec, DECODE_TCP_BAD_URP);
// Now that we are returning true, set the tcp header
- codec.lyr_len = tcph_len - codec.invalid_bytes; // set in DecodeTCPOptions()
+ codec.lyr_len = tcph_len - codec.invalid_bytes;
codec.proto_bits |= PROTO_BIT__TCP;
+
+ snort.set_pkt_type(PktType::TCP);
snort.tcph = tcph;
+
if ((raw.pkth->flags & DAQ_PKT_FLAG_REAL_ADDRESSES) and (codec.ip_layer_cnt == 1))
{
snort.sp = ntohs(raw.pkth->n_real_sPort);
snort.sp = tcph->src_port();
snort.dp = tcph->dst_port();
}
- snort.set_pkt_type(PktType::TCP);
- TCPMiscTests(tcph, snort, codec);
+ if (snort.sp == 0 || snort.dp == 0)
+ codec_event(codec, DECODE_TCP_PORT_ZERO);
return true;
}
/*
- * Function: DecodeTCPOptions()
- *
- * Purpose: Fairly self explainatory name, don't you think?
- *
- * TCP Option Header length validation is left to the caller
- *
- * For a good listing of TCP Options,
- * http://www.iana.org/assignments/tcp-parameters
- *
* ------------------------------------------------------------
* From: "Kastenholz, Frank" <FKastenholz@unispherenetworks.com>
* Subject: Re: skeeter & bubba TCP options?
* prepared to handle an illegal option length (e.g., zero) without
* crashing; a suggested procedure is to reset the connection and log
* the reason.
- *
- * Arguments: o_list => ptr to the option list
- * o_len => length of the option list
- * p => pointer to decoded packet struct
- *
- * Returns: void function
*/
-void TcpCodec::DecodeTCPOptions(
+void TcpCodec::decode_options(
const uint8_t* start, uint32_t o_len, CodecData& codec, DecodeData& snort)
{
const uint8_t* const end_ptr = start + o_len; /* points to byte after last option */
break;
case tcp::TcpOptCode::MAXSEG:
- code = OptLenValidate(opt, end_ptr, TCPOLEN_MAXSEG);
+ code = validate_option(opt, end_ptr, TCPOLEN_MAXSEG);
break;
case tcp::TcpOptCode::SACKOK:
- code = OptLenValidate(opt, end_ptr, TCPOLEN_SACKOK);
+ code = validate_option(opt, end_ptr, TCPOLEN_SACKOK);
break;
case tcp::TcpOptCode::WSCALE:
- code = OptLenValidate(opt, end_ptr, TCPOLEN_WSCALE);
+ code = validate_option(opt, end_ptr, TCPOLEN_WSCALE);
if (code == 0)
{
if (((uint16_t)opt->data[0] > 14))
case tcp::TcpOptCode::ECHO: /* both use the same lengths */
case tcp::TcpOptCode::ECHOREPLY:
obsolete_option_found = true;
- code = OptLenValidate(opt, end_ptr, TCPOLEN_ECHO);
+ code = validate_option(opt, end_ptr, TCPOLEN_ECHO);
break;
case tcp::TcpOptCode::MD5SIG:
/* RFC 5925 obsoletes this option (see below) */
obsolete_option_found = true;
- code = OptLenValidate(opt, end_ptr, TCPOLEN_MD5SIG);
+ code = validate_option(opt, end_ptr, TCPOLEN_MD5SIG);
break;
case tcp::TcpOptCode::AUTH:
- code = OptLenValidate(opt, end_ptr, -1);
+ code = validate_option(opt, end_ptr, -1);
/* Has to have at least 4 bytes - see RFC 5925, Section 2.2 */
if (code >= 0 && opt->len < 4)
break;
case tcp::TcpOptCode::SACK:
- code = OptLenValidate(opt, end_ptr, -1);
+ code = validate_option(opt, end_ptr, -1);
if ((code >= 0) && (opt->len < 2))
code = tcp::OPT_BADLEN;
/* fall through */
case tcp::TcpOptCode::CC: /* all 3 use the same lengths / T/TCP */
case tcp::TcpOptCode::CC_NEW:
- code = OptLenValidate(opt, end_ptr, TCPOLEN_CC);
+ code = validate_option(opt, end_ptr, TCPOLEN_CC);
break;
case tcp::TcpOptCode::TRAILER_CSUM:
experimental_option_found = true;
- code = OptLenValidate(opt, end_ptr, TCPOLEN_TRAILER_CSUM);
+ code = validate_option(opt, end_ptr, TCPOLEN_TRAILER_CSUM);
break;
case tcp::TcpOptCode::TIMESTAMP:
- code = OptLenValidate(opt, end_ptr, TCPOLEN_TIMESTAMP);
+ code = validate_option(opt, end_ptr, TCPOLEN_TIMESTAMP);
break;
case tcp::TcpOptCode::SKEETER:
case tcp::TcpOptCode::BUBBA:
case tcp::TcpOptCode::UNASSIGNED:
obsolete_option_found = true;
- code = OptLenValidate(opt, end_ptr, -1);
+ code = validate_option(opt, end_ptr, -1);
break;
case tcp::TcpOptCode::SCPS:
case tcp::TcpOptCode::SNAP:
default:
experimental_option_found = true;
- code = OptLenValidate(opt, end_ptr, -1);
+ code = validate_option(opt, end_ptr, -1);
break;
}
codec_event(codec, DECODE_TCPOPT_OBSOLETE);
}
-int TcpCodec::OptLenValidate(const tcp::TcpOption* const opt,
+int TcpCodec::validate_option(const tcp::TcpOption* const opt,
const uint8_t* const end,
const int expected_len)
{
return 0;
}
-/* TCP-layer decoder alerts */
-void TcpCodec::TCPMiscTests(const tcp::TCPHdr* const tcph,
- const DecodeData& snort,
- const CodecData& codec)
+void TcpCodec::flag_tests(const tcp::TCPHdr* const tcph,
+ const DecodeData& snort, const CodecData& codec)
{
- if ( ((tcph->th_flags & TH_NORESERVED) == TH_SYN ) &&
- (tcph->seq() == 674711609) )
- codec_event(codec, DECODE_TCP_SHAFT_SYNFLOOD);
+ if (tcph->are_flags_set(TH_FIN|TH_PUSH|TH_URG))
+ {
+ if (tcph->are_flags_set(TH_SYN|TH_ACK|TH_RST))
+ codec_event(codec, DECODE_TCP_XMAS);
+ else
+ codec_event(codec, DECODE_TCP_NMAP_XMAS);
+ }
+
+ if (tcph->are_flags_set(TH_SYN))
+ {
+ if ( tcph->th_flags == TH_SYN )
+ {
+ if ((tcph->th_seq == naptha_seq) and (snort.ip_api.get_ip4h()->ip_id == naptha_id))
+ {
+ codec_event(codec, DECODE_DOS_NAPTHA);
+ }
+ }
+
+ if ( SnortConfig::is_address_anomaly_check_enabled() )
+ {
+ if ( sfvar_ip_in(SynToMulticastDstIp, snort.ip_api.get_dst()) )
+ codec_event(codec, DECODE_SYN_TO_MULTICAST);
+ }
+
+ if ( (tcph->th_flags & TH_RST) )
+ codec_event(codec, DECODE_TCP_SYN_RST);
+
+ if ( (tcph->th_flags & TH_FIN) )
+ codec_event(codec, DECODE_TCP_SYN_FIN);
+
+ if ( ((tcph->th_flags & TH_NORESERVED) == TH_SYN ) && (tcph->seq() == 674711609) )
+ codec_event(codec, DECODE_TCP_SHAFT_SYNFLOOD);
+ }
+ else
+ { // we already know there is no SYN
+ if ( !(tcph->th_flags & (TH_ACK|TH_RST)) )
+ codec_event(codec, DECODE_TCP_NO_SYN_ACK_RST);
+ }
+
+ if ( (tcph->th_flags & (TH_FIN|TH_PUSH|TH_URG)) &&
+ !(tcph->th_flags & TH_ACK) )
+ codec_event(codec, DECODE_TCP_MUST_ACK);
- if (snort.sp == 0 || snort.dp == 0)
- codec_event(codec, DECODE_TCP_PORT_ZERO);
}
/******************************************************************
node->children[i], eval_data, cursor);
if ( child_node->option_type == RULE_OPTION_TYPE_LEAF_NODE )
+ {
// Leaf node won't have any children but will return success
- // or failure
- result += child_state->result;
-
+ // or failure; regardless we must count them here
+ result += 1;
+ }
else if (child_state->result == child_node->num_children)
// Indicate that the child's tree branches are done
++result;
static inline void init_match_info(OtnxMatchData* o)
{
- for ( int i = 0; i < o->iMatchInfoArraySize; i++ )
+ for ( int i = 0; i < SnortConfig::get_conf()->num_rule_types; i++ )
o->matchInfo[i].iMatchCount = 0;
o->have_match = false;
int evalIndex = rtn->listhead->ruleListNode->evalIndex;
/* bounds check index */
- if ( evalIndex >= omd_local->iMatchInfoArraySize )
+ if ( evalIndex >= SnortConfig::get_conf()->num_rule_types )
{
pc.match_limit++;
return 1;
*/
int fpEvalRTN(RuleTreeNode* rtn, Packet* p, int check_ports)
{
- Profile rule_profile(rulePerfStats);
- Profile rule_rtn_eval_profile(ruleRTNEvalPerfStats);
+ DeepProfile rule_profile(rulePerfStats);
+ DeepProfile rule_rtn_eval_profile(ruleRTNEvalPerfStats);
if ( !rtn )
return 0;
print_pattern(pmx->pmd);
{
- Profile rule_profile(rulePerfStats);
+ DeepProfile rule_profile(rulePerfStats);
/* NOTE: The otn will be the first one in the match state. If there are
* multiple rules associated with a match state, mucking with the otn
* may muck with an unintended rule */
int ret = 0;
{
- Profile rule_otn_eval_profile(ruleOTNEvalPerfStats);
+ DeepProfile rule_otn_eval_profile(ruleOTNEvalPerfStats);
ret = detection_option_tree_evaluate(root, &eval_data);
}
EventQueueConfig* eq = SnortConfig::get_conf()->event_queue_config;
RuleTreeNode* rtn;
- for ( i = 0; i < o->iMatchInfoArraySize; i++ )
+ for ( i = 0; i < SnortConfig::get_conf()->num_rule_types; i++ )
{
/* bail if were not dumping events in all the action groups,
* and we've already got some events */
void fp_set_context(IpsContext& c)
{
c.stash = new MpseStash;
-
c.otnx = (OtnxMatchData*)snort_calloc(sizeof(OtnxMatchData));
- // FIXIT-L use dynamic array size from configure, resize it when reload
- c.otnx->iMatchInfoArraySize = (MAX_NUM_RULE_TYPES * 2);
-
- c.otnx->matchInfo = (MatchInfo*)snort_calloc(
- c.otnx->iMatchInfoArraySize, sizeof(MatchInfo));
-
+ c.otnx->matchInfo = (MatchInfo*)snort_calloc(MAX_NUM_RULE_TYPES, sizeof(MatchInfo));
c.context_num = 0;
}
int rval = 0;
{
- Profile rule_profile(rulePerfStats);
- Profile rule_nfp_eval_profile(ruleNFPEvalPerfStats);
+ DeepProfile rule_profile(rulePerfStats);
+ DeepProfile rule_nfp_eval_profile(ruleNFPEvalPerfStats);
trace_log(detection, TRACE_RULE_EVAL, "Testing non-content rules\n");
rval = detection_option_tree_evaluate(
(detection_option_tree_root_t*)port_group->nfp_tree, &eval_data);
int fpLogEvent(const RuleTreeNode*, const OptTreeNode*, snort::Packet*);
int fpEvalRTN(RuleTreeNode*, snort::Packet*, int check_ports);
-#define MAX_NUM_RULE_TYPES 10 // max number of known rule types
+#define MAX_NUM_RULE_TYPES 16 // max number of allowed rule types
/*
** This define is for the number of unique events
{
PortGroup* pg;
snort::Packet* p;
+ MatchInfo* matchInfo;
int check_ports;
bool have_match;
bool do_fp;
-
- MatchInfo* matchInfo;
- int iMatchInfoArraySize;
};
int fpAddMatch(OtnxMatchData*, int pLen, const OptTreeNode*);
delete p;
}
}
+ ids_in_use.clear();
sfeventq_free(equeue);
fp_clear_context(*this);
{
assert(id < data.size());
data[id] = cd;
+ ids_in_use.push_back(id);
}
IpsContextData* IpsContext::get_context_data(unsigned id) const
void IpsContext::clear_context_data()
{
- for ( auto* p : data )
+ for ( auto id : ids_in_use )
{
+ auto* p = data[id];
if ( p )
p->clear();
}
+ ids_in_use.clear();
}
void IpsContext::snapshot_flow(Flow* f)
bool check_tags;
static const unsigned buf_size = Codec::PKT_MAX;
+
+ // FIXIT-L eliminate max_ips_id and just resize data vector.
// Only 5 inspectors currently use the ips context data.
- // FIXIT-L This limit should to be updated if any more inspectors/modules use it.
- static constexpr unsigned max_ips_id = 32;
+ static constexpr unsigned max_ips_id = 8;
private:
FlowSnapshot flow;
std::vector<IpsContextData*> data;
+ std::vector<unsigned> ids_in_use; // for indirection; FIXIT-P evaluate alternatives
std::vector<Callback> post_callbacks;
IpsContext* depends_on;
IpsContext* next_to_process;
//--------------------------------------------------------------------------
unsigned IpsContextData::ips_id = 0;
-static unsigned max_id = IpsContext::max_ips_id;
unsigned IpsContextData::get_ips_id()
{
++ips_id;
- assert(ips_id < max_id);
+ assert(ips_id < IpsContext::max_ips_id);
return ips_id;
}
auto id2 = IpsContextData::get_ips_id();
CHECK(id1 != id2);
- CHECK(max_id > id2);
}
#endif
const unsigned char* T, int n, MpseMatch match,
void* context, int* current_state)
{
- Profile profile(mpsePerfStats);
+ DeepProfile profile(mpsePerfStats);
pmqs.matched_bytes += n;
return _search(T, n, match, context, current_state);
}
const unsigned char* T, int n, MpseMatch match,
void* context, int* current_state)
{
- Profile profile(mpsePerfStats);
+ DeepProfile profile(mpsePerfStats);
pmqs.matched_bytes += n;
return _search(T, n, match, context, current_state);
}
v.update_mask(sc->output_flags, OUTPUT_FLAG__ALERT_REFS);
else if ( v.is("order") )
- OrderRuleLists(sc, v.get_string());
+ sc->rule_order = v.get_string();
else if ( v.is("rate_filter_memcap") )
sc->rate_filter_config->memcap = v.get_uint32();
#ifdef PIGLET
if ( !Piglet::piglet_mode() )
#endif
- if ( !SnortConfig::get_conf()->output.empty() )
- EventManager::instantiate(SnortConfig::get_conf()->output.c_str(),
- SnortConfig::get_conf());
+ if ( !sc->output.empty() )
+ EventManager::instantiate(sc->output.c_str(), sc);
if (SnortConfig::alert_before_pass())
- {
- OrderRuleLists(SnortConfig::get_conf(), "drop sdrop reject alert pass log");
- }
-
- SnortConfig::get_conf()->setup();
+ sc->rule_order = "drop sdrop reject alert pass log";
+ sc->setup();
FileService::post_init();
// Must be after CodecManager::instantiate()
- if ( !InspectorManager::configure(SnortConfig::get_conf()) )
+ if ( !InspectorManager::configure(sc) )
ParseError("can't initialize inspectors");
else if ( SnortConfig::log_verbose() )
- InspectorManager::print_config(SnortConfig::get_conf());
+ InspectorManager::print_config(sc);
- ModuleManager::reset_stats(SnortConfig::get_conf());
+ ModuleManager::reset_stats(sc);
- if (SnortConfig::get_conf()->file_mask != 0)
- umask(SnortConfig::get_conf()->file_mask);
+ if (sc->file_mask != 0)
+ umask(sc->file_mask);
else
umask(077); /* set default to be sane */
/* Need to do this after dynamic detection stuff is initialized, too */
- IpsManager::global_init(SnortConfig::get_conf());
+ IpsManager::global_init(sc);
- SnortConfig::get_conf()->post_setup();
+ sc->post_setup();
- const MpseApi* search_api = SnortConfig::get_conf()->fast_pattern_config->get_search_api();
- const MpseApi* offload_search_api = SnortConfig::get_conf()->fast_pattern_config->
- get_offload_search_api();
+ const MpseApi* search_api = sc->fast_pattern_config->get_search_api();
+ const MpseApi* offload_search_api = sc->fast_pattern_config->get_offload_search_api();
- MpseManager::activate_search_engine(search_api, SnortConfig::get_conf());
+ MpseManager::activate_search_engine(search_api, sc);
if ((offload_search_api != nullptr) and (offload_search_api != search_api))
- MpseManager::activate_search_engine(offload_search_api, SnortConfig::get_conf());
+ MpseManager::activate_search_engine(offload_search_api, sc);
SFAT_Start();
Trough::setup();
// FIXIT-L refactor stuff done here and in snort_config.cc::VerifyReload()
- if ( SnortConfig::get_conf()->bpf_filter.empty() &&
- !SnortConfig::get_conf()->bpf_file.empty() )
- SnortConfig::get_conf()->bpf_filter = read_infile("bpf_file",
- SnortConfig::get_conf()->bpf_file.c_str());
+ if ( sc->bpf_filter.empty() && !sc->bpf_file.empty() )
+ sc->bpf_filter = read_infile("bpf_file", sc->bpf_file.c_str());
- if ( !SnortConfig::get_conf()->bpf_filter.empty() )
- LogMessage("Snort BPF option: %s\n", SnortConfig::get_conf()->bpf_filter.c_str());
+ if ( !sc->bpf_filter.empty() )
+ LogMessage("Snort BPF option: %s\n", sc->bpf_filter.c_str());
- parser_term(SnortConfig::get_conf());
+ parser_term(sc);
Active::init(sc);
}
{
// Need to include this b/c call is outside the detect tree
Profile detect_profile(detectPerfStats);
- Profile rebuilt_profile(rebuiltPacketPerfStats);
+ DeepProfile rebuilt_profile(rebuiltPacketPerfStats);
DetectionEngine de;
return main_hook(p);
init_policies(this);
ParseRules(this);
+ OrderRuleLists(this);
// FIXIT-L see SnortInit() on config printing
//detection_filter_print_config(detection_filter_config);
//------------------------------------------------------
// alert module stuff
+ std::string rule_order;
bool default_rule_state = true;
SfCidr homenet;
else
{
rtn.listhead = get_rule_list(sc, s);
+
+ if ( !rtn.listhead )
+ {
+ CreateRuleType(sc, s, rtn.type);
+ rtn.listhead = get_rule_list(sc, s);
+ }
}
if ( !rtn.listhead )
// private / implementation methods
//-------------------------------------------------------------------------
-static void CreateDefaultRules(SnortConfig* sc)
-{
- CreateRuleType(sc, Actions::get_string(Actions::LOG), Actions::LOG);
- CreateRuleType(sc, Actions::get_string(Actions::PASS), Actions::PASS);
- CreateRuleType(sc, Actions::get_string(Actions::ALERT), Actions::ALERT);
- CreateRuleType(sc, Actions::get_string(Actions::DROP), Actions::DROP);
- CreateRuleType(sc, Actions::get_string(Actions::BLOCK), Actions::BLOCK);
- CreateRuleType(sc, Actions::get_string(Actions::RESET), Actions::RESET);
-}
-
static void FreeRuleTreeNodes(SnortConfig* sc)
{
RuleTreeNode* rtn;
if ( !fname )
fname = "";
- CreateDefaultRules(sc);
-
sc->port_tables = PortTablesNew();
OtnInit(sc);
printRuleListOrder(rule_lists);
}
-/****************************************************************************
- *
- * Function: OrderRuleLists
- *
- * Purpose: Orders the rule lists into the specified order.
- *
- * Returns: void function
- *
- ***************************************************************************/
-void OrderRuleLists(SnortConfig* sc, const char* order)
+void OrderRuleLists(SnortConfig* sc)
{
- int i;
int evalIndex = 0;
RuleListNode* ordered_list = nullptr;
- RuleListNode* node;
- char** toks;
- int num_toks;
- toks = mSplit(order, " \t", 0, &num_toks, 0);
+ const char* order = sc->rule_order.c_str();
+ if ( !*order )
+ order = "pass drop alert log"; // FIXIT-H apply builtin module defaults
- for ( i = 0; i < num_toks; i++ )
+ int num_toks;
+ char** toks = mSplit(order, " \t", 0, &num_toks, 0);
+
+ for ( int i = 0; i < num_toks; i++ )
{
RuleListNode* prev = nullptr;
- node = sc->rule_lists;
+ RuleListNode* node = sc->rule_lists;
while (node != nullptr)
{
else
prev->next = node->next;
- /* Add node to ordered list */
ordered_list = addNodeToOrderedList(ordered_list, node, evalIndex++);
- sc->evalOrder[node->mode] = evalIndex;
-
+ sc->evalOrder[node->mode] = evalIndex;
break;
}
else
node = node->next;
}
}
-
- if ( node == nullptr )
- {
- ParseError("ruletype '%s' does not exist or "
- "has already been ordered.", toks[i]);
- return;
- }
+ // ignore rule types that aren't in use
}
mSplitFree(&toks, num_toks);
/* anything left in the rule lists needs to be moved to the ordered lists */
while (sc->rule_lists != nullptr)
{
- node = sc->rule_lists;
+ RuleListNode* node = sc->rule_lists;
sc->rule_lists = node->next;
- /* Add node to ordered list */
ordered_list = addNodeToOrderedList(ordered_list, node, evalIndex++);
sc->evalOrder[node->mode] = evalIndex;
}
- /* set the rulelists to the ordered list */
sc->rule_lists = ordered_list;
}
bool is_fatal = true);
void ParseRules(snort::SnortConfig*);
-void OrderRuleLists(snort::SnortConfig*, const char*);
+void OrderRuleLists(snort::SnortConfig*);
void PrintRuleOrder(RuleListNode*);
char* ProcessFileOption(snort::SnortConfig*, const char*);
// stream_tcp will flush to paf max which could be well below what
// has been scanned so far. since no flush point was specified,
// NHI should just deal with what it gets.
- assert(false);
+ //assert(false);
#endif
return http_buf;
}
ip_stats.total++;
ip_stats.fragmented_bytes += p->pkth->caplen + 4; /* 4 for the CRC */
- Profile profile(fragPerfStats);
+ DeepProfile profile(fragPerfStats);
if (!ft->engine )
{
int16_t fragLength;
const uint16_t net_frag_offset = p->ptrs.ip_api.off();
- Profile profile(fragInsertPerfStats);
+ DeepProfile profile(fragInsertPerfStats);
if (p->is_ip6() && (net_frag_offset == 0))
{
virtual TcpStreamTracker::TcpState get_listener_state()
{ return TcpStreamTracker::TCP_MAX_STATES; }
+ TcpStreamTracker::TcpState get_peer_state(TcpStreamTracker* me)
+ { return me == &client ? server.get_tcp_state() : client.get_tcp_state(); }
+
virtual void init_new_tcp_session(TcpSegmentDescriptor&);
virtual void update_timestamp_tracking(TcpSegmentDescriptor&) { }
virtual void update_session_on_syn_ack();
tcp_state = ( client_tracker ) ?
TcpStreamTracker::TCP_STATE_NONE : TcpStreamTracker::TCP_LISTEN;
flush_policy = STREAM_FLPOLICY_IGNORE;
- memset(&paf_state, 0, sizeof(paf_state));
+ paf_setup(&paf_state);
snd_una = snd_nxt = snd_wnd = 0;
rcv_nxt = r_win_base = iss = ts_last = ts_last_packet = 0;
small_seg_count = wscale = mss = 0;
void paf_setup (PAF_State* ps)
{
- // this is already cleared when instantiated
- //memset(ps, 0, sizeof(*ps));
ps->paf = StreamSplitter::START;
}
void paf_reset (PAF_State* ps)
{
- memset(ps, 0, sizeof(*ps));
ps->paf = StreamSplitter::START;
}
if ( !paf_initialized(ps) )
{
ps->seq = ps->pos = seq;
+ ps->fpt = ps->tot = 0;
ps->paf = StreamSplitter::SEARCH;
}
else if ( SEQ_GT(seq, ps->seq) )
bool validate_rst(TcpNormalizerState&, TcpSegmentDescriptor&) override;
bool is_paws_ts_checked_required(TcpNormalizerState&, TcpSegmentDescriptor&) override;
int handle_repeated_syn(TcpNormalizerState&, TcpSegmentDescriptor&) override;
- uint16_t set_urg_offset(
- TcpNormalizerState&, const tcp::TCPHdr* tcph, uint16_t dsize) override;
};
class TcpNormalizerOldLinux : public TcpNormalizer
bool validate_rst(TcpNormalizerState&, TcpSegmentDescriptor&) override;
bool is_paws_ts_checked_required(TcpNormalizerState&, TcpSegmentDescriptor&) override;
int handle_repeated_syn(TcpNormalizerState&, TcpSegmentDescriptor&) override;
- uint16_t set_urg_offset(
- TcpNormalizerState&, const tcp::TCPHdr* tcph, uint16_t dsize) override;
};
class TcpNormalizerBSD : public TcpNormalizer
return check_ts;
}
+#if 0
+// FIXIT-L urgent pointer schizzle - outdated
static inline uint16_t set_urg_offset_linux(const tcp::TCPHdr* tcph, uint16_t dsize)
{
uint16_t urg_offset = 0;
return urg_offset;
}
+#endif
int TcpNormalizerFirst::handle_repeated_syn(
TcpNormalizerState& tns, TcpSegmentDescriptor& tsd)
return handle_repeated_syn_bsd(tns.peer_tracker, tsd, tns.session);
}
-uint16_t TcpNormalizerLinux::set_urg_offset(
- TcpNormalizerState&, const tcp::TCPHdr* tcph, uint16_t dsize)
-{
- return set_urg_offset_linux(tcph, dsize);
-}
-
bool TcpNormalizerOldLinux::validate_rst(
TcpNormalizerState& tns, TcpSegmentDescriptor& tsd)
{
return handle_repeated_syn_bsd(tns.peer_tracker, tsd, tns.session);
}
-uint16_t TcpNormalizerOldLinux::set_urg_offset(
- TcpNormalizerState&, const tcp::TCPHdr* tcph, uint16_t dsize)
-{
- return set_urg_offset_linux(tcph, dsize);
-}
-
bool TcpNormalizerBSD::validate_rst(
TcpNormalizerState& tns, TcpSegmentDescriptor& tsd)
{
while ( tsn )
{
- if (SEQ_LT(sx, tsn->c_seq))
- fprintf(stdout, " +%u", tsn->c_seq - sx);
- else if (SEQ_GT(sx, tsn->c_seq))
- fprintf(stdout, " -%u", sx - tsn->c_seq);
+ if (SEQ_LT(sx, tsn->i_seq))
+ fprintf(stdout, " +%u", tsn->i_seq - sx);
+ else if (SEQ_GT(sx, tsn->i_seq))
+ fprintf(stdout, " -%u", sx - tsn->i_seq);
- fprintf(stdout, " %hu", tsn->c_len);
+ fprintf(stdout, " %hu", tsn->i_len);
+
+ if ( tsn->c_len and tsn->c_len != tsn->i_len )
+ fprintf(stdout, "(%hu|%hu|%d)",
+ tsn->offset, tsn->c_len, tsn->i_len-tsn->offset-tsn->c_len);
segs++;
bytes += tsn->i_len;
- sx = tsn->c_seq + tsn->c_len;
+ sx = tsn->i_seq + tsn->i_len;
tsn = tsn->next;
}
assert(trs.sos.seg_count == segs);
{
trs.sos.seglist.cur_rseg = next_no_gap(tsn) ? tsn.next : nullptr;
if ( trs.sos.seglist.cur_rseg )
- trs.sos.seglist.cur_rseg->c_seq = trs.sos.seglist.cur_rseg->i_seq;
+ trs.sos.seglist.cur_rseg->c_seq = trs.sos.seglist.cur_rseg->i_seq;
}
int TcpReassembler::delete_reassembly_segment(TcpReassemblerState& trs, TcpSegmentNode* tsn)
if ( trs.sos.seglist.cur_rseg == tsn )
update_next(trs, *tsn);
+ if ( trs.sos.seglist.cur_pseg == tsn )
+ trs.sos.seglist.cur_pseg = nullptr;
+
tsn->term();
trs.sos.seg_count--;
trs.sos.seglist.insert(prev, tsn);
if ( SEQ_EQ(tsn->i_seq, trs.sos.seglist_base_seq) )
{
- tsn->c_seq = tsn->i_seq;
+ tsn->c_seq = tsn->i_seq;
trs.sos.seglist.cur_rseg = tsn;
}
tsn->i_seq = tsn->c_seq = seq;
tsn->ts = tsd.get_ts();
- // FIXIT-M the urgent ptr handling is broken... urg_offset is set here but currently
+ // FIXIT-M the urgent ptr handling is broken... urg_offset could be set here but currently
// not actually referenced anywhere else. In 2.9.7 the FlushStream function did reference
// this field but that code has been lost... urg ptr handling needs to be reviewed and fixed
- tsn->urg_offset = trs.tracker->normalizer.set_urg_offset(tsd.get_tcph(), tsd.get_seg_len() );
+ //tsn->urg_offset = trs.tracker->normalizer.set_urg_offset(tsd.get_tcph(), tsd.get_seg_len());
queue_reassembly_segment(trs, left, tsn);
trs.tracker->alert_count = 0;
}
-int TcpReassembler::purge_to_seq(TcpReassemblerState& trs, uint32_t flush_seq)
+void TcpReassembler::purge_to_seq(TcpReassemblerState& trs, uint32_t flush_seq)
{
assert(trs.sos.seglist.head != nullptr);
- int total_purged = 0;
uint32_t last_ts = 0;
TcpSegmentNode* tsn = trs.sos.seglist.head;
while ( tsn && SEQ_LT(tsn->i_seq, flush_seq))
{
- if ( !tsn->c_len )
- {
- TcpSegmentNode* dump_me = tsn;
- tsn = tsn->next;
- if (dump_me->ts > last_ts)
- last_ts = dump_me->ts;
-
- total_purged += dump_me->last_flush_len;
- delete_reassembly_segment(trs, dump_me);
- }
- else
- {
- total_purged += tsn->last_flush_len;
- tsn->last_flush_len = 0;
- break;
- }
+ if ( tsn->c_len )
+ break;
+
+ TcpSegmentNode* dump_me = tsn;
+ tsn = tsn->next;
+ if (dump_me->ts > last_ts)
+ last_ts = dump_me->ts;
+
+ delete_reassembly_segment(trs, dump_me);
}
if ( SEQ_LT(trs.tracker->rcv_nxt, flush_seq) )
trs.sos.session->server.set_ts_last(last_ts);
}
}
-
- return total_purged;
}
-// purge_flushed_ackd():
// must only purge flushed and acked bytes we may flush partial segments
-// must adjust seq->seq and tsn->size when a flush gets only the initial
+// must adjust seq->seq and tsn->size when a flush gets only the initial
// part of a segment
// * FIXIT-L need flag to mark any reassembled packets that have a gap
// (if we reassemble such)
-int TcpReassembler::purge_flushed_ackd(TcpReassemblerState& trs)
+void TcpReassembler::purge_flushed_ackd(TcpReassemblerState& trs)
{
TcpSegmentNode* tsn = trs.sos.seglist.head;
uint32_t seq;
if (!trs.sos.seglist.head)
- return 0;
+ return;
seq = trs.sos.seglist.head->i_seq;
uint32_t end = tsn->i_seq + tsn->i_len;
if ( SEQ_GT(end, trs.tracker->r_win_base) )
- {
- seq = trs.tracker->r_win_base;
break;
- }
+
seq = end;
tsn = tsn->next;
}
if ( seq != trs.sos.seglist.head->i_seq )
- return purge_to_seq(trs, seq);
-
- return 0;
+ purge_to_seq(trs, seq);
}
void TcpReassembler::show_rebuilt_packet(TcpReassemblerState& trs, Packet* pkt)
return flush_len;
}
-// flush the client trs.sos.seglist up to the most recently acked segment
int TcpReassembler::flush_data_segments(
TcpReassemblerState& trs, Packet* p, uint32_t total, Packet* pdu)
{
tsn->c_seq += bytes_copied;
tsn->c_len -= bytes_copied;
tsn->offset += bytes_copied;
- tsn->last_flush_len = bytes_copied;
flags = 0;
if ( !tsn->c_len )
uint32_t TcpReassembler::get_q_sequenced(TcpReassemblerState& trs)
{
- TcpSegmentNode* tsn;
+ TcpSegmentNode* tsn = trs.sos.seglist.cur_rseg;
- if ( trs.sos.seglist.cur_rseg )
- tsn = trs.sos.seglist.cur_rseg;
- else
+ if ( !tsn )
{
- trs.sos.seglist.cur_rseg = trs.sos.seglist.head;
- tsn = trs.tracker ? trs.sos.seglist.cur_rseg : nullptr; // FIXIT-H why check tracker here?
+ tsn = trs.sos.seglist.head;
- if ( !tsn or (trs.sos.session->flow->two_way_traffic() and
- SEQ_LT(trs.tracker->r_win_base, tsn->c_seq)) )
- {
- if ( trs.sos.seglist.cur_rseg )
- trs.sos.seglist.cur_rseg = trs.sos.seglist.cur_rseg->prev;
+ if ( !tsn or SEQ_LT(trs.tracker->r_win_base, tsn->c_seq) )
+ return false;
- if ( trs.sos.seglist.cur_rseg )
- trs.sos.seglist.cur_rseg->c_seq = trs.sos.seglist.cur_rseg->i_seq;
- return 0;
- }
+ trs.sos.seglist.cur_rseg = tsn;
}
-
uint32_t len = 0;
const uint32_t limit = trs.tracker->splitter->get_max_pdu();
return len;
}
+bool TcpReassembler::is_q_sequenced(TcpReassemblerState& trs)
+{
+ TcpSegmentNode* tsn = trs.sos.seglist.cur_rseg;
+
+ if ( !tsn )
+ {
+ tsn = trs.sos.seglist.head;
+
+ if ( !tsn or SEQ_LT(trs.tracker->r_win_base, tsn->c_seq) )
+ return false;
+
+ trs.sos.seglist.cur_rseg = tsn;
+ }
+ while ( next_no_gap(*tsn) )
+ {
+ if ( tsn->c_len )
+ break;
+
+ tsn = trs.sos.seglist.cur_rseg = tsn->next;
+ }
+ trs.sos.seglist_base_seq = tsn->c_seq;
+
+ return (tsn->c_len != 0);
+}
+
// FIXIT-L flush_stream() calls should be replaced with calls to
// CheckFlushPolicyOn*() with the exception that for the *OnAck() case,
// any available ackd data must be flushed in both directions.
uint32_t bytes;
- if ( trs.tracker->normalizer.is_tcp_ips_enabled() )
+ if ( !trs.sos.session->flow->two_way_traffic() )
+ bytes = 0;
+ else if ( trs.tracker->normalizer.is_tcp_ips_enabled() )
bytes = get_q_sequenced(trs);
else
bytes = get_q_footprint(trs);
// because we don't wait until it is acknowledged
int32_t TcpReassembler::flush_pdu_ips(TcpReassemblerState& trs, uint32_t* flags, Packet* p)
{
+ assert(trs.sos.session->flow == p->flow);
DeepProfile profile(s5TcpPAFPerfStats);
- uint32_t total = 0, avail;
- TcpSegmentNode* tsn;
+ if ( !is_q_sequenced(trs) )
+ return -1;
- avail = get_q_sequenced(trs);
- tsn = trs.sos.seglist.cur_rseg;
+ TcpSegmentNode* tsn = trs.sos.seglist.cur_pseg;
+ uint32_t total = 0;
- // * must stop if gap (checked in paf_check)
- while ( tsn && *flags && ( total < avail ) )
+ if ( !tsn )
+ tsn = trs.sos.seglist.cur_rseg;
+
+ else if ( paf_initialized(&trs.tracker->paf_state) )
{
- int32_t flush_pt;
- uint32_t size = tsn->c_len;
+ assert(trs.sos.seglist.cur_rseg);
+ total = tsn->c_seq - trs.sos.seglist.cur_rseg->c_seq;
+ }
+
+ while ( tsn && *flags )
+ {
+ total += tsn->c_len;
+
uint32_t end = tsn->c_seq + tsn->c_len;
uint32_t pos = paf_position(&trs.tracker->paf_state);
- total += size;
-
if ( paf_initialized(&trs.tracker->paf_state) && SEQ_LEQ(end, pos) )
{
+ if ( !next_no_gap(*tsn) )
+ return -1;
+
tsn = tsn->next;
continue;
}
- assert(trs.sos.session->flow == p->flow);
-
- flush_pt = paf_check(
+ int32_t flush_pt = paf_check(
trs.tracker->splitter, &trs.tracker->paf_state, p, tsn->payload(),
- size, total, tsn->c_seq, flags);
+ tsn->c_len, total, tsn->c_seq, flags);
if (flush_pt >= 0)
{
- // see flush_pdu_ackd()
- if ( !trs.tracker->splitter->is_paf() && avail > (unsigned)flush_pt )
- {
- paf_jump(&trs.tracker->paf_state, avail - (unsigned)flush_pt);
- return avail;
- }
+ trs.sos.seglist.cur_pseg = nullptr;
return flush_pt;
}
+
+ trs.sos.seglist.cur_pseg = tsn;
+
+ if ( !next_no_gap(*tsn) )
+ return -1;
+
tsn = tsn->next;
}
-
return -1;
}
int32_t TcpReassembler::flush_pdu_ackd(TcpReassemblerState& trs, uint32_t* flags, Packet* p)
{
+ assert(trs.sos.session->flow == p->flow);
DeepProfile profile(s5TcpPAFPerfStats);
uint32_t total = 0;
- TcpSegmentNode* tsn =
- SEQ_LT(trs.sos.seglist_base_seq, trs.tracker->r_win_base) ? trs.sos.seglist.head : nullptr;
+ TcpSegmentNode* tsn = SEQ_LT(trs.sos.seglist_base_seq, trs.tracker->r_win_base) ?
+ trs.sos.seglist.head : nullptr;
// must stop if not acked
// must use adjusted size of tsn if not fully acked
// must stop if gap (checked in paf_check)
while (tsn && *flags && SEQ_LT(tsn->c_seq, trs.tracker->r_win_base))
{
- int32_t flush_pt;
uint32_t size = tsn->c_len;
uint32_t end = tsn->c_seq + tsn->c_len;
uint32_t pos = paf_position(&trs.tracker->paf_state);
if ( SEQ_GT(end, trs.tracker->r_win_base))
size = trs.tracker->r_win_base - tsn->c_seq;
- assert(trs.sos.session->flow == p->flow);
-
total += size;
- flush_pt = paf_check(
+
+ int32_t flush_pt = paf_check(
trs.tracker->splitter, &trs.tracker->paf_state, p, tsn->payload(),
size, total, tsn->c_seq, flags);
if ( flush_pt >= 0 )
{
- trs.sos.seglist.cur_rseg = trs.sos.seglist.head;
- trs.sos.seglist_base_seq = trs.sos.seglist.head->c_seq;
+ trs.sos.seglist.cur_rseg = trs.sos.seglist.head;
+ trs.sos.seglist_base_seq = trs.sos.seglist.head->c_seq;
+
+ // FIXIT-L this special case should be eliminated
+ // the splitters should be the sole source of where to flush
+
// for non-paf splitters, flush_pt > 0 means we reached
// the minimum required, but we flush what is available
// instead of creating more, but smaller, packets
}
tsn = tsn->next;
}
-
return -1;
}
break;
}
+ if ( flushed and !trs.sos.session->flow->two_way_traffic() and !p->ptrs.tcph->is_syn() )
+ {
+ TcpStreamTracker::TcpState peer = trs.tracker->session->get_peer_state(trs.tracker);
+
+ if ( peer == TcpStreamTracker::TCP_SYN_SENT or peer == TcpStreamTracker::TCP_SYN_RECV )
+ {
+ purge_to_seq(trs, trs.sos.seglist.head->i_seq + flushed);
+ trs.tracker->r_win_base = trs.sos.seglist_base_seq;
+ }
+ }
return flushed;
}
if ( trs.sos.seglist.tail && is_segment_fasttrack(trs, trs.sos.seglist.tail, tsd) )
{
/* segment fit cleanly at the end of the segment list */
- rc = add_reassembly_segment(trs, tsd, tsd.get_seg_len(), 0, 0,
- tsd.get_seg_seq(), trs.sos.seglist.tail);
+ rc = add_reassembly_segment(
+ trs, tsd, tsd.get_seg_len(), 0, 0, tsd.get_seg_seq(), trs.sos.seglist.tail);
return rc;
}
public:
virtual int queue_packet_for_reassembly(TcpReassemblerState&, TcpSegmentDescriptor&);
virtual void purge_segment_list(TcpReassemblerState&);
- virtual int purge_flushed_ackd(TcpReassemblerState&);
+ virtual void purge_flushed_ackd(TcpReassemblerState&);
virtual int flush_stream(
TcpReassemblerState&, snort::Packet* p, uint32_t dir, bool final_flush = false);
virtual void flush_queued_segments(
int do_zero_byte_flush(TcpReassemblerState&, snort::Packet* p, uint32_t pkt_flags);
uint32_t get_q_footprint(TcpReassemblerState&);
uint32_t get_q_sequenced(TcpReassemblerState&);
+ bool is_q_sequenced(TcpReassemblerState&);
void final_flush(TcpReassemblerState&, snort::Packet*, uint32_t dir);
uint32_t get_reverse_packet_dir(TcpReassemblerState&, const snort::Packet*);
uint32_t get_forward_packet_dir(TcpReassemblerState&, const snort::Packet*);
int32_t flush_pdu_ips(TcpReassemblerState&, uint32_t*, snort::Packet*);
void fallback(TcpReassemblerState&);
int32_t flush_pdu_ackd(TcpReassemblerState&, uint32_t* flags, snort::Packet*);
- int purge_to_seq(TcpReassemblerState&, uint32_t flush_seq);
+ void purge_to_seq(TcpReassemblerState&, uint32_t flush_seq);
bool next_no_gap(TcpSegmentNode&);
void update_next(TcpReassemblerState&, TcpSegmentNode&);
void purge_segment_list()
{ reassembler->purge_segment_list(trs); }
- int purge_flushed_ackd()
+ void purge_flushed_ackd()
{ return reassembler->purge_flushed_ackd(trs); }
int flush_stream(snort::Packet* p, uint32_t dir, bool final_flush = false)
TcpSegmentNode::TcpSegmentNode(const struct timeval& tv, const uint8_t* payload, uint16_t len) :
prev(nullptr), next(nullptr), tv(tv), ts(0), i_seq(0), c_seq(0), i_len(len),
- c_len(len), offset(0), last_flush_len(0), urg_offset(0)
+ c_len(len), offset(0)
{
data = ( uint8_t* )snort_alloc(len);
memcpy(data, payload, len);
uint16_t i_len; // initial length of the data segment
uint16_t c_len; // length of data remaining for reassembly
uint16_t offset;
- uint16_t last_flush_len;
- uint16_t urg_offset;
};
class TcpSegmentList
dump_me->term();
}
- head = tail = cur_rseg = nullptr;
+ head = tail = cur_rseg = cur_pseg = nullptr;
count = 0;
return i;
}
TcpSegmentNode* head = nullptr;
TcpSegmentNode* tail = nullptr;
TcpSegmentNode* cur_rseg = nullptr;
+ TcpSegmentNode* cur_pseg = nullptr;
uint32_t count = 0;
};