shutdown_active.reset();
}
+ if ( parent )
+ p->packet_flags |= PKT_HAS_PARENT;
+
p->reset();
return p;
}
// clear closed sessions here after inspection since non-stream
// inspectors may depend on flow information
// this also handles block pending state
- Stream::check_flow_closed(p);
+ // must only be done for terminal packets to avoid yoinking stream_tcp state
+ // while processing a PDU
+ if ( !p->has_parent() )
+ Stream::check_flow_closed(p);
if ( inspected and !p->context->next() )
InspectorManager::clear(p);
#define STREAM_STATE_UNREACH 0x0100
#define STREAM_STATE_CLOSED 0x0200
#define STREAM_STATE_BLOCK_PENDING 0x0400
+#define STREAM_STATE_RELEASING 0x0800
class BitOp;
class Session;
#define PKT_RETRANSMIT 0x01000000 // packet is a re-transmitted pkt.
#define PKT_RETRY 0x02000000 /* this packet is being re-evaluated from the internal retry queue */
#define PKT_USE_DIRECT_INJECT 0x04000000 /* Use ioctl when injecting. */
-#define PKT_UNUSED_FLAGS 0xf8000000
+#define PKT_HAS_PARENT 0x08000000 /* derived pseudo packet from current wire packet */
+
+#define PKT_WAS_SET 0x10000000 /* derived pseudo packet (PDU) from current wire packet */
+#define PKT_UNUSED_FLAGS 0xE0000000
#define PKT_TS_OFFLOADED 0x01
void clear_offloaded()
{ ts_packet_flags &= (~PKT_TS_OFFLOADED); }
+ bool has_parent() const
+ { return (packet_flags & PKT_HAS_PARENT) != 0; }
+
+ bool was_set() const
+ { return (packet_flags & PKT_WAS_SET) != 0; }
+
bool is_detection_enabled(bool to_server);
bool is_inter_group_flow() const
EncodeFlags, const Packet* p, Packet* c, PseudoPacketType type,
const DAQ_PktHdr_t* phdr, uint32_t opaque)
{
+ uint32_t cflags = c->packet_flags;
c->reset();
init_daq_pkthdr(p, c, phdr, opaque);
- c->packet_flags |= PKT_PSEUDO;
+ c->packet_flags = cflags | PKT_PSEUDO;
c->pseudo_type = type;
// cooked packet gets same policy as raw
{
Flow* flow = p->flow;
- if ( !flow )
+ if ( !flow or (flow->session_state & STREAM_STATE_RELEASING) )
return;
+ flow->session_state |= STREAM_STATE_RELEASING;
+
if (flow->session_state & STREAM_STATE_CLOSED)
{
assert(flow_con);
}
flow->clear_session_state(STREAM_STATE_BLOCK_PENDING);
}
+
+ flow->session_state &= ~STREAM_STATE_RELEASING;
}
int Stream::ignore_flow(
Packet* TcpReassembler::initialize_pdu(
TcpReassemblerState& trs, Packet* p, uint32_t pkt_flags, struct timeval tv)
{
- Packet* pdu = DetectionEngine::set_next_packet(p);
+ // partial flushes already set the pdu for http_inspect splitter processing
+ Packet* pdu = p->was_set() ? p : DetectionEngine::set_next_packet(p);
EncodeFlags enc_flags = 0;
DAQ_PktHdr_t pkth;
pdu->context->pkth->ts = tv;
pdu->dsize = 0;
pdu->data = nullptr;
+ pdu->ip_proto_next = (IpProtocol)p->flow->ip_proto;
+
return pdu;
}
trs.tracker->clear_tf_flags(TF_FORCE_FLUSH);
}
-static Packet* set_packet(Flow* flow, uint32_t flags, bool c2s)
+static Packet* get_packet(Flow* flow, uint32_t flags, bool c2s)
{
- // if not in the context of a wire packet the flush initiator must have
- // created a packet context by calling DetectionEngine::set_next_packet()
- Packet* p = DetectionEngine::get_current_packet();
- assert(p->pkth == p->context->pkth);
-
- // FIXIT-M p points to a skeleton of a TCP PDU packet with no data and we now
- // initialize the IPs/ports/flow and other fields accessed as we reassemble
- // and flush the PDU. There are probably other Packet fields that should be set here...
+ Packet* p = DetectionEngine::set_next_packet();
+
DAQ_PktHdr_t* ph = p->context->pkth;
memset(ph, 0, sizeof(*ph));
packet_gettimeofday(&ph->ts);
void TcpReassembler::flush_queued_segments(
TcpReassemblerState& trs, Flow* flow, bool clear, Packet* p)
{
- // if flushing outside the context of wire packet p will be null, initialize
- // Packet object allocated for the current IpsContext
- if ( !p )
- p = set_packet(flow, trs.packet_dir, trs.server_side);
-
bool pending = clear and paf_initialized(&trs.paf_state)
and (!trs.tracker->get_splitter() || trs.tracker->get_splitter()->finish(flow) );
final_flush(trs, p, trs.packet_dir);
}
+void TcpReassembler::flush_queued_segments(
+ TcpReassemblerState& trs, Flow* flow, bool clear, const Packet* p)
+{
+ Packet* pdu = get_packet(flow, trs.packet_dir, trs.server_side);
+
+ if ( p )
+ flush_queued_segments(trs, flow, clear, pdu);
+
+ else
+ {
+ // if we weren't given a packet, we must establish a context
+ DetectionEngine de;
+ flush_queued_segments(trs, flow, clear, pdu);
+ }
+}
+
// this is for post-ack flushing
uint32_t TcpReassembler::get_reverse_packet_dir(TcpReassemblerState&, const Packet* p)
{
break;
flushed += flush_to_seq(trs, flush_amt, p, flags);
- } while( trs.sos.seglist.head );
+ }
+ while ( trs.sos.seglist.head and !p->flow->is_inspection_disabled() );
if ( !flags && trs.tracker->is_splitter_paf() )
{
// ideally we would purge just once after this loop but that throws off base
if ( trs.sos.seglist.head )
purge_to_seq(trs, trs.sos.seglist_base_seq);
- } while ( trs.sos.seglist.head );
+ }
+ while ( trs.sos.seglist.head and !p->flow->is_inspection_disabled() );
if ( (trs.paf_state.paf == StreamSplitter::ABORT) && trs.tracker->is_splitter_paf() )
{
uint32_t TcpReassembler::perform_partial_flush(TcpReassemblerState& trs, Flow* flow)
{
- // Call this first, to create a context before creating a packet:
- DetectionEngine::set_next_packet();
- DetectionEngine de;
+ Packet* p = get_packet(flow, (trs.packet_dir|PKT_WAS_SET), trs.server_side);
- Packet* p = set_packet(flow, trs.packet_dir, trs.server_side);
uint32_t result = perform_partial_flush(trs, p);
// If the held_packet hasn't been released by perform_partial_flush(),
virtual int flush_stream(
TcpReassemblerState&, snort::Packet* p, uint32_t dir, bool final_flush = false);
virtual void flush_queued_segments(
- TcpReassemblerState&, snort::Flow* flow, bool clear, snort::Packet* p = nullptr);
+ TcpReassemblerState&, snort::Flow* flow, bool clear, const snort::Packet* = nullptr);
virtual bool is_segment_pending_flush(TcpReassemblerState&);
virtual int flush_on_data_policy(TcpReassemblerState&, snort::Packet*);
virtual int flush_on_ack_policy(TcpReassemblerState&, snort::Packet*);
bool is_segment_fasttrack
(TcpReassemblerState&, TcpSegmentNode* tail, const TcpSegmentDescriptor&);
void show_rebuilt_packet(const TcpReassemblerState&, snort::Packet*);
+ void flush_queued_segments(
+ TcpReassemblerState&, snort::Flow* flow, bool clear, snort::Packet*);
int flush_data_segments(TcpReassemblerState&, uint32_t flush_len, snort::Packet* pdu);
void prep_pdu(
TcpReassemblerState&, snort::Flow*, snort::Packet*, uint32_t pkt_flags, snort::Packet*);
int flush_stream(snort::Packet* p, uint32_t dir, bool final_flush = false)
{ return reassembler->flush_stream(trs, p, dir, final_flush); }
- void flush_queued_segments(snort::Flow* flow, bool clear, snort::Packet* p = nullptr)
+ void flush_queued_segments(snort::Flow* flow, bool clear, const snort::Packet* p = nullptr)
{ reassembler->flush_queued_segments(trs, flow, clear, p); }
bool is_segment_pending_flush()