if (frame_flags & END_STREAM)
{
- finish_msg_body();
+ Http2Stream* const stream = session_data->find_stream(
+ session_data->current_stream[source_id]);
+ stream->finish_msg_body(source_id);
+
+ // Since there may be multiple frame headers or zero frame headers in the flushed
+ // data section, remember END_STREAM flag in the stream object
+ stream->set_end_stream_on_data_flush(source_id);
return StreamSplitter::FLUSH;
}
else if (scan_result != StreamSplitter::FLUSH)
return frame_buf;
}
-
-void Http2DataCutter::finish_msg_body()
-{
- uint32_t http_flush_offset = 0;
- Http2DummyPacket dummy_pkt;
- dummy_pkt.flow = session_data->flow;
- uint32_t unused = 0;
- Http2Stream* const stream = session_data->get_current_stream(source_id);
- stream->get_hi_flow_data()->finish_h2_body(source_id);
- stream->set_last_data_flush(source_id);
- const snort::StreamSplitter::Status scan_result = session_data->hi_ss[source_id]->scan(
- &dummy_pkt, nullptr, 0, unused, &http_flush_offset);
- assert(scan_result == snort::StreamSplitter::FLUSH);
- UNUSED(scan_result);
- session_data->data_processing[source_id] = false;
-}
-
bool http2_scan(const uint8_t* data, uint32_t length, uint32_t* flush_offset,
uint32_t frame_len, uint8_t frame_flags, uint32_t& data_offset);
snort::StreamSplitter::Status http_scan(const uint8_t* data, uint32_t* flush_offset);
- void finish_msg_body();
};
#endif
Http2Module::increment_peg_counts(PEG_MAX_CONCURRENT_FILES);
}
}
- if (stream->is_last_data_flush(source_id))
+ if (stream->is_end_stream_on_data_flush(source_id))
stream->set_state(source_id, STATE_CLOSED);
break;
case STATE_OPEN_DATA:
- if (stream->is_last_data_flush(source_id))
+ if (stream->is_end_stream_on_data_flush(source_id))
{
assert(session_data->concurrent_files > 0);
session_data->concurrent_files -= 1;
HttpCommon::SourceId source_id_, Http2Stream* stream_) :
Http2Frame(header_buffer, header_len, data_buffer, data_len, session_data_, source_id_, stream_)
{
- // FIXIT-E If the stream state is not IDLE, we've already received the headers. Trailers are
- // not yet being processed
+ // FIXIT-E check frame validity before creating frame
+ if (!check_frame_validity())
+ return;
+
+ // If the stream state is not IDLE, this frame contains trailers.
if (stream->get_state(source_id) >= STATE_OPEN)
{
+ HttpFlowData* http_flow = session_data->get_current_stream(source_id)->get_hi_flow_data();
+ if (http_flow->get_type_expected(source_id) != HttpEnums::SEC_TRAILER)
+ {
+ // If there was no unflushed data on this stream when the trailers arrived, http_inspect
+ // will not yet be expecting trailers. Flush empty buffer through scan, reassemble, and
+ // eval to prepare http_inspect for trailers.
+ assert(http_flow->get_type_expected(source_id) == HttpEnums::SEC_BODY_H2);
+ stream->finish_msg_body(source_id, true); // calls http_inspect scan()
+
+ unsigned copied;
+ StreamBuffer stream_buf;
+ stream_buf = session_data->hi_ss[source_id]->reassemble(session_data->flow,
+ 0, 0, nullptr, 0, PKT_PDU_TAIL, copied);
+ assert(stream_buf.data != nullptr);
+ assert(copied == 0);
+
+ Http2DummyPacket dummy_pkt;
+ dummy_pkt.flow = session_data->flow;
+ dummy_pkt.packet_flags = (source_id == SRC_CLIENT) ? PKT_FROM_CLIENT : PKT_FROM_SERVER;
+ dummy_pkt.dsize = stream_buf.length;
+ dummy_pkt.data = stream_buf.data;
+ session_data->hi->eval(&dummy_pkt);
+ assert (http_flow->get_type_expected(source_id) == HttpEnums::SEC_TRAILER);
+ if (http_flow->get_type_expected(source_id) == HttpEnums::SEC_ABORT)
+ {
+ hi_abort = true;
+ return;
+ }
+ session_data->hi->clear(&dummy_pkt);
+ }
+ // FIXIT-E Trailers are not yet being processed
trailer = true;
return;
}
}
}
+// Return: continue processing frame
+bool Http2HeadersFrame::check_frame_validity()
+{
+ if (stream->get_state(source_id) == STATE_CLOSED)
+ {
+ *session_data->infractions[source_id] += INF_TRAILERS_AFTER_END_STREAM;
+ session_data->events[source_id]->create_event(EVENT_FRAME_SEQUENCE);
+ return false;
+ }
+ else if ((stream->get_state(source_id) != STATE_IDLE) and !(get_flags() & END_STREAM))
+ {
+ // Trailers without END_STREAM flag set. Alert but continue to process.
+ *session_data->infractions[source_id] += INF_FRAME_SEQUENCE;
+ session_data->events[source_id]->create_event(EVENT_FRAME_SEQUENCE);
+ }
+ return true;
+}
+
void Http2HeadersFrame::update_stream_state()
{
switch (stream->get_state(source_id))
case STATE_OPEN:
// fallthrough
case STATE_OPEN_DATA:
- if (get_flags() & END_STREAM)
- {
- if (stream->get_state(source_id) == STATE_OPEN_DATA)
- session_data->concurrent_files -= 1;
- stream->set_state(source_id, STATE_CLOSED);
- }
- else
- {
- // Headers frame without end_stream flag set after initial Headers frame
- *session_data->infractions[source_id] += INF_FRAME_SEQUENCE;
- session_data->events[source_id]->create_event(EVENT_FRAME_SEQUENCE);
- }
+ // Any trailing headers frame will be processed as trailers regardless of whether the
+ // END_STREAM flag is set
+ if (stream->get_state(source_id) == STATE_OPEN_DATA)
+ session_data->concurrent_files -= 1;
+ stream->set_state(source_id, STATE_CLOSED);
break;
case STATE_CLOSED:
- // Trailers in closed state
- *session_data->infractions[source_id] += INF_TRAILERS_AFTER_END_STREAM;
- session_data->events[source_id]->create_event(EVENT_FRAME_SEQUENCE);
+ // FIXIT-E frame validity should be checked before creating frame so we should not get
+ // here
break;
}
}
Http2HeadersFrame(const uint8_t* header_buffer, const int32_t header_len,
const uint8_t* data_buffer, const int32_t data_len, Http2FlowData* ssn_data,
HttpCommon::SourceId src_id, Http2Stream* stream);
+ bool check_frame_validity();
Http2StartLine* start_line_generator = nullptr;
uint8_t* decoded_headers = nullptr; // working buffer to store decoded headers
#include "http2_enum.h"
#include "http2_stream.h"
+#include "service_inspectors/http_inspect/http_enum.h"
#include "service_inspectors/http_inspect/http_flow_data.h"
+#include "service_inspectors/http_inspect/http_stream_splitter.h"
#include "http2_data_cutter.h"
+#include "http2_dummy_packet.h"
using namespace HttpCommon;
using namespace Http2Enums;
+using namespace HttpEnums;
Http2Stream::Http2Stream(uint32_t stream_id_, Http2FlowData* session_data_) :
stream_id(stream_id_),
{
return (state[source_id] == STATE_OPEN) || (state[source_id] == STATE_OPEN_DATA);
}
+
+void Http2Stream::finish_msg_body(HttpCommon::SourceId source_id, bool expect_trailers)
+{
+ uint32_t http_flush_offset = 0;
+ Http2DummyPacket dummy_pkt;
+ dummy_pkt.flow = session_data->flow;
+ uint32_t unused = 0;
+ const H2BodyState body_state = expect_trailers ?
+ H2_BODY_COMPLETE_EXPECT_TRAILERS : H2_BODY_COMPLETE;
+ get_hi_flow_data()->finish_h2_body(source_id, body_state);
+ const snort::StreamSplitter::Status scan_result = session_data->hi_ss[source_id]->scan(
+ &dummy_pkt, nullptr, 0, unused, &http_flush_offset);
+ assert(scan_result == snort::StreamSplitter::FLUSH);
+ UNUSED(scan_result);
+ session_data->data_processing[source_id] = false;
+}
{ state[source_id] = new_state; }
Http2Enums::StreamState get_state(HttpCommon::SourceId source_id) { return state[source_id]; }
bool is_open(HttpCommon::SourceId source_id);
- void set_last_data_flush(HttpCommon::SourceId source_id) { last_data_flush[source_id] = true; }
- bool is_last_data_flush(HttpCommon::SourceId source_id) { return last_data_flush[source_id]; }
+ void set_end_stream_on_data_flush(HttpCommon::SourceId source_id)
+ { end_stream_on_data_flush[source_id] = true; }
+ bool is_end_stream_on_data_flush(HttpCommon::SourceId source_id)
+ { return end_stream_on_data_flush[source_id]; }
+ void finish_msg_body(HttpCommon::SourceId source_id, bool expect_trailers = false);
#ifdef REG_TEST
void print_frame(FILE* output);
HttpMsgSection* hi_msg_section = nullptr;
Http2DataCutter* data_cutter[2] = { nullptr, nullptr};
bool partial_buf_pending[2] = { false, false }; // used to indicate a partial buffer
- bool last_data_flush[2] = { false, false };
+ bool end_stream_on_data_flush[2] = { false, false };
Http2Enums::StreamState state[2] = { Http2Enums::STATE_IDLE, Http2Enums::STATE_IDLE };
};
uint32_t length, uint32_t* flush_offset, HttpCommon::SourceId source_id,
uint32_t frame_length, uint8_t frame_flags, uint32_t& data_offset);
static void partial_flush_data(Http2FlowData* session_data, HttpCommon::SourceId source_id,
- uint32_t* flush_offset, uint32_t data_offset, uint32_t old_stream);
+ uint32_t* flush_offset, uint32_t data_offset, Http2Stream* const stream);
static StreamSplitter::Status non_data_scan(Http2FlowData* session_data,
uint32_t length, uint32_t* flush_offset, HttpCommon::SourceId source_id,
uint32_t frame_length, uint8_t type, uint8_t frame_flags, uint32_t& data_offset);
// Compute frame section length once per frame
if (session_data->scan_remaining_frame_octets[source_id] == 0)
{
- if (session_data->continuation_expected[source_id] && type != FT_CONTINUATION)
- {
- *session_data->infractions[source_id] += INF_MISSING_CONTINUATION;
- session_data->events[source_id]->create_event(EVENT_MISSING_CONTINUATION);
- return StreamSplitter::ABORT;
- }
-
if (frame_length + FRAME_HEADER_LENGTH > MAX_OCTETS)
{
// FIXIT-M long non-data frame needs to be supported
// Flush pending data
void Http2StreamSplitter::partial_flush_data(Http2FlowData* session_data,
- HttpCommon::SourceId source_id, uint32_t* flush_offset, uint32_t data_offset, uint32_t
- old_stream)
+ HttpCommon::SourceId source_id, uint32_t* flush_offset, uint32_t data_offset,
+ Http2Stream* const stream)
{
- session_data->current_stream[source_id] = session_data->stream_in_hi = old_stream;
session_data->frame_type[source_id] = FT_DATA;
- Http2Stream* const stream = session_data->find_stream(
- session_data->current_stream[source_id]);
Http2DataCutter* const data_cutter = stream->get_data_cutter(source_id);
if (data_cutter->is_flush_required())
session_data->hi_ss[source_id]->init_partial_flush(session_data->flow);
*flush_offset = data_offset - 1;
session_data->flushing_data[source_id] = true;
session_data->num_frame_headers[source_id] -= 1;
- session_data->stream_in_hi = NO_STREAM_ID;
}
bool Http2StreamSplitter::read_frame_hdr(Http2FlowData* session_data, const uint8_t* data,
session_data->current_stream[source_id] =
get_stream_id(session_data->scan_frame_header[source_id]);
+ if (session_data->continuation_expected[source_id] && type != FT_CONTINUATION)
+ {
+ *session_data->infractions[source_id] += INF_MISSING_CONTINUATION;
+ session_data->events[source_id]->create_event(EVENT_MISSING_CONTINUATION);
+ return StreamSplitter::ABORT;
+ }
+
if (session_data->data_processing[source_id] &&
((type != FT_DATA) || (old_stream != session_data->current_stream[source_id])))
{
// message body section sizes. It also avoids extreme delays in inspecting the
// data that could occur if we put this aside indefinitely while processing
// other streams.
- partial_flush_data(session_data, source_id, flush_offset, data_offset,
- old_stream);
+ const uint32_t next_stream = session_data->current_stream[source_id];
+ session_data->current_stream[source_id] = session_data->stream_in_hi =
+ old_stream;
+ Http2Stream* const stream = session_data->find_stream(
+ session_data->current_stream[source_id]);
+ partial_flush_data(session_data, source_id, flush_offset, data_offset, stream);
+
+ if ((type == FT_HEADERS) and
+ (session_data->current_stream[source_id]) == next_stream)
+ {
+ stream->finish_msg_body(source_id, true);
+ }
+ session_data->stream_in_hi = NO_STREAM_ID;
return StreamSplitter::FLUSH;
}
using namespace HttpEnums;
ScanResult HttpStartCutter::cut(const uint8_t* buffer, uint32_t length,
- HttpInfractions* infractions, HttpEventGen* events, uint32_t, bool, bool)
+ HttpInfractions* infractions, HttpEventGen* events, uint32_t, bool, HttpEnums::H2BodyState)
{
for (uint32_t k = 0; k < length; k++)
{
}
ScanResult HttpHeaderCutter::cut(const uint8_t* buffer, uint32_t length,
- HttpInfractions* infractions, HttpEventGen* events, uint32_t, bool, bool)
+ HttpInfractions* infractions, HttpEventGen* events, uint32_t, bool, HttpEnums::H2BodyState)
{
// Header separators: leading \r\n, leading \n, nonleading \r\n\r\n, nonleading \n\r\n,
// nonleading \r\n\n, and nonleading \n\n. The separator itself becomes num_excess which is
}
ScanResult HttpBodyClCutter::cut(const uint8_t* buffer, uint32_t length, HttpInfractions*,
- HttpEventGen*, uint32_t flow_target, bool stretch, bool)
+ HttpEventGen*, uint32_t flow_target, bool stretch, HttpEnums::H2BodyState)
{
assert(remaining > octets_seen);
}
ScanResult HttpBodyOldCutter::cut(const uint8_t* buffer, uint32_t length, HttpInfractions*,
- HttpEventGen*, uint32_t flow_target, bool stretch, bool)
+ HttpEventGen*, uint32_t flow_target, bool stretch, HttpEnums::H2BodyState)
{
if (flow_target == 0)
{
}
ScanResult HttpBodyChunkCutter::cut(const uint8_t* buffer, uint32_t length,
- HttpInfractions* infractions, HttpEventGen* events, uint32_t flow_target, bool stretch, bool)
+ HttpInfractions* infractions, HttpEventGen* events, uint32_t flow_target, bool stretch,
+ HttpEnums::H2BodyState)
{
// Are we skipping through the rest of this chunked body to the trailers and the next message?
const bool discard_mode = (flow_target == 0);
ScanResult HttpBodyH2Cutter::cut(const uint8_t* /*buffer*/, uint32_t length,
HttpInfractions* infractions, HttpEventGen* events, uint32_t flow_target, bool /*stretch*/,
- bool h2_body_finished)
+ H2BodyState state)
{
// FIXIT-E accelerated blocking not yet supported for HTTP/2
// FIXIT-E stretch not yet supported for HTTP/2 message bodies
events->create_event(EVENT_H2_DATA_OVERRUNS_CL);
expected_body_length = HttpCommon::STAT_NOT_COMPUTE;
}
- else if (h2_body_finished and ((total_octets_scanned + length) < expected_body_length))
+ else if (state != H2_BODY_NOT_COMPLETE and
+ ((total_octets_scanned + length) < expected_body_length))
{
*infractions += INF_H2_DATA_UNDERRUNS_CL;
events->create_event(EVENT_H2_DATA_UNDERRUNS_CL);
return SCAN_DISCARD_PIECE;
}
- if (!h2_body_finished)
+ if (state == H2_BODY_NOT_COMPLETE)
{
if (octets_seen + length < flow_target)
{
virtual ~HttpCutter() = default;
virtual HttpEnums::ScanResult cut(const uint8_t* buffer, uint32_t length,
HttpInfractions* infractions, HttpEventGen* events, uint32_t flow_target, bool stretch,
- bool h2_body_finished) = 0;
+ HttpEnums::H2BodyState state) = 0;
uint32_t get_num_flush() const { return num_flush; }
uint32_t get_octets_seen() const { return octets_seen; }
uint32_t get_num_excess() const { return num_crlf; }
{
public:
HttpEnums::ScanResult cut(const uint8_t* buffer, uint32_t length,
- HttpInfractions* infractions, HttpEventGen* events, uint32_t, bool, bool) override;
+ HttpInfractions* infractions, HttpEventGen* events, uint32_t, bool, HttpEnums::H2BodyState)
+ override;
protected:
enum ValidationResult { V_GOOD, V_BAD, V_TBD };
{
public:
HttpEnums::ScanResult cut(const uint8_t* buffer, uint32_t length,
- HttpInfractions* infractions, HttpEventGen* events, uint32_t, bool, bool) override;
+ HttpInfractions* infractions, HttpEventGen* events, uint32_t, bool, HttpEnums::H2BodyState)
+ override;
uint32_t get_num_head_lines() const override { return num_head_lines; }
private:
HttpBodyCutter(accelerated_blocking, compression), remaining(expected_length)
{ assert(remaining > 0); }
HttpEnums::ScanResult cut(const uint8_t*, uint32_t length, HttpInfractions*, HttpEventGen*,
- uint32_t flow_target, bool stretch, bool) override;
+ uint32_t flow_target, bool stretch, HttpEnums::H2BodyState) override;
private:
int64_t remaining;
HttpBodyCutter(accelerated_blocking, compression)
{}
HttpEnums::ScanResult cut(const uint8_t*, uint32_t, HttpInfractions*, HttpEventGen*,
- uint32_t flow_target, bool stretch, bool) override;
+ uint32_t flow_target, bool stretch, HttpEnums::H2BodyState) override;
};
class HttpBodyChunkCutter : public HttpBodyCutter
{}
HttpEnums::ScanResult cut(const uint8_t* buffer, uint32_t length,
HttpInfractions* infractions, HttpEventGen* events, uint32_t flow_target, bool stretch,
- bool) override;
+ HttpEnums::H2BodyState) override;
bool get_is_broken_chunk() const override { return curr_state == HttpEnums::CHUNK_BAD; }
uint32_t get_num_good_chunks() const override { return num_good_chunks; }
void soft_reset() override { num_good_chunks = 0; HttpBodyCutter::soft_reset(); }
HttpBodyCutter(accelerated_blocking, compression), expected_body_length(expected_length)
{}
HttpEnums::ScanResult cut(const uint8_t*, uint32_t, HttpInfractions*, HttpEventGen*,
- uint32_t flow_target, bool stretch, bool h2_body_finished) override;
+ uint32_t flow_target, bool stretch, HttpEnums::H2BodyState state) override;
private:
int64_t expected_body_length;
uint32_t total_octets_scanned = 0;
extern const bool is_sp_tab_quote_dquote[256];
extern const bool is_print_char[256]; // printable includes SP, tab, CR, LF
extern const bool is_sp_comma[256];
+
+enum H2BodyState { H2_BODY_NOT_COMPLETE, H2_BODY_COMPLETE, H2_BODY_COMPLETE_EXPECT_TRAILERS };
+
} // end namespace HttpEnums
#endif
return memory_usage_estimate;
}
+void HttpFlowData::finish_h2_body(HttpCommon::SourceId source_id, HttpEnums::H2BodyState state)
+{
+ assert(h2_body_state[source_id] == H2_BODY_NOT_COMPLETE);
+ h2_body_state[source_id] = state;
+ partial_flush[source_id] = false;
+}
+
#ifdef REG_TEST
void HttpFlowData::show(FILE* out_file) const
{
friend class HttpUnitTestSetup;
#endif
- HttpEnums::SectionType get_type_expected(HttpCommon::SourceId source_id)
+ HttpEnums::SectionType get_type_expected(HttpCommon::SourceId source_id) const
{ return type_expected[source_id]; }
- void finish_h2_body(HttpCommon::SourceId source_id)
- { h2_body_finished[source_id] = true; }
+ void finish_h2_body(HttpCommon::SourceId source_id, HttpEnums::H2BodyState state);
void reset_partial_flush(HttpCommon::SourceId source_id)
{ partial_flush[source_id] = false; }
private:
// HTTP/2 handling
bool for_http2 = false;
- bool h2_body_finished[2] = { false, false };
+ HttpEnums::H2BodyState h2_body_state[2] = { HttpEnums::H2_BODY_NOT_COMPLETE,
+ HttpEnums::H2_BODY_NOT_COMPLETE };
// Convenience routines
void half_reset(HttpCommon::SourceId source_id);
#include "http_msg_body_h2.h"
+using namespace HttpEnums;
+
void HttpMsgBodyH2::update_flow()
{
session_data->body_octets[source_id] = body_octets;
- if (session_data->h2_body_finished[source_id])
- {
- session_data->trailer_prep(source_id);
- session_data->h2_body_finished[source_id] = false;
- }
- else
+ if (session_data->h2_body_state[source_id] == H2_BODY_NOT_COMPLETE)
update_depth();
+ else if (session_data->h2_body_state[source_id] == H2_BODY_COMPLETE_EXPECT_TRAILERS)
+ session_data->trailer_prep(source_id);
}
#ifdef REG_TEST
max_length, session_data->get_infractions(source_id), session_data->events[source_id],
session_data->section_size_target[source_id],
session_data->stretch_section_to_packet[source_id],
- session_data->h2_body_finished[source_id]);
+ session_data->h2_body_state[source_id]);
switch (cut_result)
{
case SCAN_NOT_FOUND: