#ifndef HTTP_PUBLISH_LENGTH_EVENT_H
#define HTTP_PUBLISH_LENGTH_EVENT_H
-// An event to dynamically update the publish length used by http_inspect
+// An event to dynamically update the publish length used by http_inspect.
// Subscribers MUST retain the given publish length if it's larger than
// the one they desire. That way all subscribers can work together to
-// set the publish length.
+// set the publish length. Subscribers can also request additional
+// publish behavior, such as publishing on SSE event boundaries.
namespace snort
{
bool should_publish_body() const
{ return publish_body; }
+ void set_publish_on_sse_event_boundary()
+ { publish_on_sse_event_boundary = true; }
+
+ bool should_publish_on_sse_event_boundary() const
+ { return publish_on_sse_event_boundary; }
+
private:
bool is_data_originates_from_client;
int32_t publish_length;
bool publish_body = false;
+ bool publish_on_sse_event_boundary = false;
};
}
return buf;
}
bool HttpStreamSplitter::finish(snort::Flow*) { return false; }
-void HttpStreamSplitter::prep_partial_flush(snort::Flow*, uint32_t, uint32_t, uint32_t) { }
+void HttpStreamSplitter::prep_partial_flush(snort::Flow*, uint32_t, uint32_t, uint32_t,
+ HttpEnums::PartialFlushType) { }
HttpMsgHeader::HttpMsgHeader(const uint8_t* buffer, const uint16_t buf_size,
HttpFlowData* session_data_, SourceId source_id_, bool buf_owner, Flow* flow_,
#include "http_enum.h"
#include "http_flow_data.h"
#include "http_module.h"
+#include "http_transaction.h"
using namespace HttpEnums;
using namespace HttpCommon;
HttpBodyCutter::HttpBodyCutter(bool accelerated_blocking_, ScriptFinder* finder_,
HttpFlowData* const session_data, SourceId source_id)
: accelerated_blocking(accelerated_blocking_)
+ , sse_state(source_id == SRC_SERVER &&
+ session_data->transaction[source_id] &&
+ session_data->transaction[source_id]->should_publish_on_sse_event_boundary() ?
+ SSE_START : SSE_DISABLED)
, finder(finder_)
, session_data(session_data)
, source_id(source_id)
if (octets_seen + length < flow_target)
{
- const auto [accelerate, consumed] = analyze_body(buffer, length, infractions, events);
+ const auto flush_ctx = analyze_body(buffer, length, infractions, events);
+ const auto consumed = flush_ctx.consumed;
if ( consumed < length )
{
}
octets_seen += length;
- return accelerate ? SCAN_NOT_FOUND_ACCELERATE : SCAN_NOT_FOUND;
+ if ( flush_ctx.is_none() )
+ return SCAN_NOT_FOUND;
+
+ return flush_ctx.is_publish() ? SCAN_NOT_FOUND_PARTIAL_PUBLISH : SCAN_NOT_FOUND_ACCELERATE;
}
if (!stretch)
{
assert(flow_target >= octets_seen);
const uint32_t planned_flush = flow_target - octets_seen;
- const auto [_, consumed] = analyze_body(buffer, planned_flush, infractions, events);
+ const auto consumed = analyze_body(buffer, planned_flush, infractions, events).consumed;
num_flush = consumed;
remaining -= octets_seen + num_flush;
else
planned_flush = flow_target - octets_seen;
- const auto [_, consumed] = analyze_body(buffer, planned_flush, infractions, events);
+ const auto consumed = analyze_body(buffer, planned_flush, infractions, events).consumed;
num_flush = consumed;
remaining -= octets_seen + num_flush;
if (octets_seen + length < flow_target)
{
- const auto [accelerate, consumed] = analyze_body(buffer, length, infractions, events);
+ const auto flush_ctx = analyze_body(buffer, length, infractions, events);
+ const auto consumed = flush_ctx.consumed;
if ( consumed < length )
{
// Not enough data yet to create a message section
octets_seen += length;
- return accelerate ? SCAN_NOT_FOUND_ACCELERATE : SCAN_NOT_FOUND;
+ if ( flush_ctx.is_none() )
+ return SCAN_NOT_FOUND;
+
+ return flush_ctx.is_publish() ? SCAN_NOT_FOUND_PARTIAL_PUBLISH : SCAN_NOT_FOUND_ACCELERATE;
}
else if (stretch && (octets_seen + length <= flow_target + MAX_SECTION_STRETCH))
{
// Cut the section at the end of this TCP segment to avoid splitting a packet
const uint32_t planned_flush = length;
- const auto [_, consumed] = analyze_body(buffer, planned_flush, infractions, events);
+ const auto consumed = analyze_body(buffer, planned_flush, infractions, events).consumed;
num_flush = consumed;
// Cut the section at the target length. Either stretching is not allowed or the end of
// the segment is too far away.
const uint32_t planned_flush = flow_target - octets_seen;
- const auto [_, consumed] = analyze_body(buffer, planned_flush, infractions, events);
+ const auto consumed = analyze_body(buffer, planned_flush, infractions, events).consumed;
num_flush = consumed;
const uint32_t adjusted_target = stretch ? MAX_SECTION_STRETCH + flow_target : flow_target;
bool accelerate_this_packet = false;
+ bool partial_publish_this_packet = false;
for (int32_t k=0; k < static_cast<int32_t>(length); k++)
{
if ( accelerated_blocking or !discard_mode )
{
- const auto [accelerate, consumed] = analyze_body(buffer + k, skip_amount,
- infractions, events);
- accelerate_this_packet = accelerate or accelerate_this_packet;
+ const auto flush_ctx = analyze_body(buffer + k, skip_amount, infractions, events);
+ const auto consumed = flush_ctx.consumed;
+ if ( flush_ctx.is_publish() )
+ partial_publish_this_packet = true;
+ else if ( flush_ctx.is_accelerate() )
+ accelerate_this_packet = true;
if ( consumed < skip_amount and !discard_mode )
{
skip_amount = (skip_amount <= adjusted_target-data_seen) ? skip_amount :
adjusted_target-data_seen;
- const auto [accelerate, consumed] = analyze_body(buffer + k,
- skip_amount, infractions, events);
- accelerate_this_packet = accelerate or accelerate_this_packet;
+ const auto flush_ctx = analyze_body(buffer + k, skip_amount, infractions, events);
+ const auto consumed = flush_ctx.consumed;
+ if ( flush_ctx.is_publish() )
+ partial_publish_this_packet = true;
+ else if ( flush_ctx.is_accelerate() )
+ accelerate_this_packet = true;
if ( consumed < skip_amount )
{
octets_seen += length;
- if (accelerate_this_packet || (zero_chunk && data_seen))
- return SCAN_NOT_FOUND_ACCELERATE;
+ if ( zero_chunk && data_seen )
+ accelerate_this_packet = true;
- return SCAN_NOT_FOUND;
+ if ( !accelerate_this_packet && !partial_publish_this_packet )
+ return SCAN_NOT_FOUND;
+
+ return accelerate_this_packet ? SCAN_NOT_FOUND_ACCELERATE : SCAN_NOT_FOUND_PARTIAL_PUBLISH;
}
ScanResult HttpBodyHXCutter::cut(const uint8_t* buffer, uint32_t length, HttpInfractions* infractions,
if (octets_seen + length < flow_target)
{
// Not enough data yet to create a message section
- const auto [accelerate, consumed] = analyze_body(buffer, length, infractions, events);
+ const auto flush_ctx = analyze_body(buffer, length, infractions, events);
+ const auto consumed = flush_ctx.consumed;
if ( consumed < length )
{
octets_seen += length;
total_octets_scanned += length;
- return accelerate ? SCAN_NOT_FOUND_ACCELERATE : SCAN_NOT_FOUND;
+ if ( flush_ctx.is_none() )
+ return SCAN_NOT_FOUND;
+
+ return flush_ctx.is_publish() ? SCAN_NOT_FOUND_PARTIAL_PUBLISH : SCAN_NOT_FOUND_ACCELERATE;
}
else
{
else
planned_flush = flow_target - octets_seen;
- const auto [_, consumed] = analyze_body(buffer, planned_flush, infractions, events);
+ const auto consumed = analyze_body(buffer, planned_flush, infractions, events).consumed;
num_flush = consumed;
total_octets_scanned += num_flush;
// requires script detection. Exactly what we are looking for is encapsulated in dangerous().
//
// Return value FlushContext contains:
-// accelerate: true indicates a match and enables the packet that completes the matching sequence
-// to be sent for partial inspection.
// consumed: how many input bytes the decompressor actually processed. When less than the
// input length the decompressor buffer is full and the caller should flush early.
+// decision: NONE when no early action is needed, ACCELERATE for detect-driven partial
+// inspection, PUBLISH for publish-only partial inspection on SSE boundaries.
//
// Any attempt to optimize this code should be mindful that once you skip any part of the message
// body, dangerous() loses the ability to unzip subsequent data.
{
const auto [_, consumed] = decompress(data, length, infractions, events);
assert(consumed <= length);
- return { false, consumed };
+
+ if ( (sse_state != SSE_DISABLED) &&
+ (session_data->compress[source_id] == nullptr) && has_sse_boundary(data, consumed) )
+ {
+ return { consumed, FlushContext::PUBLISH };
+ }
+
+ return { consumed, FlushContext::NONE };
}
- const auto result = dangerous(data, length, infractions, events);
+ auto result = dangerous(data, length, infractions, events);
assert(result.consumed <= length);
- if ( result.accelerate )
+ if ( result.is_accelerate() )
HttpModule::increment_peg_counts(PEG_SCRIPT_DETECTION);
+ // FIXIT-E blank-line boundary detection currently inspects only the raw response body.
+ // That is sufficient for the current work in progress, but compressed response bodies need
+ // decompression-aware boundary accounting later.
+ if ( result.is_none() && (sse_state != SSE_DISABLED) &&
+ (session_data->compress[source_id] == nullptr) && has_sse_boundary(data, result.consumed) )
+ {
+ result.decision = FlushContext::PUBLISH;
+ }
+
return result;
}
+bool HttpBodyCutter::has_sse_boundary(const uint8_t* data, uint32_t length)
+{
+ for (uint32_t k = 0; k < length; k++)
+ {
+ switch (sse_state)
+ {
+ case SSE_DISABLED:
+ default:
+ break;
+
+ case SSE_START:
+ if ( (data[k] != '\n') && (data[k] != '\r') )
+ sse_state = SSE_DATA_LINE;
+ break;
+
+ case SSE_DATA_LINE:
+ if (data[k] == '\n')
+ sse_state = SSE_EMPTY_LINE;
+ else if (data[k] == '\r')
+ sse_state = SSE_CR_DATA_LINE;
+ break;
+
+ case SSE_CR_DATA_LINE:
+ if (data[k] == '\n')
+ sse_state = SSE_EMPTY_LINE;
+ else
+ sse_state = (data[k] == '\r') ? SSE_CR_EMPTY_LINE : SSE_DATA_LINE;
+ break;
+
+ case SSE_EMPTY_LINE:
+ if (data[k] == '\n')
+ {
+ sse_state = SSE_START;
+ return true; // blank-line boundary recognized
+ }
+ if (data[k] == '\r')
+ {
+ sse_state = SSE_CR_EMPTY_LINE;
+ break;
+ }
+ sse_state = SSE_DATA_LINE;
+ break;
+
+ case SSE_CR_EMPTY_LINE:
+ if (data[k] == '\n')
+ {
+ sse_state = SSE_START;
+ return true; // blank-line boundary recognized
+ }
+
+ sse_state = (data[k] == '\r') ? SSE_CR_EMPTY_LINE : SSE_DATA_LINE;
+ break;
+ }
+ }
+
+ return false;
+}
+
bool HttpBodyCutter::find_partial(const uint8_t* input_buf, uint32_t input_length, bool end)
{
for (uint32_t k = 0; k < input_length; k++)
const auto [decompressed, consumed] = decompress(data, length, infractions, events);
if ( !decompressed )
- return { true, consumed };
+ return { consumed, FlushContext::ACCELERATE };
auto [input_buf, input_length] = *decompressed;
if ( input_length > string_length )
{
if ( partial_match and find_partial(input_buf, input_length, true) )
- return { true, consumed };
+ return { consumed, FlushContext::ACCELERATE };
if ( finder->search(input_buf, input_length) >= 0 )
- return { true, consumed };
+ return { consumed, FlushContext::ACCELERATE };
uint32_t delta = input_length - string_length + 1;
input_buf += delta;
}
if ( find_partial(input_buf, input_length, false) )
- return { true, consumed };
+ return { consumed, FlushContext::ACCELERATE };
- return { false, consumed };
+ return { consumed, FlushContext::NONE };
}
uint8_t HttpZeroNineCutter::match[] = { 'H', 'T', 'T', 'P', '/' };
private:
struct FlushContext
{
- bool accelerate = false;
+ enum Decision : uint8_t
+ {
+ ACCELERATE,
+ PUBLISH,
+ NONE,
+ };
+
uint32_t consumed = 0;
+ Decision decision = NONE;
+
+ bool is_none() const { return decision == NONE; }
+ bool is_publish() const { return decision == PUBLISH; }
+ bool is_accelerate() const { return decision == ACCELERATE; }
};
struct DecompressOutput
const bool accelerated_blocking;
private:
+ enum SseState : uint8_t
+ {
+ SSE_DISABLED,
+ SSE_START,
+ SSE_DATA_LINE,
+ SSE_CR_DATA_LINE,
+ SSE_EMPTY_LINE,
+ SSE_CR_EMPTY_LINE,
+ };
+
FlushContext dangerous(const uint8_t* data, uint32_t length,
HttpInfractions* infractions, HttpEventGen* events);
bool find_partial(const uint8_t*, uint32_t, bool);
+ bool has_sse_boundary(const uint8_t* data, uint32_t length);
uint8_t partial_match = 0;
uint8_t string_length = 0;
+ SseState sse_state = SSE_DISABLED;
ScriptFinder* const finder;
const uint8_t* match_string = nullptr;
const uint8_t* match_string_upper = nullptr;
};
#endif
-
PEG_GET, PEG_HEAD, PEG_POST, PEG_PUT, PEG_DELETE, PEG_CONNECT, PEG_OPTIONS, PEG_TRACE,
PEG_OTHER_METHOD, PEG_REQUEST_BODY, PEG_CHUNKED, PEG_URI_NORM, PEG_URI_PATH, PEG_URI_CODING,
PEG_CONCURRENT_SESSIONS, PEG_MAX_CONCURRENT_SESSIONS, PEG_SCRIPT_DETECTION,
- PEG_PARTIAL_INSPECT, PEG_EXCESS_PARAMS, PEG_PARAMS, PEG_CUTOVERS, PEG_SSL_SEARCH_ABND_EARLY,
+ PEG_PARTIAL_INSPECT, PEG_PARTIAL_PUBLISH, PEG_EXCESS_PARAMS, PEG_PARAMS, PEG_CUTOVERS,
+ PEG_SSL_SEARCH_ABND_EARLY,
PEG_PIPELINED_FLOWS, PEG_PIPELINED_REQUESTS, PEG_TOTAL_BYTES, PEG_JS_INLINE, PEG_JS_EXTERNAL,
PEG_JS_PDF, PEG_SKIP_MIME_ATTACH, PEG_COMPRESSED_GZIP, PEG_COMPRESSED_GZIP_FAILED, PEG_COMPRESSED_DEFLATE,
PEG_INCORRECT_DEFLATE_HEADER, PEG_COMPRESSED_DEFLATE_FAILED, PEG_COMPRESSED_NOT_SUPPORTED,
PEG_COMPRESSED_UNKNOWN, PEG_MAX_PUBLISH_DEPTH_HITS, PEG_COUNT_MAX};
// Result of scanning by splitter
-enum ScanResult { SCAN_NOT_FOUND, SCAN_NOT_FOUND_ACCELERATE, SCAN_FOUND, SCAN_FOUND_PIECE,
+enum ScanResult { SCAN_NOT_FOUND, SCAN_NOT_FOUND_ACCELERATE,
+ SCAN_NOT_FOUND_PARTIAL_PUBLISH, SCAN_FOUND, SCAN_FOUND_PIECE,
SCAN_DISCARD, SCAN_DISCARD_PIECE, SCAN_ABORT };
+enum PartialFlushType : uint8_t { PF_NONE, PF_DETECT, PF_PUBLISH };
+
// State machine for chunk parsing
enum ChunkState { CHUNK_NEWLINES, CHUNK_ZEROS, CHUNK_LEADING_WS, CHUNK_NUMBER, CHUNK_TRAILING_WS,
CHUNK_OPTIONS, CHUNK_HCRLF, CHUNK_DATA, CHUNK_DCRLF1, CHUNK_DCRLF2, CHUNK_BAD };
} // end namespace HttpEnums
#endif
-
assert((hx_body_state[source_id] == HX_BODY_NOT_COMPLETE) ||
(hx_body_state[source_id] == HX_BODY_LAST_SEG));
hx_body_state[source_id] = state;
- partial_flush[source_id] = false;
+ partial_flush[source_id] = HttpEnums::PF_NONE;
if (clear_partial_buffer)
{
// We've already sent all data through detection so no need to reinspect. Just need to
int32_t octets_reassembled[2] = { HttpCommon::STAT_NOT_PRESENT, HttpCommon::STAT_NOT_PRESENT };
int32_t num_head_lines[2] = { HttpCommon::STAT_NOT_PRESENT, HttpCommon::STAT_NOT_PRESENT };
bool tcp_close[2] = { false, false };
- bool partial_flush[2] = { false, false };
+ HttpEnums::PartialFlushType partial_flush[2] = { HttpEnums::PF_NONE, HttpEnums::PF_NONE };
uint64_t last_connect_trans_w_early_traffic = 0;
HttpCompressStream* compress[2] = { nullptr, nullptr };
HttpMsgSection* current_section;
HttpFlowData* session_data = http_get_flow_data(flow);
- if (!session_data->partial_flush[source_id])
+ switch (session_data->partial_flush[source_id])
+ {
+ case PF_NONE:
HttpModule::increment_peg_counts(PEG_INSPECT);
- else
+ break;
+ case PF_DETECT:
HttpModule::increment_peg_counts(PEG_PARTIAL_INSPECT);
+ break;
+ case PF_PUBLISH:
+ HttpModule::increment_peg_counts(PEG_PARTIAL_PUBLISH);
+ break;
+ default:
+ assert(false);
+ HttpModule::increment_peg_counts(PEG_INSPECT);
+ break;
+ }
switch (session_data->section_type[source_id])
{
current_section->analyze();
current_section->gen_events();
- if (!session_data->partial_flush[source_id])
+ if (session_data->partial_flush[source_id] == PF_NONE)
current_section->update_flow();
session_data->section_type[source_id] = SEC__NOT_COMPUTE;
p->set_pdu_section(pdu_section);
}
- if (current_section->run_detection(p))
+ const bool skip_detection = (session_data->partial_flush[source_id] == PF_PUBLISH);
+
+ if (!skip_detection && current_section->run_detection(p))
{
#ifdef REG_TEST
if (HttpTestManager::use_test_output(HttpTestManager::IN_HTTP))
delete[] partial_detect_buffer;
- if (!session_data->partial_flush[source_id])
+ if (session_data->partial_flush[source_id] == PF_NONE)
{
bookkeeping_regular_flush(partial_detect_length, partial_detect_buffer,
partial_js_detect_length, detect_length);
}
}
body_octets += msg_text.length();
- if (!session_data->partial_flush[source_id])
+ if (session_data->partial_flush[source_id] == PF_NONE)
transaction->add_body_len(source_id, detect_data.length());
partial_inspected_octets = session_data->partial_flush[source_id] ? msg_text.length() : 0;
}
int32_t new_depth = http_publish_length_event.get_publish_length();
int32_t should_publish = (int32_t)http_publish_length_event.should_publish_body();
+ // FIXIT-E move STASH_PUBLISH_REQUEST_BODY / STASH_PUBLISH_RESPONSE_BODY to the
+ // transaction object as well; this body-publish setting is negotiated and consumed
+ // entirely within http_inspect and fits per-transaction state better than generic
+ // flow stash.
if (is_request)
flow->set_attr(STASH_PUBLISH_REQUEST_BODY, should_publish);
else
flow->set_attr(STASH_PUBLISH_RESPONSE_BODY, should_publish);
+ if (http_publish_length_event.should_publish_on_sse_event_boundary())
+ transaction->set_publish_on_sse_event_boundary();
+
if (new_depth > session_data->publish_depth_remaining[source_id])
{
session_data->publish_octets[source_id] = 0;
uint8_t* data, unsigned len, uint32_t flags, unsigned& copied) override;
bool finish(snort::Flow* flow) override;
void prep_partial_flush(snort::Flow* flow, uint32_t num_flush) override
- { prep_partial_flush(flow, num_flush, 0, 0); }
+ { prep_partial_flush(flow, num_flush, 0, 0, HttpEnums::PF_DETECT); }
void prep_partial_flush(snort::Flow* flow, uint32_t num_flush, uint32_t num_excess,
- uint32_t num_head_lines);
+ uint32_t num_head_lines, HttpEnums::PartialFlushType partial_flush_type);
bool is_paf() override { return true; }
static StreamSplitter::Status status_value(StreamSplitter::Status ret_val, bool http2 = false);
}
void HttpStreamSplitter::prep_partial_flush(Flow* flow, uint32_t num_flush,
- uint32_t num_excess, uint32_t num_head_lines)
+ uint32_t num_excess, uint32_t num_head_lines, PartialFlushType partial_flush_type)
{
// cppcheck-suppress unreadVariable
Profile profile(HttpModule::get_profile_stats());
session_data->cutter[source_id]->get_is_broken_chunk(),
session_data->cutter[source_id]->get_num_good_chunks(),
session_data->cutter[source_id]->get_octets_seen() - num_flush);
- session_data->partial_flush[source_id] = true;
+ session_data->partial_flush[source_id] = partial_flush_type;
}
{
case SCAN_NOT_FOUND:
case SCAN_NOT_FOUND_ACCELERATE:
+ case SCAN_NOT_FOUND_PARTIAL_PUBLISH:
if (cutter->get_octets_seen() == MAX_OCTETS)
{
*session_data->get_infractions(source_id) += INF_ENDLESS_HEADER;
}
}
- if (cut_result == SCAN_NOT_FOUND_ACCELERATE)
+ if (cut_result == SCAN_NOT_FOUND_ACCELERATE ||
+ cut_result == SCAN_NOT_FOUND_PARTIAL_PUBLISH)
{
- prep_partial_flush(flow, length, cutter->get_num_excess(), cutter->get_num_head_lines());
+ prep_partial_flush(flow, length, cutter->get_num_excess(), cutter->get_num_head_lines(),
+ (cut_result == SCAN_NOT_FOUND_ACCELERATE) ? PF_DETECT : PF_PUBLISH);
#ifdef REG_TEST
if (!HttpTestManager::use_test_input(HttpTestManager::IN_HTTP))
#endif
#endif
SectionType& type = session_data->type_expected[source_id];
- session_data->partial_flush[source_id] = false;
+ session_data->partial_flush[source_id] = PF_NONE;
if (type == SEC_ABORT)
return status_value(StreamSplitter::ABORT);
{ CountType::MAX, "max_concurrent_sessions", "maximum concurrent http sessions" },
{ CountType::SUM, "script_detections", "early inspections of scripts in HTTP responses" },
{ CountType::SUM, "partial_inspections", "early inspections done for script detection" },
+ { CountType::SUM, "partial_publishes", "publish-only partial flushes" },
{ CountType::SUM, "excess_parameters", "repeat parameters exceeding max" },
{ CountType::SUM, "parameters", "HTTP parameters inspected" },
{ CountType::SUM, "connect_tunnel_cutovers", "CONNECT tunnel flow cutovers to wizard" },
void set_one_hundred_response();
bool final_response() const { return !second_response_expected; }
+ bool should_publish_on_sse_event_boundary() const
+ { return publish_on_sse_event_boundary; }
+ void set_publish_on_sse_event_boundary()
+ { publish_on_sse_event_boundary = true; }
void add_body_len(HttpCommon::SourceId source_id, uint64_t len)
{ body_len[source_id] += len; }
HttpMsgSection* archive_hdr_list = nullptr;
HttpInfractions* infractions[2];
+ // FIXIT-E compact these flags into a bitset or packed field if more per-transaction
+ // flags are added; separate bool members are easy to add but waste space.
bool response_seen = false;
bool one_hundred_response = false;
bool second_response_expected = false;
// transaction in the fairly rare case where the request and response are received in
// parallel.
bool shared_ownership = false;
+ bool publish_on_sse_event_boundary = false;
unsigned pub_id;
snort::Flow* const flow;
};
#endif
-
}
bool HttpStreamSplitter::finish(snort::Flow*) { return false; }
-void HttpStreamSplitter::prep_partial_flush(snort::Flow*, uint32_t, uint32_t, uint32_t) { }
+void HttpStreamSplitter::prep_partial_flush(snort::Flow*, uint32_t, uint32_t, uint32_t,
+ HttpEnums::PartialFlushType) { }
void HttpMsgSection::clear_tmp_buffers() { }
HttpTransaction::~HttpTransaction() = default;
return buf;
}
bool HttpStreamSplitter::finish(snort::Flow*) { return false; }
-void HttpStreamSplitter::prep_partial_flush(snort::Flow*, uint32_t, uint32_t, uint32_t) {}
+void HttpStreamSplitter::prep_partial_flush(snort::Flow*, uint32_t, uint32_t, uint32_t,
+ HttpEnums::PartialFlushType) {}
// HttpMsgSection stubs (http_msg_section.cc not in SOURCES)
HttpMsgSection::HttpMsgSection(const uint8_t* buffer, const uint16_t buf_size,
return buf;
}
bool HttpStreamSplitter::finish(snort::Flow*) { return false; }
-void HttpStreamSplitter::prep_partial_flush(snort::Flow*, uint32_t, uint32_t, uint32_t) { }
+void HttpStreamSplitter::prep_partial_flush(snort::Flow*, uint32_t, uint32_t, uint32_t,
+ HttpEnums::PartialFlushType) { }
void HttpMsgSection::clear_tmp_buffers() { }
THREAD_LOCAL PegCount HttpModule::peg_counts[PEG_COUNT_MAX] = { };
bool partial_flush, uint32_t num_excess)
{
assert(flow_data!=nullptr);
- flow_data->partial_flush[source_id] = partial_flush;
+ flow_data->partial_flush[source_id] = partial_flush ? HttpEnums::PF_DETECT :
+ HttpEnums::PF_NONE;
flow_data->num_excess[source_id] = num_excess;
}
};