From: Russ Combs (rucombs) Date: Thu, 12 Jan 2017 20:50:58 +0000 (-0500) Subject: Merge pull request #773 in SNORT/snort3 from nhttp63 to master X-Git-Tag: 3.0.0-233~111 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=729ef370b493660dc10c5f943c773085041d3352;p=thirdparty%2Fsnort3.git Merge pull request #773 in SNORT/snort3 from nhttp63 to master Squashed commit of the following: commit 1af3bd51f306e045a09ce819e3eff05f868edce1 Author: Tom Peters Date: Thu Jan 12 14:54:33 2017 -0500 code review fixes commit 93732c6027015abc7f651889cd20e853aac1ce36 Author: Tom Peters Date: Tue Jan 3 10:39:06 2017 -0500 NHI-stream issues --- diff --git a/src/flow/flow.h b/src/flow/flow.h index 1fe7d05d6..b567508f0 100644 --- a/src/flow/flow.h +++ b/src/flow/flow.h @@ -219,7 +219,7 @@ public: { return (ssn_state.session_flags & SSNFLAG_PROXIED) != 0; } bool is_stream() - { return to_utype(pkt_type) & to_utype(PktType::STREAM); } + { return (to_utype(pkt_type) & to_utype(PktType::STREAM)) != 0; } void block() { ssn_state.session_flags |= SSNFLAG_BLOCK; } diff --git a/src/service_inspectors/http_inspect/http_cutter.cc b/src/service_inspectors/http_inspect/http_cutter.cc index e53202cb4..73f1baad4 100644 --- a/src/service_inspectors/http_inspect/http_cutter.cc +++ b/src/service_inspectors/http_inspect/http_cutter.cc @@ -439,12 +439,12 @@ ScanResult HttpBodyChunkCutter::cut(const uint8_t* buffer, uint32_t length, break; } } - octets_seen += length; if (discard_mode) { num_flush = length; return SCAN_DISCARD_PIECE; } + octets_seen += length; return SCAN_NOTFOUND; } diff --git a/src/service_inspectors/http_inspect/http_enum.h b/src/service_inspectors/http_inspect/http_enum.h index 2c1dc5b9b..ebc32519e 100644 --- a/src/service_inspectors/http_inspect/http_enum.h +++ b/src/service_inspectors/http_inspect/http_enum.h @@ -24,7 +24,7 @@ namespace HttpEnums { -static const int MAX_OCTETS = 65535; +static const int MAX_OCTETS = 63780; static const int DATA_BLOCK_SIZE = 16384; static const int FINAL_BLOCK_SIZE = 24576; static const int GZIP_BLOCK_SIZE = 2048; diff --git a/src/service_inspectors/http_inspect/http_flow_data.h b/src/service_inspectors/http_inspect/http_flow_data.h index 2c858f376..1c411af03 100644 --- a/src/service_inspectors/http_inspect/http_flow_data.h +++ b/src/service_inspectors/http_inspect/http_flow_data.h @@ -73,14 +73,18 @@ private: // *** StreamSplitter internal data - reassemble() uint8_t* section_buffer[2] = { nullptr, nullptr }; + uint32_t section_total[2] = { 0, 0 }; uint32_t section_offset[2] = { 0, 0 }; HttpEnums::ChunkState chunk_state[2] = { HttpEnums::CHUNK_NUMBER, HttpEnums::CHUNK_NUMBER }; uint32_t chunk_expected_length[2] = { 0, 0 }; + uint32_t running_total[2] = { 0, 0 }; // *** StreamSplitter internal data - scan() => reassemble() uint32_t num_excess[2] = { 0, 0 }; bool is_broken_chunk[2] = { false, false }; uint32_t num_good_chunks[2] = { 0, 0 }; + uint32_t octets_expected[2] = { 0, 0 }; + bool strict_length[2] = { false, false }; // *** StreamSplitter => Inspector (facts about the most recent message section) HttpEnums::SectionType section_type[2] = { HttpEnums::SEC__NOT_COMPUTE, diff --git a/src/service_inspectors/http_inspect/http_head_norm.cc b/src/service_inspectors/http_inspect/http_head_norm.cc index dd99ad286..262f2ae26 100644 --- a/src/service_inspectors/http_inspect/http_head_norm.cc +++ b/src/service_inspectors/http_inspect/http_head_norm.cc @@ -73,7 +73,8 @@ void HeaderNormalizer::normalize(const HeaderId head_id, const int count, int num_matches = 0; int32_t buffer_length = 0; - int curr_match; + // FIXIT-P initialization that serves no functional purpose to prevent compiler warning + int curr_match = -1; for (int k=0; k < num_headers; k++) { if (header_name_id[k] == head_id) diff --git a/src/service_inspectors/http_inspect/http_stream_splitter.h b/src/service_inspectors/http_inspect/http_stream_splitter.h index b810f10ff..db4b3f81b 100644 --- a/src/service_inspectors/http_inspect/http_stream_splitter.h +++ b/src/service_inspectors/http_inspect/http_stream_splitter.h @@ -47,7 +47,8 @@ public: private: void prepare_flush(HttpFlowData* session_data, uint32_t* flush_offset, HttpEnums::SectionType section_type, uint32_t num_flushed, uint32_t num_excess, int32_t num_head_lines, - bool is_broken_chunk, uint32_t num_good_chunks) const; + bool is_broken_chunk, uint32_t num_good_chunks, uint32_t octets_seen, bool strict_length) + const; HttpCutter* get_cutter(HttpEnums::SectionType type, const HttpFlowData* session) const; void chunk_spray(HttpFlowData* session_data, uint8_t* buffer, const uint8_t* data, unsigned length) const; diff --git a/src/service_inspectors/http_inspect/http_stream_splitter_reassemble.cc b/src/service_inspectors/http_inspect/http_stream_splitter_reassemble.cc index 15846116e..5ab4d75ce 100644 --- a/src/service_inspectors/http_inspect/http_stream_splitter_reassemble.cc +++ b/src/service_inspectors/http_inspect/http_stream_splitter_reassemble.cc @@ -204,8 +204,6 @@ const StreamBuffer* HttpStreamSplitter::reassemble(Flow* flow, unsigned total, u copied = len; - assert(total <= MAX_OCTETS); - HttpFlowData* session_data = (HttpFlowData*)flow->get_flow_data(HttpFlowData::http_flow_id); assert(session_data != nullptr); @@ -244,12 +242,19 @@ const StreamBuffer* HttpStreamSplitter::reassemble(Flow* flow, unsigned total, u } #endif + // FIXIT-H Workaround for TP Bug 149662 if (session_data->section_type[source_id] == SEC__NOT_COMPUTE) - { // FIXIT-M In theory this check should not be necessary + { return nullptr; } - // FIXIT-P stream should be ehanced to do discarding for us. For now flush-then-discard here + assert(session_data->section_type[source_id] != SEC__NOT_COMPUTE); + assert(total <= MAX_OCTETS); + + session_data->running_total[source_id] += len; + assert(session_data->running_total[source_id] <= total); + + // FIXIT-P stream should be enhanced to do discarding for us. For now flush-then-discard here // is how scan() handles things we don't need to examine. if (session_data->section_type[source_id] == SEC_DISCARD) { @@ -262,6 +267,12 @@ const StreamBuffer* HttpStreamSplitter::reassemble(Flow* flow, unsigned total, u #endif if (flags & PKT_PDU_TAIL) { + assert(session_data->running_total[source_id] == total); + assert( + (session_data->octets_expected[source_id] == total) || + (!session_data->strict_length[source_id] && + (total <= session_data->octets_expected[source_id]))); + session_data->running_total[source_id] = 0; session_data->section_type[source_id] = SEC__NOT_COMPUTE; // When we are skipping through a message body beyond flow depth this is the end of @@ -295,7 +306,10 @@ const StreamBuffer* HttpStreamSplitter::reassemble(Flow* flow, unsigned total, u buffer = new uint8_t[MAX_OCTETS]; else buffer = new uint8_t[total]; - } + session_data->section_total[source_id] = total; + } + else + assert(session_data->section_total[source_id] == total); if (session_data->section_type[source_id] != SEC_BODY_CHUNK) { @@ -312,6 +326,20 @@ const StreamBuffer* HttpStreamSplitter::reassemble(Flow* flow, unsigned total, u if (flags & PKT_PDU_TAIL) { + uint32_t& running_total = session_data->running_total[source_id]; + // FIXIT-H workaround for TP Bug 149980: if we get shorted it must be because the TCP + // connection closed + if ((running_total < session_data->octets_expected[source_id]) && + !session_data->strict_length[source_id]) + session_data->tcp_close[source_id] = true; + // FIXIT-L workaround for TP Bug 148058: for now number of bytes provided following a + // connection close may be slightly less than total + assert((running_total == total) || session_data->tcp_close[source_id]); + assert( + (session_data->octets_expected[source_id] == running_total) || + (!session_data->strict_length[source_id] && + (running_total <= session_data->octets_expected[source_id]))); + running_total = 0; const Field& send_to_detection = my_inspector->process(buffer, session_data->section_offset[source_id] - session_data->num_excess[source_id], flow, source_id, true); diff --git a/src/service_inspectors/http_inspect/http_stream_splitter_scan.cc b/src/service_inspectors/http_inspect/http_stream_splitter_scan.cc index ca17a7c62..44317353c 100644 --- a/src/service_inspectors/http_inspect/http_stream_splitter_scan.cc +++ b/src/service_inspectors/http_inspect/http_stream_splitter_scan.cc @@ -33,13 +33,15 @@ using namespace HttpEnums; // Convenience function. All housekeeping that must be done before we can return FLUSH to stream. void HttpStreamSplitter::prepare_flush(HttpFlowData* session_data, uint32_t* flush_offset, SectionType section_type, uint32_t num_flushed, uint32_t num_excess, int32_t num_head_lines, - bool is_broken_chunk, uint32_t num_good_chunks) const + bool is_broken_chunk, uint32_t num_good_chunks, uint32_t octets_seen, bool strict_length) const { session_data->section_type[source_id] = section_type; session_data->num_excess[source_id] = num_excess; session_data->num_head_lines[source_id] = num_head_lines; session_data->is_broken_chunk[source_id] = is_broken_chunk; session_data->num_good_chunks[source_id] = num_good_chunks; + session_data->octets_expected[source_id] = octets_seen + num_flushed; + session_data->strict_length[source_id] = strict_length; #ifdef REG_TEST if (HttpTestManager::use_test_input()) @@ -112,8 +114,9 @@ StreamSplitter::Status HttpStreamSplitter::scan(Flow* flow, const uint8_t* data, } else if (HttpTestManager::use_test_output()) { - printf("Scan from flow data %" PRIu64 " direction %d length %u\n", session_data->seq_num, - source_id, length); + printf("Scan from flow data %" PRIu64 + " direction %d length %u client port %u server port %u\n", session_data->seq_num, + source_id, length, flow->client_port, flow->server_port); fflush(stdout); } #endif @@ -131,9 +134,9 @@ StreamSplitter::Status HttpStreamSplitter::scan(Flow* flow, const uint8_t* data, // us to overcome this limitation and reuse the entire HTTP infrastructure. type = SEC_BODY_OLD; uint32_t not_used; - prepare_flush(session_data, ¬_used, SEC_STATUS, 14, 0, 0, false, 0); + prepare_flush(session_data, ¬_used, SEC_STATUS, 14, 0, 0, false, 0, 14, true); my_inspector->process((const uint8_t*)"HTTP/0.9 200 .", 14, flow, SRC_SERVER, false); - prepare_flush(session_data, ¬_used, SEC_HEADER, 0, 0, 0, false, 0); + prepare_flush(session_data, ¬_used, SEC_HEADER, 0, 0, 0, false, 0, 0, true); my_inspector->process((const uint8_t*)"", 0, flow, SRC_SERVER, false); } @@ -152,6 +155,8 @@ StreamSplitter::Status HttpStreamSplitter::scan(Flow* flow, const uint8_t* data, if (cutter->get_octets_seen() == MAX_OCTETS) { session_data->infractions[source_id] += INF_ENDLESS_HEADER; + // FIXIT-L the following call seems inappropriate for headers and trailers. Those cases + // should be an unconditional EVENT_LOSS_OF_SYNC. session_data->events[source_id].generate_misformatted_http(data, length); // FIXIT-H need to process this data not just discard it. session_data->type_expected[source_id] = SEC_ABORT; @@ -175,7 +180,7 @@ StreamSplitter::Status HttpStreamSplitter::scan(Flow* flow, const uint8_t* data, case SCAN_DISCARD: case SCAN_DISCARD_PIECE: prepare_flush(session_data, flush_offset, SEC_DISCARD, cutter->get_num_flush(), 0, 0, - false, 0); + false, 0, cutter->get_octets_seen(), true); if (cut_result == SCAN_DISCARD) { delete cutter; @@ -188,7 +193,8 @@ StreamSplitter::Status HttpStreamSplitter::scan(Flow* flow, const uint8_t* data, const uint32_t flush_octets = cutter->get_num_flush(); prepare_flush(session_data, flush_offset, type, flush_octets, cutter->get_num_excess(), cutter->get_num_head_lines(), cutter->get_is_broken_chunk(), - cutter->get_num_good_chunks()); + cutter->get_num_good_chunks(), cutter->get_octets_seen(), + !((type == SEC_BODY_CL) || (type == SEC_BODY_OLD))); if (cut_result == SCAN_FOUND) { delete cutter; @@ -234,6 +240,7 @@ bool HttpStreamSplitter::finish(Flow* flow) (session_data->type_expected[source_id] == SEC_STATUS)) { session_data->infractions[source_id] += INF_PARTIAL_START; + // FIXIT-M why not use generate_misformatted_http()? session_data->events[source_id].create_event(EVENT_LOSS_OF_SYNC); return false; } @@ -242,7 +249,9 @@ bool HttpStreamSplitter::finish(Flow* flow) prepare_flush(session_data, ¬_used, session_data->type_expected[source_id], 0, 0, session_data->cutter[source_id]->get_num_head_lines() + 1, session_data->cutter[source_id]->get_is_broken_chunk(), - session_data->cutter[source_id]->get_num_good_chunks()); + session_data->cutter[source_id]->get_num_good_chunks(), + session_data->cutter[source_id]->get_octets_seen(), + true); return true; }