]> git.ipfire.org Git - thirdparty/snort3.git/commitdiff
Merge pull request #1930 in SNORT/snort3 from ~THOPETER/snort3:nhttp130 to master
authorMike Stepanek (mstepane) <mstepane@cisco.com>
Mon, 13 Jan 2020 21:22:26 +0000 (21:22 +0000)
committerMike Stepanek (mstepane) <mstepane@cisco.com>
Mon, 13 Jan 2020 21:22:26 +0000 (21:22 +0000)
Squashed commit of the following:

commit 2bb46538d39eb8ffdb2e1a2a0b1e2370972b5ff8
Author: Tom Peters <thopeter@cisco.com>
Date:   Thu Jan 9 13:12:08 2020 -0500

    http_inspect: no duplicate built-in events for a flow

src/service_inspectors/http_inspect/http_flow_data.cc
src/service_inspectors/http_inspect/http_flow_data.h
src/service_inspectors/http_inspect/http_msg_body.cc
src/service_inspectors/http_inspect/http_msg_head_shared.cc
src/service_inspectors/http_inspect/http_msg_request.cc
src/service_inspectors/http_inspect/http_msg_section.cc
src/service_inspectors/http_inspect/http_stream_splitter_finish.cc
src/service_inspectors/http_inspect/http_stream_splitter_reassemble.cc
src/service_inspectors/http_inspect/http_stream_splitter_scan.cc
src/service_inspectors/http_inspect/http_transaction.cc
src/service_inspectors/http_inspect/http_transaction.h

index acbdd2f653ca28d7573c1caecdc8f1506f9d47f9..6b3a337ee577d1c63ad0063e945e0836362645e4 100644 (file)
@@ -132,8 +132,6 @@ void HttpFlowData::half_reset(SourceId source_id)
     }
     delete infractions[source_id];
     infractions[source_id] = new HttpInfractions;
-    delete events[source_id];
-    events[source_id] = new HttpEventGen;
     section_offset[source_id] = 0;
     chunk_state[source_id] = CHUNK_NEWLINES;
     chunk_expected_length[source_id] = 0;
@@ -237,15 +235,6 @@ HttpInfractions* HttpFlowData::get_infractions(SourceId source_id)
     return transaction[source_id]->get_infractions(source_id);
 }
 
-HttpEventGen* HttpFlowData::get_events(SourceId source_id)
-{
-    if (events[source_id] != nullptr)
-        return events[source_id];
-    assert(transaction[source_id] != nullptr);
-    assert(transaction[source_id]->get_events(source_id) != nullptr);
-    return transaction[source_id]->get_events(source_id);
-}
-
 #ifdef REG_TEST
 void HttpFlowData::show(FILE* out_file) const
 {
index 90759f16f35b6ee5de7bc9dad1828c251acba0e4..9440455b11aee45403dafe84003cb3ab198934b5 100644 (file)
@@ -102,16 +102,15 @@ private:
     bool tcp_close[2] = { false, false };
     bool partial_flush[2] = { false, false };
 
-    // Infractions and events are associated with a specific message and are stored in the
-    // transaction for that message. But StreamSplitter splits the start line before there is
-    // a transaction and needs a place to put the problems it finds. Hence infractions and events
-    // are created before there is a transaction to associate them with and stored here until
-    // attach_my_transaction() takes them away and resets these to nullptr. The accessor methods
-    // hide this from StreamSplitter.
     HttpInfractions* infractions[2] = { new HttpInfractions, new HttpInfractions };
     HttpEventGen* events[2] = { new HttpEventGen, new HttpEventGen };
+
+    // Infractions are associated with a specific message and are stored in the transaction for
+    // that message. But StreamSplitter splits the start line before there is a transaction and
+    // needs a place to put the problems it finds. Hence infractions are created before there is a
+    // transaction to associate them with and stored here until attach_my_transaction() takes them
+    // away and resets these to nullptr. The accessor method hides this from StreamSplitter.
     HttpInfractions* get_infractions(HttpCommon::SourceId source_id);
-    HttpEventGen* get_events(HttpCommon::SourceId source_id);
 
     // *** Inspector => StreamSplitter (facts about the message section that is coming next)
     HttpEnums::SectionType type_expected[2] = { HttpEnums::SEC_REQUEST, HttpEnums::SEC_STATUS };
index 26e8bb88f4f19ff5f7d0b945c6bd7bb9533ec0ec..d36f3e02dab2b54d57ab29012c3c8cabe26e6d11 100644 (file)
@@ -121,7 +121,7 @@ void HttpMsgBody::do_file_decompression(const Field& input, Field& output)
     }
     uint8_t* buffer = new uint8_t[MAX_OCTETS];
     session_data->fd_alert_context.infractions = transaction->get_infractions(source_id);
-    session_data->fd_alert_context.events = transaction->get_events(source_id);
+    session_data->fd_alert_context.events = session_data->events[source_id];
     session_data->fd_state->Next_In = input.start();
     session_data->fd_state->Avail_In = (uint32_t)input.length();
     session_data->fd_state->Next_Out = buffer;
@@ -196,7 +196,7 @@ void HttpMsgBody::do_js_normalization(const Field& input, Field& output)
     }
 
     params->js_norm_param.js_norm->normalize(input, output,
-        transaction->get_infractions(source_id), transaction->get_events(source_id));
+        transaction->get_infractions(source_id), session_data->events[source_id]);
 }
 
 void HttpMsgBody::do_file_processing(const Field& file_data)
index 648eb336dc2d03804e12697d87a05e08f40a2506..ce387112fda221ff6bc98b29118f436503c7d481 100644 (file)
@@ -332,7 +332,7 @@ const Field& HttpMsgHeadShared::get_header_value_norm(HeaderId header_id)
     if (node == nullptr)
         return Field::FIELD_NULL;
     header_norms[header_id]->normalize(header_id, node->count,
-        transaction->get_infractions(source_id), transaction->get_events(source_id),
+        transaction->get_infractions(source_id), session_data->events[source_id],
         header_name_id, header_value, num_headers, node->norm);
     return node->norm;
 }
index a43d3561408c6d1981428d9aa9fce56b1f837e12..d494877fa43f6fea84c16a40553e343c5d298859 100644 (file)
@@ -62,7 +62,7 @@ void HttpMsgRequest::parse_start_line()
         else
         {
             add_infraction(INF_BAD_REQ_LINE);
-            transaction->get_events(source_id)->generate_misformatted_http(start_line.start(),
+            session_data->events[source_id]->generate_misformatted_http(start_line.start(),
                 start_line.length());
             return;
         }
@@ -108,7 +108,7 @@ void HttpMsgRequest::parse_start_line()
     {
         uri = new HttpUri(start_line.start() + first_end + 1, last_begin - first_end - 1,
             method_id, params->uri_param, transaction->get_infractions(source_id),
-            transaction->get_events(source_id));
+            session_data->events[source_id]);
     }
     else
     {
@@ -153,7 +153,7 @@ bool HttpMsgRequest::handle_zero_nine()
                 uri_end--);
             uri = new HttpUri(start_line.start() + uri_begin, uri_end - uri_begin + 1, method_id,
                 params->uri_param, transaction->get_infractions(source_id),
-                transaction->get_events(source_id));
+                session_data->events[source_id]);
         }
         else
         {
index cfe8c8b7c94a7c2718d50e8f70cb81c4b224cb16..7656ee7b0548585986eba397685be8041534c7d8 100644 (file)
@@ -78,7 +78,7 @@ void HttpMsgSection::add_infraction(int infraction)
 
 void HttpMsgSection::create_event(int sid)
 {
-    transaction->get_events(source_id)->create_event(sid);
+    session_data->events[source_id]->create_event(sid);
 }
 
 void HttpMsgSection::update_depth() const
@@ -284,13 +284,13 @@ void HttpMsgSection::print_section_title(FILE* output, const char* title) const
 
 void HttpMsgSection::print_section_wrapup(FILE* output) const
 {
-    fprintf(output, "Infractions: %016" PRIx64 " %016" PRIx64 ", Events: %016" PRIx64 " %016" PRIx64 " %016"
-        PRIx64 ", TCP Close: %s\n\n",
+    fprintf(output, "Infractions: %016" PRIx64 " %016" PRIx64 ", Events: %016" PRIx64 " %016"
+        PRIx64 " %016" PRIx64 ", TCP Close: %s\n\n",
         transaction->get_infractions(source_id)->get_raw2(),
         transaction->get_infractions(source_id)->get_raw(),
-        transaction->get_events(source_id)->get_raw3(),
-        transaction->get_events(source_id)->get_raw2(),
-        transaction->get_events(source_id)->get_raw(),
+        session_data->events[source_id]->get_raw3(),
+        session_data->events[source_id]->get_raw2(),
+        session_data->events[source_id]->get_raw(),
         tcp_close ? "True" : "False");
     if (HttpTestManager::get_show_pegs())
     {
index 5c631bfc852e30613b47d1cba22d394e9acdebf6..007ecccc449fa3a102fba89de11209741c1db268 100644 (file)
@@ -85,7 +85,7 @@ bool HttpStreamSplitter::finish(Flow* flow)
         {
             *session_data->get_infractions(source_id) += INF_PARTIAL_START;
             // FIXIT-M why not use generate_misformatted_http()?
-            session_data->get_events(source_id)->create_event(EVENT_LOSS_OF_SYNC);
+            session_data->events[source_id]->create_event(EVENT_LOSS_OF_SYNC);
             return false;
         }
 
index f69dc1ebb49edf6dba39c19890e0564e4b8ec255..077b1e4ecd3c662a9cbd0c39c4a3959f8da24baf 100644 (file)
@@ -101,7 +101,7 @@ void HttpStreamSplitter::chunk_spray(HttpFlowData* session_data, uint8_t* buffer
             decompress_copy(buffer, session_data->section_offset[source_id], data+k, skip_amount,
                 session_data->compression[source_id], session_data->compress_stream[source_id],
                 at_start, session_data->get_infractions(source_id),
-                session_data->get_events(source_id));
+                session_data->events[source_id]);
             if ((expected -= skip_amount) == 0)
                 curr_state = CHUNK_DCRLF1;
             k += skip_amount-1;
@@ -131,7 +131,7 @@ void HttpStreamSplitter::chunk_spray(HttpFlowData* session_data, uint8_t* buffer
             decompress_copy(buffer, session_data->section_offset[source_id], data+k, skip_amount,
                 session_data->compression[source_id], session_data->compress_stream[source_id],
                 at_start, session_data->get_infractions(source_id),
-                session_data->get_events(source_id));
+                session_data->events[source_id]);
             k += skip_amount-1;
             break;
           }
@@ -397,7 +397,7 @@ const StreamBuffer HttpStreamSplitter::reassemble(Flow* flow, unsigned total,
         decompress_copy(buffer, session_data->section_offset[source_id], data, len,
             session_data->compression[source_id], session_data->compress_stream[source_id],
             at_start, session_data->get_infractions(source_id),
-            session_data->get_events(source_id));
+            session_data->events[source_id]);
     }
     else
     {
index 5f3abef59188731b962498a8c2b0d0b2a8d6743e..187096e62ba45cd5be70eb3476aad2292d60988f 100644 (file)
@@ -214,7 +214,7 @@ StreamSplitter::Status HttpStreamSplitter::scan(Packet* pkt, const uint8_t* data
     }
     const uint32_t max_length = MAX_OCTETS - cutter->get_octets_seen();
     const ScanResult cut_result = cutter->cut(data, (length <= max_length) ? length :
-        max_length, session_data->get_infractions(source_id), session_data->get_events(source_id),
+        max_length, session_data->get_infractions(source_id), session_data->events[source_id],
         session_data->section_size_target[source_id],
         session_data->stretch_section_to_packet[source_id]);
     switch (cut_result)
@@ -226,7 +226,7 @@ StreamSplitter::Status HttpStreamSplitter::scan(Packet* pkt, const uint8_t* data
             *session_data->get_infractions(source_id) += INF_ENDLESS_HEADER;
             // FIXIT-L the following call seems inappropriate for headers and trailers. Those cases
             // should be an unconditional EVENT_LOSS_OF_SYNC.
-            session_data->get_events(source_id)->generate_misformatted_http(data, length);
+            session_data->events[source_id]->generate_misformatted_http(data, length);
             // FIXIT-H need to process this data not just discard it.
             session_data->type_expected[source_id] = SEC_ABORT;
             delete cutter;
index d4b0a833b01b4b2e6502b50d9018e9e0be794bf8..6f49c313886c2b9f2b13431eb12b3f108db7e4a1 100644 (file)
@@ -57,7 +57,6 @@ HttpTransaction::~HttpTransaction()
         delete header[k];
         delete trailer[k];
         delete infractions[k];
-        delete events[k];
     }
     delete_section_list(body_list);
     delete_section_list(discard_list);
@@ -108,17 +107,13 @@ HttpTransaction* HttpTransaction::attach_my_transaction(HttpFlowData* session_da
                 delete_transaction(session_data->transaction[SRC_CLIENT], session_data);
             }
         }
-        session_data->transaction[SRC_CLIENT] = new HttpTransaction;
+        session_data->transaction[SRC_CLIENT] = new HttpTransaction(session_data);
 
-        // The StreamSplitter generates infractions and events related to this transaction while
-        // splitting the request line and keep them in temporary storage in the FlowData. Now we
-        // move them here.
+        // The StreamSplitter generates infractions related to this transaction while splitting the
+        // request line and keeps them in temporary storage in the FlowData. Now we move them here.
         session_data->transaction[SRC_CLIENT]->infractions[SRC_CLIENT] =
             session_data->infractions[SRC_CLIENT];
         session_data->infractions[SRC_CLIENT] = nullptr;
-        session_data->transaction[SRC_CLIENT]->events[SRC_CLIENT] =
-            session_data->events[SRC_CLIENT];
-        session_data->events[SRC_CLIENT] = nullptr;
     }
     // This transaction has more than one response. This is a new response which is replacing the
     // interim response. The two responses cannot coexist so we must clean up the interim response.
@@ -144,7 +139,7 @@ HttpTransaction* HttpTransaction::attach_my_transaction(HttpFlowData* session_da
         if (session_data->pipeline_underflow)
         {
             // A previous underflow separated the two sides forever
-            session_data->transaction[SRC_SERVER] = new HttpTransaction;
+            session_data->transaction[SRC_SERVER] = new HttpTransaction(session_data);
         }
         else if ((session_data->transaction[SRC_SERVER] = session_data->take_from_pipeline()) ==
             nullptr)
@@ -155,7 +150,7 @@ HttpTransaction* HttpTransaction::attach_my_transaction(HttpFlowData* session_da
                 // Either there is no request at all or there is a request but a previous response
                 // already took it. Either way we have more responses than requests.
                 session_data->pipeline_underflow = true;
-                session_data->transaction[SRC_SERVER] = new HttpTransaction;
+                session_data->transaction[SRC_SERVER] = new HttpTransaction(session_data);
             }
 
             else if (session_data->type_expected[SRC_CLIENT] == SEC_REQUEST)
@@ -176,13 +171,10 @@ HttpTransaction* HttpTransaction::attach_my_transaction(HttpFlowData* session_da
         }
         session_data->transaction[SRC_SERVER]->response_seen = true;
 
-        // Move in server infractions and events now that the response is attached here
+        // Move in server infractions now that the response is attached here
         session_data->transaction[SRC_SERVER]->infractions[SRC_SERVER] =
             session_data->infractions[SRC_SERVER];
         session_data->infractions[SRC_SERVER] = nullptr;
-        session_data->transaction[SRC_SERVER]->events[SRC_SERVER] =
-            session_data->events[SRC_SERVER];
-        session_data->events[SRC_SERVER] = nullptr;
     }
 
     assert(session_data->transaction[source_id] != nullptr);
@@ -251,18 +243,13 @@ HttpInfractions* HttpTransaction::get_infractions(SourceId source_id)
     return infractions[source_id];
 }
 
-HttpEventGen* HttpTransaction::get_events(SourceId source_id)
-{
-    return events[source_id];
-}
-
 void HttpTransaction::set_one_hundred_response()
 {
     assert(response_seen);
     if (one_hundred_response)
     {
         *infractions[SRC_SERVER] += INF_MULTIPLE_100_RESPONSES;
-        events[SRC_SERVER]->create_event(EVENT_MULTIPLE_100_RESPONSES);
+        session_data->events[SRC_SERVER]->create_event(EVENT_MULTIPLE_100_RESPONSES);
     }
     one_hundred_response = true;
     second_response_expected = true;
index ddeedcbb6e3a4aba6bf8e7f0ec3075f913cb2866..12835cb3ff7435737a46395c21f27f3727927da1 100644 (file)
@@ -22,6 +22,7 @@
 
 #include "http_common.h"
 #include "http_enum.h"
+#include "http_event.h"
 #include "http_flow_data.h"
 
 class HttpMsgRequest;
@@ -31,9 +32,6 @@ class HttpMsgTrailer;
 class HttpMsgSection;
 class HttpMsgBody;
 class HttpMsgHeadShared;
-class HttpEventGen;
-template <int MAX, int NONE> class Infractions;
-using HttpInfractions = Infractions<HttpEnums::INF__MAX_VALUE, HttpEnums::INF__NONE>;
 
 class HttpTransaction
 {
@@ -60,7 +58,6 @@ public:
     void set_body(HttpMsgBody* latest_body);
 
     HttpInfractions* get_infractions(HttpCommon::SourceId source_id);
-    HttpEventGen* get_events(HttpCommon::SourceId source_id);
 
     void set_one_hundred_response();
     bool final_response() const { return !second_response_expected; }
@@ -78,9 +75,15 @@ public:
         { return file_processing_id[source_id]; }
 
 private:
-    HttpTransaction() = default;
+    HttpTransaction(HttpFlowData* session_data_) : session_data(session_data_)
+    {
+        infractions[0] = nullptr;
+        infractions[1] = nullptr;
+    }
     void discard_section(HttpMsgSection* section);
 
+    HttpFlowData* const session_data;
+
     uint64_t active_sections = 0;
 
     HttpMsgRequest* request = nullptr;
@@ -89,8 +92,7 @@ private:
     HttpMsgTrailer* trailer[2] = { nullptr, nullptr };
     HttpMsgBody* body_list = nullptr;
     HttpMsgSection* discard_list = nullptr;
-    HttpInfractions* infractions[2] = { nullptr, nullptr };
-    HttpEventGen* events[2] = { nullptr, nullptr };
+    HttpInfractions* infractions[2];
 
     uint64_t file_processing_id[2] = { 0, 0 };