]> git.ipfire.org Git - thirdparty/snort3.git/commitdiff
Merge pull request #1738 in SNORT/snort3 from ~THOPETER/snort3:nhttp125 to master
authorMike Stepanek (mstepane) <mstepane@cisco.com>
Mon, 9 Sep 2019 19:53:14 +0000 (15:53 -0400)
committerMike Stepanek (mstepane) <mstepane@cisco.com>
Mon, 9 Sep 2019 19:53:14 +0000 (15:53 -0400)
Squashed commit of the following:

commit 66eaee24b9d6e8f3b8073ecd88e4ba9799c80fc3
Author: Tom Peters <thopeter@cisco.com>
Date:   Mon Jul 8 12:17:04 2019 -0400

    http_inspect: accelerated blocking for chunked message bodies

src/service_inspectors/http_inspect/http_cutter.cc
src/service_inspectors/http_inspect/http_flow_data.h
src/service_inspectors/http_inspect/http_inspect.cc
src/service_inspectors/http_inspect/http_msg_body.cc
src/service_inspectors/http_inspect/http_stream_splitter_reassemble.cc
src/stream/libtcp/tcp_stream_tracker.cc

index 7e48de7ca95561a8a53a18b0950153bc5503060b..fe99162ddb7c5da0b5e715b0b1ae91a810fe6220 100644 (file)
@@ -371,6 +371,8 @@ ScanResult HttpBodyChunkCutter::cut(const uint8_t* buffer, uint32_t length,
 
     const uint32_t adjusted_target = stretch ? MAX_SECTION_STRETCH + flow_target : flow_target;
 
+    bool detain_this_packet = false;
+
     for (int32_t k=0; k < static_cast<int32_t>(length); k++)
     {
         switch (curr_state)
@@ -548,6 +550,10 @@ ScanResult HttpBodyChunkCutter::cut(const uint8_t* buffer, uint32_t length,
             { // Do not exceed requested section size (including stretching)
                 skip_amount = adjusted_target-data_seen;
             }
+
+            if (!detain_this_packet)
+                detain_this_packet = need_accelerated_blocking(buffer+k, skip_amount);
+
             k += skip_amount - 1;
             if ((expected -= skip_amount) == 0)
             {
@@ -620,6 +626,8 @@ ScanResult HttpBodyChunkCutter::cut(const uint8_t* buffer, uint32_t length,
             uint32_t skip_amount = length-k;
             skip_amount = (skip_amount <= adjusted_target-data_seen) ? skip_amount :
                 adjusted_target-data_seen;
+            if (!detain_this_packet)
+                detain_this_packet = need_accelerated_blocking(buffer+k, skip_amount);
             k += skip_amount - 1;
             if ((data_seen += skip_amount) == adjusted_target)
             {
@@ -645,7 +653,7 @@ ScanResult HttpBodyChunkCutter::cut(const uint8_t* buffer, uint32_t length,
     }
 
     octets_seen += length;
-    return SCAN_NOT_FOUND;
+    return detain_this_packet ? SCAN_NOT_FOUND_DETAIN : SCAN_NOT_FOUND;
 }
 
 // This method searches the input stream looking for the beginning of a script or other dangerous
index c1113c5b224d0810aea0ea7cc467fe838b1c9f16..f8ffef62f442a4f1bce9ac01a312d2d98e3b93cd 100644 (file)
@@ -83,6 +83,7 @@ private:
     uint32_t running_total[2] = { 0, 0 };
     HttpEnums::ChunkState chunk_state[2] = { HttpEnums::CHUNK_NEWLINES,
         HttpEnums::CHUNK_NEWLINES };
+    uint32_t partial_raw_bytes[2] = { 0, 0 };
     uint8_t* partial_buffer[2] = { nullptr, nullptr };
     uint32_t partial_buffer_length[2] = { 0, 0 };
 
index b40ca895ca5a55b149c12e5b318bd0c72cb5d19c..712be4d13f285cdc6230e29c86c2c6f8cab5b769 100644 (file)
@@ -298,8 +298,8 @@ void HttpInspect::eval(Packet* p)
         p->set_detect_limit(session_data->detect_depth_remaining[source_id]);
     }
 
-    const bool partial_flush = session_data->partial_flush[source_id];
-    if (!process(p->data, p->dsize, p->flow, source_id, !partial_flush))
+    const bool buf_owner = !session_data->partial_flush[source_id];
+    if (!process(p->data, p->dsize, p->flow, source_id, buf_owner))
     {
         DetectionEngine::disable_content(p);
     }
index 18edcb2e9943df537bf09c379d0f9a095646831c..9613bd5bb760b4766cb61d424d6fc1c4578f9f90 100644 (file)
@@ -58,7 +58,8 @@ void HttpMsgBody::analyze()
             (js_norm_body.length() <= session_data->detect_depth_remaining[source_id]) ?
             js_norm_body.length() : session_data->detect_depth_remaining[source_id];
         detect_data.set(detect_length, js_norm_body.start());
-        session_data->detect_depth_remaining[source_id] -= detect_length;
+        if (!session_data->partial_flush[source_id])
+            session_data->detect_depth_remaining[source_id] -= detect_length;
         snort::set_file_data(const_cast<uint8_t*>(detect_data.start()),
             (unsigned)detect_data.length());
     }
index 6a3d13fdd329613fa052b01b2cd58f17a0e980d4..01baa31dc88a5462322af03d1045eb80b944a6d0 100644 (file)
@@ -291,12 +291,13 @@ const snort::StreamBuffer HttpStreamSplitter::reassemble(snort::Flow* flow, unsi
     assert(session_data->section_type[source_id] != SEC__NOT_COMPUTE);
     uint8_t*& partial_buffer = session_data->partial_buffer[source_id];
     uint32_t& partial_buffer_length = session_data->partial_buffer_length[source_id];
-    assert(partial_buffer_length + total <= MAX_OCTETS);
+    uint32_t& partial_raw_bytes = session_data->partial_raw_bytes[source_id];
+    assert(partial_raw_bytes + total <= MAX_OCTETS);
 
     // FIXIT-H this is a precaution/workaround for stream issues. When they are fixed replace this
     // block with an assert.
     if ((session_data->section_offset[source_id] == 0) &&
-        (session_data->octets_expected[source_id] != (total + partial_buffer_length)))
+        (session_data->octets_expected[source_id] != partial_raw_bytes + total))
     {
         if (session_data->octets_expected[source_id] == 0)
         {
@@ -371,25 +372,25 @@ const snort::StreamBuffer HttpStreamSplitter::reassemble(snort::Flow* flow, unsi
             buffer = new uint8_t[MAX_OCTETS];
         else
         {
-            const uint32_t buffer_size = ((partial_buffer_length + total) > 0) ?
-                (partial_buffer_length + total) : 1;
+            const uint32_t buffer_size = (total > 0) ? total : 1;
             buffer = new uint8_t[buffer_size];
         }
     }
 
-    // FIXIT-H there is no support here for partial flush with either chunking or compression
+    // FIXIT-H there is no support for partial flush with compression
+    assert((partial_buffer_length == 0) || (session_data->compression[source_id] == CMP_NONE));
+    if (partial_buffer_length > 0)
+    {
+        assert(session_data->section_offset[source_id] == 0);
+        memcpy(buffer, partial_buffer, partial_buffer_length);
+        session_data->section_offset[source_id] = partial_buffer_length;
+        partial_buffer_length = 0;
+        delete[] partial_buffer;
+        partial_buffer = nullptr;
+    }
+
     if (session_data->section_type[source_id] != SEC_BODY_CHUNK)
     {
-        assert((partial_buffer_length == 0) || (session_data->compression[source_id] == CMP_NONE));
-        if (partial_buffer_length > 0)
-        {
-            assert(session_data->section_offset[source_id] == 0);
-            memcpy(buffer, partial_buffer, partial_buffer_length);
-            session_data->section_offset[source_id] = partial_buffer_length;
-            partial_buffer_length = 0;
-            delete[] partial_buffer;
-            partial_buffer = nullptr;
-        }
         const bool at_start = (session_data->body_octets[source_id] == 0) &&
              (session_data->section_offset[source_id] == 0);
         decompress_copy(buffer, session_data->section_offset[source_id], data, len,
@@ -399,7 +400,6 @@ const snort::StreamBuffer HttpStreamSplitter::reassemble(snort::Flow* flow, unsi
     }
     else
     {
-        assert(partial_buffer_length == 0);
         chunk_spray(session_data, buffer, data, len);
     }
 
@@ -416,7 +416,10 @@ const snort::StreamBuffer HttpStreamSplitter::reassemble(snort::Flow* flow, unsi
             // Store the data from a partial flush for reuse
             partial_buffer = buffer;
             partial_buffer_length = buf_size;
+            partial_raw_bytes += total;
         }
+        else
+            partial_raw_bytes = 0;
 
         http_buf.data = buffer;
         http_buf.length = buf_size;
index 8e852b37e070b9b8be3adfda2347a8152068766c..1dad641fccf05d7dec8fd9f40dfc197024ed7f1a 100644 (file)
@@ -659,7 +659,7 @@ bool TcpStreamTracker::set_held_packet(snort::Packet* p)
     //           such as # of DAQ Msg buffers, # of threads, etc... for now we use small number like 10
     if ( held_packet )
         return false;
-    if ( tcpStats.current_packets_held >= 10 )
+    if ( tcpStats.current_packets_held >= 50 )
     {
         tcpStats.held_packet_limit_exceeded++;
         return false;