]> git.ipfire.org Git - thirdparty/snort3.git/commitdiff
Merge pull request #1214 in SNORT/snort3 from nhttp102 to master
authorTom Peters (thopeter) <thopeter@cisco.com>
Thu, 3 May 2018 17:53:13 +0000 (13:53 -0400)
committerTom Peters (thopeter) <thopeter@cisco.com>
Thu, 3 May 2018 17:53:13 +0000 (13:53 -0400)
Squashed commit of the following:

commit 1c2f2fb934bf4dcd3005cda1321db866b1ce2c8f
Author: Tom Peters <thopeter@cisco.com>
Date:   Mon Apr 16 16:15:12 2018 -0400

    http_inspect: performance improvements

doc/tutorial.txt
src/service_inspectors/http_inspect/http_cutter.cc
src/service_inspectors/http_inspect/http_msg_section.cc

index c43009177cb8f6991d6f7d38360dae140500a3e8..dfff778c7169ef08e67ba86b63f452fde42358be 100644 (file)
@@ -263,19 +263,19 @@ include::errors.txt[]
 
 * A nil key in a table will not caught.  Neither will a nil value in a
   table.  Neither of the following will cause errors, nor will they 
-  actually set http_server.post_depth:
+  actually set http_inspect.request_depth:
 
-    http_server = { post_depth }
-    http_server = { post_depth = undefined_symbol }
+    http_inspect = { request_depth }
+    http_inspect = { request_depth = undefined_symbol }
 
 * It is not an error to set a value multiple times.  The actual value
   applied may not be the last in the table either.  It is best to avoid
   such cases.
 
-    http_server =
+    http_inspect =
     {
-        post_depth = 1234,
-        post_depth = 4321
+        request_depth = 1234,
+        request_depth = 4321
     }
 
 * Snort can't tell you the exact filename or line number of a semantic
index 7eace0b28b68def253255cd7b19925dbe025e970..68bf4b0ef5ec278e348545284ba589e8eda4bdc4 100644 (file)
@@ -345,6 +345,7 @@ ScanResult HttpBodyChunkCutter::cut(const uint8_t* buffer, uint32_t length,
                 {
                     events->create_event(EVENT_BROKEN_CHUNK);
                     curr_state = CHUNK_BAD;
+                    k--;
                 }
                 break;
             }
@@ -397,6 +398,7 @@ ScanResult HttpBodyChunkCutter::cut(const uint8_t* buffer, uint32_t length,
                 *infractions += INF_CHUNK_BAD_CHAR;
                 events->create_event(EVENT_BROKEN_CHUNK);
                 curr_state = CHUNK_BAD;
+                k--;
             }
             else
             {
@@ -407,6 +409,7 @@ ScanResult HttpBodyChunkCutter::cut(const uint8_t* buffer, uint32_t length,
                     *infractions += INF_CHUNK_TOO_LARGE;
                     events->create_event(EVENT_BROKEN_CHUNK);
                     curr_state = CHUNK_BAD;
+                    k--;
                 }
             }
             break;
@@ -435,6 +438,7 @@ ScanResult HttpBodyChunkCutter::cut(const uint8_t* buffer, uint32_t length,
                 *infractions += INF_CHUNK_BAD_CHAR;
                 events->create_event(EVENT_BROKEN_CHUNK);
                 curr_state = CHUNK_BAD;
+                k--;
             }
             break;
         case CHUNK_OPTIONS:
@@ -462,6 +466,7 @@ ScanResult HttpBodyChunkCutter::cut(const uint8_t* buffer, uint32_t length,
                 *infractions += INF_CHUNK_LONE_CR;
                 events->create_event(EVENT_BROKEN_CHUNK);
                 curr_state = CHUNK_BAD;
+                k--;
                 break;
             }
             if (expected > 0)
@@ -480,6 +485,7 @@ ScanResult HttpBodyChunkCutter::cut(const uint8_t* buffer, uint32_t length,
                 *infractions += INF_CHUNK_NO_LENGTH;
                 events->create_event(EVENT_BROKEN_CHUNK);
                 curr_state = CHUNK_BAD;
+                k--;
             }
             break;
         case CHUNK_DATA:
@@ -497,7 +503,6 @@ ScanResult HttpBodyChunkCutter::cut(const uint8_t* buffer, uint32_t length,
             }
             if ((data_seen += skip_amount) == flow_target)
             {
-                // FIXIT-M need to randomize slice point
                 data_seen = 0;
                 num_flush = k+1;
                 new_section = true;
@@ -523,6 +528,7 @@ ScanResult HttpBodyChunkCutter::cut(const uint8_t* buffer, uint32_t length,
                 *infractions += INF_CHUNK_BAD_END;
                 events->create_event(EVENT_BROKEN_CHUNK);
                 curr_state = CHUNK_BAD;
+                k--;
             }
             break;
         case CHUNK_DCRLF2:
@@ -553,13 +559,19 @@ ScanResult HttpBodyChunkCutter::cut(const uint8_t* buffer, uint32_t length,
                 num_flush = length;
                 return SCAN_DISCARD_PIECE;
             }
+
+            // When chunk parsing breaks down and we first enter CHUNK_BAD state, it may happen
+            // that there were chunk header bytes between the last good chunk and the point where
+            // the failure occurred. These will not have been counted in data_seen because we
+            // planned to delete them during reassembly. Because they are not part of a valid chunk
+            // they will be reassembled after all. This will overrun the flow_target making the
+            // message section a little bigger than planned. It's not important.
             uint32_t skip_amount = length-k;
             skip_amount = (skip_amount <= flow_target-data_seen) ? skip_amount :
                 flow_target-data_seen;
             k += skip_amount - 1;
             if ((data_seen += skip_amount) == flow_target)
             {
-                // FIXIT-M need to randomize slice point
                 data_seen = 0;
                 num_flush = k+1;
                 new_section = true;
index f8dda87a7e5ee59b7b4987ebb99146cbe3d20b5f..fb0cc73b20ad9672de0bd2f76cb8d3dd7e713b16 100644 (file)
@@ -70,14 +70,15 @@ void HttpMsgSection::create_event(int sid)
 
 void HttpMsgSection::update_depth() const
 {
-    if ((session_data->detect_depth_remaining[source_id] <= 0) &&
-        (session_data->detection_status[source_id] == DET_ON))
+    const int64_t& ddr = session_data->detect_depth_remaining[source_id];
+    const int64_t& fdr = session_data->file_depth_remaining[source_id];
+
+    if ((ddr <= 0) && (session_data->detection_status[source_id] == DET_ON))
     {
         session_data->detection_status[source_id] = DET_DEACTIVATING;
     }
 
-    if ((session_data->file_depth_remaining[source_id] <= 0) &&
-        (session_data->detect_depth_remaining[source_id] <= 0))
+    if ((ddr <= 0) && (fdr <= 0))
     {
         // Don't need any more of the body
         session_data->section_size_target[source_id] = 0;
@@ -85,25 +86,28 @@ void HttpMsgSection::update_depth() const
         return;
     }
 
-    const int random_increment = FlushBucket::get_size() - 192;
-    assert((random_increment >= -64) && (random_increment <= 63));
+    const unsigned max_pdu = snort::SnortConfig::get_conf()->max_pdu;
+    const unsigned target_size = (session_data->compression[source_id] == CMP_NONE) ?
+        max_pdu : GZIP_BLOCK_SIZE;
+    const unsigned max_size = (session_data->compression[source_id] == CMP_NONE) ?
+        max_pdu + (max_pdu >> 1) : FINAL_GZIP_BLOCK_SIZE;
 
-    switch (session_data->compression[source_id])
+    if (ddr <= 0)
     {
-    case CMP_NONE:
-      {
-        unsigned max_pdu = snort::SnortConfig::get_conf()->max_pdu;
-        session_data->section_size_target[source_id] = max_pdu + random_increment;
-        session_data->section_size_max[source_id] = max_pdu + (max_pdu >> 1);
-        break;
-      }
-    case CMP_GZIP:
-    case CMP_DEFLATE:
-        session_data->section_size_target[source_id] = GZIP_BLOCK_SIZE + random_increment;
-        session_data->section_size_max[source_id] = FINAL_GZIP_BLOCK_SIZE;
-        break;
-    default:
-        assert(false);
+        session_data->section_size_target[source_id] = target_size;
+        session_data->section_size_max[source_id] = max_size;
+    }
+    else if (ddr <= max_size)
+    {
+        session_data->section_size_target[source_id] = ddr + 200;
+        session_data->section_size_max[source_id] = ddr + 200;
+    }
+    else
+    {
+        const int random_increment = FlushBucket::get_size() - 192;
+        assert((random_increment >= -64) && (random_increment <= 63));
+        session_data->section_size_target[source_id] = target_size + random_increment;
+        session_data->section_size_max[source_id] = max_size;
     }
 }