]> git.ipfire.org Git - thirdparty/snort3.git/commitdiff
Merge pull request #1621 in SNORT/snort3 from ~THOPETER/snort3:merge4 to master
authorMike Stepanek (mstepane) <mstepane@cisco.com>
Tue, 4 Jun 2019 14:31:39 +0000 (10:31 -0400)
committerMike Stepanek (mstepane) <mstepane@cisco.com>
Tue, 4 Jun 2019 14:31:39 +0000 (10:31 -0400)
Squashed commit of the following:

commit 67ff9e50695a75b8fe2e9505620b091f624aef16
Author: Tom Peters <thopeter@cisco.com>
Date:   Mon May 13 16:28:57 2019 -0400

    http_inspect/stream: accelerated blocking

43 files changed:
doc/http_inspect.txt
src/flow/session.h
src/main/analyzer.cc
src/main/analyzer.h
src/packet_io/active.cc
src/packet_io/active.h
src/pub_sub/active_events.h [new file with mode: 0644]
src/service_inspectors/http_inspect/dev_notes.txt
src/service_inspectors/http_inspect/http_cutter.cc
src/service_inspectors/http_inspect/http_cutter.h
src/service_inspectors/http_inspect/http_enum.h
src/service_inspectors/http_inspect/http_flow_data.cc
src/service_inspectors/http_inspect/http_flow_data.h
src/service_inspectors/http_inspect/http_inspect.cc
src/service_inspectors/http_inspect/http_module.cc
src/service_inspectors/http_inspect/http_module.h
src/service_inspectors/http_inspect/http_msg_body.cc
src/service_inspectors/http_inspect/http_msg_body_chunk.cc
src/service_inspectors/http_inspect/http_msg_body_cl.cc
src/service_inspectors/http_inspect/http_msg_body_old.cc
src/service_inspectors/http_inspect/http_msg_header.cc
src/service_inspectors/http_inspect/http_msg_request.cc
src/service_inspectors/http_inspect/http_msg_status.cc
src/service_inspectors/http_inspect/http_msg_trailer.cc
src/service_inspectors/http_inspect/http_stream_splitter.h
src/service_inspectors/http_inspect/http_stream_splitter_finish.cc
src/service_inspectors/http_inspect/http_stream_splitter_reassemble.cc
src/service_inspectors/http_inspect/http_stream_splitter_scan.cc
src/service_inspectors/http_inspect/http_tables.cc
src/service_inspectors/http_inspect/http_test_input.cc
src/service_inspectors/http_inspect/http_test_input.h
src/stream/libtcp/tcp_stream_session.cc
src/stream/libtcp/tcp_stream_session.h
src/stream/libtcp/tcp_stream_tracker.cc
src/stream/libtcp/tcp_stream_tracker.h
src/stream/stream.cc
src/stream/stream.h
src/stream/stream_splitter.h
src/stream/tcp/tcp_module.cc
src/stream/tcp/tcp_module.h
src/stream/tcp/tcp_reassembler.cc
src/stream/tcp/tcp_session.cc
tools/u2spewfoo/u2spewfoo.cc

index af967afa135df7fc74c43f340937a71def84803c..ea995f710e859bf9589d72766954d7ef1b81d664 100644 (file)
@@ -99,6 +99,20 @@ depth parameter entirely because that is the default.
 These limits have no effect on how much data is forwarded to file
 processing.
 
+===== accelerated_blocking
+
+Accelerated blocking is an experimental feature currently under
+development. It enables Snort to more quickly detect and block response
+messages containing malicious JavaScript. As this feature involves
+actively blocking traffic it is designed for use with inline mode
+operation (-Q).
+
+This feature only functions with response_depth = -1 (unlimited). This
+limitation will be removed in a future version.
+
+This feature is off by default. accelerated_blocking = true will activate
+it.
+
 ===== gzip
 
 http_inspect by default decompresses deflate and gzip message bodies
index 6bc47d1a84de91883c2b54f14c02fd7eb32e38d3..25979ebf013d7588dc28c026083917f319c3c079 100644 (file)
@@ -70,6 +70,7 @@ public:
 
     virtual uint8_t get_reassembly_direction() { return SSN_DIR_NONE; }
     virtual uint8_t missing_in_reassembled(uint8_t /*dir*/) { return SSN_MISSING_NONE; }
+    virtual bool set_packet_action_to_hold(snort::Packet*) { return false; }
 
 protected:
     Session(snort::Flow* f) { flow = f; }
index 0a915adca94dcb4809440b976a5d5b9609886fe9..2271472d30a20f71e16e46060979cda31e27b7a4 100644 (file)
@@ -294,7 +294,7 @@ void Analyzer::post_process_daq_pkt_msg(Packet* p)
 
     if (verdict == DAQ_VERDICT_RETRY)
         retry_queue->put(p->daq_msg);
-    else
+    else if ( !p->active->is_packet_held() )
         p->daq_instance->finalize_message(p->daq_msg, verdict);
 }
 
@@ -396,6 +396,11 @@ void Analyzer::post_process_packet(Packet* p)
     switcher->stop();
 }
 
+void Analyzer::finalize_daq_message(DAQ_Msg_h msg, DAQ_Verdict verdict)
+{
+    daq_instance->finalize_message(msg, verdict);
+}
+
 //-------------------------------------------------------------------------
 // Utility
 //-------------------------------------------------------------------------
index b8a4c66708267fb64cc1a51a3e11c0437c887850..2e0b6e667861004df0ccec62bc01aeb075daa15f 100644 (file)
@@ -83,6 +83,7 @@ public:
     void post_process_packet(snort::Packet*);
     bool process_rebuilt_packet(snort::Packet*, const DAQ_PktHdr_t*, const uint8_t* pkt, uint32_t pktlen);
     bool inspect_rebuilt(snort::Packet*);
+    void finalize_daq_message(DAQ_Msg_h, DAQ_Verdict);
 
     // Functions called by analyzer commands
     void start();
index 1270f7265b8d02e4c56238901276a12da1ada41c..f1821afa736e25f5ac36aa794c2f77028867ab35 100644 (file)
@@ -29,6 +29,7 @@
 #include "main/snort_config.h"
 #include "managers/action_manager.h"
 #include "protocols/tcp.h"
+#include "pub_sub/active_events.h"
 #include "stream/stream.h"
 #include "utils/dnet_header.h"
 
@@ -41,6 +42,7 @@ using namespace snort;
 const char* Active::act_str[Active::ACT_MAX][Active::AST_MAX] =
 {
     { "allow", "error", "error", "error" },
+    { "hold", "error", "error", "error" },
     { "retry", "error", "error", "error" },
     { "drop", "cant_drop", "would_drop", "force_drop" },
     { "block", "cant_block", "would_block", "force_block" },
@@ -437,6 +439,17 @@ bool Active::daq_retry_packet(const Packet* p)
     return retry_queued;
 }
 
+bool Active::hold_packet(const Packet*)
+{
+    if ( active_action < ACT_HOLD )
+    {
+        active_action = ACT_HOLD;
+        return true;
+    }
+
+    return false;
+}
+
 void Active::allow_session(Packet* p)
 {
     active_action = ACT_PASS;
index f20842ae1d4e731d8460da773a662a532971c03f..097924fe146156bb2fc3a29078b882c2d2cdf917 100644 (file)
@@ -45,7 +45,7 @@ public:
     { AST_ALLOW, AST_CANT, AST_WOULD, AST_FORCE, AST_MAX };
 
     enum ActiveAction : uint8_t
-    { ACT_PASS, ACT_RETRY, ACT_DROP, ACT_BLOCK, ACT_RESET, ACT_MAX };
+    { ACT_PASS, ACT_HOLD, ACT_RETRY, ACT_DROP, ACT_BLOCK, ACT_RESET, ACT_MAX };
 
 public:
     static void init(SnortConfig*);
@@ -86,6 +86,7 @@ public:
     void drop_packet(const Packet*, bool force = false);
     void daq_drop_packet(const Packet*);
     bool daq_retry_packet(const Packet*);
+    bool hold_packet(const Packet*);
 
     void allow_session(Packet*);
     void block_session(Packet*, bool force = false);
@@ -112,6 +113,9 @@ public:
     bool packet_force_dropped()
     { return active_status == AST_FORCE; }
 
+    bool is_packet_held()
+    { return active_action == ACT_HOLD; }
+
     void set_tunnel_bypass()
     { active_tunnel_bypass++; }
 
diff --git a/src/pub_sub/active_events.h b/src/pub_sub/active_events.h
new file mode 100644 (file)
index 0000000..b5f600c
--- /dev/null
@@ -0,0 +1,57 @@
+//--------------------------------------------------------------------------
+// Copyright (C) 2016-2019 Cisco and/or its affiliates. All rights reserved.
+//
+// This program is free software; you can redistribute it and/or modify it
+// under the terms of the GNU General Public License Version 2 as published
+// by the Free Software Foundation.  You may not use, modify or distribute
+// this program under any other version of the GNU General Public License.
+//
+// This program is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+// General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License along
+// with this program; if not, write to the Free Software Foundation, Inc.,
+// 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+//--------------------------------------------------------------------------
+
+// active_events.h author davis mcpherson <davmcphe@cisco.com>
+// Active action events published to notify clients of state change in
+// the active action for the packet
+
+#ifndef ACTIVE_EVENTS_H
+#define ACTIVE_EVENTS_H
+
+#include <framework/data_bus.h>
+#include <packet_io/active.h>
+
+namespace snort
+{
+struct Packet;
+
+class SO_PUBLIC ActiveEvent : public DataEvent
+{
+public:
+    ActiveEvent
+        (const Active::ActiveAction current, const Active::ActiveAction previous, const Packet* p)
+        : current_action(current), previous_action(previous), pkt(p)
+    { }
+
+    Active::ActiveAction get_current_action() const
+    { return current_action; }
+
+    Active::ActiveAction get_previous_action() const
+    { return previous_action; }
+
+    const Packet* get_pkt() const
+    { return pkt; }
+
+private:
+    const Active::ActiveAction current_action;
+    const Active::ActiveAction previous_action;
+    const Packet* pkt;
+};
+}
+#endif
+
index 096d30acf516f5981f5d158b639d47aa65e7955d..2cd87e624bf2b5ea9308575342434025515b31ca 100644 (file)
@@ -2,10 +2,27 @@ The new Snort HTTP inspector (HI) is divided into two major parts. The HttpStrea
 (splitter) accepts TCP payload data from Stream and subdivides it into message sections.
 HttpInspect (inspector) processes individual message sections.
 
-Splitter finish() is called by the framework when the TCP connection closes (including pruning).
+Splitter finish() is called by Stream when the TCP connection closes (including pruning).
 It serves several specialized purposes in cases where the HTTP message is truncated (ends
 unexpectedly).
 
+The nature of splitting allows packets to be forwarded before they are aggregated into a message
+section and inspected. Accelerated blocking is a feature that allows the splitter to designate
+to Stream packets that are too risky to forward without being inspected. These packets are detained
+until such time as inspection is completed. The design is based on the principle that detaining
+one packet in a TCP stream effectively blocks all subsequent packets from being reassembled and
+delivered to a target application. Once a packet is detained no attempt is made to detain
+additional packets.
+
+Splitter init_partial_flush() is called by Stream when a previously detained packet must be dropped
+or released immediately. It sets up reassembly and inspection of a partial message section
+containing all available data. Once inspection of this partial message section is complete, for
+most of HI it is as if it never happened. scan() continues to split the original message section
+with all the original data. The inspector will perform a completely new inspection of the full
+message section. Only reassemble() knows that something different is happening. Stream only
+delivers message data for reassembly once. reassemble() stores data received for a partial
+inspection and prepends it to the buffer for the next inspection.
+
 HttpFlowData is a data class representing all HI information relating to a flow. It serves as
 persistent memory between invocations of HI by the framework. It also glues together the inspector,
 the client-to-server splitter, and the server-to-client splitter which pass information through the
@@ -166,7 +183,8 @@ Commands:
   @request and @response set the message direction. Applies to subsequent paragraphs until changed.
      The initial direction is always request and the break command resets the direction to request.
   @fill <decimal number> create a paragraph consisting of <number> octets of auto-fill data
-     ABCDEFGHIJABC ... Primarily useful for chunk testing.
+     ABCDEFGHIJABC ....
+  @partial causes a partial flush, simulating a retransmission of a detained packet
   @fileset <pathname> specifies a file from which the tool will read data into the message buffer.
      This may be used to include a zipped or other binary file into a message body. Data is read
      beginning at the start of the file.
@@ -194,19 +212,6 @@ Escape sequences begin with '\'. They may be used within a paragraph or to begin
 Data is separated into segments for presentation to the splitter whenever a paragraph ends (blank
 line).
 
-Whenever a segment contains insufficient data to make up a body, fill data will be provided to
-make up the difference based on the Content-Length field. Specifically, flushing more data than in
-the current segment will trigger filling. The user should include at least one character of body
-data either as part of the previous header segment or at the beginning of a new segment following
-the headers. Fill data will be read from the currently included file. If no included file is
-currently set the fill data will be generated in the pattern ABC...XYZABC...
-
-The current chunk reassembly algorithm does not flush data beyond the current segment. The fill
-feature does not work with chunked bodies but the fill command can be used.
-
-Fill data will not be provided for a paragraph that is preceded by tcpclose. The body will
-terminate at the end of the paragraph.
-
 This test tool does not implement the feature of being hardened against bad input. If you write a
 badly formatted or improper test case the program may assert or crash. The responsibility is on the
 developer to get it right. Currently that is the best use of resources.
index 885f50ab509f01d2f644ba4729850607bbc3d85d..7e48de7ca95561a8a53a18b0950153bc5503060b 100644 (file)
@@ -250,7 +250,7 @@ ScanResult HttpHeaderCutter::cut(const uint8_t* buffer, uint32_t length,
     return SCAN_NOT_FOUND;
 }
 
-ScanResult HttpBodyClCutter::cut(const uint8_t*, uint32_t length, HttpInfractions*,
+ScanResult HttpBodyClCutter::cut(const uint8_t* buffer, uint32_t length, HttpInfractions*,
     HttpEventGen*, uint32_t flow_target, bool stretch)
 {
     assert(remaining > octets_seen);
@@ -279,25 +279,23 @@ ScanResult HttpBodyClCutter::cut(const uint8_t*, uint32_t length, HttpInfraction
         stretch = false;
     }
 
+    if (octets_seen + length < flow_target)
+    {
+        octets_seen += length;
+        return need_accelerated_blocking(buffer, length) ? SCAN_NOT_FOUND_DETAIN : SCAN_NOT_FOUND;
+    }
+
     if (!stretch)
     {
-        num_flush = flow_target;
-        if (num_flush < remaining)
+        remaining -= flow_target;
+        num_flush = flow_target - octets_seen;
+        if (remaining > 0)
         {
-            remaining -= num_flush;
+            need_accelerated_blocking(buffer, num_flush);
             return SCAN_FOUND_PIECE;
         }
         else
-        {
-            remaining = 0;
             return SCAN_FOUND;
-        }
-    }
-
-    if (octets_seen + length < flow_target)
-    {
-        octets_seen += length;
-        return SCAN_NOT_FOUND;
     }
 
     if (octets_seen + length < remaining)
@@ -309,6 +307,7 @@ ScanResult HttpBodyClCutter::cut(const uint8_t*, uint32_t length, HttpInfraction
         else
             num_flush = flow_target - octets_seen;
         remaining -= octets_seen + num_flush;
+        need_accelerated_blocking(buffer, num_flush);
         return SCAN_FOUND_PIECE;
     }
 
@@ -323,11 +322,12 @@ ScanResult HttpBodyClCutter::cut(const uint8_t*, uint32_t length, HttpInfraction
     // Cannot stretch to the end of the message body. Cut at the original target.
     num_flush = flow_target - octets_seen;
     remaining -= flow_target;
+    need_accelerated_blocking(buffer, num_flush);
     return SCAN_FOUND_PIECE;
 }
 
-ScanResult HttpBodyOldCutter::cut(const uint8_t*, uint32_t length, HttpInfractions*, HttpEventGen*,
-    uint32_t flow_target, bool stretch)
+ScanResult HttpBodyOldCutter::cut(const uint8_t* buffer, uint32_t length, HttpInfractions*,
+    HttpEventGen*, uint32_t flow_target, bool stretch)
 {
     if (flow_target == 0)
     {
@@ -344,12 +344,13 @@ ScanResult HttpBodyOldCutter::cut(const uint8_t*, uint32_t length, HttpInfractio
     {
         // Not enough data yet to create a message section
         octets_seen += length;
-        return SCAN_NOT_FOUND;
+        return need_accelerated_blocking(buffer, length) ? SCAN_NOT_FOUND_DETAIN : SCAN_NOT_FOUND;
     }
     else if (stretch && (octets_seen + length <= flow_target + MAX_SECTION_STRETCH))
     {
         // Cut the section at the end of this TCP segment to avoid splitting a packet
         num_flush = length;
+        need_accelerated_blocking(buffer, num_flush);
         return SCAN_FOUND_PIECE;
     }
     else
@@ -357,6 +358,7 @@ ScanResult HttpBodyOldCutter::cut(const uint8_t*, uint32_t length, HttpInfractio
         // Cut the section at the target length. Either stretching is not allowed or the end of
         // the segment is too far away.
         num_flush = flow_target - octets_seen;
+        need_accelerated_blocking(buffer, num_flush);
         return SCAN_FOUND_PIECE;
     }
 }
@@ -646,3 +648,48 @@ ScanResult HttpBodyChunkCutter::cut(const uint8_t* buffer, uint32_t length,
     return SCAN_NOT_FOUND;
 }
 
+// This method searches the input stream looking for the beginning of a script or other dangerous
+// content that requires accelerated blocking. Exactly what we are looking for is encapsulated in
+// dangerous().
+//
+// Return value true indicates a match and enables the packet that completes the matching sequence
+// to be detained.
+//
+// Once accelerated blocking is activated on a message body it never goes away. The first packet
+// of every subsequent message section must be detained (detention_required). Supporting this
+// requirement requires that the calling routine submit all data including buffers that are about
+// to be flushed.
+bool HttpBodyCutter::need_accelerated_blocking(const uint8_t* data, uint32_t length)
+{
+    if (!accelerated_blocking || packet_detained)
+        return false;
+    if (detention_required || dangerous(data, length))
+    {
+        packet_detained = true;
+        detention_required = true;
+        return true;
+    }
+    return false;
+}
+
+// Currently we do accelerated blocking when we see a javascript starting
+bool HttpBodyCutter::dangerous(const uint8_t* data, uint32_t length)
+{
+    static const uint8_t match_string[] = { '<', 's', 'c', 'r', 'i', 'p', 't' };
+    static const uint8_t string_length = sizeof(match_string);
+    for (uint32_t k = 0; k < length; k++)
+    {
+        // partial_match is persistent, enabling matches that cross data boundaries
+        if (data[k] == match_string[partial_match])
+        {
+            if (++partial_match == string_length)
+                return true;
+        }
+        else
+        {
+            partial_match = 0;
+        }
+    }
+    return false;
+}
+
index ad7593b44f2452878e5542dcb61185d6733cbbce..6f4313299c9bcf8090bdf8653807753f8758017e 100644 (file)
@@ -94,36 +94,57 @@ private:
     int32_t num_head_lines = 0;
 };
 
-class HttpBodyClCutter : public HttpCutter
+class HttpBodyCutter : public HttpCutter
 {
 public:
-    explicit HttpBodyClCutter(int64_t expected_length) : remaining(expected_length)
+    HttpBodyCutter(bool accelerated_blocking_) : accelerated_blocking(accelerated_blocking_) {}
+    void soft_reset() override { octets_seen = 0; packet_detained = false; }
+    void detain_ended() { packet_detained = false; }
+
+protected:
+    bool need_accelerated_blocking(const uint8_t* data, uint32_t length);
+
+private:
+    bool dangerous(const uint8_t* data, uint32_t length);
+
+    const bool accelerated_blocking;
+    bool packet_detained = false;
+    uint8_t partial_match = 0;
+    bool detention_required = false;
+};
+
+class HttpBodyClCutter : public HttpBodyCutter
+{
+public:
+    HttpBodyClCutter(int64_t expected_length, bool accelerated_blocking) :
+        HttpBodyCutter(accelerated_blocking), remaining(expected_length)
         { assert(remaining > 0); }
     HttpEnums::ScanResult cut(const uint8_t*, uint32_t length, HttpInfractions*, HttpEventGen*,
         uint32_t flow_target, bool stretch) override;
-    void soft_reset() override { octets_seen = 0; }
 
 private:
     int64_t remaining;
 };
 
-class HttpBodyOldCutter : public HttpCutter
+class HttpBodyOldCutter : public HttpBodyCutter
 {
 public:
+    explicit HttpBodyOldCutter(bool accelerated_blocking) : HttpBodyCutter(accelerated_blocking) {}
     HttpEnums::ScanResult cut(const uint8_t*, uint32_t, HttpInfractions*, HttpEventGen*,
         uint32_t flow_target, bool stretch) override;
-    void soft_reset() override { octets_seen = 0; }
 };
 
-class HttpBodyChunkCutter : public HttpCutter
+class HttpBodyChunkCutter : public HttpBodyCutter
 {
 public:
+    explicit HttpBodyChunkCutter(bool accelerated_blocking) : HttpBodyCutter(accelerated_blocking)
+        {}
     HttpEnums::ScanResult cut(const uint8_t* buffer, uint32_t length,
         HttpInfractions* infractions, HttpEventGen* events, uint32_t flow_target, bool stretch)
         override;
     bool get_is_broken_chunk() const override { return curr_state == HttpEnums::CHUNK_BAD; }
     uint32_t get_num_good_chunks() const override { return num_good_chunks; }
-    void soft_reset() override { octets_seen = 0; num_good_chunks = 0; }
+    void soft_reset() override { num_good_chunks = 0; HttpBodyCutter::soft_reset(); }
 
 private:
     uint32_t data_seen = 0;
index d4b18683346d07992c7b3fdf4d939072cb4d3194..94763c1a642ead864ebf04f848a593a8a9ca4c19 100644 (file)
@@ -64,11 +64,12 @@ enum HTTP_BUFFER { HTTP_BUFFER_CLIENT_BODY = 1, HTTP_BUFFER_COOKIE, HTTP_BUFFER_
 enum PEG_COUNT { PEG_FLOW = 0, PEG_SCAN, PEG_REASSEMBLE, PEG_INSPECT, PEG_REQUEST, PEG_RESPONSE,
     PEG_GET, PEG_HEAD, PEG_POST, PEG_PUT, PEG_DELETE, PEG_CONNECT, PEG_OPTIONS, PEG_TRACE,
     PEG_OTHER_METHOD, PEG_REQUEST_BODY, PEG_CHUNKED, PEG_URI_NORM, PEG_URI_PATH, PEG_URI_CODING,
-    PEG_CONCURRENT_SESSIONS, PEG_MAX_CONCURRENT_SESSIONS, PEG_COUNT_MAX };
+    PEG_CONCURRENT_SESSIONS, PEG_MAX_CONCURRENT_SESSIONS, PEG_DETAINED, PEG_PARTIAL_INSPECT,
+    PEG_COUNT_MAX };
 
 // Result of scanning by splitter
-enum ScanResult { SCAN_NOT_FOUND, SCAN_FOUND, SCAN_FOUND_PIECE, SCAN_DISCARD, SCAN_DISCARD_PIECE,
-    SCAN_ABORT };
+enum ScanResult { SCAN_NOT_FOUND, SCAN_NOT_FOUND_DETAIN, SCAN_FOUND, SCAN_FOUND_PIECE,
+    SCAN_DISCARD, SCAN_DISCARD_PIECE, SCAN_ABORT };
 
 // State machine for chunk parsing
 enum ChunkState { CHUNK_NEWLINES, CHUNK_ZEROS, CHUNK_LEADING_WS, CHUNK_NUMBER, CHUNK_TRAILING_WS,
index 262a5783efa09c127d8da64751272ce32e59d3aa..4eed269c588a613cb69739c6cfd9af34d06cf51a 100644 (file)
@@ -25,6 +25,7 @@
 
 #include "decompress/file_decomp.h"
 
+#include "http_cutter.h"
 #include "http_module.h"
 #include "http_test_manager.h"
 #include "http_transaction.h"
@@ -41,14 +42,11 @@ uint64_t HttpFlowData::instance_count = 0;
 HttpFlowData::HttpFlowData() : FlowData(inspector_id)
 {
 #ifdef REG_TEST
-    if (HttpTestManager::use_test_output())
+    seq_num = ++instance_count;
+    if (HttpTestManager::use_test_output() && !HttpTestManager::use_test_input())
     {
-        seq_num = ++instance_count;
-        if (!HttpTestManager::use_test_input())
-        {
-            printf("Flow Data construct %" PRIu64 "\n", seq_num);
-            fflush(nullptr);
-        }
+        printf("Flow Data construct %" PRIu64 "\n", seq_num);
+        fflush(nullptr);
     }
 #endif
     HttpModule::increment_peg_counts(PEG_CONCURRENT_SESSIONS);
@@ -74,6 +72,7 @@ HttpFlowData::~HttpFlowData()
         delete infractions[k];
         delete events[k];
         delete[] section_buffer[k];
+        delete[] partial_buffer[k];
         HttpTransaction::delete_transaction(transaction[k], nullptr);
         delete cutter[k];
         if (compress_stream[k] != nullptr)
@@ -109,6 +108,7 @@ void HttpFlowData::half_reset(SourceId source_id)
     body_octets[source_id] = STAT_NOT_PRESENT;
     section_size_target[source_id] = 0;
     stretch_section_to_packet[source_id] = false;
+    accelerated_blocking[source_id] = false;
     file_depth_remaining[source_id] = STAT_NOT_PRESENT;
     detect_depth_remaining[source_id] = STAT_NOT_PRESENT;
     detection_status[source_id] = DET_REACTIVATING;
index 73ebac51602779fb938572d53e2285413f18877a..dea0c1ee7913e8fcca3ebf0ad18c6b4c8d157bd3 100644 (file)
 #include "utils/util_utf.h"
 #include "decompress/file_decomp.h"
 
-#include "http_cutter.h"
 #include "http_infractions.h"
 #include "http_event_gen.h"
 
 class HttpTransaction;
 class HttpJsNorm;
 class HttpMsgSection;
+class HttpCutter;
 
 class HttpFlowData : public snort::FlowData
 {
@@ -44,6 +44,7 @@ public:
     ~HttpFlowData() override;
     static unsigned inspector_id;
     static void init() { inspector_id = snort::FlowData::create_flow_data_id(); }
+    size_t size_of() override { return sizeof(*this); }
 
     friend class HttpInspect;
     friend class HttpMsgSection;
@@ -63,9 +64,6 @@ public:
     friend class HttpUnitTestSetup;
 #endif
 
-    size_t size_of() override
-    { return sizeof(*this); }
-
 private:
     // Convenience routines
     void half_reset(HttpEnums::SourceId source_id);
@@ -79,19 +77,19 @@ private:
 
     // *** StreamSplitter internal data - reassemble()
     uint8_t* section_buffer[2] = { nullptr, nullptr };
-    uint32_t section_total[2] = { 0, 0 };
     uint32_t section_offset[2] = { 0, 0 };
     uint32_t chunk_expected_length[2] = { 0, 0 };
     uint32_t running_total[2] = { 0, 0 };
     HttpEnums::ChunkState chunk_state[2] = { HttpEnums::CHUNK_NEWLINES,
         HttpEnums::CHUNK_NEWLINES };
+    uint8_t* partial_buffer[2] = { nullptr, nullptr };
+    uint32_t partial_buffer_length[2] = { 0, 0 };
 
     // *** StreamSplitter internal data - scan() => reassemble()
     uint32_t num_excess[2] = { 0, 0 };
     uint32_t num_good_chunks[2] = { 0, 0 };
     uint32_t octets_expected[2] = { 0, 0 };
     bool is_broken_chunk[2] = { false, false };
-    bool strict_length[2] = { false, false };
 
     // *** StreamSplitter => Inspector (facts about the most recent message section)
     HttpEnums::SectionType section_type[2] = { HttpEnums::SEC__NOT_COMPUTE,
@@ -99,6 +97,7 @@ private:
     int32_t num_head_lines[2] = { HttpEnums::STAT_NOT_PRESENT, HttpEnums::STAT_NOT_PRESENT };
     bool tcp_close[2] = { false, false };
     bool zero_byte_workaround[2];
+    bool partial_flush[2] = { false, false };
 
     // Infractions and events are associated with a specific message and are stored in the
     // transaction for that message. But StreamSplitter splits the start line before there is
@@ -121,6 +120,7 @@ private:
     HttpEnums::CompressId compression[2] = { HttpEnums::CMP_NONE, HttpEnums::CMP_NONE };
     HttpEnums::DetectionStatus detection_status[2] = { HttpEnums::DET_ON, HttpEnums::DET_ON };
     bool stretch_section_to_packet[2] = { false, false };
+    bool accelerated_blocking[2] = { false, false };
 
     // *** Inspector's internal data about the current message
     struct FdCallbackContext
index 1e263028f1094504060a5d55fd7d5e412ff455b4..0d1006ec2b76bf86d8776124f47ee1a753030a86 100644 (file)
@@ -274,9 +274,8 @@ void HttpInspect::eval(Packet* p)
     }
 
     const int remove_workaround = session_data->zero_byte_workaround[source_id] ? 1 : 0;
-
-    if (!process(p->data, p->dsize - remove_workaround, p->flow, source_id,
-        (p->data != p->context->buf)))
+    const bool partial_flush = session_data->partial_flush[source_id];
+    if (!process(p->data, p->dsize - remove_workaround, p->flow, source_id, !partial_flush))
     {
         DetectionEngine::disable_content(p);
     }
@@ -317,7 +316,10 @@ bool HttpInspect::process(const uint8_t* data, const uint16_t dsize, Flow* const
     HttpFlowData* session_data = (HttpFlowData*)flow->get_flow_data(HttpFlowData::inspector_id);
     assert(session_data != nullptr);
 
-    HttpModule::increment_peg_counts(PEG_INSPECT);
+    if (!session_data->partial_flush[source_id])
+        HttpModule::increment_peg_counts(PEG_INSPECT);
+    else
+        HttpModule::increment_peg_counts(PEG_PARTIAL_INSPECT);
 
     switch (session_data->section_type[source_id])
     {
@@ -360,7 +362,10 @@ bool HttpInspect::process(const uint8_t* data, const uint16_t dsize, Flow* const
 
     current_section->analyze();
     current_section->gen_events();
-    current_section->update_flow();
+    if (!session_data->partial_flush[source_id])
+        current_section->update_flow();
+    session_data->partial_flush[source_id] = false;
+    session_data->section_type[source_id] = SEC__NOT_COMPUTE;
 
 #ifdef REG_TEST
     if (HttpTestManager::use_test_output())
index 4f3c902073a233dd8a1e25e0191a56060ff6be6b..20783702a70e7e3160f3e1095c86d06e1fc2ecaf 100644 (file)
@@ -54,11 +54,14 @@ const Parameter HttpModule::http_params[] =
     { "decompress_zip", Parameter::PT_BOOL, nullptr, "false",
       "decompress zip files in response bodies" },
 
+    { "accelerated_blocking", Parameter::PT_BOOL, nullptr, "false",
+      "inspect JavaScript in response messages as soon as possible" },
+
     { "normalize_javascript", Parameter::PT_BOOL, nullptr, "false",
-      "normalize javascript in response bodies" },
+      "normalize JavaScript in response bodies" },
 
     { "max_javascript_whitespaces", Parameter::PT_INT, "1:65535", "200",
-      "maximum consecutive whitespaces allowed within the Javascript obfuscated data" },
+      "maximum consecutive whitespaces allowed within the JavaScript obfuscated data" },
 
     { "bad_characters", Parameter::PT_BIT_LIST, "255", nullptr,
       "alert when any of specified bytes are present in URI after percent decoding" },
@@ -167,6 +170,10 @@ bool HttpModule::set(const char*, Value& val, SnortConfig*)
     {
         params->decompress_zip = val.get_bool();
     }
+    else if (val.is("accelerated_blocking"))
+    {
+        params->accelerated_blocking = val.get_bool();
+    }
     else if (val.is("normalize_javascript"))
     {
         params->js_norm_param.normalize_javascript = val.get_bool();
index cfec45b2353ac6f91a3143358fb177e6e9332f10..e95e112602d02af8a4ae6d985020a20114ece41f 100644 (file)
@@ -42,6 +42,7 @@ public:
     bool decompress_pdf = false;
     bool decompress_swf = false;
     bool decompress_zip = false;
+    bool accelerated_blocking = false;
 
     struct JsNormParam
     {
index 7f5429ccf1e41d2c166762da337cffc0f0efdb0d..f54582461ed112a3e6fd906bcd9c79ffc5a2258c 100644 (file)
@@ -60,7 +60,9 @@ void HttpMsgBody::analyze()
             (unsigned)detect_data.length());
     }
 
-    if (session_data->file_depth_remaining[source_id] > 0)
+    // Only give data to file processing once, when we inspect the entire message section.
+    if (!session_data->partial_flush[source_id] &&
+        (session_data->file_depth_remaining[source_id] > 0))
     {
         do_file_processing(decoded_body);
     }
index 92a74d02a5b22d6dd0f5ce5a7d75f329d339be44..b7870be416e837a94e5a714ca4d661fc0292270a 100644 (file)
@@ -49,7 +49,6 @@ void HttpMsgBodyChunk::update_flow()
     {
         update_depth();
     }
-    session_data->section_type[source_id] = SEC__NOT_COMPUTE;
 }
 
 #ifdef REG_TEST
index d9a5a8b31be99546c07daa993959b9327c2e31d9..6f03c53b9a0b5f2f9577788918f588e27ade15f2 100644 (file)
@@ -38,7 +38,6 @@ void HttpMsgBodyCl::update_flow()
         // End of message
         session_data->half_reset(source_id);
     }
-    session_data->section_type[source_id] = SEC__NOT_COMPUTE;
 }
 
 #ifdef REG_TEST
index e8f956a80eedee02e2701959fc0e726c3ace959c..e2c7448af59ff3c082645b38cd4ce9199389f8a3 100644 (file)
@@ -30,7 +30,6 @@ void HttpMsgBodyOld::update_flow()
     // Always more body expected
     session_data->body_octets[source_id] = body_octets;
     update_depth();
-    session_data->section_type[source_id] = SEC__NOT_COMPUTE;
 }
 
 #ifdef REG_TEST
index 3e86c9f81d16e2e29a1aa30e5e282ec85d8f6fd3..150357cadd939ac028679fceb840341a48c4fbb1 100644 (file)
@@ -141,8 +141,6 @@ void HttpMsgHeader::gen_events()
 
 void HttpMsgHeader::update_flow()
 {
-    session_data->section_type[source_id] = SEC__NOT_COMPUTE;
-
     // The following logic to determine body type is by no means the last word on this topic.
     if (tcp_close)
     {
@@ -306,6 +304,10 @@ void HttpMsgHeader::prepare_body()
     setup_utf_decoding();
     setup_file_decompression();
     update_depth();
+    // Limitations on accelerated blocking will be lifted as the feature is built out
+    session_data->accelerated_blocking[source_id] = params->accelerated_blocking &&
+        (source_id == SRC_SERVER) && (session_data->compression[source_id] == CMP_NONE) &&
+        (params->request_depth == -1);
     if (source_id == SRC_CLIENT)
     {
         HttpModule::increment_peg_counts(PEG_REQUEST_BODY);
index e7b0cece7c228975258aeca1e22747fe732edcc0..459f75d9773ac5f46be7c3c4e9d67afa61e3a9e5 100644 (file)
@@ -246,8 +246,10 @@ void HttpMsgRequest::update_flow()
     {
         session_data->half_reset(source_id);
         session_data->type_expected[source_id] = SEC_ABORT;
+        return;
     }
-    else if (*transaction->get_infractions(source_id) & INF_ZERO_NINE_REQ)
+
+    if (*transaction->get_infractions(source_id) & INF_ZERO_NINE_REQ)
     {
         session_data->half_reset(source_id);
         // There can only be one 0.9 response per connection because it ends the S2C connection. Do
@@ -259,14 +261,12 @@ void HttpMsgRequest::update_flow()
             // line and headers.
             session_data->zero_nine_expected = trans_num;
         }
+        return;
     }
-    else
-    {
-        session_data->type_expected[source_id] = SEC_HEADER;
-        session_data->version_id[source_id] = version_id;
-        session_data->method_id = method_id;
-    }
-    session_data->section_type[source_id] = SEC__NOT_COMPUTE;
+
+    session_data->type_expected[source_id] = SEC_HEADER;
+    session_data->version_id[source_id] = version_id;
+    session_data->method_id = method_id;
 }
 
 #ifdef REG_TEST
index 3875682d7323ad03e9004e03a7c710005f3030e3..47df606081499223e198765f8f760acf5ad6f40c 100644 (file)
@@ -178,29 +178,26 @@ void HttpMsgStatus::update_flow()
     {
         session_data->half_reset(source_id);
         session_data->type_expected[source_id] = SEC_ABORT;
+        return;
     }
-    else
+    session_data->type_expected[source_id] = SEC_HEADER;
+    session_data->version_id[source_id] = version_id;
+    session_data->status_code_num = status_code_num;
+    // 100 response means the next response message will be added to this transaction instead
+    // of being part of another transaction. As implemented it is possible for multiple 100
+    // responses to all be included in the same transaction. It's not obvious whether that is
+    // the best way to handle what should be a highly abnormal situation.
+    if (status_code_num == 100)
     {
-        session_data->type_expected[source_id] = SEC_HEADER;
-        session_data->version_id[source_id] = version_id;
-        session_data->status_code_num = status_code_num;
-        // 100 response means the next response message will be added to this transaction instead
-        // of being part of another transaction. As implemented it is possible for multiple 100
-        // responses to all be included in the same transaction. It's not obvious whether that is
-        // the best way to handle what should be a highly abnormal situation.
-        if (status_code_num == 100)
+        // Were we "Expect"-ing this?
+        if ((header[SRC_CLIENT] != nullptr) &&
+            (header[SRC_CLIENT]->get_header_count(HEAD_EXPECT) == 0))
         {
-            // Were we "Expect"-ing this?
-            if ((header[SRC_CLIENT] != nullptr) &&
-                (header[SRC_CLIENT]->get_header_count(HEAD_EXPECT) == 0))
-            {
-                add_infraction(INF_UNEXPECTED_100_RESPONSE);
-                create_event(EVENT_UNEXPECTED_100_RESPONSE);
-            }
-            transaction->set_one_hundred_response();
+            add_infraction(INF_UNEXPECTED_100_RESPONSE);
+            create_event(EVENT_UNEXPECTED_100_RESPONSE);
         }
+        transaction->set_one_hundred_response();
     }
-    session_data->section_type[source_id] = SEC__NOT_COMPUTE;
 }
 
 #ifdef REG_TEST
index f9f3d10e40bd0478f9aa3ba382c79ea6f6537994..224ea36e71461c41ac2f0520e8504ca397aba673 100644 (file)
@@ -84,7 +84,6 @@ void HttpMsgTrailer::gen_events()
 void HttpMsgTrailer::update_flow()
 {
     session_data->half_reset(source_id);
-    session_data->section_type[source_id] = SEC__NOT_COMPUTE;
 }
 
 #ifdef REG_TEST
index 05c55407be0c4610f20c6261982e9258cc78fce8..d7e78d1a3ac3d5559bb4a26483c1afc06e2150aa 100644 (file)
@@ -41,6 +41,7 @@ public:
     const snort::StreamBuffer reassemble(snort::Flow* flow, unsigned total, unsigned, const
         uint8_t* data, unsigned len, uint32_t flags, unsigned& copied) override;
     bool finish(snort::Flow* flow) override;
+    bool init_partial_flush(snort::Flow* flow) override;
     bool is_paf() override { return true; }
 
     // FIXIT-M should return actual packet buffer size
@@ -49,7 +50,7 @@ public:
 private:
     void prepare_flush(HttpFlowData* session_data, uint32_t* flush_offset, HttpEnums::SectionType
         section_type, uint32_t num_flushed, uint32_t num_excess, int32_t num_head_lines,
-        bool is_broken_chunk, uint32_t num_good_chunks, uint32_t octets_seen, bool strict_length)
+        bool is_broken_chunk, uint32_t num_good_chunks, uint32_t octets_seen)
         const;
     HttpCutter* get_cutter(HttpEnums::SectionType type, const HttpFlowData* session) const;
     void chunk_spray(HttpFlowData* session_data, uint8_t* buffer, const uint8_t* data,
@@ -57,6 +58,7 @@ private:
     static void decompress_copy(uint8_t* buffer, uint32_t& offset, const uint8_t* data,
         uint32_t length, HttpEnums::CompressId& compression, z_stream*& compress_stream,
         bool at_start, HttpInfractions* infractions, HttpEventGen* events);
+    static void detain_packet(snort::Packet* pkt);
 
     HttpInspect* const my_inspector;
     const HttpEnums::SourceId source_id;
index 642c3c955b7b47e2f4804460d0171505db20e580..f7dc0eb928a5d5c6bbb91799013d85b3bb9d6762 100644 (file)
@@ -23,6 +23,7 @@
 
 #include "file_api/file_flows.h"
 
+#include "http_cutter.h"
 #include "http_module.h"
 #include "http_msg_request.h"
 #include "http_stream_splitter.h"
@@ -33,7 +34,6 @@ using namespace HttpEnums;
 bool HttpStreamSplitter::finish(snort::Flow* flow)
 {
     snort::Profile profile(HttpModule::get_profile_stats());
-    snort::Packet* p = snort::DetectionEngine::get_current_packet();
 
     HttpFlowData* session_data = (HttpFlowData*)flow->get_flow_data(HttpFlowData::inspector_id);
     // FIXIT-M - this assert has been changed to check for null session data and return false if so
@@ -89,8 +89,7 @@ bool HttpStreamSplitter::finish(snort::Flow* flow)
             session_data->cutter[source_id]->get_num_head_lines(),
             session_data->cutter[source_id]->get_is_broken_chunk(),
             session_data->cutter[source_id]->get_num_good_chunks(),
-            session_data->cutter[source_id]->get_octets_seen(),
-            true);
+            session_data->cutter[source_id]->get_octets_seen());
         delete session_data->cutter[source_id];
         session_data->cutter[source_id] = nullptr;
 
@@ -111,7 +110,7 @@ bool HttpStreamSplitter::finish(snort::Flow* flow)
         // Set up to process empty message section
         uint32_t not_used;
         prepare_flush(session_data, &not_used, session_data->type_expected[source_id], 0, 0, 0,
-            false, 0, 0, true);
+            false, 0, 0);
         return true;
     }
 
@@ -121,6 +120,7 @@ bool HttpStreamSplitter::finish(snort::Flow* flow)
         (session_data->cutter[source_id] != nullptr)               &&
         (session_data->cutter[source_id]->get_octets_seen() == 0))
     {
+        snort::Packet* packet = snort::DetectionEngine::get_current_packet();
         if (!session_data->mime_state[source_id])
         {
             snort::FileFlows* file_flows = snort::FileFlows::get_file_flows(flow);
@@ -137,11 +137,11 @@ bool HttpStreamSplitter::finish(snort::Flow* flow)
                 }
             }
 
-            file_flows->file_process(p, nullptr, 0, SNORT_FILE_END, !download, file_index);
+            file_flows->file_process(packet, nullptr, 0, SNORT_FILE_END, !download, file_index);
         }
         else
         {
-            session_data->mime_state[source_id]->process_mime_data(p, nullptr, 0, true,
+            session_data->mime_state[source_id]->process_mime_data(packet, nullptr, 0, true,
                 SNORT_FILE_POSITION_UNKNOWN);
             delete session_data->mime_state[source_id];
             session_data->mime_state[source_id] = nullptr;
@@ -152,3 +152,42 @@ bool HttpStreamSplitter::finish(snort::Flow* flow)
     return session_data->section_type[source_id] != SEC__NOT_COMPUTE;
 }
 
+bool HttpStreamSplitter::init_partial_flush(snort::Flow* flow)
+{
+    snort::Profile profile(HttpModule::get_profile_stats());
+
+    if (source_id != SRC_SERVER)
+    {
+        assert(false);
+        return false;
+    }
+
+    HttpFlowData* session_data = (HttpFlowData*)flow->get_flow_data(HttpFlowData::inspector_id);
+    assert(session_data != nullptr);
+    if ((session_data->type_expected[source_id] != SEC_BODY_CL)      &&
+        (session_data->type_expected[source_id] != SEC_BODY_OLD)     &&
+        (session_data->type_expected[source_id] != SEC_BODY_CHUNK))
+    {
+        assert(false);
+        return false;
+    }
+
+#ifdef REG_TEST
+    if (HttpTestManager::use_test_output() && !HttpTestManager::use_test_input())
+    {
+        printf("Partial flush from flow data %" PRIu64 "\n", session_data->seq_num);
+        fflush(stdout);
+    }
+#endif
+
+    // Set up to process partial message section
+    uint32_t not_used;
+    prepare_flush(session_data, &not_used, session_data->type_expected[source_id], 0, 0, 0,
+        session_data->cutter[source_id]->get_is_broken_chunk(),
+        session_data->cutter[source_id]->get_num_good_chunks(),
+        session_data->cutter[source_id]->get_octets_seen());
+    (static_cast<HttpBodyCutter*>(session_data->cutter[source_id]))->detain_ended();
+    session_data->partial_flush[source_id] = true;
+    return true;
+}
+
index e32c50df7781f2337fd65b1ff699dbd41bc4fe3c..259e075c54aeea6bcbb9947022e385f437669f4b 100644 (file)
@@ -221,8 +221,8 @@ void HttpStreamSplitter::decompress_copy(uint8_t* buffer, uint32_t& offset, cons
     offset += length;
 }
 
-const snort::StreamBuffer HttpStreamSplitter::reassemble(snort::Flow* flow, unsigned total, unsigned,
-    const uint8_t* data, unsigned len, uint32_t flags, unsigned& copied)
+const snort::StreamBuffer HttpStreamSplitter::reassemble(snort::Flow* flow, unsigned total,
+    unsigned, const uint8_t* data, unsigned len, uint32_t flags, unsigned& copied)
 {
     snort::Profile profile(HttpModule::get_profile_stats());
 
@@ -243,17 +243,22 @@ const snort::StreamBuffer HttpStreamSplitter::reassemble(snort::Flow* flow, unsi
                 return http_buf;
             }
             bool tcp_close;
+            bool partial_flush;
             uint8_t* test_buffer;
             HttpTestManager::get_test_input_source()->reassemble(&test_buffer, len, source_id,
-                tcp_close);
+                tcp_close, partial_flush);
             if (tcp_close)
             {
                 finish(flow);
             }
+            if (partial_flush)
+            {
+                init_partial_flush(flow);
+            }
             if (test_buffer == nullptr)
             {
-                // Source ID does not match test data, no test data was flushed, or there is no
-                // more test data
+                // Source ID does not match test data, no test data was flushed, preparing for a
+                // partial flush, or there is no more test data
                 return http_buf;
             }
             data = test_buffer;
@@ -261,8 +266,9 @@ const snort::StreamBuffer HttpStreamSplitter::reassemble(snort::Flow* flow, unsi
         }
         else
         {
-            printf("Reassemble from flow data %" PRIu64 " direction %d total %u length %u\n",
-                session_data->seq_num, source_id, total, len);
+            printf("Reassemble from flow data %" PRIu64
+                " direction %d total %u length %u partial %d\n", session_data->seq_num, source_id,
+                total, len, session_data->partial_flush[source_id]);
             fflush(stdout);
         }
     }
@@ -282,13 +288,14 @@ const snort::StreamBuffer HttpStreamSplitter::reassemble(snort::Flow* flow, unsi
     }
 
     assert(session_data->section_type[source_id] != SEC__NOT_COMPUTE);
-    assert(total <= MAX_OCTETS);
+    uint8_t*& partial_buffer = session_data->partial_buffer[source_id];
+    uint32_t& partial_buffer_length = session_data->partial_buffer_length[source_id];
+    assert(partial_buffer_length + total <= MAX_OCTETS);
 
     // FIXIT-H this is a precaution/workaround for stream issues. When they are fixed replace this
     // block with an assert.
-    if ( !((session_data->octets_expected[source_id] == total) ||
-        (!session_data->strict_length[source_id] &&
-        (total <= session_data->octets_expected[source_id]))) )
+    if ((session_data->section_offset[source_id] == 0) &&
+        (session_data->octets_expected[source_id] != (total + partial_buffer_length)))
     {
         if (session_data->octets_expected[source_id] == 0)
         {
@@ -325,6 +332,7 @@ const snort::StreamBuffer HttpStreamSplitter::reassemble(snort::Flow* flow, unsi
             fflush(HttpTestManager::get_output_file());
         }
 #endif
+        assert(partial_buffer == nullptr);
         if (flags & PKT_PDU_TAIL)
         {
             assert(session_data->running_total[source_id] == total);
@@ -361,14 +369,26 @@ const snort::StreamBuffer HttpStreamSplitter::reassemble(snort::Flow* flow, unsi
         if (is_body)
             buffer = new uint8_t[MAX_OCTETS];
         else
-            buffer = new uint8_t[(total > 0) ? total : 1];
-        session_data->section_total[source_id] = total;
+        {
+            const uint32_t buffer_size = ((partial_buffer_length + total) > 0) ?
+                (partial_buffer_length + total) : 1;
+            buffer = new uint8_t[buffer_size];
+        }
     }
-    else
-        assert(session_data->section_total[source_id] == total);
 
+    // FIXIT-H there is no support here for partial flush with either chunking or compression
     if (session_data->section_type[source_id] != SEC_BODY_CHUNK)
     {
+        assert((partial_buffer_length == 0) || (session_data->compression[source_id] == CMP_NONE));
+        if (partial_buffer_length > 0)
+        {
+            assert(session_data->section_offset[source_id] == 0);
+            memcpy(buffer, partial_buffer, partial_buffer_length);
+            session_data->section_offset[source_id] = partial_buffer_length;
+            partial_buffer_length = 0;
+            delete[] partial_buffer;
+            partial_buffer = nullptr;
+        }
         const bool at_start = (session_data->body_octets[source_id] == 0) &&
              (session_data->section_offset[source_id] == 0);
         decompress_copy(buffer, session_data->section_offset[source_id], data, len,
@@ -378,6 +398,7 @@ const snort::StreamBuffer HttpStreamSplitter::reassemble(snort::Flow* flow, unsi
     }
     else
     {
+        assert(partial_buffer_length == 0);
         chunk_spray(session_data, buffer, data, len);
     }
 
@@ -389,6 +410,13 @@ const snort::StreamBuffer HttpStreamSplitter::reassemble(snort::Flow* flow, unsi
         const uint16_t buf_size =
             session_data->section_offset[source_id] - session_data->num_excess[source_id];
 
+        if (session_data->partial_flush[source_id])
+        {
+            // Store the data from a partial flush for reuse
+            partial_buffer = buffer;
+            partial_buffer_length = buf_size;
+        }
+
         // FIXIT-M kludge until we work out issues with returning an empty buffer
         http_buf.data = buffer;
         if (buf_size > 0)
index 14f60c981638b504ff9c6a4ae2696e2977da0f14..7b610d9186a46e3b5e51f10ace177d016a5d9f67 100644 (file)
 #include "config.h"
 #endif
 
+#include "http_cutter.h"
 #include "http_inspect.h"
 #include "http_module.h"
 #include "http_stream_splitter.h"
 #include "http_test_input.h"
+#include "stream/stream.h"
 
 using namespace snort;
 using namespace HttpEnums;
@@ -32,7 +34,7 @@ using namespace HttpEnums;
 // Convenience function. All housekeeping that must be done before we can return FLUSH to stream.
 void HttpStreamSplitter::prepare_flush(HttpFlowData* session_data, uint32_t* flush_offset,
     SectionType section_type, uint32_t num_flushed, uint32_t num_excess, int32_t num_head_lines,
-    bool is_broken_chunk, uint32_t num_good_chunks, uint32_t octets_seen, bool strict_length) const
+    bool is_broken_chunk, uint32_t num_good_chunks, uint32_t octets_seen) const
 {
     session_data->section_type[source_id] = section_type;
     session_data->num_excess[source_id] = num_excess;
@@ -40,7 +42,6 @@ void HttpStreamSplitter::prepare_flush(HttpFlowData* session_data, uint32_t* flu
     session_data->is_broken_chunk[source_id] = is_broken_chunk;
     session_data->num_good_chunks[source_id] = num_good_chunks;
     session_data->octets_expected[source_id] = octets_seen + num_flushed;
-    session_data->strict_length[source_id] = strict_length;
 
     if (flush_offset != nullptr)
     {
@@ -68,17 +69,51 @@ HttpCutter* HttpStreamSplitter::get_cutter(SectionType type,
     case SEC_TRAILER:
         return (HttpCutter*)new HttpHeaderCutter;
     case SEC_BODY_CL:
-        return (HttpCutter*)new HttpBodyClCutter(session_data->data_length[source_id]);
+        return (HttpCutter*)new HttpBodyClCutter(session_data->data_length[source_id],
+            session_data->accelerated_blocking[source_id]);
     case SEC_BODY_CHUNK:
-        return (HttpCutter*)new HttpBodyChunkCutter;
+        return (HttpCutter*)new HttpBodyChunkCutter(session_data->accelerated_blocking[source_id]);
     case SEC_BODY_OLD:
-        return (HttpCutter*)new HttpBodyOldCutter;
+        return (HttpCutter*)new HttpBodyOldCutter(session_data->accelerated_blocking[source_id]);
     default:
         assert(false);
         return nullptr;
     }
 }
 
+static StreamSplitter::Status status_value(StreamSplitter::Status ret_val)
+{
+#ifdef REG_TEST
+    if (HttpTestManager::use_test_output())
+    {
+        fprintf(HttpTestManager::get_output_file(), "scan() returning status %d\n", ret_val);
+        fflush(HttpTestManager::get_output_file());
+    }
+    if (HttpTestManager::use_test_input())
+    {
+        if (ret_val == StreamSplitter::ABORT)
+            return StreamSplitter::ABORT;
+        return StreamSplitter::FLUSH;
+    }
+    else
+#endif
+    return ret_val;
+}
+
+void HttpStreamSplitter::detain_packet(Packet* pkt)
+{
+#ifdef REG_TEST
+    if (HttpTestManager::use_test_output())
+    {
+        fprintf(HttpTestManager::get_output_file(), "Packet detain request\n");
+        fflush(HttpTestManager::get_output_file());
+    }
+    if (!HttpTestManager::use_test_input())
+#endif
+    Stream::set_packet_action_to_hold(pkt);
+    HttpModule::increment_peg_counts(PEG_DETAINED);
+}
+
 StreamSplitter::Status HttpStreamSplitter::scan(Packet* pkt, const uint8_t* data, uint32_t length,
     uint32_t, uint32_t* flush_offset)
 {
@@ -102,7 +137,7 @@ StreamSplitter::Status HttpStreamSplitter::scan(Packet* pkt, const uint8_t* data
     SectionType type = session_data->type_expected[source_id];
 
     if (type == SEC_ABORT)
-        return StreamSplitter::ABORT;
+        return status_value(StreamSplitter::ABORT);
 
 #ifdef REG_TEST
     if (HttpTestManager::use_test_input())
@@ -155,10 +190,10 @@ StreamSplitter::Status HttpStreamSplitter::scan(Packet* pkt, const uint8_t* data
         // not support no headers. Processing this imaginary status line and empty headers allows
         // us to overcome this limitation and reuse the entire HTTP infrastructure.
         type = SEC_BODY_OLD;
-        prepare_flush(session_data, nullptr, SEC_STATUS, 14, 0, 0, false, 0, 14, true);
+        prepare_flush(session_data, nullptr, SEC_STATUS, 14, 0, 0, false, 0, 14);
         my_inspector->process((const uint8_t*)"HTTP/0.9 200 .", 14, flow, SRC_SERVER, false);
         session_data->transaction[SRC_SERVER]->clear_section();
-        prepare_flush(session_data, nullptr, SEC_HEADER, 0, 0, 0, false, 0, 0, true);
+        prepare_flush(session_data, nullptr, SEC_HEADER, 0, 0, 0, false, 0, 0);
         my_inspector->process((const uint8_t*)"", 0, flow, SRC_SERVER, false);
         session_data->transaction[SRC_SERVER]->clear_section();
     }
@@ -176,6 +211,7 @@ StreamSplitter::Status HttpStreamSplitter::scan(Packet* pkt, const uint8_t* data
     switch (cut_result)
     {
     case SCAN_NOT_FOUND:
+    case SCAN_NOT_FOUND_DETAIN:
         if (cutter->get_octets_seen() == MAX_OCTETS)
         {
             *session_data->get_infractions(source_id) += INF_ENDLESS_HEADER;
@@ -186,24 +222,21 @@ StreamSplitter::Status HttpStreamSplitter::scan(Packet* pkt, const uint8_t* data
             session_data->type_expected[source_id] = SEC_ABORT;
             delete cutter;
             cutter = nullptr;
-            return StreamSplitter::ABORT;
+            return status_value(StreamSplitter::ABORT);
         }
-        // Incomplete headers wait patiently for more data
-#ifdef REG_TEST
-        if (HttpTestManager::use_test_input())
-            return StreamSplitter::FLUSH;
-        else
-#endif
-        return StreamSplitter::SEARCH;
+        if (cut_result == SCAN_NOT_FOUND_DETAIN)
+            detain_packet(pkt);
+        // Wait patiently for more data
+        return status_value(StreamSplitter::SEARCH);
     case SCAN_ABORT:
         session_data->type_expected[source_id] = SEC_ABORT;
         delete cutter;
         cutter = nullptr;
-        return StreamSplitter::ABORT;
+        return status_value(StreamSplitter::ABORT);
     case SCAN_DISCARD:
     case SCAN_DISCARD_PIECE:
         prepare_flush(session_data, flush_offset, SEC_DISCARD, cutter->get_num_flush(), 0, 0,
-            false, 0, cutter->get_octets_seen(), true);
+            false, 0, cutter->get_octets_seen());
         if (cut_result == SCAN_DISCARD)
         {
             delete cutter;
@@ -211,15 +244,14 @@ StreamSplitter::Status HttpStreamSplitter::scan(Packet* pkt, const uint8_t* data
         }
         else
             cutter->soft_reset();
-        return StreamSplitter::FLUSH;
+        return status_value(StreamSplitter::FLUSH);
     case SCAN_FOUND:
     case SCAN_FOUND_PIECE:
       {
         const uint32_t flush_octets = cutter->get_num_flush();
         prepare_flush(session_data, flush_offset, type, flush_octets, cutter->get_num_excess(),
             cutter->get_num_head_lines(), cutter->get_is_broken_chunk(),
-            cutter->get_num_good_chunks(), cutter->get_octets_seen(),
-            !((type == SEC_BODY_CL) || (type == SEC_BODY_OLD)));
+            cutter->get_num_good_chunks(), cutter->get_octets_seen());
         if (cut_result == SCAN_FOUND)
         {
             delete cutter;
@@ -227,11 +259,11 @@ StreamSplitter::Status HttpStreamSplitter::scan(Packet* pkt, const uint8_t* data
         }
         else
             cutter->soft_reset();
-        return StreamSplitter::FLUSH;
+        return status_value(StreamSplitter::FLUSH);
       }
     default:
         assert(false);
-        return StreamSplitter::ABORT;
+        return status_value(StreamSplitter::ABORT);
     }
 }
 
index fb5f9ea30a07ed093a4e3458f4020cbde0b1f6fb..7c3d4c9c62b0755752b54cae571948869b5352d5 100644 (file)
@@ -404,6 +404,8 @@ const PegInfo HttpModule::peg_names[PEG_COUNT_MAX+1] =
     { CountType::SUM, "uri_coding", "URIs with character coding problems" },
     { CountType::NOW, "concurrent_sessions", "total concurrent http sessions" },
     { CountType::MAX, "max_concurrent_sessions", "maximum concurrent http sessions" },
+    { CountType::SUM, "detained_packets", "TCP packets delayed by accelerated blocking" },
+    { CountType::SUM, "partial_inspections", "pre-inspections for accelerated blocking" },
     { CountType::END, nullptr, nullptr }
 };
 
index d50a1446ddd28a4ccdd6aed72482e90794d05cc8..867845102c585ebcf49072827a42a8477931c135 100644 (file)
@@ -238,6 +238,13 @@ void HttpTestInput::scan(uint8_t*& data, uint32_t& length, SourceId source_id, u
                         return;
                     }
                 }
+                else if ((command_length == strlen("partial")) && !memcmp(command_value,
+                    "partial", strlen("partial")))
+                {
+                    partial = true;
+                    length = 0;
+                    return;
+                }
                 else if ((command_length > strlen("fileset")) && !memcmp(command_value, "fileset",
                     strlen("fileset")))
                 {
@@ -433,20 +440,30 @@ void HttpTestInput::scan(uint8_t*& data, uint32_t& length, SourceId source_id, u
 void HttpTestInput::flush(uint32_t num_octets)
 {
     flush_octets = previous_offset[last_source_id] + num_octets;
+    assert(flush_octets <= end_offset[last_source_id]);
     assert(flush_octets <= MAX_OCTETS);
     flushed = true;
 }
 
 void HttpTestInput::reassemble(uint8_t** buffer, unsigned& length, SourceId source_id,
-    bool& tcp_close)
+    bool& tcp_close, bool& partial_flush)
 {
     *buffer = nullptr;
     tcp_close = false;
+    partial_flush = false;
 
     // Only piggyback on data moving in the same direction.
     // Need flushed data unless the connection is closing.
-    if ((source_id != last_source_id) || (!flushed && !tcp_closed))
+    if ((source_id != last_source_id) || (!flushed && !tcp_closed && !partial))
+    {
+        return;
+    }
+
+    if (partial)
     {
+        // Give the caller a chance to set up for a partial flush before giving him the data
+        partial_flush = true;
+        partial = false;
         return;
     }
 
@@ -454,9 +471,8 @@ void HttpTestInput::reassemble(uint8_t** buffer, unsigned& length, SourceId sour
     // buffer.
     // 1. less than whole buffer - not the final flush, ignore pending close
     // 2. exactly equal - process data now and signal the close next time around
-    // 3. more than whole buffer - signal the close now and truncate and send next time around
-    // 4. there was no flush - signal the close now and send the leftovers next time around
-    if (tcp_closed && (!flushed || (flush_octets >= end_offset[last_source_id])))
+    // 3. there was no flush - signal the close now and send the leftovers next time around
+    if (tcp_closed && (!flushed || (flush_octets == end_offset[last_source_id])))
     {
         if (close_pending)
         {
@@ -479,7 +495,7 @@ void HttpTestInput::reassemble(uint8_t** buffer, unsigned& length, SourceId sour
             close_notified = true;
             finish_expected = true;
         }
-        else if (flush_octets == end_offset[last_source_id])
+        else
         {
             // The flush point is the end of the paragraph. Supply the data now and if necessary
             // notify the caller about close next time or otherwise just clean up.
@@ -497,38 +513,12 @@ void HttpTestInput::reassemble(uint8_t** buffer, unsigned& length, SourceId sour
                 close_pending = true;
             }
         }
-        else
-        {
-            // Flushed more body data than is actually available. Truncate the size of the flush,
-            // notify caller about close, and supply the data next time.
-            flush_octets = end_offset[last_source_id];
-            tcp_close = true;
-            close_notified = true;
-            finish_expected = true;
-        }
         return;
     }
 
     // Normal case with no TCP close or at least not yet
     *buffer = msg_buf[last_source_id];
     length = flush_octets;
-    if (flush_octets > end_offset[last_source_id])
-    {
-        // We need to generate additional data to fill out the body or chunk section.
-        for (uint32_t k = end_offset[last_source_id]; k < flush_octets; k++)
-        {
-            if (include_file[last_source_id] == nullptr)
-            {
-                msg_buf[last_source_id][k] = 'A' + k % 26;
-            }
-            else
-            {
-                int new_octet = getc(include_file[last_source_id]);
-                assert(new_octet != EOF);
-                msg_buf[last_source_id][k] = new_octet;
-            }
-        }
-    }
     just_flushed = true;
     flushed = false;
 }
index b674d55a42c676774019bd5d901e73e34fee3334..3b3cb20268251347b3d33d1c1fb1b4c67493f34c 100644 (file)
@@ -35,7 +35,7 @@ public:
     void scan(uint8_t*& data, uint32_t& length, HttpEnums::SourceId source_id, uint64_t seq_num);
     void flush(uint32_t num_octets);
     void reassemble(uint8_t** buffer, unsigned& length, HttpEnums::SourceId source_id,
-        bool& tcp_close);
+        bool& tcp_close, bool& partial_flush);
     bool finish();
 
 private:
@@ -62,6 +62,9 @@ private:
     // TCP connection directional close at end of current paragraph
     bool tcp_closed = false;
 
+    // partial flush requested, useful for testing accelerated blocking
+    bool partial = false;
+
     // number of octets that have been flushed and must be sent by reassemble
     uint32_t flush_octets = 0;
 
index d8e1316785a3d8c286bb3ac59da14e637576218c..1668a85fa4f3fdc762ddeda0fa978d93553b3eb0 100644 (file)
@@ -310,6 +310,11 @@ int TcpStreamSession::update_alert(Packet* p, uint32_t gid, uint32_t sid,
     return -1;
 }
 
+bool TcpStreamSession::set_packet_action_to_hold(snort::Packet* p)
+{
+    return listener->set_held_packet(p);
+}
+
 void TcpStreamSession::SetPacketHeaderFoo(const Packet* p)
 {
     if ( daq_flags & DAQ_PKT_FLAG_NOT_FORWARDING )
@@ -414,6 +419,16 @@ void TcpStreamSession::cleanup(Packet* p)
     clear_session(true, true, false, p);
     client.normalizer.reset();
     server.reassembler.reset();
+    if ( p )
+    {
+        client.finalize_held_packet(p);
+        server.finalize_held_packet(p);
+    }
+    else
+    {
+        client.finalize_held_packet(flow);
+        server.finalize_held_packet(flow);
+    }
 }
 
 void TcpStreamSession::clear()
@@ -462,7 +477,6 @@ void TcpStreamSession::start_proxy()
 //-------------------------------------------------------------------------
 // tcp module stuff
 //-------------------------------------------------------------------------
-
 void TcpStreamSession::print()
 {
     char buf[64];
@@ -481,3 +495,4 @@ void TcpStreamSession::print()
     server.print();
 }
 
+
index 27d08672d99e83d73a8ae3742e22482eac73cfba..c195a938f0a06b3164de8ea614f6f3cdb2bddf59 100644 (file)
@@ -55,6 +55,8 @@ public:
     bool check_alerted(snort::Packet*, uint32_t gid, uint32_t sid) override;
     int update_alert(snort::Packet*, uint32_t /*gid*/, uint32_t /*sid*/,
         uint32_t /*event_id*/, uint32_t /*event_second*/) override;
+    bool set_packet_action_to_hold(snort::Packet*) override;
+
     uint16_t get_mss(bool to_server) const;
     uint8_t get_tcp_options_len(bool to_server) const;
 
index 86eb7ece45b6c0d8b9f1e3a0f8d17b0c586a6b47..b58cf98e2fb796a86554e6f34fd19a3beb928050 100644 (file)
@@ -26,6 +26,9 @@
 #include "tcp_stream_tracker.h"
 
 #include "log/messages.h"
+#include "main/analyzer.h"
+#include "main/snort.h"
+#include "packet_io/active.h"
 #include "profiler/profiler_defs.h"
 #include "protocols/eth.h"
 #include "stream/stream.h"
@@ -59,9 +62,7 @@ TcpStreamTracker::TcpStreamTracker(bool client) :
 { }
 
 TcpStreamTracker::~TcpStreamTracker()
-{
-    delete splitter;
-}
+{ delete splitter; }
 
 TcpStreamTracker::TcpEvent TcpStreamTracker::set_tcp_event(TcpSegmentDescriptor& tsd)
 {
@@ -199,6 +200,8 @@ void TcpStreamTracker::init_tcp_state()
     fin_seq_set = false;
     rst_pkt_sent = false;
     order = 0;
+    held_packet = nullptr;
+    held_seq_num = 0;
 }
 
 //-------------------------------------------------------------------------
@@ -650,6 +653,88 @@ bool TcpStreamTracker::is_segment_seq_valid(TcpSegmentDescriptor& tsd)
     return valid_seq;
 }
 
+bool TcpStreamTracker::set_held_packet(snort::Packet* p)
+{
+    // FIXIT-M - limit of packets held per packet thread should be determined based on runtime criteria
+    //           such as # of DAQ Msg buffers, # of threads, etc... for now we use small number like 10
+    if ( held_packet )
+        return false;
+    if ( tcpStats.current_packets_held >= 10 )
+    {
+        tcpStats.held_packet_limit_exceeded++;
+        return false;
+    }
+
+    if ( p->active->hold_packet(p) )
+    {
+        held_packet = p->daq_msg;
+        held_seq_num = p->ptrs.tcph->seq();
+        tcpStats.total_packets_held++;
+        if ( ++tcpStats.current_packets_held > tcpStats.max_packets_held )
+            tcpStats.max_packets_held = tcpStats.current_packets_held;
+        return true;
+    }
+
+    return false;
+}
+
+bool TcpStreamTracker::is_retransmit_of_held_packet(snort::Packet* cp)
+{
+    if ( !held_packet or ( cp->daq_msg == held_packet ) )
+        return false;
+
+    uint32_t next_send_seq = cp->ptrs.tcph->seq() + (uint32_t)cp->dsize;
+    if ( SEQ_LEQ(cp->ptrs.tcph->seq(), held_seq_num) and SEQ_GT(next_send_seq, held_seq_num) )
+    {
+        tcpStats.held_packet_rexmits++;
+        return true;
+    }
+
+    return false;
+}
+
+void TcpStreamTracker::finalize_held_packet(snort::Packet* cp)
+{
+    if ( held_packet )
+    {
+        if ( cp->active->packet_was_dropped() )
+        {
+            Analyzer::get_local_analyzer()->finalize_daq_message(held_packet, DAQ_VERDICT_BLOCK);
+            tcpStats.held_packets_dropped++;
+        }
+        else
+        {
+            Analyzer::get_local_analyzer()->finalize_daq_message(held_packet, DAQ_VERDICT_PASS);
+            tcpStats.held_packets_passed++;
+        }
+
+        held_packet = nullptr;
+        held_seq_num = 0;
+        tcpStats.current_packets_held--;
+    }
+}
+
+void TcpStreamTracker::finalize_held_packet(snort::Flow* flow)
+{
+    if ( held_packet )
+    {
+        if ( flow->ssn_state.session_flags & SSNFLAG_BLOCK )
+        {
+            Analyzer::get_local_analyzer()->finalize_daq_message(held_packet, DAQ_VERDICT_BLOCK);
+            tcpStats.held_packets_dropped++;
+        }
+        else
+        {
+            Analyzer::get_local_analyzer()->finalize_daq_message(held_packet, DAQ_VERDICT_PASS);
+            tcpStats.held_packets_passed++;
+        }
+
+        held_packet = nullptr;
+        held_seq_num = 0;
+        tcpStats.current_packets_held--;
+    }
+}
+
 void TcpStreamTracker::print()
 {
     LogMessage(" + TcpTracker +\n");
@@ -664,4 +749,3 @@ void TcpStreamTracker::print()
     LogMessage("    rcv_nxt:            %X\n", rcv_nxt);
     LogMessage("    r_win_base:         %X\n", r_win_base);
 }
-
index 920ee7d505948a904ec34c9a78e0e343adecb4fd..945b9276b606ed657c9f9e50a8a7b1ca4fb424ab 100644 (file)
@@ -22,6 +22,8 @@
 #ifndef TCP_STREAM_TRACKER_H
 #define TCP_STREAM_TRACKER_H
 
+#include <daq_common.h>
+
 #include "stream/paf.h"
 #include "stream/tcp/segment_overlap_editor.h"
 #include "stream/tcp/tcp_defs.h"
@@ -45,6 +47,11 @@ struct StreamAlertInfo
 extern const char* tcp_state_names[];
 extern const char* tcp_event_names[];
 
+namespace snort
+{
+struct Packet;
+}
+
 class TcpReassembler;
 class TcpSession;
 
@@ -276,6 +283,10 @@ public:
     virtual bool update_on_fin_sent(TcpSegmentDescriptor&);
     virtual bool is_segment_seq_valid(TcpSegmentDescriptor&);
     virtual void flush_data_on_fin_recv(TcpSegmentDescriptor&);
+    bool set_held_packet(snort::Packet*);
+    bool is_retransmit_of_held_packet(snort::Packet*);
+    void finalize_held_packet(snort::Packet*);
+    void finalize_held_packet(snort::Flow*);
 
 public:
     uint32_t snd_una = 0; // SND.UNA - send unacknowledged
@@ -339,6 +350,8 @@ protected:
 
     uint8_t mac_addr[6] = { };
     uint8_t tcp_options_len = 0;
+    DAQ_Msg_h held_packet = nullptr;
+    uint32_t held_seq_num = 0;
 
     bool mac_addr_valid = false;
     bool fin_seq_set = false;  // FIXIT-M should be obviated by tcp state
index 91968f5acfa6f4b17483ac4b1ec7202db3ad3797..5f5b47293b0f0c5e4350eb47547073f66fe95348 100644 (file)
@@ -805,6 +805,10 @@ uint8_t Stream::get_tcp_options_len(Flow* flow, bool to_server)
     return tcp_session->get_tcp_options_len(to_server);
 }
 
+bool Stream::set_packet_action_to_hold(Packet* p)
+{
+    return p->flow->session->set_packet_action_to_hold(p);
+}
 
 #ifdef UNIT_TEST
 
index d154afc2737af58a047ad2ee546c01b14e3fadd5..36d0db3990f6cd052223dabad263778438f9d88f 100644 (file)
@@ -232,6 +232,8 @@ public:
     static uint16_t get_mss(Flow*, bool to_server);
     static uint8_t get_tcp_options_len(Flow*, bool to_server);
 
+    static bool set_packet_action_to_hold(Packet*);
+
 private:
     static void set_ip_protocol(Flow*);
 };
index b187f68e61c47900513e5025b681e39cab6ebb55..a6007e99da4d8a183091d4d6051e274e0a106e64 100644 (file)
@@ -65,6 +65,7 @@ public:
     // finish indicates end of scanning
     // return false to discard any unflushed data
     virtual bool finish(Flow*) { return true; }
+    virtual bool init_partial_flush(Flow*) { return false; }
 
     // the last call to reassemble() will be made with len == 0 if
     // finish() returned true as an opportunity for a final flush
index 6698937fbd41e3962d88ec3592f768e97db34f82..c8520cf7179e2eaf186b2602ccb1b5cba1b5db69 100644 (file)
@@ -83,7 +83,16 @@ const PegInfo tcp_pegs[] =
     { CountType::SUM, "syns", "number of syn packets" },
     { CountType::SUM, "syn_acks", "number of syn-ack packets" },
     { CountType::SUM, "resets", "number of reset packets" },
-    { CountType::SUM, "fins", "number of fin packets"},
+    { CountType::SUM, "fins", "number of fin packets" },
+    { CountType::SUM, "packets_held", "number of packets held" },
+    { CountType::SUM, "held_packet_rexmits", "number of retransmits of held packets" },
+    { CountType::SUM, "held_packets_dropped", "number of held packets dropped" },
+    { CountType::SUM, "held_packets_passed", "number of held packets passed" },
+    { CountType::NOW, "cur_packets_held", "number of packets currently held" },
+    { CountType::MAX, "max_packets_held", "maximum number of packets held simultaneously" },
+    { CountType::SUM, "held_packet_limit_exceeded", "number of times limit of max held packets exceeded" },
+    { CountType::SUM, "partial_flushes", "number of partial flushes initiated" },
+    { CountType::SUM, "partial_flush_bytes", "partial flush total bytes" },
     { CountType::END, nullptr, nullptr }
 };
 
index 9a0c5ce5e3ae2be009e39a25c05b8f1e9a790eb8..8aaf85fa91616aff331d162ec608387e43e285b7 100644 (file)
@@ -98,6 +98,15 @@ struct TcpStats
     PegCount syn_acks;
     PegCount resets;
     PegCount fins;
+    PegCount total_packets_held;
+    PegCount held_packet_rexmits;
+    PegCount held_packets_dropped;
+    PegCount held_packets_passed;
+    PegCount current_packets_held;
+    PegCount max_packets_held;
+    PegCount held_packet_limit_exceeded;
+    PegCount partial_flushes;
+    PegCount partial_flush_bytes;
 };
 
 extern THREAD_LOCAL struct TcpStats tcpStats;
index b59dc062429a60e996565b6ff2b1d342da0c0fe3..6c78b2e7713ac08cb03a3d995a20c20b2006bf27 100644 (file)
@@ -29,6 +29,7 @@
 #include "log/log.h"
 #include "main/analyzer.h"
 #include "memory/memory_cap.h"
+#include "packet_io/active.h"
 #include "profiler/profiler.h"
 #include "protocols/packet_manager.h"
 #include "time/packet_time.h"
@@ -588,6 +589,8 @@ int TcpReassembler::_flush_to_seq(
                 last_pdu = pdu;
             else
                 last_pdu = nullptr;
+
+            trs.tracker->finalize_held_packet(p);
         }
         else
         {
@@ -1083,8 +1086,24 @@ int TcpReassembler::flush_on_data_policy(TcpReassemblerState& trs, Packet* p)
             return flush_on_data_policy(trs, p);
         }
     }
-    break;
+        break;
     }
+
+    if ( trs.tracker->is_retransmit_of_held_packet(p) )
+    {
+        if ( trs.tracker->splitter->init_partial_flush(p->flow) )
+        {
+            flushed += flush_stream(trs, p, trs.packet_dir, false);
+            tcpStats.partial_flushes++;
+            tcpStats.partial_flush_bytes += flushed;
+            if ( trs.sos.seg_count )
+            {
+                purge_to_seq(trs, trs.sos.seglist.head->i_seq + flushed);
+                trs.tracker->r_win_base = trs.sos.seglist_base_seq;
+            }
+        }
+    }
+
     // FIXIT-H a drop rule will yoink the seglist out from under us
     // because apply_delayed_action is only deferred to end of context
     if ( flushed and trs.sos.seg_count and
index afded4843d1bfbe017350960f3335ba804c437b2..ede24a9433b49dcb369c346c8162eb958a371a9d 100644 (file)
@@ -165,6 +165,7 @@ void TcpSession::clear_session(bool free_flow_data, bool flush_segments, bool re
         client.reassembler.flush_queued_segments(flow, true, p);
         server.reassembler.flush_queued_segments(flow, true, p);
     }
+
     client.reassembler.purge_segment_list();
     server.reassembler.purge_segment_list();
 
@@ -755,7 +756,7 @@ void TcpSession::handle_data_segment(TcpSegmentDescriptor& tsd)
             }
         }
 
-        // dunno if this is RFC but fragroute testing expects it  for the record,
+        // dunno if this is RFC but fragroute testing expects it for the record,
         // I've seen FTP data sessions that send data packets with no tcp flags set
         if ((tsd.get_tcph()->th_flags != 0) or (config->policy == StreamPolicy::OS_LINUX)
                 or (config->policy == StreamPolicy::OS_PROXY))
index 56605e6ca3201a67447a8ab64be5ff5bb7c8d79a..bd658864a5a8f4fb290eb48e181bcb098cf6ca00 100644 (file)
@@ -299,7 +299,7 @@ static const char* get_status(uint8_t stat)
 
 static const char* get_action(uint8_t act)
 {
-    const char* acts[] = { "pass", "retry", "drop", "block", "reset" };
+    const char* acts[] = { "pass", "hold", "retry", "drop", "block", "reset" };
     return lookup(acts, sizeof(acts)/sizeof(acts[0]), act);
 }