]> git.ipfire.org Git - thirdparty/suricata.git/commitdiff
eve/http: add request/response http headers 3641/head
authorMaurizio Abba <mabba@lastline.com>
Fri, 3 Aug 2018 13:27:05 +0000 (14:27 +0100)
committerVictor Julien <victor@inliniac.net>
Fri, 8 Feb 2019 09:36:42 +0000 (10:36 +0100)
Add a keyword configuration dump-all-headers, with allowed values
{both, request, response}, dumping all HTTP headers in the eve-log http
object. Each header is a single object in the list request_headers
(response_headers) with the following notation:

{
    "name": <header name>,
    "value": <header value>
}

To avoid forged malicious headers, the header name size is capped at 256
bytes, the header value size at 2048.

By default, dump-all-headers is disabled.

doc/userguide/output/eve/eve-json-format.rst
src/output-json-http.c
suricata.yaml.in

index 1a0cf09dc19fdf1ac352a8b0c4cffa7ce17c1771..d744083d38e02407953443ec39cde11af73836d9 100644 (file)
@@ -165,6 +165,10 @@ In addition to the extended logging fields one can also choose to enable/add fro
 
 The benefits here of using the extended logging is to see if this action for example was a POST or perhaps if a download of an executable actually returned any bytes.
 
+It is also possible to dump every header for HTTP requests/responses or both via the keyword ``dump-all-headers``.
+
+
+
 Examples
 ~~~~~~~~
 
@@ -212,6 +216,39 @@ Event with extended logging:
       "length":310
   }
 
+Event with ``dump-all-headers`` set to "both":
+
+::
+
+  "http": {
+      "hostname": "test.co.uk",
+      "url":"\/test\/file.json",
+      "http_user_agent": "<User-Agent>",
+      "http_content_type": "application\/json",
+      "http_refer": "http:\/\/www.test.com\/",
+      "http_method": "GET",
+      "protocol": "HTTP\/1.1",
+      "status":"200",
+      "length":310,
+      "request_headers": [
+          {
+              "name": "User-Agent",
+              "value": "Wget/1.13.4 (linux-gnu)"
+          },
+          {
+              "name": "Accept",
+              "value": "*/*"
+          },
+      ],
+      "response_headers": [
+          {
+              "name": "Date",
+              "value": "Wed, 25 Mar 2015 15:40:41 GMT"
+          },
+      ]
+  }
+
+
 Event type: DNS
 ---------------
 
index b4898ab6565f5153765b681fbe7304001b38f1de..417a9571b2d021bf5005fedf0ebdddc1b5b8c168 100644 (file)
@@ -72,11 +72,15 @@ typedef struct JsonHttpLogThread_ {
     MemBuffer *buffer;
 } JsonHttpLogThread;
 
+#define MAX_SIZE_HEADER_NAME 256
+#define MAX_SIZE_HEADER_VALUE 2048
 
 #define LOG_HTTP_DEFAULT 0
 #define LOG_HTTP_EXTENDED 1
 #define LOG_HTTP_REQUEST 2 /* request field */
 #define LOG_HTTP_ARRAY 4 /* require array handling */
+#define LOG_HTTP_REQ_HEADERS 8
+#define LOG_HTTP_RES_HEADERS 16
 
 typedef enum {
     HTTP_FIELD_ACCEPT = 0,
@@ -368,6 +372,42 @@ static void JsonHttpLogJSONExtended(json_t *js, htp_tx_t *tx)
     json_object_set_new(js, "length", json_integer(tx->response_message_len));
 }
 
+static void JsonHttpLogJSONHeaders(json_t *js, uint32_t direction, htp_tx_t *tx)
+{
+    htp_table_t * headers = direction & LOG_HTTP_REQ_HEADERS ?
+        tx->request_headers : tx->response_headers;
+    char name[MAX_SIZE_HEADER_NAME] = {0};
+    char value[MAX_SIZE_HEADER_VALUE] = {0};
+    size_t n = htp_table_size(headers);
+    json_t * arr = json_array();
+    if (arr == NULL) {
+        return;
+    }
+    for (size_t i = 0; i < n; i++) {
+        htp_header_t * h = htp_table_get_index(headers, i, NULL);
+        if (h == NULL) {
+            continue;
+        }
+        json_t * obj = json_object();
+        if (obj == NULL) {
+            continue;
+        }
+        size_t size_name = bstr_len(h->name) < MAX_SIZE_HEADER_NAME - 1 ?
+            bstr_len(h->name) : MAX_SIZE_HEADER_NAME - 1;
+        memcpy(name, bstr_ptr(h->name), size_name);
+        name[size_name] = '\0';
+        json_object_set_new(obj, "name", SCJsonString(name));
+        size_t size_value = bstr_len(h->value) < MAX_SIZE_HEADER_VALUE - 1 ?
+            bstr_len(h->value) : MAX_SIZE_HEADER_VALUE - 1;
+        memcpy(value, bstr_ptr(h->value), size_value);
+        value[size_value] = '\0';
+        json_object_set_new(obj, "value", SCJsonString(value));
+        json_array_append_new(arr, obj);
+    }
+    json_object_set_new(js, direction & LOG_HTTP_REQ_HEADERS ?
+            "request_headers" : "response_headers", arr);
+}
+
 static void BodyPrintableBuffer(json_t *js, HtpBody *body, const char *key)
 {
     if (body->sb != NULL && body->sb->buf != NULL) {
@@ -456,6 +496,10 @@ static void JsonHttpLogJSON(JsonHttpLogThread *aft, json_t *js, htp_tx_t *tx, ui
         JsonHttpLogJSONCustom(http_ctx, hjs, tx);
     if (http_ctx->flags & LOG_HTTP_EXTENDED)
         JsonHttpLogJSONExtended(hjs, tx);
+    if (http_ctx->flags & LOG_HTTP_REQ_HEADERS)
+        JsonHttpLogJSONHeaders(hjs, LOG_HTTP_REQ_HEADERS, tx);
+    if (http_ctx->flags & LOG_HTTP_RES_HEADERS)
+        JsonHttpLogJSONHeaders(hjs, LOG_HTTP_RES_HEADERS, tx);
 
     json_object_set_new(js, "http", hjs);
 }
@@ -583,6 +627,18 @@ static OutputInitResult OutputHttpLogInit(ConfNode *conf)
                 http_ctx->flags = LOG_HTTP_EXTENDED;
             }
         }
+        const char *all_headers = ConfNodeLookupChildValue(
+                conf, "dump-all-headers");
+        if (all_headers != NULL) {
+            if (strcmp(all_headers, "both") == 0) {
+                http_ctx->flags |= LOG_HTTP_REQ_HEADERS;
+                http_ctx->flags |= LOG_HTTP_RES_HEADERS;
+            } else if (strcmp(all_headers, "request") == 0) {
+                http_ctx->flags |= LOG_HTTP_REQ_HEADERS;
+            } else if (strcmp(all_headers, "response") == 0) {
+                http_ctx->flags |= LOG_HTTP_RES_HEADERS;
+            }
+        }
     }
     http_ctx->xff_cfg = SCCalloc(1, sizeof(HttpXFFCfg));
     if (http_ctx->xff_cfg != NULL) {
@@ -661,6 +717,18 @@ static OutputInitResult OutputHttpLogInitSub(ConfNode *conf, OutputCtx *parent_c
                 }
             }
         }
+        const char *all_headers = ConfNodeLookupChildValue(
+                conf, "dump-all-headers");
+        if (all_headers != NULL) {
+            if (strncmp(all_headers, "both", 4) == 0) {
+                http_ctx->flags |= LOG_HTTP_REQ_HEADERS;
+                http_ctx->flags |= LOG_HTTP_RES_HEADERS;
+            } else if (strncmp(all_headers, "request", 7) == 0) {
+                http_ctx->flags |= LOG_HTTP_REQ_HEADERS;
+            } else if (strncmp(all_headers, "response", 8) == 0) {
+                http_ctx->flags |= LOG_HTTP_RES_HEADERS;
+            }
+        }
     }
 
     if (conf != NULL && ConfNodeLookupChild(conf, "xff") != NULL) {
index 61cc16898a2dd6eb07a7cc18e6c18237e9b9651f..483050b420ec7fbcb2742ce3c8de8e665bcf0526 100644 (file)
@@ -159,6 +159,9 @@ outputs:
             # custom allows additional http fields to be included in eve-log
             # the example below adds three additional fields when uncommented
             #custom: [Accept-Encoding, Accept-Language, Authorization]
+            # set this value to one among {both, request, response} to dump all
+            # http headers for every http request and/or response
+            # dump-all-headers: [both, request, response]
         - dns:
             # This configuration uses the new DNS logging format,
             # the old configuration is still available: