alert http any any -> any any (msg:"SURICATA HTTP too many warnings"; flow:established; app-layer-event:http.too_many_warnings; flowint:http.anomaly.count,+,1; classtype:protocol-command-decode; sid:2221050; rev:1;)
-# next sid 2221051
+alert http any any -> any any (msg:"SURICATA HTTP invalid Range header value"; flow:established; app-layer-event:http.range_invalid; flowint:http.anomaly.count,+,1; classtype:protocol-command-decode; sid:2221051; rev:1;)
+
+# next sid 2221052
app-layer-htp.h \
app-layer-htp-libhtp.h \
app-layer-htp-mem.h \
+ app-layer-htp-range.h \
app-layer-htp-xff.h \
app-layer-http2.h \
app-layer-ike.h \
app-layer-htp-file.c \
app-layer-htp-libhtp.c \
app-layer-htp-mem.c \
+ app-layer-htp-range.c \
app-layer-htp-xff.c \
app-layer-http2.c \
app-layer-ike.c \
#include "suricata.h"
#include "suricata-common.h"
#include "debug.h"
+#include "util-validate.h"
#include "decode.h"
#include "threads.h"
sbcfg = &s->cfg->response.sbcfg;
+ // we shall not open a new file if there is a current one
+ DEBUG_VALIDATE_BUG_ON(s->file_range != NULL);
} else {
if (s->files_ts == NULL) {
s->files_ts = FileContainerAlloc();
return 0;
}
+/**
+ * Performs parsing + checking of the content-range value
+ *
+ * @param[in] rawvalue
+ * @param[out] range
+ *
+ * @return HTP_OK on success, HTP_ERROR, -2, -3 on failure.
+ */
+static int HTPParseAndCheckContentRange(
+ bstr *rawvalue, HtpContentRange *range, HtpState *s, HtpTxUserData *htud)
+{
+ int r = HTPParseContentRange(rawvalue, range);
+ if (r != 0) {
+ AppLayerDecoderEventsSetEventRaw(&htud->decoder_events, HTTP_DECODER_EVENT_RANGE_INVALID);
+ s->events++;
+ SCLogDebug("parsing range failed, going back to normal file");
+ return r;
+ }
+ /* crparsed.end <= 0 means a range with only size
+ * this is the answer to an unsatisfied range with the whole file
+ * crparsed.size <= 0 means an unknown size, so we do not know
+ * when to close it...
+ */
+ if (range->end <= 0 || range->size <= 0) {
+ SCLogDebug("range without all information");
+ return -2;
+ } else if (range->end == range->size - 1 && range->start == 0) {
+ SCLogDebug("range without all information");
+ return -3;
+ }
+ return r;
+}
+
/**
* \brief Sets range for a file
*
* \retval -2 error parsing
* \retval -3 error negative end in range
*/
-int HTPFileSetRange(HtpState *s, bstr *rawvalue)
+int HTPFileOpenWithRange(HtpState *s, HtpTxUserData *txud, const uint8_t *filename,
+ uint16_t filename_len, const uint8_t *data, uint32_t data_len, uint64_t txid,
+ bstr *rawvalue, HtpTxUserData *htud)
{
SCEnter();
+ uint16_t flags;
if (s == NULL) {
SCReturnInt(-1);
}
+ // This function is only called STREAM_TOCLIENT from HtpResponseBodyHandle
+ HtpContentRange crparsed;
+ if (HTPParseAndCheckContentRange(rawvalue, &crparsed, s, htud) != 0) {
+ return HTPFileOpen(
+ s, txud, filename, (uint32_t)filename_len, data, data_len, txid, STREAM_TOCLIENT);
+ }
+ flags = FileFlowToFlags(s->f, STREAM_TOCLIENT);
+ if ((s->flags & HTP_FLAG_STORE_FILES_TS) ||
+ ((s->flags & HTP_FLAG_STORE_FILES_TX_TS) && txid == s->store_tx_id)) {
+ flags |= FILE_STORE;
+ flags &= ~FILE_NOSTORE;
+ } else if (!(flags & FILE_STORE) && (s->f->file_flags & FLOWFILE_NO_STORE_TC)) {
+ flags |= FILE_NOSTORE;
+ }
+
FileContainer * files = s->files_tc;
if (files == NULL) {
- SCLogDebug("no files in state");
+ s->files_tc = FileContainerAlloc();
+ if (s->files_tc == NULL) {
+ SCReturnInt(-1);
+ }
+ files = s->files_tc;
+ }
+
+ if (FileOpenFileWithId(files, &s->cfg->response.sbcfg, s->file_track_id++, filename,
+ filename_len, data, data_len, flags) != 0) {
SCReturnInt(-1);
}
+ FileSetTx(files->tail, txid);
- HtpContentRange crparsed;
- if (HTPParseContentRange(rawvalue, &crparsed) != 0) {
- SCLogDebug("parsing range failed");
- SCReturnInt(-2);
+ if (FileSetRange(files, crparsed.start, crparsed.end) < 0) {
+ SCLogDebug("set range failed");
}
- if (crparsed.end <= 0) {
- SCLogDebug("negative end in range");
- SCReturnInt(-3);
+ htp_tx_t *tx = htp_list_get(s->conn->transactions, txid);
+ if (!tx) {
+ SCReturnInt(-1);
}
- int retval = FileSetRange(files, crparsed.start, crparsed.end);
- if (retval == -1) {
- SCLogDebug("set range failed");
+ uint8_t *keyurl;
+ size_t keylen;
+ if (tx->request_hostname != NULL) {
+ keylen = bstr_len(tx->request_hostname) + filename_len + 1;
+ keyurl = SCMalloc(keylen);
+ if (keyurl == NULL) {
+ SCReturnInt(-1);
+ }
+ memcpy(keyurl, bstr_ptr(tx->request_hostname), bstr_len(tx->request_hostname));
+ memcpy(keyurl + bstr_len(tx->request_hostname), filename, filename_len);
+ keyurl[keylen - 1] = 0;
+ } else {
+ // do not reassemble file without host info
+ return HTPFileOpen(
+ s, txud, filename, (uint32_t)filename_len, data, data_len, txid, STREAM_TOCLIENT);
}
- SCReturnInt(retval);
+ HttpRangeContainerFile *file_range_container =
+ HttpRangeContainerUrlGet(keyurl, keylen, &s->f->lastts);
+ SCFree(keyurl);
+ if (file_range_container == NULL) {
+ // probably reached memcap
+ return HTPFileOpen(
+ s, txud, filename, (uint32_t)filename_len, data, data_len, txid, STREAM_TOCLIENT);
+ }
+ s->file_range = ContainerUrlRangeOpenFile(file_range_container, crparsed.start, crparsed.end,
+ crparsed.size, &s->cfg->response.sbcfg, filename, filename_len, flags, data, data_len);
+ if (s->file_range == NULL) {
+ // probably reached memcap
+ return HTPFileOpen(
+ s, txud, filename, (uint32_t)filename_len, data, data_len, txid, STREAM_TOCLIENT);
+ }
+
+ SCReturnInt(0);
}
/**
goto end;
}
+ if (s->file_range != NULL) {
+ if (ContainerUrlRangeAppendData(s->file_range, data, data_len) < 0) {
+ SCLogDebug("Failed to append data");
+ }
+ }
result = FileAppendData(files, data, data_len);
if (result == -1) {
SCLogDebug("appending data failed");
} else if (result == -2) {
retval = -2;
}
+ if (s->file_range != NULL) {
+ if (ContainerUrlRangeAppendData(s->file_range, data, data_len) < 0) {
+ SCLogDebug("Failed to append data");
+ }
+ File *ranged = ContainerUrlRangeClose(s->file_range, flags);
+ if (ranged) {
+ FileContainerAdd(files, ranged);
+ }
+ SCFree(s->file_range);
+ s->file_range = NULL;
+ }
end:
SCReturnInt(retval);
int HTPFileOpen(HtpState *, HtpTxUserData *, const uint8_t *, uint16_t, const uint8_t *, uint32_t,
uint64_t, uint8_t);
int HTPParseContentRange(bstr * rawvalue, HtpContentRange *range);
-int HTPFileSetRange(HtpState *, bstr *rawvalue);
+int HTPFileOpenWithRange(HtpState *, HtpTxUserData *, const uint8_t *, uint16_t, const uint8_t *,
+ uint32_t, uint64_t, bstr *rawvalue, HtpTxUserData *htud);
int HTPFileStoreChunk(HtpState *, const uint8_t *, uint32_t, uint8_t);
int HTPFileClose(HtpState *, const uint8_t *, uint32_t, uint8_t, uint8_t);
--- /dev/null
+/* Copyright (C) 2021 Open Information Security Foundation
+ *
+ * You can copy, redistribute or modify this Program under the terms of
+ * the GNU General Public License version 2 as published by the Free
+ * Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+/**
+ * \file
+ *
+ * \author Philippe Antoine <p.antoine@catenacyber.fr>
+ */
+
+#include "suricata-common.h"
+#include "app-layer-htp-range.h"
+#include "util-misc.h" //ParseSizeStringU64
+#include "util-thash.h" //HashTable
+#include "util-memcmp.h" //SCBufferCmp
+#include "util-hash-string.h" //StringHashDjb2
+#include "util-validate.h" //DEBUG_VALIDATE_BUG_ON
+#include "util-byte.h" //StringParseUint32
+
+typedef struct ContainerTHashTable {
+ THashTableContext *ht;
+ uint32_t timeout;
+} ContainerTHashTable;
+
+// globals
+ContainerTHashTable ContainerUrlRangeList;
+
+#define CONTAINER_URLRANGE_HASH_SIZE 256
+
+int HttpRangeContainerBufferCompare(HttpRangeContainerBuffer *a, HttpRangeContainerBuffer *b)
+{
+ // lexical order : start, buflen, offset
+ if (a->start > b->start)
+ return 1;
+ if (a->start < b->start)
+ return -1;
+ if (a->buflen > b->buflen)
+ return 1;
+ if (a->buflen < b->buflen)
+ return -1;
+ if (a->offset > b->offset)
+ return 1;
+ if (a->offset < b->offset)
+ return -1;
+ return 0;
+}
+
+RB_GENERATE(HTTP_RANGES, HttpRangeContainerBuffer, rb, HttpRangeContainerBufferCompare);
+
+static int ContainerUrlRangeSet(void *dst, void *src)
+{
+ HttpRangeContainerFile *src_s = src;
+ HttpRangeContainerFile *dst_s = dst;
+ dst_s->len = src_s->len;
+ dst_s->key = SCMalloc(dst_s->len);
+ BUG_ON(dst_s->key == NULL);
+ memcpy(dst_s->key, src_s->key, dst_s->len);
+ dst_s->files = FileContainerAlloc();
+ BUG_ON(dst_s->files == NULL);
+ RB_INIT(&dst_s->fragment_tree);
+ dst_s->flags = 0;
+ dst_s->totalsize = 0;
+ SCMutexInit(&dst_s->mutex, NULL);
+ dst_s->hdata = NULL;
+
+ return 0;
+}
+
+static bool ContainerUrlRangeCompare(void *a, void *b)
+{
+ const HttpRangeContainerFile *as = a;
+ const HttpRangeContainerFile *bs = b;
+ if (SCBufferCmp(as->key, as->len, bs->key, bs->len) == 0) {
+ return true;
+ }
+ return false;
+}
+
+static uint32_t ContainerUrlRangeHash(void *s)
+{
+ HttpRangeContainerFile *cur = s;
+ uint32_t h = StringHashDjb2(cur->key, cur->len);
+ return h;
+}
+
+// base data stays in hash
+static void ContainerUrlRangeFree(void *s)
+{
+ HttpRangeContainerBuffer *range, *tmp;
+
+ HttpRangeContainerFile *cu = s;
+ SCFree(cu->key);
+ cu->key = NULL;
+ FileContainerFree(cu->files);
+ cu->files = NULL;
+ RB_FOREACH_SAFE (range, HTTP_RANGES, &cu->fragment_tree, tmp) {
+ RB_REMOVE(HTTP_RANGES, &cu->fragment_tree, range);
+ SCFree(range->buffer);
+ (void)SC_ATOMIC_SUB(ContainerUrlRangeList.ht->memuse, range->buflen);
+ SCFree(range);
+ }
+ SCMutexDestroy(&cu->mutex);
+}
+
+static bool ContainerValueRangeTimeout(HttpRangeContainerFile *cu, struct timeval *ts)
+{
+ // we only timeout if we have no flow referencing us
+ SCMutexLock(&cu->mutex);
+ bool r = ((uint32_t)ts->tv_sec > cu->expire && SC_ATOMIC_GET(cu->hdata->use_cnt) == 0);
+ SCMutexUnlock(&cu->mutex);
+ return r;
+}
+
+static void ContainerUrlRangeUpdate(HttpRangeContainerFile *cu, uint32_t expire)
+{
+ cu->expire = expire;
+}
+
+#define HTTP_RANGE_DEFAULT_TIMEOUT 60
+#define HTTP_RANGE_DEFAULT_MEMCAP 100 * 1024 * 1024
+
+void HttpRangeContainersInit(void)
+{
+ SCLogDebug("containers start");
+ const char *str = NULL;
+ uint64_t memcap = HTTP_RANGE_DEFAULT_MEMCAP;
+ uint32_t timeout = HTTP_RANGE_DEFAULT_TIMEOUT;
+ if (ConfGetValue("app-layer.protocols.http.urlrange.memcap", &str) == 1) {
+ if (ParseSizeStringU64(str, &memcap) < 0) {
+ SCLogWarning(SC_ERR_INVALID_VALUE,
+ "memcap value cannot be deduced: %s,"
+ " resetting to default",
+ str);
+ memcap = 0;
+ }
+ }
+ if (ConfGetValue("app-layer.protocols.http.urlrange.timeout", &str) == 1) {
+ if (StringParseUint32(&timeout, 10, strlen(str), str) <= 0) {
+ SCLogWarning(SC_ERR_INVALID_VALUE,
+ "timeout value cannot be deduced: %s,"
+ " resetting to default",
+ str);
+ timeout = 0;
+ }
+ }
+
+ ContainerUrlRangeList.ht =
+ THashInit("app-layer.protocols.http.urlrange", sizeof(HttpRangeContainerFile),
+ ContainerUrlRangeSet, ContainerUrlRangeFree, ContainerUrlRangeHash,
+ ContainerUrlRangeCompare, false, memcap, CONTAINER_URLRANGE_HASH_SIZE);
+ ContainerUrlRangeList.timeout = timeout;
+
+ SCLogDebug("containers started");
+}
+
+void HttpRangeContainersDestroy(void)
+{
+ THashShutdown(ContainerUrlRangeList.ht);
+}
+
+uint32_t HttpRangeContainersTimeoutHash(struct timeval *ts)
+{
+ uint32_t cnt = 0;
+
+ for (size_t i = 0; i < ContainerUrlRangeList.ht->config.hash_size; i++) {
+ THashHashRow *hb = &ContainerUrlRangeList.ht->array[i];
+
+ if (HRLOCK_TRYLOCK(hb) != 0)
+ continue;
+ /* hash bucket is now locked */
+ THashData *h = hb->head;
+ while (h) {
+ THashData *n = h->next;
+ if (ContainerValueRangeTimeout(h->data, ts)) {
+ /* remove from the hash */
+ if (h->prev != NULL)
+ h->prev->next = h->next;
+ if (h->next != NULL)
+ h->next->prev = h->prev;
+ if (hb->head == h)
+ hb->head = h->next;
+ if (hb->tail == h)
+ hb->tail = h->prev;
+ h->next = NULL;
+ h->prev = NULL;
+ // we should log the timed out file somehow...
+ // but it does not belong to any flow...
+ ContainerUrlRangeFree(h->data);
+ THashDataMoveToSpare(ContainerUrlRangeList.ht, h);
+ }
+ h = n;
+ }
+ HRLOCK_UNLOCK(hb);
+ }
+
+ return cnt;
+}
+
+void *HttpRangeContainerUrlGet(const uint8_t *key, size_t keylen, struct timeval *ts)
+{
+ HttpRangeContainerFile lookup;
+ // cast so as not to have const in the structure
+ lookup.key = (uint8_t *)key;
+ lookup.len = keylen;
+ struct THashDataGetResult res = THashGetFromHash(ContainerUrlRangeList.ht, &lookup);
+ if (res.data) {
+ // nothing more to do if (res.is_new)
+ ContainerUrlRangeUpdate(res.data->data, ts->tv_sec + ContainerUrlRangeList.timeout);
+ HttpRangeContainerFile *c = res.data->data;
+ c->hdata = res.data;
+ THashDataUnlock(res.data);
+ return res.data->data;
+ }
+ return NULL;
+}
+
+static HttpRangeContainerBlock *ContainerUrlRangeOpenFileAux(HttpRangeContainerFile *c,
+ uint64_t start, uint64_t end, uint64_t total, const StreamingBufferConfig *sbcfg,
+ const uint8_t *name, uint16_t name_len, uint16_t flags)
+{
+ SCMutexLock(&c->mutex);
+ if (c->files->tail == NULL) {
+ if (FileOpenFileWithId(c->files, sbcfg, 0, name, name_len, NULL, 0, flags) != 0) {
+ SCLogDebug("open file for range failed");
+ THashDecrUsecnt(c->hdata);
+ SCMutexUnlock(&c->mutex);
+ return NULL;
+ }
+ }
+ HttpRangeContainerBlock *curf = SCCalloc(1, sizeof(HttpRangeContainerBlock));
+ if (curf == NULL) {
+ THashDecrUsecnt(c->hdata);
+ SCMutexUnlock(&c->mutex);
+ return NULL;
+ }
+ if (total > c->totalsize) {
+ // TODOask add checks about totalsize remaining the same
+ c->totalsize = total;
+ }
+ uint64_t buflen = end - start + 1;
+ if (start == c->files->tail->size && !c->appending) {
+ // easy case : append to current file
+ curf->container = c;
+ c->appending = true;
+ SCMutexUnlock(&c->mutex);
+ return curf;
+ } else if (start < c->files->tail->size && c->files->tail->size - start >= buflen) {
+ // only overlap
+ THashDecrUsecnt(c->hdata);
+ // redundant to be explicit that this block is independent
+ curf->container = NULL;
+ curf->toskip = buflen;
+ SCMutexUnlock(&c->mutex);
+ return curf;
+ } else if (start < c->files->tail->size && c->files->tail->size - start < buflen &&
+ !c->appending) {
+ // skip first overlap, then append
+ curf->toskip = c->files->tail->size - start;
+ c->appending = true;
+ curf->container = c;
+ SCMutexUnlock(&c->mutex);
+ return curf;
+ }
+ // else {
+ // block/range to be inserted in ordered linked list
+ if (!(THASH_CHECK_MEMCAP(ContainerUrlRangeList.ht, buflen))) {
+ // TODOask release memory for other ranges cf RangeContainerFree(c);
+ // skips this range
+ curf->toskip = buflen;
+ curf->container = NULL;
+ THashDecrUsecnt(c->hdata);
+ SCMutexUnlock(&c->mutex);
+ return curf;
+ }
+ curf->container = c;
+ (void)SC_ATOMIC_ADD(ContainerUrlRangeList.ht->memuse, buflen);
+ HttpRangeContainerBuffer *range = SCCalloc(1, sizeof(HttpRangeContainerBuffer));
+ BUG_ON(range == NULL);
+ range->buffer = SCMalloc(buflen);
+ BUG_ON(range->buffer == NULL);
+ range->buflen = buflen;
+ range->start = start;
+
+ curf->current = range;
+ SCMutexUnlock(&c->mutex);
+ return curf;
+}
+
+HttpRangeContainerBlock *ContainerUrlRangeOpenFile(HttpRangeContainerFile *c, uint64_t start,
+ uint64_t end, uint64_t total, const StreamingBufferConfig *sbcfg, const uint8_t *name,
+ uint16_t name_len, uint16_t flags, const uint8_t *data, size_t len)
+{
+ HttpRangeContainerBlock *r =
+ ContainerUrlRangeOpenFileAux(c, start, end, total, sbcfg, name, name_len, flags);
+ if (ContainerUrlRangeAppendData(r, data, len) < 0) {
+ SCLogDebug("Failed to append data while openeing");
+ }
+ return r;
+}
+
+int ContainerUrlRangeAppendData(HttpRangeContainerBlock *c, const uint8_t *data, size_t len)
+{
+ if (len == 0) {
+ return 0;
+ }
+ // first check if we have a current allocated buffer to copy to
+ // in the case of an unordered range being handled
+ if (c->current) {
+ if (data == NULL) {
+ // just feed the gap in the current position, instead of its right one
+ return FileAppendData(c->container->files, data, len);
+ } else if (c->current->offset + len <= c->current->buflen) {
+ memcpy(c->current->buffer + c->current->offset, data, len);
+ c->current->offset += len;
+ } else {
+ memcpy(c->current->buffer + c->current->offset, data,
+ c->current->buflen - c->current->offset);
+ c->current->offset = c->current->buflen;
+ }
+ return 0;
+ // then check if we are skipping
+ } else if (c->toskip > 0) {
+ if (c->toskip >= len) {
+ c->toskip -= len;
+ return 0;
+ } // else
+ DEBUG_VALIDATE_BUG_ON(c->container->files == NULL);
+ int r;
+ if (data == NULL) {
+ // gap overlaping already known data
+ r = FileAppendData(c->container->files, NULL, len - c->toskip);
+ } else {
+ r = FileAppendData(c->container->files, data + c->toskip, len - c->toskip);
+ }
+ c->toskip = 0;
+ return r;
+ } // else {
+ // last we are ordered, simply append
+ DEBUG_VALIDATE_BUG_ON(c->container->files == NULL);
+ return FileAppendData(c->container->files, data, len);
+}
+
+static void ContainerUrlRangeFileClose(HttpRangeContainerFile *c, uint16_t flags)
+{
+ DEBUG_VALIDATE_BUG_ON(SC_ATOMIC_GET(c->hdata->use_cnt) == 0);
+ THashDecrUsecnt(c->hdata);
+ // move ownership of file c->files->head to caller
+ FileCloseFile(c->files, NULL, 0, c->flags | flags);
+ c->files->head = NULL;
+ c->files->tail = NULL;
+ if (SC_ATOMIC_GET(c->hdata->use_cnt) == 0) {
+ THashRemoveFromHash(ContainerUrlRangeList.ht, c);
+ }
+ // otherwise, the hash entry will be used for another read of the file
+}
+
+File *ContainerUrlRangeClose(HttpRangeContainerBlock *c, uint16_t flags)
+{
+ if (c->container == NULL) {
+ // everything was just skipped : nothing to do
+ return NULL;
+ }
+
+ SCMutexLock(&c->container->mutex);
+
+ if (c->current) {
+ // some out-or-order range is finished
+ if (c->container->files->tail &&
+ c->container->files->tail->size >= c->current->start + c->current->offset) {
+ // if the range has become obsolete because we received the data already
+ // we just free it
+ (void)SC_ATOMIC_SUB(ContainerUrlRangeList.ht->memuse, c->current->buflen);
+ SCFree(c->current->buffer);
+ SCFree(c->current);
+ } else {
+ // otherwise insert in red and black tree
+ HTTP_RANGES_RB_INSERT(&c->container->fragment_tree, c->current);
+ }
+ THashDecrUsecnt(c->container->hdata);
+ SCMutexUnlock(&c->container->mutex);
+ return NULL;
+ }
+
+ // else {
+ if (c->toskip > 0) {
+ // was only an overlapping range, truncated before new bytes
+ THashDecrUsecnt(c->container->hdata);
+ SCMutexUnlock(&c->container->mutex);
+ return NULL;
+ }
+
+ // else {
+ // we just finished an in-order block
+ c->container->appending = false;
+ DEBUG_VALIDATE_BUG_ON(c->container->files->tail == NULL);
+ File *f = c->container->files->tail;
+
+ // have we reached a saved range ?
+ HttpRangeContainerBuffer *range;
+ RB_FOREACH(range, HTTP_RANGES, &c->container->fragment_tree)
+ {
+ if (f->size < range->start) {
+ break;
+ }
+ if (f->size == range->start) {
+ // a new range just begins where we ended, append it
+ if (FileAppendData(c->container->files, range->buffer, range->offset) != 0) {
+ ContainerUrlRangeFileClose(c->container, flags);
+ SCMutexUnlock(&c->container->mutex);
+ return f;
+ }
+ } else {
+ // the range starts before where we ended
+ uint64_t overlap = f->size - range->start;
+ if (overlap < range->offset) {
+ // And the range ends beyond where we ended
+ // in this case of overlap, only add the extra data
+ if (FileAppendData(c->container->files, range->buffer + overlap,
+ range->offset - overlap) != 0) {
+ ContainerUrlRangeFileClose(c->container, flags);
+ SCMutexUnlock(&c->container->mutex);
+ return f;
+ }
+ }
+ }
+ // anyways, remove this range from the linked list, as we are now beyond it
+ RB_REMOVE(HTTP_RANGES, &c->container->fragment_tree, range);
+ }
+
+ if (f->size >= c->container->totalsize) {
+ // we finished the whole file
+ ContainerUrlRangeFileClose(c->container, flags);
+ } else {
+ // we are expecting more ranges
+ THashDecrUsecnt(c->container->hdata);
+ f = NULL;
+ }
+ SCMutexUnlock(&c->container->mutex);
+ return f;
+}
--- /dev/null
+/* Copyright (C) 2021 Open Information Security Foundation
+ *
+ * You can copy, redistribute or modify this Program under the terms of
+ * the GNU General Public License version 2 as published by the Free
+ * Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef __APP_LAYER_HTP_RANGE_H__
+#define __APP_LAYER_HTP_RANGE_H__
+
+#include "util-thash.h"
+
+void HttpRangeContainersInit(void);
+void HttpRangeContainersDestroy(void);
+uint32_t HttpRangeContainersTimeoutHash(struct timeval *ts);
+
+void *HttpRangeContainerUrlGet(const uint8_t *key, size_t keylen, struct timeval *ts);
+
+// linked list of ranges : buffer with offset
+typedef struct HttpRangeContainerBuffer {
+ /** red and black tree */
+ RB_ENTRY(HttpRangeContainerBuffer) rb;
+ /** allocated buffer */
+ uint8_t *buffer;
+ /** length of buffer */
+ uint64_t buflen;
+ /** the start of the range (offset relative to the absolute beginning of the file) */
+ uint64_t start;
+ /** offset of bytes written in buffer (relative to the start of the range) */
+ uint64_t offset;
+} HttpRangeContainerBuffer;
+
+int HttpRangeContainerBufferCompare(HttpRangeContainerBuffer *a, HttpRangeContainerBuffer *b);
+
+RB_HEAD(HTTP_RANGES, HttpRangeContainerBuffer);
+RB_PROTOTYPE(HTTP_RANGES, HttpRangeContainerBuffer, rb, HttpRangeContainerBufferCompare);
+
+/** Item in hash table for a file in multiple ranges
+ * Thread-safety is ensured by the thread-safe hash table
+ * The number of use is increased for each flow opening a new HttpRangeContainerBlock
+ * until it closes this HttpRangeContainerBlock
+ */
+typedef struct HttpRangeContainerFile {
+ /** key for hashtable */
+ uint8_t *key;
+ /** key length */
+ uint32_t len;
+ /** pointer to hashtable data, for use count */
+ THashData *hdata;
+ /** expire time in epoch */
+ uint32_t expire;
+ /** total epxected size of the file in ranges */
+ uint64_t totalsize;
+ /** file flags */
+ uint16_t flags;
+ /** file container, with only one file */
+ FileContainer *files;
+ /** red and black tree list of ranges which came out of order */
+ struct HTTP_RANGES fragment_tree;
+ /** wether a range file is currently appending */
+ bool appending;
+ /** mutex */
+ SCMutex mutex;
+} HttpRangeContainerFile;
+
+/** A structure representing a single range request :
+ * either skipping, buffering, or appending
+ * As this belongs to a flow, appending data to it is ensured to be thread-safe
+ * Only one block per file has the pointer to the container
+ */
+typedef struct HttpRangeContainerBlock {
+ /** state where we skip content */
+ uint64_t toskip;
+ /** current out of order range to write into */
+ HttpRangeContainerBuffer *current;
+ /** pointer to the main file container, where to directly append data */
+ HttpRangeContainerFile *container;
+} HttpRangeContainerBlock;
+
+int ContainerUrlRangeAppendData(HttpRangeContainerBlock *c, const uint8_t *data, size_t len);
+File *ContainerUrlRangeClose(HttpRangeContainerBlock *c, uint16_t flags);
+
+HttpRangeContainerBlock *ContainerUrlRangeOpenFile(HttpRangeContainerFile *c, uint64_t start,
+ uint64_t end, uint64_t total, const StreamingBufferConfig *sbcfg, const uint8_t *name,
+ uint16_t name_len, uint16_t flags, const uint8_t *data, size_t len);
+
+#endif /* __APP_LAYER_HTP_RANGE_H__ */
static uint64_t htp_state_memcnt = 0;
#endif
-SCEnumCharMap http_decoder_event_table[ ] = {
- { "UNKNOWN_ERROR",
- HTTP_DECODER_EVENT_UNKNOWN_ERROR},
- { "GZIP_DECOMPRESSION_FAILED",
- HTTP_DECODER_EVENT_GZIP_DECOMPRESSION_FAILED},
- { "REQUEST_FIELD_MISSING_COLON",
- HTTP_DECODER_EVENT_REQUEST_FIELD_MISSING_COLON},
- { "RESPONSE_FIELD_MISSING_COLON",
- HTTP_DECODER_EVENT_RESPONSE_FIELD_MISSING_COLON},
- { "INVALID_REQUEST_CHUNK_LEN",
- HTTP_DECODER_EVENT_INVALID_REQUEST_CHUNK_LEN},
- { "INVALID_RESPONSE_CHUNK_LEN",
- HTTP_DECODER_EVENT_INVALID_RESPONSE_CHUNK_LEN},
+SCEnumCharMap http_decoder_event_table[] = {
+ { "UNKNOWN_ERROR", HTTP_DECODER_EVENT_UNKNOWN_ERROR },
+ { "GZIP_DECOMPRESSION_FAILED", HTTP_DECODER_EVENT_GZIP_DECOMPRESSION_FAILED },
+ { "REQUEST_FIELD_MISSING_COLON", HTTP_DECODER_EVENT_REQUEST_FIELD_MISSING_COLON },
+ { "RESPONSE_FIELD_MISSING_COLON", HTTP_DECODER_EVENT_RESPONSE_FIELD_MISSING_COLON },
+ { "INVALID_REQUEST_CHUNK_LEN", HTTP_DECODER_EVENT_INVALID_REQUEST_CHUNK_LEN },
+ { "INVALID_RESPONSE_CHUNK_LEN", HTTP_DECODER_EVENT_INVALID_RESPONSE_CHUNK_LEN },
{ "INVALID_TRANSFER_ENCODING_VALUE_IN_REQUEST",
- HTTP_DECODER_EVENT_INVALID_TRANSFER_ENCODING_VALUE_IN_REQUEST},
+ HTTP_DECODER_EVENT_INVALID_TRANSFER_ENCODING_VALUE_IN_REQUEST },
{ "INVALID_TRANSFER_ENCODING_VALUE_IN_RESPONSE",
- HTTP_DECODER_EVENT_INVALID_TRANSFER_ENCODING_VALUE_IN_RESPONSE},
+ HTTP_DECODER_EVENT_INVALID_TRANSFER_ENCODING_VALUE_IN_RESPONSE },
{ "INVALID_CONTENT_LENGTH_FIELD_IN_REQUEST",
- HTTP_DECODER_EVENT_INVALID_CONTENT_LENGTH_FIELD_IN_REQUEST},
+ HTTP_DECODER_EVENT_INVALID_CONTENT_LENGTH_FIELD_IN_REQUEST },
{ "INVALID_CONTENT_LENGTH_FIELD_IN_RESPONSE",
- HTTP_DECODER_EVENT_INVALID_CONTENT_LENGTH_FIELD_IN_RESPONSE},
+ HTTP_DECODER_EVENT_INVALID_CONTENT_LENGTH_FIELD_IN_RESPONSE },
{ "DUPLICATE_CONTENT_LENGTH_FIELD_IN_REQUEST",
- HTTP_DECODER_EVENT_DUPLICATE_CONTENT_LENGTH_FIELD_IN_REQUEST},
+ HTTP_DECODER_EVENT_DUPLICATE_CONTENT_LENGTH_FIELD_IN_REQUEST },
{ "DUPLICATE_CONTENT_LENGTH_FIELD_IN_RESPONSE",
- HTTP_DECODER_EVENT_DUPLICATE_CONTENT_LENGTH_FIELD_IN_RESPONSE},
- { "100_CONTINUE_ALREADY_SEEN",
- HTTP_DECODER_EVENT_100_CONTINUE_ALREADY_SEEN},
+ HTTP_DECODER_EVENT_DUPLICATE_CONTENT_LENGTH_FIELD_IN_RESPONSE },
+ { "100_CONTINUE_ALREADY_SEEN", HTTP_DECODER_EVENT_100_CONTINUE_ALREADY_SEEN },
{ "UNABLE_TO_MATCH_RESPONSE_TO_REQUEST",
- HTTP_DECODER_EVENT_UNABLE_TO_MATCH_RESPONSE_TO_REQUEST},
- { "INVALID_SERVER_PORT_IN_REQUEST",
- HTTP_DECODER_EVENT_INVALID_SERVER_PORT_IN_REQUEST},
- { "INVALID_AUTHORITY_PORT",
- HTTP_DECODER_EVENT_INVALID_AUTHORITY_PORT},
- { "REQUEST_HEADER_INVALID",
- HTTP_DECODER_EVENT_REQUEST_HEADER_INVALID},
- { "RESPONSE_HEADER_INVALID",
- HTTP_DECODER_EVENT_RESPONSE_HEADER_INVALID},
- { "MISSING_HOST_HEADER",
- HTTP_DECODER_EVENT_MISSING_HOST_HEADER},
- { "HOST_HEADER_AMBIGUOUS",
- HTTP_DECODER_EVENT_HOST_HEADER_AMBIGUOUS},
- { "INVALID_REQUEST_FIELD_FOLDING",
- HTTP_DECODER_EVENT_INVALID_REQUEST_FIELD_FOLDING},
- { "INVALID_RESPONSE_FIELD_FOLDING",
- HTTP_DECODER_EVENT_INVALID_RESPONSE_FIELD_FOLDING},
- { "REQUEST_FIELD_TOO_LONG",
- HTTP_DECODER_EVENT_REQUEST_FIELD_TOO_LONG},
- { "RESPONSE_FIELD_TOO_LONG",
- HTTP_DECODER_EVENT_RESPONSE_FIELD_TOO_LONG},
- { "REQUEST_LINE_INVALID",
- HTTP_DECODER_EVENT_REQUEST_LINE_INVALID},
- { "REQUEST_BODY_UNEXPECTED",
- HTTP_DECODER_EVENT_REQUEST_BODY_UNEXPECTED},
+ HTTP_DECODER_EVENT_UNABLE_TO_MATCH_RESPONSE_TO_REQUEST },
+ { "INVALID_SERVER_PORT_IN_REQUEST", HTTP_DECODER_EVENT_INVALID_SERVER_PORT_IN_REQUEST },
+ { "INVALID_AUTHORITY_PORT", HTTP_DECODER_EVENT_INVALID_AUTHORITY_PORT },
+ { "REQUEST_HEADER_INVALID", HTTP_DECODER_EVENT_REQUEST_HEADER_INVALID },
+ { "RESPONSE_HEADER_INVALID", HTTP_DECODER_EVENT_RESPONSE_HEADER_INVALID },
+ { "MISSING_HOST_HEADER", HTTP_DECODER_EVENT_MISSING_HOST_HEADER },
+ { "HOST_HEADER_AMBIGUOUS", HTTP_DECODER_EVENT_HOST_HEADER_AMBIGUOUS },
+ { "INVALID_REQUEST_FIELD_FOLDING", HTTP_DECODER_EVENT_INVALID_REQUEST_FIELD_FOLDING },
+ { "INVALID_RESPONSE_FIELD_FOLDING", HTTP_DECODER_EVENT_INVALID_RESPONSE_FIELD_FOLDING },
+ { "REQUEST_FIELD_TOO_LONG", HTTP_DECODER_EVENT_REQUEST_FIELD_TOO_LONG },
+ { "RESPONSE_FIELD_TOO_LONG", HTTP_DECODER_EVENT_RESPONSE_FIELD_TOO_LONG },
+ { "REQUEST_LINE_INVALID", HTTP_DECODER_EVENT_REQUEST_LINE_INVALID },
+ { "REQUEST_BODY_UNEXPECTED", HTTP_DECODER_EVENT_REQUEST_BODY_UNEXPECTED },
{ "REQUEST_SERVER_PORT_TCP_PORT_MISMATCH",
- HTTP_DECODER_EVENT_REQUEST_SERVER_PORT_TCP_PORT_MISMATCH},
- { "REQUEST_URI_HOST_INVALID",
- HTTP_DECODER_EVENT_URI_HOST_INVALID},
- { "REQUEST_HEADER_HOST_INVALID",
- HTTP_DECODER_EVENT_HEADER_HOST_INVALID},
- { "REQUEST_AUTH_UNRECOGNIZED",
- HTTP_DECODER_EVENT_AUTH_UNRECOGNIZED},
- { "REQUEST_HEADER_REPETITION",
- HTTP_DECODER_EVENT_REQUEST_HEADER_REPETITION},
- { "RESPONSE_HEADER_REPETITION",
- HTTP_DECODER_EVENT_RESPONSE_HEADER_REPETITION},
- { "DOUBLE_ENCODED_URI",
- HTTP_DECODER_EVENT_DOUBLE_ENCODED_URI},
- { "URI_DELIM_NON_COMPLIANT",
- HTTP_DECODER_EVENT_URI_DELIM_NON_COMPLIANT},
- { "METHOD_DELIM_NON_COMPLIANT",
- HTTP_DECODER_EVENT_METHOD_DELIM_NON_COMPLIANT},
- { "REQUEST_LINE_LEADING_WHITESPACE",
- HTTP_DECODER_EVENT_REQUEST_LINE_LEADING_WHITESPACE},
- { "TOO_MANY_ENCODING_LAYERS",
- HTTP_DECODER_EVENT_TOO_MANY_ENCODING_LAYERS},
- { "ABNORMAL_CE_HEADER",
- HTTP_DECODER_EVENT_ABNORMAL_CE_HEADER},
- { "RESPONSE_MULTIPART_BYTERANGES",
- HTTP_DECODER_EVENT_RESPONSE_MULTIPART_BYTERANGES},
+ HTTP_DECODER_EVENT_REQUEST_SERVER_PORT_TCP_PORT_MISMATCH },
+ { "REQUEST_URI_HOST_INVALID", HTTP_DECODER_EVENT_URI_HOST_INVALID },
+ { "REQUEST_HEADER_HOST_INVALID", HTTP_DECODER_EVENT_HEADER_HOST_INVALID },
+ { "REQUEST_AUTH_UNRECOGNIZED", HTTP_DECODER_EVENT_AUTH_UNRECOGNIZED },
+ { "REQUEST_HEADER_REPETITION", HTTP_DECODER_EVENT_REQUEST_HEADER_REPETITION },
+ { "RESPONSE_HEADER_REPETITION", HTTP_DECODER_EVENT_RESPONSE_HEADER_REPETITION },
+ { "DOUBLE_ENCODED_URI", HTTP_DECODER_EVENT_DOUBLE_ENCODED_URI },
+ { "URI_DELIM_NON_COMPLIANT", HTTP_DECODER_EVENT_URI_DELIM_NON_COMPLIANT },
+ { "METHOD_DELIM_NON_COMPLIANT", HTTP_DECODER_EVENT_METHOD_DELIM_NON_COMPLIANT },
+ { "REQUEST_LINE_LEADING_WHITESPACE", HTTP_DECODER_EVENT_REQUEST_LINE_LEADING_WHITESPACE },
+ { "TOO_MANY_ENCODING_LAYERS", HTTP_DECODER_EVENT_TOO_MANY_ENCODING_LAYERS },
+ { "ABNORMAL_CE_HEADER", HTTP_DECODER_EVENT_ABNORMAL_CE_HEADER },
+ { "RESPONSE_MULTIPART_BYTERANGES", HTTP_DECODER_EVENT_RESPONSE_MULTIPART_BYTERANGES },
{ "RESPONSE_ABNORMAL_TRANSFER_ENCODING",
- HTTP_DECODER_EVENT_RESPONSE_ABNORMAL_TRANSFER_ENCODING},
- { "RESPONSE_CHUNKED_OLD_PROTO",
- HTTP_DECODER_EVENT_RESPONSE_CHUNKED_OLD_PROTO},
- { "RESPONSE_INVALID_PROTOCOL",
- HTTP_DECODER_EVENT_RESPONSE_INVALID_PROTOCOL},
- { "RESPONSE_INVALID_STATUS",
- HTTP_DECODER_EVENT_RESPONSE_INVALID_STATUS},
- { "REQUEST_LINE_INCOMPLETE",
- HTTP_DECODER_EVENT_REQUEST_LINE_INCOMPLETE},
-
- { "LZMA_MEMLIMIT_REACHED",
- HTTP_DECODER_EVENT_LZMA_MEMLIMIT_REACHED},
- { "COMPRESSION_BOMB",
- HTTP_DECODER_EVENT_COMPRESSION_BOMB},
+ HTTP_DECODER_EVENT_RESPONSE_ABNORMAL_TRANSFER_ENCODING },
+ { "RESPONSE_CHUNKED_OLD_PROTO", HTTP_DECODER_EVENT_RESPONSE_CHUNKED_OLD_PROTO },
+ { "RESPONSE_INVALID_PROTOCOL", HTTP_DECODER_EVENT_RESPONSE_INVALID_PROTOCOL },
+ { "RESPONSE_INVALID_STATUS", HTTP_DECODER_EVENT_RESPONSE_INVALID_STATUS },
+ { "REQUEST_LINE_INCOMPLETE", HTTP_DECODER_EVENT_REQUEST_LINE_INCOMPLETE },
+
+ { "LZMA_MEMLIMIT_REACHED", HTTP_DECODER_EVENT_LZMA_MEMLIMIT_REACHED },
+ { "COMPRESSION_BOMB", HTTP_DECODER_EVENT_COMPRESSION_BOMB },
+
+ { "RANGE_INVALID", HTTP_DECODER_EVENT_RANGE_INVALID },
/* suricata warnings/errors */
- { "MULTIPART_GENERIC_ERROR",
- HTTP_DECODER_EVENT_MULTIPART_GENERIC_ERROR},
- { "MULTIPART_NO_FILEDATA",
- HTTP_DECODER_EVENT_MULTIPART_NO_FILEDATA},
- { "MULTIPART_INVALID_HEADER",
- HTTP_DECODER_EVENT_MULTIPART_INVALID_HEADER},
+ { "MULTIPART_GENERIC_ERROR", HTTP_DECODER_EVENT_MULTIPART_GENERIC_ERROR },
+ { "MULTIPART_NO_FILEDATA", HTTP_DECODER_EVENT_MULTIPART_NO_FILEDATA },
+ { "MULTIPART_INVALID_HEADER", HTTP_DECODER_EVENT_MULTIPART_INVALID_HEADER },
- { "TOO_MANY_WARNINGS",
- HTTP_DECODER_EVENT_TOO_MANY_WARNINGS},
+ { "TOO_MANY_WARNINGS", HTTP_DECODER_EVENT_TOO_MANY_WARNINGS },
- { NULL, -1 },
+ { NULL, -1 },
};
static void *HTPStateGetTx(void *alstate, uint64_t tx_id);
}
if (filename != NULL) {
- result = HTPFileOpen(hstate, htud, filename, (uint32_t)filename_len, data, data_len,
- HtpGetActiveResponseTxID(hstate), STREAM_TOCLIENT);
+ // set range if present
+ htp_header_t *h_content_range = htp_table_get_c(tx->response_headers, "content-range");
+ if (h_content_range != NULL) {
+ result = HTPFileOpenWithRange(hstate, htud, filename, (uint32_t)filename_len, data,
+ data_len, HtpGetActiveResponseTxID(hstate), h_content_range->value, htud);
+ } else {
+ result = HTPFileOpen(hstate, htud, filename, (uint32_t)filename_len, data, data_len,
+ HtpGetActiveResponseTxID(hstate), STREAM_TOCLIENT);
+ }
SCLogDebug("result %d", result);
if (result == -1) {
goto end;
htud->tcflags |= HTP_FILENAME_SET;
htud->tcflags &= ~HTP_DONTSTORE;
}
- //set range if present
- htp_header_t *h_content_range = htp_table_get_c(tx->response_headers, "content-range");
- if (h_content_range != NULL) {
- HTPFileSetRange(hstate, h_content_range->value);
- }
}
}
else if (tx->response_line != NULL || tx->is_protocol_0_9)
#include "app-layer-htp-mem.h"
#include "detect-engine-state.h"
#include "util-streaming-buffer.h"
+#include "app-layer-htp-range.h"
#include "rust.h"
#include <htp/htp.h>
HTTP_DECODER_EVENT_LZMA_MEMLIMIT_REACHED,
HTTP_DECODER_EVENT_COMPRESSION_BOMB,
+ HTTP_DECODER_EVENT_RANGE_INVALID,
+
/* suricata errors/warnings */
HTTP_DECODER_EVENT_MULTIPART_GENERIC_ERROR,
HTTP_DECODER_EVENT_MULTIPART_NO_FILEDATA,
uint16_t events;
uint16_t htp_messages_offset; /**< offset into conn->messages list */
uint32_t file_track_id; /**< used to assign file track ids to files */
+ HttpRangeContainerBlock *file_range; /**< used to assign track ids to range file */
uint64_t last_request_data_stamp;
uint64_t last_response_data_stamp;
} HtpState;
#include "host-timeout.h"
#include "defrag-timeout.h"
#include "ippair-timeout.h"
+#include "app-layer-htp-range.h"
#include "output-flow.h"
#include "util-validate.h"
//uint32_t hosts_pruned =
HostTimeoutHash(&ts);
IPPairTimeoutHash(&ts);
+ HttpRangeContainersTimeoutHash(&ts);
other_last_sec = (uint32_t)ts.tv_sec;
}
#include "conf.h"
#include "conf-yaml-loader.h"
+#include "app-layer-htp-range.h"
#include "datasets.h"
#include "stream-tcp.h"
AppLayerDeSetup();
DatasetsSave();
DatasetsDestroy();
+ HttpRangeContainersDestroy();
TagDestroyCtx();
LiveDeviceListClean();
{
/* Initialize Datasets to be able to use them with unix socket */
DatasetsInit();
+ HttpRangeContainersInit();
if (runmode == RUNMODE_UNIX_SOCKET)
return;
static THashData *THashGetUsed(THashTableContext *ctx);
static void THashDataEnqueue (THashDataQueue *q, THashData *h);
-static void THashDataMoveToSpare(THashTableContext *ctx, THashData *h)
+void THashDataMoveToSpare(THashTableContext *ctx, THashData *h)
{
THashDataEnqueue(&ctx->spare_q, h);
(void) SC_ATOMIC_SUB(ctx->counter, 1);
int THashWalk(THashTableContext *, THashFormatFunc, THashOutputFunc, void *);
int THashRemoveFromHash (THashTableContext *ctx, void *data);
void THashConsolidateMemcap(THashTableContext *ctx);
+void THashDataMoveToSpare(THashTableContext *ctx, THashData *h);
#endif /* __THASH_H__ */
dp: 53
http:
enabled: yes
+
+ # Range Containers default settings
+ # urlrange:
+ # memcap: 100mb
+ # timeout: 60
+
+
# memcap: Maximum memory capacity for HTTP
# Default is unlimited, values can be 64mb, e.g.