void FileFlows::set_current_file_context(FileContext* ctx)
{
+ // If we finished processing a file context object last time, delete it
+ if (current_context_delete_pending)
+ {
+ delete current_context;
+ current_context_delete_pending = false;
+ }
current_context = ctx;
+ // Not using current_file_id so clear it
+ current_file_id = 0;
}
FileContext* FileFlows::get_current_file_context()
FileFlows::~FileFlows()
{
delete(main_context);
+ if (current_context_delete_pending)
+ delete(current_context);
// Delete any remaining FileContexts stored on the flow
- for (auto const& elem : flow_file_contexts)
+ for (auto const& elem : partially_processed_contexts)
{
delete elem.second;
}
return context;
}
-FileContext* FileFlows::get_file_context(uint64_t file_id, bool to_create)
+FileContext* FileFlows::get_partially_processed_context(uint64_t file_id)
{
- FileContext *context = nullptr;
+ auto elem = partially_processed_contexts.find(file_id);
+ if (elem != partially_processed_contexts.end())
+ return elem->second;
+ return nullptr;
+}
+
+FileContext* FileFlows::get_file_context(uint64_t file_id, bool to_create,
+ uint64_t multi_file_processing_id)
+{
+ // First check if this file is currently being processed
+ if (!multi_file_processing_id)
+ multi_file_processing_id = file_id;
+ FileContext *context = get_partially_processed_context(multi_file_processing_id);
- // First check if this file is currently being processed and is stored on the file flows object
- auto elem = flow_file_contexts.find(file_id);
- if (elem != flow_file_contexts.end())
- context = elem->second;
// Otherwise check if it has been fully processed and is in the file cache. If the file is not
// in the cache, don't add it.
- else
+ if (!context)
{
FileCache* file_cache = FileService::get_file_cache();
assert(file_cache);
{
// If we have reached the max file per flow limit, alert and increment the peg count
FileConfig* fc = get_file_config(SnortConfig::get_conf());
- if (flow_file_contexts.size() == fc->max_files_per_flow)
+ if (partially_processed_contexts.size() == fc->max_files_per_flow)
{
file_counts.files_over_flow_limit_not_processed++;
events.create_event(EVENT_FILE_DROPPED_OVER_LIMIT);
else
{
context = new FileContext;
- flow_file_contexts[file_id] = context;
- if (flow_file_contexts.size() > file_counts.max_concurrent_files_per_flow)
- file_counts.max_concurrent_files_per_flow = flow_file_contexts.size();
+ partially_processed_contexts[multi_file_processing_id] = context;
+ if (partially_processed_contexts.size() > file_counts.max_concurrent_files_per_flow)
+ file_counts.max_concurrent_files_per_flow = partially_processed_contexts.size();
}
}
- current_file_id = file_id;
return context;
}
-void FileFlows::remove_file_context(uint64_t file_id)
+// Remove a file context from the flow's partially processed store. Don't delete the context
+// yet because detection needs access; pointer is stored in current_context. The file context will
+// be deleted when the next file is processed
+void FileFlows::remove_processed_file_context(uint64_t file_id)
{
- auto elem = flow_file_contexts.find(file_id);
- if (elem == flow_file_contexts.end())
- return;
- delete elem->second;
- flow_file_contexts.erase(file_id);
+ FileContext *context = get_partially_processed_context(file_id);
+ partially_processed_contexts.erase(file_id);
+ if (context)
+ current_context_delete_pending = true;
}
/* This function is used to process file that is sent in pieces
* false: ignore this file
*/
bool FileFlows::file_process(Packet* p, uint64_t file_id, const uint8_t* file_data,
- int data_size, uint64_t offset, FileDirection dir)
+ int data_size, uint64_t offset, FileDirection dir, uint64_t multi_file_processing_id,
+ FilePosition position)
{
int64_t file_depth = FileService::get_max_file_depth();
bool continue_processing;
+ if (!multi_file_processing_id)
+ multi_file_processing_id = file_id;
if ((file_depth < 0) or (offset > (uint64_t)file_depth))
{
return false;
}
- FileContext* context = get_file_context(file_id, true);
+ FileContext* context = get_file_context(file_id, true, multi_file_processing_id);
if (!context)
return false;
+ set_current_file_context(context);
+
if (!context->get_processed_bytes())
{
context->check_policy(flow, dir, file_policy);
continue_processing = context->process(p, file_data, data_size, position,
file_policy);
if (context->processing_complete)
- remove_file_context(file_id);
+ remove_processed_file_context(multi_file_processing_id);
return continue_processing;
}
}
- continue_processing = context->process(p, file_data, data_size, offset, file_policy);
+ continue_processing = context->process(p, file_data, data_size, offset, file_policy, position);
if (context->processing_complete)
- remove_file_context(file_id);
+ remove_processed_file_context(multi_file_processing_id);
return continue_processing;
}
return context->process(p, file_data, data_size, position, file_policy);
}
-void FileFlows::set_file_name(const uint8_t* fname, uint32_t name_size)
+void FileFlows::set_file_name(const uint8_t* fname, uint32_t name_size, uint64_t file_id)
{
- FileContext* context = get_current_file_context();
+ FileContext* context;
+ if (file_id)
+ context = get_file_context(file_id, false);
+ else
+ context = get_current_file_context();
if ( !context )
return;
void set_current_file_context(FileContext*);
- // Get file context based on file id, create it if not existed
- FileContext* get_file_context(uint64_t file_id, bool to_create);
+ // Get file context based on file id, create it if does not exist
+ FileContext* get_file_context(uint64_t file_id, bool to_create,
+ uint64_t multi_file_processing_id=0);
+ // Get a partially processed file context from the flow object
+ FileContext* get_partially_processed_context(uint64_t file_id);
+ // Remove a file from the flow object when processing is complete
+ void remove_processed_file_context(uint64_t file_id);
uint64_t get_new_file_instance();
- void set_file_name(const uint8_t* fname, uint32_t name_size);
+ void set_file_name(const uint8_t* fname, uint32_t name_size, uint64_t file_id=0);
void set_sig_gen_state( bool enable )
{
// This is used for each file context. Support multiple files per session
bool file_process(Packet* p, uint64_t file_id, const uint8_t* file_data,
- int data_size, uint64_t offset, FileDirection);
+ int data_size, uint64_t offset, FileDirection, uint64_t multi_file_processing_id=0,
+ FilePosition=SNORT_FILE_POSITION_UNKNOWN);
static unsigned file_flow_data_id;
size_t size_of() override
{ return sizeof(*this); }
- void remove_file_context(uint64_t file_id);
-
private:
void init_file_context(FileDirection, FileContext*);
FileContext* find_main_file_context(FilePosition, FileDirection, size_t id = 0);
Flow* flow = nullptr;
FilePolicyBase* file_policy = nullptr;
- std::unordered_map<uint64_t, FileContext*> flow_file_contexts;
+ std::unordered_map<uint64_t, FileContext*> partially_processed_contexts;
+ bool current_context_delete_pending = false;
FileEventGen events;
};
}
}
bool FileContext::process(Packet* p, const uint8_t* file_data, int data_size,
- uint64_t offset, FilePolicyBase* policy)
+ uint64_t offset, FilePolicyBase* policy, FilePosition position)
{
if (!file_segments)
file_segments = new FileSegments(this);
- return file_segments->process(p, file_data, data_size, offset, policy);
+ return file_segments->process(p, file_data, data_size, offset, policy, position);
}
/*
// true: continue processing/log/block this file
// false: ignore this file
bool process(Packet*, const uint8_t* file_data, int data_size, FilePosition, FilePolicyBase*);
- bool process(Packet*, const uint8_t* file_data, int data_size, uint64_t offset, FilePolicyBase*);
+ bool process(Packet*, const uint8_t* file_data, int data_size, uint64_t offset, FilePolicyBase*,
+ FilePosition position=SNORT_FILE_POSITION_UNKNOWN);
void process_file_type(const uint8_t* file_data, int data_size, FilePosition);
void process_file_signature_sha256(const uint8_t* file_data, int data_size, FilePosition);
void update_file_size(int data_size, FilePosition position);
}
int FileSegments::process_one(Packet* p, const uint8_t* file_data, int data_size,
- FilePolicyBase* policy)
+ FilePolicyBase* policy, FilePosition position)
{
- FilePosition position = get_file_position(data_size, context->get_file_size());
+ if (position == SNORT_FILE_POSITION_UNKNOWN)
+ position = get_file_position(data_size, context->get_file_size());
return context->process(p, file_data, data_size, position, policy);
}
* 0: ignore this file
*/
int FileSegments::process(Packet* p, const uint8_t* file_data, uint64_t data_size,
- uint64_t offset, FilePolicyBase* policy)
+ uint64_t offset, FilePolicyBase* policy, FilePosition position)
{
int ret = 0;
// Walk through the segments that can be flushed
if (current_offset == offset)
{
- ret = process_one(p, file_data, data_size, policy);
+ ret = process_one(p, file_data, data_size, policy, position);
current_offset += data_size;
if (!ret)
{
// Process file segments with current_offset specified. If file segment is out of order,
// it will be put into the file segments queue.
int process(snort::Packet*, const uint8_t* file_data, uint64_t data_size, uint64_t offset,
- snort::FilePolicyBase*);
+ snort::FilePolicyBase*, FilePosition position=SNORT_FILE_POSITION_UNKNOWN);
private:
FileSegment* head = nullptr;
void add(const uint8_t* file_data, uint64_t data_size, uint64_t offset);
FilePosition get_file_position(uint64_t data_size, uint64_t file_size);
- int process_one(snort::Packet*, const uint8_t* file_data, int data_size, snort::FilePolicyBase*);
+ int process_one(snort::Packet*, const uint8_t* file_data, int data_size, snort::FilePolicyBase*,
+ FilePosition position=SNORT_FILE_POSITION_UNKNOWN);
int process_all(snort::Packet*, snort::FilePolicyBase*);
};
if (!session_data->mime_state[source_id])
{
FileFlows* file_flows = FileFlows::get_file_flows(flow);
- const bool download = (source_id == SRC_SERVER);
+ const FileDirection dir = source_id == SRC_SERVER ? FILE_DOWNLOAD : FILE_UPLOAD;
size_t file_index = 0;
file_index = request->get_http_uri()->get_file_proc_hash();
}
- if (file_flows->file_process(p, file_data.start(), fp_length,
- file_position, !download, file_index))
+ if (file_flows->file_process(p, file_index, file_data.start(), fp_length, body_octets, dir,
+ transaction->get_file_processing_id(source_id), file_position))
{
session_data->file_depth_remaining[source_id] -= fp_length;
void HttpMsgHeader::setup_file_processing()
{
- // FIXIT-M Bidirectional file processing is problematic so we don't do it. When the library
- // fully supports it remove the outer if statement that prevents it from being done.
- if (session_data->file_depth_remaining[1-source_id] <= 0)
+ // Generate the unique file id for file processing
+ transaction->set_file_processing_id(source_id, get_transaction_id());
+
+ if ((session_data->file_depth_remaining[source_id] = FileService::get_max_file_depth()) < 0)
{
- if ((session_data->file_depth_remaining[source_id] = FileService::get_max_file_depth())
- < 0)
- {
- session_data->file_depth_remaining[source_id] = 0;
- return;
- }
+ session_data->file_depth_remaining[source_id] = 0;
+ return;
+ }
- // Do we meet all the conditions for MIME file processing?
- if (source_id == SRC_CLIENT)
+ // Do we meet all the conditions for MIME file processing?
+ if (source_id == SRC_CLIENT)
+ {
+ const Field& content_type = get_header_value_raw(HEAD_CONTENT_TYPE);
+ if (content_type.length() > 0)
{
- const Field& content_type = get_header_value_raw(HEAD_CONTENT_TYPE);
- if (content_type.length() > 0)
+ if (boundary_present(content_type))
{
- if (boundary_present(content_type))
- {
- session_data->mime_state[source_id] =
- new MimeSession(&decode_conf, &mime_conf);
- // Show file processing the Content-Type header as if it were regular data.
- // This will enable it to find the boundary string.
- // FIXIT-L develop a proper interface for passing the boundary string.
- // This interface is a leftover from when OHI pushed whole messages through
- // this interface.
- Packet* p = DetectionEngine::get_current_packet();
- session_data->mime_state[source_id]->process_mime_data(p,
- content_type.start(), content_type.length(), true,
- SNORT_FILE_POSITION_UNKNOWN);
- session_data->mime_state[source_id]->process_mime_data(p,
- (const uint8_t*)"\r\n", 2, true, SNORT_FILE_POSITION_UNKNOWN);
- }
+ session_data->mime_state[source_id] = new MimeSession(&decode_conf, &mime_conf);
+ // Show file processing the Content-Type header as if it were regular data.
+ // This will enable it to find the boundary string.
+ // FIXIT-L develop a proper interface for passing the boundary string.
+ // This interface is a leftover from when OHI pushed whole messages through
+ // this interface.
+ Packet* p = DetectionEngine::get_current_packet();
+ session_data->mime_state[source_id]->process_mime_data(p,
+ content_type.start(), content_type.length(), true,
+ SNORT_FILE_POSITION_UNKNOWN);
+ session_data->mime_state[source_id]->process_mime_data(p,
+ (const uint8_t*)"\r\n", 2, true, SNORT_FILE_POSITION_UNKNOWN);
}
}
-
- // Otherwise do regular file processing
- if (session_data->mime_state[source_id] == nullptr)
- {
- FileFlows* file_flows = FileFlows::get_file_flows(flow);
- if (!file_flows)
- session_data->file_depth_remaining[source_id] = 0;
- }
}
- else
+
+ // Otherwise do regular file processing
+ if (session_data->mime_state[source_id] == nullptr)
{
- session_data->file_depth_remaining[source_id] = 0;
+ FileFlows* file_flows = FileFlows::get_file_flows(flow);
+ if (!file_flows)
+ session_data->file_depth_remaining[source_id] = 0;
}
}
void clear();
bool is_clear() { return cleared; }
+ uint64_t get_transaction_id() { return trans_num; }
+
HttpMsgSection* next = nullptr;
#ifdef REG_TEST
#include "http_msg_status.h"
#include "http_msg_trailer.h"
+#include "hash/hashfcn.h"
+
using namespace HttpCommon;
using namespace HttpEnums;
+using namespace snort;
static void delete_section_list(HttpMsgSection* section_list)
{
second_response_expected = true;
}
+void HttpTransaction::set_file_processing_id(const SourceId source_id,
+ const uint64_t transaction_id)
+{
+ const int data_len = sizeof(source_id) + sizeof(transaction_id);
+ uint8_t data[data_len];
+ memcpy(data, (void*)&source_id, sizeof(source_id));
+ uint32_t offset = sizeof(source_id);
+ memcpy(data + offset, (void*)&transaction_id, sizeof(transaction_id));
+
+ file_processing_id[source_id] = str_to_hash(data, data_len);
+}
HttpTransaction* next = nullptr;
+ // Each file processed has a unique id per flow: hash(source_id, transaction_id)
+ void set_file_processing_id(const HttpCommon::SourceId source_id,
+ const uint64_t transaction_id);
+ uint64_t get_file_processing_id(HttpCommon::SourceId source_id)
+ { return file_processing_id[source_id]; }
+
private:
HttpTransaction() = default;
void discard_section(HttpMsgSection* section);
HttpInfractions* infractions[2] = { nullptr, nullptr };
HttpEventGen* events[2] = { nullptr, nullptr };
+ uint64_t file_processing_id[2] = { 0, 0 };
+
bool response_seen = false;
bool one_hundred_response = false;
bool second_response_expected = false;
FlowData::~FlowData() = default;
int DetectionEngine::queue_event(unsigned int, unsigned int, Actions::Type) { return 0; }
fd_status_t File_Decomp_StopFree(fd_session_t*) { return File_Decomp_OK; }
+size_t str_to_hash(unsigned char const*, size_t) { return 0; }
}
THREAD_LOCAL PegCount HttpModule::peg_counts[1];