In normal records it will try to continue parsing.
GAP 'data' will be passed to file api as '0's. New call is used
so that the file API does know it is dealing with a GAP. Such
files are flagged as truncated at the end of the file and no
checksums are calculated.
file_container: &FileContainer,
track_id: u32,
data: *const u8, data_len: u32) -> i32;
+pub type SCFileAppendGAPById = extern "C" fn (
+ file_container: &FileContainer,
+ track_id: u32,
+ data: *const u8, data_len: u32) -> i32;
// void FilePrune(FileContainer *ffc)
pub type SCFilePrune = extern "C" fn (
file_container: &FileContainer);
pub FileOpenFile: SCFileOpenFileWithId,
pub FileCloseFile: SCFileCloseFileById,
pub FileAppendData: SCFileAppendDataById,
+ pub FileAppendGAP: SCFileAppendGAPById,
pub FileContainerRecycle: SCFileContainerRecycle,
pub FilePrune: SCFilePrune,
pub FileSetTx: SCFileSetTx,
}
}
- pub fn file_append(&mut self, track_id: &u32, data: &[u8]) -> i32 {
+ pub fn file_append(&mut self, track_id: &u32, data: &[u8], is_gap: bool) -> i32 {
SCLogDebug!("FILECONTAINER: append {}", data.len());
if data.len() == 0 {
return 0
match unsafe {SC} {
None => panic!("BUG no suricata_config"),
Some(c) => {
- let res = (c.FileAppendData)(&self, *track_id,
- data.as_ptr(), data.len() as u32);
+ let res = match is_gap {
+ false => {
+ SCLogDebug!("appending file data");
+ let r = (c.FileAppendData)(&self, *track_id,
+ data.as_ptr(), data.len() as u32);
+ r
+ },
+ true => {
+ SCLogDebug!("appending GAP");
+ let r = (c.FileAppendGAP)(&self, *track_id,
+ data.as_ptr(), data.len() as u32);
+ r
+ },
+ };
if res != 0 {
panic!("c.fn_fileappenddata failed");
}
* 02110-1301, USA.
*/
-// written by Victor Julien
+/**
+ * \file
+ * \author Victor Julien <victor@inliniac.net>
+ *
+ * Tracks chunk based file transfers. Chunks may be transfered out
+ * of order, but cannot be transfered in parallel. So only one
+ * chunk at a time.
+ *
+ * GAP handling. If a data gap is encountered, the file is truncated
+ * and new data is no longer pushed down to the lower level APIs.
+ * The tracker does continue to follow the file.
+ */
+
extern crate libc;
use log::*;
use core::*;
use std::collections::hash_map::Entry::{Occupied, Vacant};
use filecontainer::*;
+#[derive(Debug)]
+pub struct FileChunk {
+ contains_gap: bool,
+ chunk: Vec<u8>,
+}
+
+impl FileChunk {
+ pub fn new(size: u32) -> FileChunk {
+ FileChunk {
+ contains_gap: false,
+ chunk: Vec::with_capacity(size as usize),
+ }
+ }
+}
+
#[derive(Debug)]
pub struct FileTransferTracker {
file_size: u64,
tracked: u64,
+ cur_ooo: u64, // how many bytes do we have queued from ooo chunks
track_id: u32,
chunk_left: u32,
pub file_open: bool,
chunk_is_last: bool,
chunk_is_ooo: bool,
+ file_is_truncated: bool,
- chunks: HashMap<u64, Vec<u8>>,
+ chunks: HashMap<u64, FileChunk>,
cur_ooo_chunk_offset: u64,
}
FileTransferTracker {
file_size:0,
tracked:0,
+ cur_ooo:0,
track_id:0,
chunk_left:0,
tx_id:0,
file_open:false,
chunk_is_last:false,
chunk_is_ooo:false,
+ file_is_truncated:false,
cur_ooo_chunk_offset:0,
chunks:HashMap::new(),
}
}
pub fn close(&mut self, files: &mut FileContainer, flags: u16) {
- files.file_close(&self.track_id, flags);
+ if !self.file_is_truncated {
+ files.file_close(&self.track_id, flags);
+ }
self.file_open = false;
self.tracked = 0;
files.files_prune();
}
+ pub fn trunc (&mut self, files: &mut FileContainer, flags: u16) {
+ if self.file_is_truncated {
+ return;
+ }
+ let myflags = flags | 1; // TODO util-file.c::FILE_TRUNCATED
+ files.file_close(&self.track_id, myflags);
+ SCLogDebug!("truncated file");
+ files.files_prune();
+ self.file_is_truncated = true;
+ }
+
pub fn create(&mut self, name: &[u8], file_size: u64) {
if self.file_open == true { panic!("close existing file first"); }
SCLogDebug!("NEW CHUNK: chunk_size {} fill_bytes {}", chunk_size, fill_bytes);
+ // for now assume that is_last means its really the last chunk
+ // so no out of order chunks coming after. This means that if
+ // the last chunk is out or order, we've missed chunks before.
if chunk_offset != self.tracked {
SCLogDebug!("NEW CHUNK IS OOO: expected {}, got {}", self.tracked, chunk_offset);
+ if is_last {
+ SCLogDebug!("last chunk is out of order, this means we missed data before");
+ self.trunc(files, flags);
+ }
self.chunk_is_ooo = true;
self.cur_ooo_chunk_offset = chunk_offset;
}
self.open(config, files, flags, name);
}
- let res = self.update(files, flags, data);
+ let res = self.update(files, flags, data, 0);
SCLogDebug!("NEW CHUNK: update res {:?}", res);
res
}
+ /// update the file tracker
+ /// If gap_size > 0 'data' should not be used.
/// return how much we consumed of data
- pub fn update(&mut self, files: &mut FileContainer, flags: u16, data: &[u8]) -> u32 {
+ pub fn update(&mut self, files: &mut FileContainer, flags: u16, data: &[u8], gap_size: u32) -> u32 {
let mut consumed = 0 as usize;
+ let is_gap = gap_size > 0;
+ if is_gap || gap_size > 0 {
+ SCLogDebug!("is_gap {} size {} ooo? {}", is_gap, gap_size, self.chunk_is_ooo);
+ }
+
if self.chunk_left + self.fill_bytes as u32 == 0 {
//SCLogDebug!("UPDATE: nothing to do");
return 0
let d = &data[0..self.chunk_left as usize];
if self.chunk_is_ooo == false {
- let res = files.file_append(&self.track_id, d);
+ let res = files.file_append(&self.track_id, d, is_gap);
if res != 0 { panic!("append failed"); }
self.tracked += self.chunk_left as u64;
d.len(), self.cur_ooo_chunk_offset, self.tracked);
let c = match self.chunks.entry(self.cur_ooo_chunk_offset) {
Vacant(entry) => {
- entry.insert(Vec::with_capacity(self.chunk_left as usize))
+ entry.insert(FileChunk::new(self.chunk_left))
},
Occupied(entry) => entry.into_mut(),
};
- c.extend(d);
+ self.cur_ooo += d.len() as u64;
+ c.contains_gap |= is_gap;
+ c.chunk.extend(d);
}
consumed += self.chunk_left as usize;
SCLogDebug!("CHUNK(post) fill bytes now still {}", self.fill_bytes);
}
self.chunk_left = 0;
- //return consumed as u32
} else {
self.chunk_left = 0;
loop {
let offset = self.tracked;
match self.chunks.remove(&self.tracked) {
- Some(a) => {
- let res = files.file_append(&self.track_id, &a);
- if res != 0 { panic!("append failed"); }
+ Some(c) => {
+ let res = files.file_append(&self.track_id, &c.chunk, c.contains_gap);
+ if res != 0 { panic!("append failed: files.file_append() returned {}", res); }
- self.tracked += a.len() as u64;
+ self.tracked += c.chunk.len() as u64;
+ self.cur_ooo -= c.chunk.len() as u64;
- SCLogDebug!("STORED OOO CHUNK at offset {}, tracked now {}, stored len {}", offset, self.tracked, a.len());
+ SCLogDebug!("STORED OOO CHUNK at offset {}, tracked now {}, stored len {}", offset, self.tracked, c.chunk.len());
},
_ => {
SCLogDebug!("NO STORED CHUNK found at offset {}", self.tracked);
} else {
if self.chunk_is_ooo == false {
- let res = files.file_append(&self.track_id, data);
+ let res = files.file_append(&self.track_id, data, is_gap);
if res != 0 { panic!("append failed"); }
self.tracked += data.len() as u64;
} else {
let c = match self.chunks.entry(self.cur_ooo_chunk_offset) {
- Vacant(entry) => entry.insert(Vec::with_capacity(32768)),
+ Vacant(entry) => entry.insert(FileChunk::new(32768)),
Occupied(entry) => entry.into_mut(),
};
- c.extend(data);
+ c.chunk.extend(data);
+ c.contains_gap |= is_gap;
+ self.cur_ooo += data.len() as u64;
}
self.chunk_left -= data.len() as u32;
files.files_prune();
consumed as u32
}
+
+ pub fn get_queued_size(&self) -> u64 {
+ self.cur_ooo
+ }
}
ts_chunk_left: u32,
tc_chunk_left: u32,
+ ts_ssn_gap: bool,
+ tc_ssn_gap: bool,
+
/// tx counter for assigning incrementing id's to tx's
tx_id: u64,
tc_chunk_xid:0,
ts_chunk_left:0,
tc_chunk_left:0,
+ ts_ssn_gap:false,
+ tc_ssn_gap:false,
tx_id:0,
de_state_count:0,
//ts_txs_updated:false,
let xidmap;
match self.requestmap.remove(&r.hdr.xid) {
Some(p) => { xidmap = p; },
- _ => { SCLogDebug!("REPLY: xid {} NOT FOUND", r.hdr.xid); return 0; },
+ _ => {
+ SCLogDebug!("REPLY: xid {} NOT FOUND. GAPS? TS:{} TC:{}",
+ r.hdr.xid, self.ts_ssn_gap, self.tc_ssn_gap);
+
+ // TODO we might be able to try to infer from the size + data
+ // that this is a READ reply and pass the data to the file API anyway?
+ return 0;
+ },
}
if xidmap.procedure == NFSPROC3_LOOKUP {
// update in progress chunks for file transfers
// return how much data we consumed
- fn filetracker_update(&mut self, direction: u8, data: &[u8]) -> u32 {
+ fn filetracker_update(&mut self, direction: u8, data: &[u8], gap_size: u32) -> u32 {
let mut chunk_left = if direction == STREAM_TOSERVER {
self.ts_chunk_left
} else {
self.tc_chunk_left = chunk_left;
}
+ let ssn_gap = self.ts_ssn_gap | self.tc_ssn_gap;
// get the tx and update it
let consumed = match self.get_file_tx_by_handle(&file_handle, direction) {
Some((tx, files, flags)) => {
Some(NFS3TransactionTypeData::FILE(ref mut x)) => x,
_ => { panic!("BUG") },
};
- let cs = tdf.file_tracker.update(files, flags, data);
+ if ssn_gap {
+ let queued_data = tdf.file_tracker.get_queued_size();
+ if queued_data > 2000000 { // TODO should probably be configurable
+ SCLogDebug!("QUEUED size {} while we've seen GAPs. Truncating file.", queued_data);
+ tdf.file_tracker.trunc(files, flags);
+ }
+ }
+
+ let cs = tdf.file_tracker.update(files, flags, data, gap_size);
cs
},
None => { 0 },
xidmap.procedure
}
+ pub fn parse_tcp_data_ts_gap<'b>(&mut self, gap_size: u32) -> u32 {
+ if self.tcp_buffer_ts.len() > 0 {
+ self.tcp_buffer_ts.clear();
+ }
+ let gap = vec![0; gap_size as usize];
+ let consumed = self.filetracker_update(STREAM_TOSERVER, &gap, gap_size);
+ if consumed > gap_size {
+ panic!("consumed more than GAP size: {} > {}", consumed, gap_size);
+ }
+ self.ts_ssn_gap = true;
+ return 0
+ }
+
+ pub fn parse_tcp_data_tc_gap<'b>(&mut self, gap_size: u32) -> u32 {
+ if self.tcp_buffer_tc.len() > 0 {
+ self.tcp_buffer_tc.clear();
+ }
+ let gap = vec![0; gap_size as usize];
+ let consumed = self.filetracker_update(STREAM_TOCLIENT, &gap, gap_size);
+ if consumed > gap_size {
+ panic!("consumed more than GAP size: {} > {}", consumed, gap_size);
+ }
+ self.tc_ssn_gap = true;
+ return 0
+ }
+
/// Parsing function, handling TCP chunks fragmentation
pub fn parse_tcp_data_ts<'b>(&mut self, i: &'b[u8]) -> u32 {
let mut v : Vec<u8>;
v = self.tcp_buffer_ts.split_off(0);
// sanity check vector length to avoid memory exhaustion
if self.tcp_buffer_ts.len() + i.len() > 1000000 {
- SCLogNotice!("parse_tcp_data_ts: TS buffer exploded {} {}",
+ SCLogDebug!("parse_tcp_data_ts: TS buffer exploded {} {}",
self.tcp_buffer_ts.len(), i.len());
return 1;
};
//SCLogDebug!("tcp_buffer ({})",tcp_buffer.len());
let mut cur_i = tcp_buffer;
if cur_i.len() > 1000000 {
- SCLogNotice!("BUG buffer exploded: {}", cur_i.len());
+ SCLogDebug!("BUG buffer exploded: {}", cur_i.len());
}
-
// take care of in progress file chunk transfers
// and skip buffer beyond it
- let consumed = self.filetracker_update(STREAM_TOSERVER, cur_i);
+ let consumed = self.filetracker_update(STREAM_TOSERVER, cur_i, 0);
if consumed > 0 {
if consumed > cur_i.len() as u32 { panic!("BUG consumed more than we gave it"); }
cur_i = &cur_i[consumed as usize..];
}
-
while cur_i.len() > 0 { // min record size
match parse_rpc_request_partial(cur_i) {
IResult::Done(_, ref rpc_phdr) => {
SCLogDebug!("TC buffer exploded");
return 1;
};
+
v.extend_from_slice(i);
v.as_slice()
},
let mut cur_i = tcp_buffer;
if cur_i.len() > 100000 {
- SCLogNotice!("parse_tcp_data_tc: BUG buffer exploded {}", cur_i.len());
+ SCLogDebug!("parse_tcp_data_tc: BUG buffer exploded {}", cur_i.len());
}
// take care of in progress file chunk transfers
// and skip buffer beyond it
- let consumed = self.filetracker_update(STREAM_TOCLIENT, cur_i);
+ let consumed = self.filetracker_update(STREAM_TOCLIENT, cur_i, 0);
if consumed > 0 {
if consumed > cur_i.len() as u32 { panic!("BUG consumed more than we gave it"); }
cur_i = &cur_i[consumed as usize..];
}
-
while cur_i.len() > 0 {
match parse_rpc_packet_header(cur_i) {
IResult::Done(_, ref rpc_hdr) => {
{
let buf = unsafe{std::slice::from_raw_parts(input, input_len as usize)};
SCLogDebug!("parsing {} bytes of request data", input_len);
+
+ if buf.as_ptr().is_null() && input_len > 0 {
+ if state.parse_tcp_data_ts_gap(input_len as u32) == 0 {
+ return 1
+ }
+ return -1
+ }
+
if state.parse_tcp_data_ts(buf) == 0 {
1
} else {
{
SCLogDebug!("parsing {} bytes of response data", input_len);
let buf = unsafe{std::slice::from_raw_parts(input, input_len as usize)};
+
+ if buf.as_ptr().is_null() && input_len > 0 {
+ if state.parse_tcp_data_tc_gap(input_len as u32) == 0 {
+ return 1
+ }
+ return -1
+ }
+
if state.parse_tcp_data_tc(buf) == 0 {
1
} else {
// NFS3StateGetEventInfo);
// AppLayerParserRegisterGetEventsFunc(IPPROTO_TCP, ALPROTO_NFS3,
// NFS3GetEvents);
+
+ /* This parser accepts gaps. */
+ AppLayerParserRegisterOptionFlags(IPPROTO_TCP, ALPROTO_NFS3,
+ APP_LAYER_PARSER_OPT_ACCEPT_GAPS);
}
else {
SCLogDebug("NFS3 protocol parsing disabled.");
if (ff->magic)
json_object_set_new(fjs, "magic", json_string((char *)ff->magic));
#endif
+ json_object_set_new(fjs, "gaps", json_boolean((ff->flags & FILE_HAS_GAPS)));
switch (ff->state) {
case FILE_STATE_CLOSED:
json_object_set_new(fjs, "state", json_string("CLOSED"));
const uint8_t *data, uint32_t data_len, uint16_t flags);
int (*FileAppendDataById)(FileContainer *, uint32_t track_id,
const uint8_t *data, uint32_t data_len);
+ int (*FileAppendGAPById)(FileContainer *, uint32_t track_id,
+ const uint8_t *data, uint32_t data_len);
void (*FileContainerRecycle)(FileContainer *ffc);
void (*FilePrune)(FileContainer *ffc);
void (*FileSetTx)(FileContainer *, uint64_t);
context.FileOpenFileWithId = FileOpenFileWithId;
context.FileCloseFileById = FileCloseFileById;
context.FileAppendDataById = FileAppendDataById;
+ context.FileAppendGAPById = FileAppendGAPById;
context.FileContainerRecycle = FileContainerRecycle;
context.FilePrune = FilePrune;
context.FileSetTx = FileContainerSetTx;
SCReturnInt(-1);
}
+/**
+ * \brief Store/handle a chunk of file data in the File structure
+ * The file with 'track_id' in the FileContainer will be used.
+ *
+ * \param ffc FileContainer used to append to
+ * \param track_id id to lookup the file
+ * \param data data chunk
+ * \param data_len data chunk len
+ *
+ * \retval 0 ok
+ * \retval -1 error
+ * \retval -2 no store for this file
+ */
+int FileAppendGAPById(FileContainer *ffc, uint32_t track_id,
+ const uint8_t *data, uint32_t data_len)
+{
+ SCEnter();
+
+ if (ffc == NULL || ffc->tail == NULL || data == NULL || data_len == 0) {
+ SCReturnInt(-1);
+ }
+ File *ff = ffc->head;
+ for ( ; ff != NULL; ff = ff->next) {
+ if (track_id == ff->file_track_id) {
+ ff->flags |= FILE_HAS_GAPS;
+ ff->flags |= (FILE_NOMD5|FILE_NOSHA1|FILE_NOSHA256);
+ ff->flags &= ~(FILE_MD5|FILE_SHA1|FILE_SHA256);
+ SCLogDebug("FILE_HAS_GAPS set");
+
+ int r = FileAppendDataDo(ff, data, data_len);
+ SCReturnInt(r);
+ }
+ }
+ SCReturnInt(-1);
+}
/**
* \brief Open a new File
}
}
- if (flags & FILE_TRUNCATED) {
+ if ((flags & FILE_TRUNCATED) || (ff->flags & FILE_HAS_GAPS)) {
ff->state = FILE_STATE_TRUNCATED;
SCLogDebug("flowfile state transitioned to FILE_STATE_TRUNCATED");
#define FILE_NOTRACK BIT_U16(12) /**< track size of file */
#define FILE_USE_DETECT BIT_U16(13) /**< use content_inspected tracker */
#define FILE_USE_TRACKID BIT_U16(14) /**< File::file_track_id field is in use */
+#define FILE_HAS_GAPS BIT_U16(15)
typedef enum FileState_ {
FILE_STATE_NONE = 0, /**< no state */
int FileAppendData(FileContainer *, const uint8_t *data, uint32_t data_len);
int FileAppendDataById(FileContainer *, uint32_t track_id,
const uint8_t *data, uint32_t data_len);
+int FileAppendGAPById(FileContainer *ffc, uint32_t track_id,
+ const uint8_t *data, uint32_t data_len);
/**
* \brief Tag a file for storing