From: Victor Julien Date: Thu, 8 Jun 2017 13:13:49 +0000 (+0200) Subject: rust/nfs: handle GAPs X-Git-Tag: suricata-4.0.0-rc1~55 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=58af39131f3b693df7293efcb208e21d21ed7683;p=thirdparty%2Fsuricata.git rust/nfs: handle GAPs In normal records it will try to continue parsing. GAP 'data' will be passed to file api as '0's. New call is used so that the file API does know it is dealing with a GAP. Such files are flagged as truncated at the end of the file and no checksums are calculated. --- diff --git a/rust/src/core.rs b/rust/src/core.rs index d87cd976dc..e3647673c6 100644 --- a/rust/src/core.rs +++ b/rust/src/core.rs @@ -80,6 +80,10 @@ pub type SCFileAppendDataById = extern "C" fn ( file_container: &FileContainer, track_id: u32, data: *const u8, data_len: u32) -> i32; +pub type SCFileAppendGAPById = extern "C" fn ( + file_container: &FileContainer, + track_id: u32, + data: *const u8, data_len: u32) -> i32; // void FilePrune(FileContainer *ffc) pub type SCFilePrune = extern "C" fn ( file_container: &FileContainer); @@ -109,6 +113,7 @@ pub struct SuricataContext { pub FileOpenFile: SCFileOpenFileWithId, pub FileCloseFile: SCFileCloseFileById, pub FileAppendData: SCFileAppendDataById, + pub FileAppendGAP: SCFileAppendGAPById, pub FileContainerRecycle: SCFileContainerRecycle, pub FilePrune: SCFilePrune, pub FileSetTx: SCFileSetTx, diff --git a/rust/src/filecontainer.rs b/rust/src/filecontainer.rs index 3d3c163d94..546625de94 100644 --- a/rust/src/filecontainer.rs +++ b/rust/src/filecontainer.rs @@ -63,7 +63,7 @@ impl FileContainer { } } - pub fn file_append(&mut self, track_id: &u32, data: &[u8]) -> i32 { + pub fn file_append(&mut self, track_id: &u32, data: &[u8], is_gap: bool) -> i32 { SCLogDebug!("FILECONTAINER: append {}", data.len()); if data.len() == 0 { return 0 @@ -71,8 +71,20 @@ impl FileContainer { match unsafe {SC} { None => panic!("BUG no suricata_config"), Some(c) => { - let res = (c.FileAppendData)(&self, *track_id, - data.as_ptr(), data.len() as u32); + let res = match is_gap { + false => { + SCLogDebug!("appending file data"); + let r = (c.FileAppendData)(&self, *track_id, + data.as_ptr(), data.len() as u32); + r + }, + true => { + SCLogDebug!("appending GAP"); + let r = (c.FileAppendGAP)(&self, *track_id, + data.as_ptr(), data.len() as u32); + r + }, + }; if res != 0 { panic!("c.fn_fileappenddata failed"); } diff --git a/rust/src/filetracker.rs b/rust/src/filetracker.rs index 169f247e0b..7e5c24a1ac 100644 --- a/rust/src/filetracker.rs +++ b/rust/src/filetracker.rs @@ -15,7 +15,19 @@ * 02110-1301, USA. */ -// written by Victor Julien +/** + * \file + * \author Victor Julien + * + * Tracks chunk based file transfers. Chunks may be transfered out + * of order, but cannot be transfered in parallel. So only one + * chunk at a time. + * + * GAP handling. If a data gap is encountered, the file is truncated + * and new data is no longer pushed down to the lower level APIs. + * The tracker does continue to follow the file. + */ + extern crate libc; use log::*; use core::*; @@ -23,10 +35,26 @@ use std::collections::HashMap; use std::collections::hash_map::Entry::{Occupied, Vacant}; use filecontainer::*; +#[derive(Debug)] +pub struct FileChunk { + contains_gap: bool, + chunk: Vec, +} + +impl FileChunk { + pub fn new(size: u32) -> FileChunk { + FileChunk { + contains_gap: false, + chunk: Vec::with_capacity(size as usize), + } + } +} + #[derive(Debug)] pub struct FileTransferTracker { file_size: u64, tracked: u64, + cur_ooo: u64, // how many bytes do we have queued from ooo chunks track_id: u32, chunk_left: u32, @@ -36,8 +64,9 @@ pub struct FileTransferTracker { pub file_open: bool, chunk_is_last: bool, chunk_is_ooo: bool, + file_is_truncated: bool, - chunks: HashMap>, + chunks: HashMap, cur_ooo_chunk_offset: u64, } @@ -46,6 +75,7 @@ impl FileTransferTracker { FileTransferTracker { file_size:0, tracked:0, + cur_ooo:0, track_id:0, chunk_left:0, tx_id:0, @@ -53,6 +83,7 @@ impl FileTransferTracker { file_open:false, chunk_is_last:false, chunk_is_ooo:false, + file_is_truncated:false, cur_ooo_chunk_offset:0, chunks:HashMap::new(), } @@ -70,12 +101,25 @@ impl FileTransferTracker { } pub fn close(&mut self, files: &mut FileContainer, flags: u16) { - files.file_close(&self.track_id, flags); + if !self.file_is_truncated { + files.file_close(&self.track_id, flags); + } self.file_open = false; self.tracked = 0; files.files_prune(); } + pub fn trunc (&mut self, files: &mut FileContainer, flags: u16) { + if self.file_is_truncated { + return; + } + let myflags = flags | 1; // TODO util-file.c::FILE_TRUNCATED + files.file_close(&self.track_id, myflags); + SCLogDebug!("truncated file"); + files.files_prune(); + self.file_is_truncated = true; + } + pub fn create(&mut self, name: &[u8], file_size: u64) { if self.file_open == true { panic!("close existing file first"); } @@ -92,8 +136,15 @@ impl FileTransferTracker { SCLogDebug!("NEW CHUNK: chunk_size {} fill_bytes {}", chunk_size, fill_bytes); + // for now assume that is_last means its really the last chunk + // so no out of order chunks coming after. This means that if + // the last chunk is out or order, we've missed chunks before. if chunk_offset != self.tracked { SCLogDebug!("NEW CHUNK IS OOO: expected {}, got {}", self.tracked, chunk_offset); + if is_last { + SCLogDebug!("last chunk is out of order, this means we missed data before"); + self.trunc(files, flags); + } self.chunk_is_ooo = true; self.cur_ooo_chunk_offset = chunk_offset; } @@ -108,14 +159,21 @@ impl FileTransferTracker { self.open(config, files, flags, name); } - let res = self.update(files, flags, data); + let res = self.update(files, flags, data, 0); SCLogDebug!("NEW CHUNK: update res {:?}", res); res } + /// update the file tracker + /// If gap_size > 0 'data' should not be used. /// return how much we consumed of data - pub fn update(&mut self, files: &mut FileContainer, flags: u16, data: &[u8]) -> u32 { + pub fn update(&mut self, files: &mut FileContainer, flags: u16, data: &[u8], gap_size: u32) -> u32 { let mut consumed = 0 as usize; + let is_gap = gap_size > 0; + if is_gap || gap_size > 0 { + SCLogDebug!("is_gap {} size {} ooo? {}", is_gap, gap_size, self.chunk_is_ooo); + } + if self.chunk_left + self.fill_bytes as u32 == 0 { //SCLogDebug!("UPDATE: nothing to do"); return 0 @@ -140,7 +198,7 @@ impl FileTransferTracker { let d = &data[0..self.chunk_left as usize]; if self.chunk_is_ooo == false { - let res = files.file_append(&self.track_id, d); + let res = files.file_append(&self.track_id, d, is_gap); if res != 0 { panic!("append failed"); } self.tracked += self.chunk_left as u64; @@ -149,11 +207,13 @@ impl FileTransferTracker { d.len(), self.cur_ooo_chunk_offset, self.tracked); let c = match self.chunks.entry(self.cur_ooo_chunk_offset) { Vacant(entry) => { - entry.insert(Vec::with_capacity(self.chunk_left as usize)) + entry.insert(FileChunk::new(self.chunk_left)) }, Occupied(entry) => entry.into_mut(), }; - c.extend(d); + self.cur_ooo += d.len() as u64; + c.contains_gap |= is_gap; + c.chunk.extend(d); } consumed += self.chunk_left as usize; @@ -169,7 +229,6 @@ impl FileTransferTracker { SCLogDebug!("CHUNK(post) fill bytes now still {}", self.fill_bytes); } self.chunk_left = 0; - //return consumed as u32 } else { self.chunk_left = 0; @@ -177,13 +236,14 @@ impl FileTransferTracker { loop { let offset = self.tracked; match self.chunks.remove(&self.tracked) { - Some(a) => { - let res = files.file_append(&self.track_id, &a); - if res != 0 { panic!("append failed"); } + Some(c) => { + let res = files.file_append(&self.track_id, &c.chunk, c.contains_gap); + if res != 0 { panic!("append failed: files.file_append() returned {}", res); } - self.tracked += a.len() as u64; + self.tracked += c.chunk.len() as u64; + self.cur_ooo -= c.chunk.len() as u64; - SCLogDebug!("STORED OOO CHUNK at offset {}, tracked now {}, stored len {}", offset, self.tracked, a.len()); + SCLogDebug!("STORED OOO CHUNK at offset {}, tracked now {}, stored len {}", offset, self.tracked, c.chunk.len()); }, _ => { SCLogDebug!("NO STORED CHUNK found at offset {}", self.tracked); @@ -208,15 +268,17 @@ impl FileTransferTracker { } else { if self.chunk_is_ooo == false { - let res = files.file_append(&self.track_id, data); + let res = files.file_append(&self.track_id, data, is_gap); if res != 0 { panic!("append failed"); } self.tracked += data.len() as u64; } else { let c = match self.chunks.entry(self.cur_ooo_chunk_offset) { - Vacant(entry) => entry.insert(Vec::with_capacity(32768)), + Vacant(entry) => entry.insert(FileChunk::new(32768)), Occupied(entry) => entry.into_mut(), }; - c.extend(data); + c.chunk.extend(data); + c.contains_gap |= is_gap; + self.cur_ooo += data.len() as u64; } self.chunk_left -= data.len() as u32; @@ -226,4 +288,8 @@ impl FileTransferTracker { files.files_prune(); consumed as u32 } + + pub fn get_queued_size(&self) -> u64 { + self.cur_ooo + } } diff --git a/rust/src/nfs/nfs3.rs b/rust/src/nfs/nfs3.rs index 9b3a52db93..3772a91f44 100644 --- a/rust/src/nfs/nfs3.rs +++ b/rust/src/nfs/nfs3.rs @@ -283,6 +283,9 @@ pub struct NFS3State { ts_chunk_left: u32, tc_chunk_left: u32, + ts_ssn_gap: bool, + tc_ssn_gap: bool, + /// tx counter for assigning incrementing id's to tx's tx_id: u64, @@ -308,6 +311,8 @@ impl NFS3State { tc_chunk_xid:0, ts_chunk_left:0, tc_chunk_left:0, + ts_ssn_gap:false, + tc_ssn_gap:false, tx_id:0, de_state_count:0, //ts_txs_updated:false, @@ -701,7 +706,14 @@ impl NFS3State { let xidmap; match self.requestmap.remove(&r.hdr.xid) { Some(p) => { xidmap = p; }, - _ => { SCLogDebug!("REPLY: xid {} NOT FOUND", r.hdr.xid); return 0; }, + _ => { + SCLogDebug!("REPLY: xid {} NOT FOUND. GAPS? TS:{} TC:{}", + r.hdr.xid, self.ts_ssn_gap, self.tc_ssn_gap); + + // TODO we might be able to try to infer from the size + data + // that this is a READ reply and pass the data to the file API anyway? + return 0; + }, } if xidmap.procedure == NFSPROC3_LOOKUP { @@ -809,7 +821,7 @@ impl NFS3State { // update in progress chunks for file transfers // return how much data we consumed - fn filetracker_update(&mut self, direction: u8, data: &[u8]) -> u32 { + fn filetracker_update(&mut self, direction: u8, data: &[u8], gap_size: u32) -> u32 { let mut chunk_left = if direction == STREAM_TOSERVER { self.ts_chunk_left } else { @@ -877,6 +889,7 @@ impl NFS3State { self.tc_chunk_left = chunk_left; } + let ssn_gap = self.ts_ssn_gap | self.tc_ssn_gap; // get the tx and update it let consumed = match self.get_file_tx_by_handle(&file_handle, direction) { Some((tx, files, flags)) => { @@ -884,7 +897,15 @@ impl NFS3State { Some(NFS3TransactionTypeData::FILE(ref mut x)) => x, _ => { panic!("BUG") }, }; - let cs = tdf.file_tracker.update(files, flags, data); + if ssn_gap { + let queued_data = tdf.file_tracker.get_queued_size(); + if queued_data > 2000000 { // TODO should probably be configurable + SCLogDebug!("QUEUED size {} while we've seen GAPs. Truncating file.", queued_data); + tdf.file_tracker.trunc(files, flags); + } + } + + let cs = tdf.file_tracker.update(files, flags, data, gap_size); cs }, None => { 0 }, @@ -993,6 +1014,32 @@ impl NFS3State { xidmap.procedure } + pub fn parse_tcp_data_ts_gap<'b>(&mut self, gap_size: u32) -> u32 { + if self.tcp_buffer_ts.len() > 0 { + self.tcp_buffer_ts.clear(); + } + let gap = vec![0; gap_size as usize]; + let consumed = self.filetracker_update(STREAM_TOSERVER, &gap, gap_size); + if consumed > gap_size { + panic!("consumed more than GAP size: {} > {}", consumed, gap_size); + } + self.ts_ssn_gap = true; + return 0 + } + + pub fn parse_tcp_data_tc_gap<'b>(&mut self, gap_size: u32) -> u32 { + if self.tcp_buffer_tc.len() > 0 { + self.tcp_buffer_tc.clear(); + } + let gap = vec![0; gap_size as usize]; + let consumed = self.filetracker_update(STREAM_TOCLIENT, &gap, gap_size); + if consumed > gap_size { + panic!("consumed more than GAP size: {} > {}", consumed, gap_size); + } + self.tc_ssn_gap = true; + return 0 + } + /// Parsing function, handling TCP chunks fragmentation pub fn parse_tcp_data_ts<'b>(&mut self, i: &'b[u8]) -> u32 { let mut v : Vec; @@ -1006,7 +1053,7 @@ impl NFS3State { v = self.tcp_buffer_ts.split_off(0); // sanity check vector length to avoid memory exhaustion if self.tcp_buffer_ts.len() + i.len() > 1000000 { - SCLogNotice!("parse_tcp_data_ts: TS buffer exploded {} {}", + SCLogDebug!("parse_tcp_data_ts: TS buffer exploded {} {}", self.tcp_buffer_ts.len(), i.len()); return 1; }; @@ -1017,17 +1064,15 @@ impl NFS3State { //SCLogDebug!("tcp_buffer ({})",tcp_buffer.len()); let mut cur_i = tcp_buffer; if cur_i.len() > 1000000 { - SCLogNotice!("BUG buffer exploded: {}", cur_i.len()); + SCLogDebug!("BUG buffer exploded: {}", cur_i.len()); } - // take care of in progress file chunk transfers // and skip buffer beyond it - let consumed = self.filetracker_update(STREAM_TOSERVER, cur_i); + let consumed = self.filetracker_update(STREAM_TOSERVER, cur_i, 0); if consumed > 0 { if consumed > cur_i.len() as u32 { panic!("BUG consumed more than we gave it"); } cur_i = &cur_i[consumed as usize..]; } - while cur_i.len() > 0 { // min record size match parse_rpc_request_partial(cur_i) { IResult::Done(_, ref rpc_phdr) => { @@ -1122,6 +1167,7 @@ impl NFS3State { SCLogDebug!("TC buffer exploded"); return 1; }; + v.extend_from_slice(i); v.as_slice() }, @@ -1130,17 +1176,16 @@ impl NFS3State { let mut cur_i = tcp_buffer; if cur_i.len() > 100000 { - SCLogNotice!("parse_tcp_data_tc: BUG buffer exploded {}", cur_i.len()); + SCLogDebug!("parse_tcp_data_tc: BUG buffer exploded {}", cur_i.len()); } // take care of in progress file chunk transfers // and skip buffer beyond it - let consumed = self.filetracker_update(STREAM_TOCLIENT, cur_i); + let consumed = self.filetracker_update(STREAM_TOCLIENT, cur_i, 0); if consumed > 0 { if consumed > cur_i.len() as u32 { panic!("BUG consumed more than we gave it"); } cur_i = &cur_i[consumed as usize..]; } - while cur_i.len() > 0 { match parse_rpc_packet_header(cur_i) { IResult::Done(_, ref rpc_hdr) => { @@ -1260,6 +1305,14 @@ pub extern "C" fn rs_nfs3_parse_request(_flow: *mut Flow, { let buf = unsafe{std::slice::from_raw_parts(input, input_len as usize)}; SCLogDebug!("parsing {} bytes of request data", input_len); + + if buf.as_ptr().is_null() && input_len > 0 { + if state.parse_tcp_data_ts_gap(input_len as u32) == 0 { + return 1 + } + return -1 + } + if state.parse_tcp_data_ts(buf) == 0 { 1 } else { @@ -1278,6 +1331,14 @@ pub extern "C" fn rs_nfs3_parse_response(_flow: *mut Flow, { SCLogDebug!("parsing {} bytes of response data", input_len); let buf = unsafe{std::slice::from_raw_parts(input, input_len as usize)}; + + if buf.as_ptr().is_null() && input_len > 0 { + if state.parse_tcp_data_tc_gap(input_len as u32) == 0 { + return 1 + } + return -1 + } + if state.parse_tcp_data_tc(buf) == 0 { 1 } else { diff --git a/src/app-layer-nfs3.c b/src/app-layer-nfs3.c index 7cdc31485e..7ebb52e112 100644 --- a/src/app-layer-nfs3.c +++ b/src/app-layer-nfs3.c @@ -346,6 +346,10 @@ void RegisterNFS3Parsers(void) // NFS3StateGetEventInfo); // AppLayerParserRegisterGetEventsFunc(IPPROTO_TCP, ALPROTO_NFS3, // NFS3GetEvents); + + /* This parser accepts gaps. */ + AppLayerParserRegisterOptionFlags(IPPROTO_TCP, ALPROTO_NFS3, + APP_LAYER_PARSER_OPT_ACCEPT_GAPS); } else { SCLogDebug("NFS3 protocol parsing disabled."); diff --git a/src/output-json-file.c b/src/output-json-file.c index 39bad3f85e..842a3d208e 100644 --- a/src/output-json-file.c +++ b/src/output-json-file.c @@ -124,6 +124,7 @@ static void FileWriteJsonRecord(JsonFileLogThread *aft, const Packet *p, const F if (ff->magic) json_object_set_new(fjs, "magic", json_string((char *)ff->magic)); #endif + json_object_set_new(fjs, "gaps", json_boolean((ff->flags & FILE_HAS_GAPS))); switch (ff->state) { case FILE_STATE_CLOSED: json_object_set_new(fjs, "state", json_string("CLOSED")); diff --git a/src/rust.h b/src/rust.h index aaf47dcef7..58684b3648 100644 --- a/src/rust.h +++ b/src/rust.h @@ -33,6 +33,8 @@ typedef struct SuricataContext_ { const uint8_t *data, uint32_t data_len, uint16_t flags); int (*FileAppendDataById)(FileContainer *, uint32_t track_id, const uint8_t *data, uint32_t data_len); + int (*FileAppendGAPById)(FileContainer *, uint32_t track_id, + const uint8_t *data, uint32_t data_len); void (*FileContainerRecycle)(FileContainer *ffc); void (*FilePrune)(FileContainer *ffc); void (*FileSetTx)(FileContainer *, uint64_t); diff --git a/src/suricata.c b/src/suricata.c index 16ea7cb730..9a2bc53fdb 100644 --- a/src/suricata.c +++ b/src/suricata.c @@ -2795,6 +2795,7 @@ int main(int argc, char **argv) context.FileOpenFileWithId = FileOpenFileWithId; context.FileCloseFileById = FileCloseFileById; context.FileAppendDataById = FileAppendDataById; + context.FileAppendGAPById = FileAppendGAPById; context.FileContainerRecycle = FileContainerRecycle; context.FilePrune = FilePrune; context.FileSetTx = FileContainerSetTx; diff --git a/src/util-file.c b/src/util-file.c index c7ee27e45a..56d9bd7a32 100644 --- a/src/util-file.c +++ b/src/util-file.c @@ -689,6 +689,41 @@ int FileAppendDataById(FileContainer *ffc, uint32_t track_id, SCReturnInt(-1); } +/** + * \brief Store/handle a chunk of file data in the File structure + * The file with 'track_id' in the FileContainer will be used. + * + * \param ffc FileContainer used to append to + * \param track_id id to lookup the file + * \param data data chunk + * \param data_len data chunk len + * + * \retval 0 ok + * \retval -1 error + * \retval -2 no store for this file + */ +int FileAppendGAPById(FileContainer *ffc, uint32_t track_id, + const uint8_t *data, uint32_t data_len) +{ + SCEnter(); + + if (ffc == NULL || ffc->tail == NULL || data == NULL || data_len == 0) { + SCReturnInt(-1); + } + File *ff = ffc->head; + for ( ; ff != NULL; ff = ff->next) { + if (track_id == ff->file_track_id) { + ff->flags |= FILE_HAS_GAPS; + ff->flags |= (FILE_NOMD5|FILE_NOSHA1|FILE_NOSHA256); + ff->flags &= ~(FILE_MD5|FILE_SHA1|FILE_SHA256); + SCLogDebug("FILE_HAS_GAPS set"); + + int r = FileAppendDataDo(ff, data, data_len); + SCReturnInt(r); + } + } + SCReturnInt(-1); +} /** * \brief Open a new File @@ -837,7 +872,7 @@ static int FileCloseFilePtr(File *ff, const uint8_t *data, } } - if (flags & FILE_TRUNCATED) { + if ((flags & FILE_TRUNCATED) || (ff->flags & FILE_HAS_GAPS)) { ff->state = FILE_STATE_TRUNCATED; SCLogDebug("flowfile state transitioned to FILE_STATE_TRUNCATED"); diff --git a/src/util-file.h b/src/util-file.h index 6d6ae9e75d..0795177f3d 100644 --- a/src/util-file.h +++ b/src/util-file.h @@ -48,6 +48,7 @@ #define FILE_NOTRACK BIT_U16(12) /**< track size of file */ #define FILE_USE_DETECT BIT_U16(13) /**< use content_inspected tracker */ #define FILE_USE_TRACKID BIT_U16(14) /**< File::file_track_id field is in use */ +#define FILE_HAS_GAPS BIT_U16(15) typedef enum FileState_ { FILE_STATE_NONE = 0, /**< no state */ @@ -159,6 +160,8 @@ int FileCloseFileById(FileContainer *, uint32_t track_id, int FileAppendData(FileContainer *, const uint8_t *data, uint32_t data_len); int FileAppendDataById(FileContainer *, uint32_t track_id, const uint8_t *data, uint32_t data_len); +int FileAppendGAPById(FileContainer *ffc, uint32_t track_id, + const uint8_t *data, uint32_t data_len); /** * \brief Tag a file for storing