}
}
-uint32_t TcpReassembler::get_flush_data_len(TcpSegmentNode* tsn, uint32_t to_seq,
- uint32_t flushBufSize)
+uint32_t TcpReassembler::get_flush_data_len(TcpSegmentNode* tsn, uint32_t to_seq, unsigned max)
{
unsigned int flushSize = tsn->payload_size;
- // copy only till flush buffer gets full
- if ( flushSize > flushBufSize )
- flushSize = flushBufSize;
+ if ( flushSize > max )
+ flushSize = max;
// copy only to flush point
if ( paf_active(&tracker->paf_state) && SEQ_GT(tsn->seq + flushSize, to_seq) )
}
// flush the client seglist up to the most recently acked segment
-int TcpReassembler::flush_data_segments(Packet* p, uint32_t toSeq, uint8_t* flushbuf,
- const uint8_t* flushbuf_end)
+int TcpReassembler::flush_data_segments(Packet* p, uint32_t toSeq)
{
- uint16_t bytes_flushed = 0;
+ uint32_t bytes_flushed = 0;
uint32_t segs = 0;
uint32_t flags = PKT_PDU_HEAD;
DEBUG_WRAP(uint32_t bytes_queued = seg_bytes_logical; );
while ( SEQ_LT(seglist.next->seq, toSeq) )
{
TcpSegmentNode* tsn = seglist.next, * sr = nullptr;
- unsigned flushbuf_size = flushbuf_end - flushbuf;
- unsigned bytes_to_copy = get_flush_data_len(tsn, toSeq, flushbuf_size);
+ unsigned bytes_to_copy = get_flush_data_len(tsn, toSeq, tracker->splitter->max(p->flow));
unsigned bytes_copied = 0;
assert(bytes_to_copy);
|| SEQ_EQ(tsn->seq + bytes_to_copy, toSeq) )
flags |= PKT_PDU_TAIL;
- const StreamBuffer* sb = tracker->splitter->reassemble(p->flow, total, bytes_flushed,
- tsn->payload,
- bytes_to_copy, flags, bytes_copied);
+ const StreamBuffer* sb = tracker->splitter->reassemble(
+ p->flow, total, bytes_flushed, tsn->payload, bytes_to_copy, flags, bytes_copied);
+
flags = 0;
+
if ( sb )
{
s5_pkt->data = sb->data;
s5_pkt->dsize = sb->length;
assert(sb->length <= s5_pkt->max_dsize);
- // FIXIT-M flushbuf should be eliminated from this function
- // since we are actually using the stream splitter buffer
- flushbuf = ( uint8_t* )s5_pkt->data;
- // ensure we stop here
bytes_to_copy = bytes_copied;
}
assert(bytes_to_copy == bytes_copied);
-
- flushbuf += bytes_to_copy;
bytes_flushed += bytes_to_copy;
if ( bytes_to_copy < tsn->payload_size
flush_count++;
segs++;
- if ( flushbuf >= flushbuf_end )
- break;
-
if ( SEQ_EQ(tsn->seq + bytes_to_copy, toSeq) )
break;
if ( tsn->next )
seglist.next = tsn->next;
- tracker->set_tf_flags(TF_MISSING_PKT);
+ // FIXIT-L this is suboptimal - better to exclude fin from toSeq
+ if ( !tracker->fin_set() or SEQ_LT(toSeq, tracker->fin_final_seq) )
+ tracker->set_tf_flags(TF_MISSING_PKT);
+
break;
}
seglist.next = tsn->next;
if ( sb || !seglist.next )
break;
+
+ if ( bytes_flushed + seglist.next->payload_size >= StreamSplitter::max_buf )
+ break;
}
DEBUG_WRAP(bytes_queued -= bytes_flushed; );
prep_s5_pkt(session->flow, p, pkt_flags);
- // if not specified, set bytes to flush to what was acked
- if (!bytes && SEQ_GT(tracker->r_win_base, seglist_base_seq))
- bytes = tracker->r_win_base - seglist_base_seq;
-
// FIXIT-L this should not be necessary here
seglist_base_seq = seglist.next->seq;
stop_seq = seglist_base_seq + bytes;
/* setup the pseudopacket payload */
s5_pkt->dsize = 0;
- s5_pkt->data = s5_pkt->pkt;
- const uint8_t* s5_pkt_end = s5_pkt->data + s5_pkt->max_dsize;
- flushed_bytes = flush_data_segments(p, stop_seq, (uint8_t*)s5_pkt->data, s5_pkt_end);
+ s5_pkt->data = nullptr;
+ flushed_bytes = flush_data_segments(p, stop_seq);
if ( flushed_bytes == 0 )
break; /* No more data... bail */
return _flush_to_seq(bytes, p, pkt_flags);
}
-// FIXIT-H the seq number math in the following 2 funcs does not handle
-// wrapping get the footprint for the current seglist, the difference
+// get the footprint for the current seglist, the difference
// between our base sequence and the last ack'd sequence we received
uint32_t TcpReassembler::get_q_footprint()
{
- int32_t fp;
-
- if ( tracker == nullptr )
- return 0;
-
- fp = tracker->r_win_base - seglist_base_seq;
- if ( fp <= 0 )
+ if ( !tracker )
return 0;
seglist.next = seglist.head;
+ uint32_t fp = tracker->r_win_base - seglist_base_seq;
+
+ // FIXIT-M ideally would exclude fin here
+
return fp;
}
// FIXIT-P get_q_sequenced() performance could possibly be
// boosted by tracking sequenced bytes as seglist is updated
// to avoid the while loop, etc. below.
+
uint32_t TcpReassembler::get_q_sequenced()
{
int32_t len;