* A nil key in a table will not caught. Neither will a nil value in a
table. Neither of the following will cause errors, nor will they
- actually set http_server.post_depth:
+ actually set http_inspect.request_depth:
- http_server = { post_depth }
- http_server = { post_depth = undefined_symbol }
+ http_inspect = { request_depth }
+ http_inspect = { request_depth = undefined_symbol }
* It is not an error to set a value multiple times. The actual value
applied may not be the last in the table either. It is best to avoid
such cases.
- http_server =
+ http_inspect =
{
- post_depth = 1234,
- post_depth = 4321
+ request_depth = 1234,
+ request_depth = 4321
}
* Snort can't tell you the exact filename or line number of a semantic
{
events->create_event(EVENT_BROKEN_CHUNK);
curr_state = CHUNK_BAD;
+ k--;
}
break;
}
*infractions += INF_CHUNK_BAD_CHAR;
events->create_event(EVENT_BROKEN_CHUNK);
curr_state = CHUNK_BAD;
+ k--;
}
else
{
*infractions += INF_CHUNK_TOO_LARGE;
events->create_event(EVENT_BROKEN_CHUNK);
curr_state = CHUNK_BAD;
+ k--;
}
}
break;
*infractions += INF_CHUNK_BAD_CHAR;
events->create_event(EVENT_BROKEN_CHUNK);
curr_state = CHUNK_BAD;
+ k--;
}
break;
case CHUNK_OPTIONS:
*infractions += INF_CHUNK_LONE_CR;
events->create_event(EVENT_BROKEN_CHUNK);
curr_state = CHUNK_BAD;
+ k--;
break;
}
if (expected > 0)
*infractions += INF_CHUNK_NO_LENGTH;
events->create_event(EVENT_BROKEN_CHUNK);
curr_state = CHUNK_BAD;
+ k--;
}
break;
case CHUNK_DATA:
}
if ((data_seen += skip_amount) == flow_target)
{
- // FIXIT-M need to randomize slice point
data_seen = 0;
num_flush = k+1;
new_section = true;
*infractions += INF_CHUNK_BAD_END;
events->create_event(EVENT_BROKEN_CHUNK);
curr_state = CHUNK_BAD;
+ k--;
}
break;
case CHUNK_DCRLF2:
num_flush = length;
return SCAN_DISCARD_PIECE;
}
+
+ // When chunk parsing breaks down and we first enter CHUNK_BAD state, it may happen
+ // that there were chunk header bytes between the last good chunk and the point where
+ // the failure occurred. These will not have been counted in data_seen because we
+ // planned to delete them during reassembly. Because they are not part of a valid chunk
+ // they will be reassembled after all. This will overrun the flow_target making the
+ // message section a little bigger than planned. It's not important.
uint32_t skip_amount = length-k;
skip_amount = (skip_amount <= flow_target-data_seen) ? skip_amount :
flow_target-data_seen;
k += skip_amount - 1;
if ((data_seen += skip_amount) == flow_target)
{
- // FIXIT-M need to randomize slice point
data_seen = 0;
num_flush = k+1;
new_section = true;
void HttpMsgSection::update_depth() const
{
- if ((session_data->detect_depth_remaining[source_id] <= 0) &&
- (session_data->detection_status[source_id] == DET_ON))
+ const int64_t& ddr = session_data->detect_depth_remaining[source_id];
+ const int64_t& fdr = session_data->file_depth_remaining[source_id];
+
+ if ((ddr <= 0) && (session_data->detection_status[source_id] == DET_ON))
{
session_data->detection_status[source_id] = DET_DEACTIVATING;
}
- if ((session_data->file_depth_remaining[source_id] <= 0) &&
- (session_data->detect_depth_remaining[source_id] <= 0))
+ if ((ddr <= 0) && (fdr <= 0))
{
// Don't need any more of the body
session_data->section_size_target[source_id] = 0;
return;
}
- const int random_increment = FlushBucket::get_size() - 192;
- assert((random_increment >= -64) && (random_increment <= 63));
+ const unsigned max_pdu = snort::SnortConfig::get_conf()->max_pdu;
+ const unsigned target_size = (session_data->compression[source_id] == CMP_NONE) ?
+ max_pdu : GZIP_BLOCK_SIZE;
+ const unsigned max_size = (session_data->compression[source_id] == CMP_NONE) ?
+ max_pdu + (max_pdu >> 1) : FINAL_GZIP_BLOCK_SIZE;
- switch (session_data->compression[source_id])
+ if (ddr <= 0)
{
- case CMP_NONE:
- {
- unsigned max_pdu = snort::SnortConfig::get_conf()->max_pdu;
- session_data->section_size_target[source_id] = max_pdu + random_increment;
- session_data->section_size_max[source_id] = max_pdu + (max_pdu >> 1);
- break;
- }
- case CMP_GZIP:
- case CMP_DEFLATE:
- session_data->section_size_target[source_id] = GZIP_BLOCK_SIZE + random_increment;
- session_data->section_size_max[source_id] = FINAL_GZIP_BLOCK_SIZE;
- break;
- default:
- assert(false);
+ session_data->section_size_target[source_id] = target_size;
+ session_data->section_size_max[source_id] = max_size;
+ }
+ else if (ddr <= max_size)
+ {
+ session_data->section_size_target[source_id] = ddr + 200;
+ session_data->section_size_max[source_id] = ddr + 200;
+ }
+ else
+ {
+ const int random_increment = FlushBucket::get_size() - 192;
+ assert((random_increment >= -64) && (random_increment <= 63));
+ session_data->section_size_target[source_id] = target_size + random_increment;
+ session_data->section_size_max[source_id] = max_size;
}
}