"defrag": {
"type": "object",
"properties": {
- "max_frag_hits": {
- "type": "integer"
+ "tracker_soft_reuse": {
+ "type": "integer",
+ "description":
+ "Finished tracker re-used from hash table before being moved to spare pool"
+ },
+ "tracker_hard_reuse": {
+ "type": "integer",
+ "description":
+ "Active tracker force closed before completion and reused for new tracker"
+ },
+ "max_trackers_reached": {
+ "type": "integer",
+ "description":
+ "How many times a packet wasn't reassembled due to max-trackers limit being reached"
+ },
+ "max_frags_reached": {
+ "type": "integer",
+ "description":
+ "How many times a fragment wasn't stored due to max-frags limit being reached"
},
"memcap_exception_policy": {
"description":
dtv->counter_defrag_ipv6_fragments =
StatsRegisterCounter("defrag.ipv6.fragments", tv);
dtv->counter_defrag_ipv6_reassembled = StatsRegisterCounter("defrag.ipv6.reassembled", tv);
- dtv->counter_defrag_max_hit =
- StatsRegisterCounter("defrag.max_frag_hits", tv);
+ dtv->counter_defrag_max_hit = StatsRegisterCounter("defrag.max_trackers_reached", tv);
+ dtv->counter_defrag_no_frags = StatsRegisterCounter("defrag.max_frags_reached", tv);
+ dtv->counter_defrag_tracker_soft_reuse = StatsRegisterCounter("defrag.tracker_soft_reuse", tv);
+ dtv->counter_defrag_tracker_hard_reuse = StatsRegisterCounter("defrag.tracker_hard_reuse", tv);
ExceptionPolicySetStatsCounters(tv, &dtv->counter_defrag_memcap_eps, &defrag_memcap_eps_stats,
DefragGetMemcapExceptionPolicy(), "defrag.memcap_exception_policy.",
uint16_t counter_defrag_ipv6_fragments;
uint16_t counter_defrag_ipv6_reassembled;
uint16_t counter_defrag_max_hit;
+ uint16_t counter_defrag_no_frags;
+ uint16_t counter_defrag_tracker_soft_reuse;
+ uint16_t counter_defrag_tracker_hard_reuse;
ExceptionPolicyCounters counter_defrag_memcap_eps;
uint16_t counter_flow_memcap;
SC_ATOMIC_DECLARE(unsigned int,defragtracker_counter);
SC_ATOMIC_DECLARE(unsigned int,defragtracker_prune_idx);
-static DefragTracker *DefragTrackerGetUsedDefragTracker(void);
+static DefragTracker *DefragTrackerGetUsedDefragTracker(
+ ThreadVars *tv, const DecodeThreadVars *dtv);
/** queue with spare tracker */
static DefragTrackerStack defragtracker_spare_q;
if (dt == NULL) {
/* If we reached the max memcap, we get a used tracker */
if (!(DEFRAG_CHECK_MEMCAP(sizeof(DefragTracker)))) {
- dt = DefragTrackerGetUsedDefragTracker();
+ dt = DefragTrackerGetUsedDefragTracker(tv, dtv);
if (dt == NULL) {
ExceptionPolicyApply(p, defrag_config.memcap_policy, PKT_DROP_REASON_DEFRAG_MEMCAP);
DefragExceptionPolicyStatsIncr(tv, dtv, defrag_config.memcap_policy);
*
* \retval dt tracker or NULL
*/
-static DefragTracker *DefragTrackerGetUsedDefragTracker(void)
+static DefragTracker *DefragTrackerGetUsedDefragTracker(ThreadVars *tv, const DecodeThreadVars *dtv)
{
uint32_t idx = SC_ATOMIC_GET(defragtracker_prune_idx) % defrag_config.hash_size;
uint32_t cnt = defrag_config.hash_size;
continue;
}
+ /* only count "forced" reuse */
+ bool incr_reuse_cnt = !dt->remove;
+
/* remove from the hash */
hb->head = dt->hnext;
SCMutexUnlock(&dt->lock);
+ if (incr_reuse_cnt) {
+ StatsIncr(tv, dtv->counter_defrag_tracker_hard_reuse);
+ } else {
+ StatsIncr(tv, dtv->counter_defrag_tracker_soft_reuse);
+ }
+
(void) SC_ATOMIC_ADD(defragtracker_prune_idx, (defrag_config.hash_size - cnt));
return dt;
}
} else {
ENGINE_SET_EVENT(p, IPV6_FRAG_IGNORED);
}
+ if (tv != NULL && dtv != NULL) {
+ StatsIncr(tv, dtv->counter_defrag_no_frags);
+ }
goto error_remove_tracker;
}
new->pkt = SCMalloc(GET_PKT_LEN(p));