} else {
/* flow has been recycled before it went into the spare queue */
- /* flow is initialized (recylced) but *unlocked* */
+ /* flow is initialized (recycled) but *unlocked* */
}
FLOWLOCK_WRLOCK(f);
* flow pointer. Then compares the packet with the found flow to see if it is
* the flow we need. If it isn't, walk the list until the right flow is found.
*
- * If the flow is not found or the bucket was emtpy, a new flow is taken from
+ * If the flow is not found or the bucket was empty, a new flow is taken from
* the spare pool. The pool will alloc new flows as long as we stay within our
* memcap limit.
*
/** queue to pass flows to cleanup/log thread(s) */
FlowQueue flow_recycle_q;
-/* multi flow mananger support */
+/* multi flow manager support */
static uint32_t flowmgr_number = 1;
/* atomic counter for flow managers, to assign instance id */
SC_ATOMIC_DECLARE(uint32_t, flowmgr_cnt);
* done are:
* - code consistency
* - silence complaining profilers
- * - allow us to aggressively check using debug valdation assertions
+ * - allow us to aggressively check using debug validation assertions
* - be robust in case of future changes
* - locking overhead if neglectable when no other thread fights us
*
/** atomic int that is used when freeing a flow from the hash. In this
* case we walk the hash to find a flow to free. This var records where
* we left off in the hash. Without this only the top rows of the hash
- * are freed. This isn't just about fairness. Under severe presure, the
+ * are freed. This isn't just about fairness. Under severe pressure, the
* hash rows on top would be all freed and the time to find a flow to
* free increased with every run. */
SC_ATOMIC_DECLARE(unsigned int, flow_prune_idx);
void FlowHandlePacket(ThreadVars *tv, FlowLookupStruct *fls, Packet *p)
{
/* Get this packet's flow from the hash. FlowHandlePacket() will setup
- * a new flow if nescesary. If we get NULL, we're out of flow memory.
+ * a new flow if necessary. If we get NULL, we're out of flow memory.
* The returned flow is locked. */
Flow *f = FlowGetFlowFromHash(tv, fls, p, &p->flow);
if (f == NULL)
/**
* \brief get 'disruption' flags: GAP/DEPTH/PASS
* \param f locked flow
- * \param flags existing flags to be ammended
+ * \param flags existing flags to be amended
* \retval flags original flags + disrupt flags (if any)
* \TODO handle UDP
*/
#ifdef UNITTESTS
if (f->fb != NULL) {
#endif
- /* and reset the flow bucket's next_ts value so that the flow manager
+ /* and reset the flow bucket next_ts value so that the flow manager
* has to revisit this row */
SC_ATOMIC_SET(f->fb->next_ts, 0);
#ifdef UNITTESTS
} FlowLookupStruct;
/** \brief prepare packet for a life with flow
- * Set PKT_WANTS_FLOW flag to incidate workers should do a flow lookup
+ * Set PKT_WANTS_FLOW flag to indicate workers should do a flow lookup
* and calc the hash value to be used in the lookup and autofp flow
* balancing. */
void FlowSetupPacket(Packet *p);
{
int64_t id = (uint64_t)(SCTIME_SECS(f->startts) & 0x0000FFFF) << 48 |
(uint64_t)(SCTIME_USECS(f->startts) & 0x0000FFFF) << 32 | (int64_t)f->flow_hash;
- /* reduce to 51 bits as Javascript and even JSON often seem to
+ /* reduce to 51 bits as JavaScript and even JSON often seem to
* max out there. */
id &= 0x7ffffffffffffLL;
return id;