Remove comparisons with true or false with C bools.
for (uint32_t hbCount = 0; hbCount < hStat.u.usageData_v0.data.numHostBufferUsed;
hbCount++) {
- if (unlikely(stream_config[inst_id].initialized == false)) {
+ if (unlikely(!stream_config[inst_id].initialized)) {
rxPktsStart[stream_id] += hStat.u.usageData_v0.data.hb[hbCount].stat.rx.frames;
rxByteStart[stream_id] += hStat.u.usageData_v0.data.hb[hbCount].stat.rx.bytes;
dropPktStart[stream_id] +=
uint64_t active_id;
uint64_t log_id = pstate->log_id;
uint64_t inspect_id = pstate->inspect_id[(direction & STREAM_TOSERVER) ? 0 : 1];
- if (alp_ctx.ctxs[f->alproto][f->protomap].logger == true) {
+ if (alp_ctx.ctxs[f->alproto][f->protomap].logger) {
active_id = MIN(log_id, inspect_id);
} else {
active_id = inspect_id;
{
SCEnter();
int ipproto_map = FlowGetProtoMapping(ipproto);
- int r = (alp_ctx.ctxs[alproto][ipproto_map].logger == false) ? 0 : 1;
+ int r = (!alp_ctx.ctxs[alproto][ipproto_map].logger) ? 0 : 1;
SCReturnInt(r);
}
void StatsSyncCountersIfSignalled(ThreadVars *tv)
{
- if (SC_ATOMIC_GET(tv->perf_public_ctx.sync_now) == true) {
+ if (SC_ATOMIC_GET(tv->perf_public_ctx.sync_now)) {
StatsUpdateCounterArray(&tv->perf_private_ctx, &tv->perf_public_ctx);
}
}
{
Dataset *set = sets;
while (set) {
- if (strcasecmp(name, set->name) == 0 && set->hidden == false) {
+ if (strcasecmp(name, set->name) == 0 && !set->hidden) {
return set;
}
set = set->next;
SCMutexLock(&sets_lock);
Dataset *set = sets;
while (set) {
- if (!DatasetIsStatic(set->save, set->load) || set->from_yaml == true) {
+ if (!DatasetIsStatic(set->save, set->load) || set->from_yaml) {
SCLogDebug("Not a static set, skipping %s", set->name);
set = set->next;
continue;
Dataset *prev = NULL;
while (cur) {
Dataset *next = cur->next;
- if (cur->hidden == false) {
+ if (!cur->hidden) {
prev = cur;
cur = next;
continue;
p->vlan_id[p->vlan_idx++] = (uint16_t)GET_VLAN_ID(vlan_hdr);
- if (DecodeNetworkLayer(tv, dtv, proto, p,
- pkt + VLAN_HEADER_LEN, len - VLAN_HEADER_LEN) == false) {
+ if (!DecodeNetworkLayer(tv, dtv, proto, p, pkt + VLAN_HEADER_LEN, len - VLAN_HEADER_LEN)) {
ENGINE_SET_INVALID_EVENT(p, VLAN_UNKNOWN_TYPE);
return TM_ECODE_FAILED;
}
DetectContentData *cd = DetectContentParse(de_ctx->spm_global_thread_ctx, contentstr);
if (cd == NULL)
goto error;
- if (s->init_data->negated == true) {
+ if (s->init_data->negated) {
cd->flags |= DETECT_CONTENT_NEGATED;
}
FAIL_IF_NULL(s); \
SigPrepareStage1(de_ctx); \
bool res = TestLastContent(s, (o), (d)); \
- FAIL_IF(res == false); \
+ FAIL_IF_NOT(res); \
DetectEngineCtxFree(de_ctx); \
}
fsd.mpm_ctx = mpm_ctx;
if (SetupStreamCallbackData(&fsd, ssn, stream, det_ctx, ctx->transforms, frames, frame,
- ctx->list_id, eof) == true) {
+ ctx->list_id, eof)) {
StreamReassembleForFrame(ssn, stream, FrameStreamDataPrefilterFunc, &fsd,
fsd.requested_stream_offset, eof);
}
// TODO there should be only one inspect engine for this frame, ever?
- if (e->v1.Callback(det_ctx, e, s, p, frames, frame) == true) {
+ if (e->v1.Callback(det_ctx, e, s, p, frames, frame)) {
SCLogDebug("sid %u: e %p Callback returned true", s->id, e);
return true;
}
fsd.inspect_result = DETECT_ENGINE_INSPECT_SIG_NO_MATCH;
fsd.p = p;
- if (SetupStreamCallbackData(
- &fsd, ssn, stream, det_ctx, transforms, frames, frame, list_id, eof) == false) {
+ if (!SetupStreamCallbackData(
+ &fsd, ssn, stream, det_ctx, transforms, frames, frame, list_id, eof)) {
return DETECT_ENGINE_INSPECT_SIG_NO_MATCH;
}
StreamReassembleForFrame(
/* init geo engine, but not when running as unittests */
if (!(RunmodeIsUnittests())) {
/* Initialize the geolocation engine */
- if (InitGeolocationEngine(geoipdata) == false)
+ if (!InitGeolocationEngine(geoipdata))
goto error;
}
/* handle 'silent' error case */
if (setup_ret == -2) {
- if (de_ctx->sm_types_silent_error[idx] == false) {
+ if (!de_ctx->sm_types_silent_error[idx]) {
de_ctx->sm_types_silent_error[idx] = true;
return -1;
}
}
has_frame |= bt->frame;
- has_app |= (bt->frame == false && bt->packet == false);
+ has_app |= (!bt->frame && !bt->packet);
has_pkt |= bt->packet;
- if ((s->flags & SIG_FLAG_REQUIRE_PACKET) && bt->packet == false) {
+ if ((s->flags & SIG_FLAG_REQUIRE_PACKET) && !bt->packet) {
SCLogError("Signature combines packet "
"specific matches (like dsize, flags, ttl) with stream / "
"state matching by matching on app layer proto (like using "
/* no flowvars? skip this sig */
const bool fv = f->flowvar != NULL;
- if (fv == false) {
+ if (!fv) {
SCLogDebug("skipping sig as the flow has no flowvars and sig "
"has SIG_FLAG_REQUIRE_FLOWVAR flag set.");
return false;
}
}
- if (DetectRunInspectRuleHeader(p, pflow, s, sflags, s_proto_flags) == false) {
+ if (!DetectRunInspectRuleHeader(p, pflow, s, sflags, s_proto_flags)) {
goto next;
}
- if (DetectEnginePktInspectionRun(tv, det_ctx, s, pflow, p, &alert_flags) == false) {
+ if (!DetectEnginePktInspectionRun(tv, det_ctx, s, pflow, p, &alert_flags)) {
goto next;
}
/* for a new inspection we inspect pkt header and packet matches */
if (likely(stored_flags == NULL)) {
TRACE_SID_TXS(s->id, tx, "first inspect, run packet matches");
- if (DetectRunInspectRuleHeader(p, f, s, s->flags, s->proto.flags) == false) {
+ if (!DetectRunInspectRuleHeader(p, f, s, s->flags, s->proto.flags)) {
TRACE_SID_TXS(s->id, tx, "DetectRunInspectRuleHeader() no match");
return false;
}
- if (DetectEnginePktInspectionRun(tv, det_ctx, s, f, p, NULL) == false) {
+ if (!DetectEnginePktInspectionRun(tv, det_ctx, s, f, p, NULL)) {
TRACE_SID_TXS(s->id, tx, "DetectEnginePktInspectionRun no match");
return false;
}
/* call individual rule inspection */
RULE_PROFILING_START(p);
bool r = DetectRunInspectRuleHeader(p, f, s, s->flags, s->proto.flags);
- if (r == true) {
+ if (r) {
r = DetectRunFrameInspectRule(tv, det_ctx, s, f, p, frames, frame);
- if (r == true) {
+ if (r) {
/* match */
DetectRunPostMatch(tv, det_ctx, p, s);
* be modified when we have both the flow and hash row lock */
/* timeout logic goes here */
- if (FlowManagerFlowTimeout(f, ts, next_ts, emergency) == false) {
+ if (!FlowManagerFlowTimeout(f, ts, next_ts, emergency)) {
FLOWLOCK_UNLOCK(f);
counters->flows_notimeout++;
FlowCountersUpdate(th_v, ftd, &counters);
- if (emerg == true) {
+ if (emerg) {
SCLogDebug("flow_sparse_q.len = %" PRIu32 " prealloc: %" PRIu32
"flow_spare_q status: %" PRIu32 "%% flows at the queue",
spare_pool_len, flow_config.prealloc,
if (SC_ATOMIC_GET(flow_flags) & FLOW_EMERGENCY) {
break;
}
- if (SC_ATOMIC_GET(flow_recycle_q.non_empty) == true) {
+ if (SC_ATOMIC_GET(flow_recycle_q.non_empty)) {
break;
}
}
do {
FlowWakeupFlowRecyclerThread();
usleep(10);
- } while (FlowRecyclerReadyToShutdown() == false);
+ } while (!FlowRecyclerReadyToShutdown());
SCMutexLock(&tv_root_lock);
/* flow recycler thread(s) is/are a part of mgmt threads */
static inline void FlowQueueAtomicSetNonEmpty(FlowQueue *fq)
{
- if (SC_ATOMIC_GET(fq->non_empty) == false) {
+ if (!SC_ATOMIC_GET(fq->non_empty)) {
SC_ATOMIC_SET(fq->non_empty, true);
}
}
static inline void FlowQueueAtomicSetEmpty(FlowQueue *fq)
{
- if (SC_ATOMIC_GET(fq->non_empty) == true) {
+ if (SC_ATOMIC_GET(fq->non_empty)) {
SC_ATOMIC_SET(fq->non_empty, false);
}
}
/* take injected flows and append to our work queue */
FLOWWORKER_PROFILING_START(p, PROFILE_FLOWWORKER_FLOW_INJECTED);
FlowQueuePrivate injected = { NULL, NULL, 0 };
- if (SC_ATOMIC_GET(tv->flow_queue->non_empty) == true)
+ if (SC_ATOMIC_GET(tv->flow_queue->non_empty))
injected = FlowQueueExtractPrivate(tv->flow_queue);
if (injected.len > 0) {
StatsAddUI64(tv, fw->cnt.flows_injected, (uint64_t)injected.len);
boolval = true;
if (ConfGetChildValueBoolWithDefault(if_root, if_default, "use-percpu-hash", &boolval) ==
1) {
- if (boolval == false) {
+ if (!boolval) {
SCLogConfig("%s: not using percpu hash", aconf->iface);
aconf->ebpf_t_config.cpus_count = 1;
}
if (ptv->flags & (AFP_BYPASS|AFP_XDPBYPASS)) {
ptv->v4_map_fd = EBPFGetMapFDByName(ptv->iface, "flow_table_v4");
if (ptv->v4_map_fd == -1) {
- if (g_flowv4_ok == false) {
+ if (!g_flowv4_ok) {
SCLogError("Can't find eBPF map fd for '%s'", "flow_table_v4");
g_flowv4_ok = true;
}
static TmEcode AFXDPAssignQueueID(AFXDPThreadVars *ptv)
{
- if (ptv->xsk.queue.assigned == false) {
+ if (!ptv->xsk.queue.assigned) {
ptv->xsk.queue.queue_num = SC_ATOMIC_GET(xsk_protect.queue_num);
SC_ATOMIC_ADD(xsk_protect.queue_num, 1);
bool verdict = VerdictTunnelPacket(p);
/* don't verdict if we are not ready */
- if (verdict == true) {
+ if (verdict) {
SCLogDebug("Setting verdict on tunnel");
retval = IPFWSetVerdict(tv, ptv, p->root ? p->root : p);
}
root_p->nfq_v.verdicted = do_verdict;
SCSpinUnlock(lock);
/* don't verdict if we are not ready */
- if (do_verdict == true) {
+ if (do_verdict) {
int ret = NFQSetVerdict(root_p, mark_value, mark_modified);
if (ret != TM_ECODE_OK) {
return ret;
pv->should_loop = (should_loop == 1);
}
- if (pv->should_recurse == true && pv->should_loop == true) {
+ if (pv->should_recurse && pv->should_loop) {
SCLogError("Error, --pcap-file-continuous and --pcap-file-recursive "
"cannot be used together.");
closedir(directory);
stream->segs_right_edge = SEG_SEQ_RIGHT_EDGE(seg);
/* insert succeeded, now check if we overlap with someone */
- if (CheckOverlap(&stream->seg_tree, seg) == true) {
+ if (CheckOverlap(&stream->seg_tree, seg)) {
SCLogDebug("seg %u has overlap in the tree", seg->seq);
return 1;
}
}
/* check for bad responses */
- if (StateSynSentValidateTimestamp(ssn, p) == false) {
+ if (!StateSynSentValidateTimestamp(ssn, p)) {
StreamTcpSetEvent(p, STREAM_PKT_INVALID_TIMESTAMP);
return -1;
}
goto skip;
}
- if (StreamTcpPacketIsDupAck(ssn, p) == true) {
+ if (StreamTcpPacketIsDupAck(ssn, p)) {
STREAM_PKT_FLAG_SET(p, STREAM_PKT_FLAG_DUP_ACK);
// TODO see if we can skip work on these
}
static void SetupUserMode(SCInstance *suri)
{
/* apply 'user mode' config updates here */
- if (suri->system == false) {
- if (suri->set_logdir == false) {
+ if (!suri->system) {
+ if (!suri->set_logdir) {
/* override log dir to current work dir" */
if (ConfigSetLogDirectory((char *)".") != TM_ECODE_OK) {
FatalError("could not set USER mode logdir");
}
}
- if (suri->set_datadir == false) {
+ if (!suri->set_datadir) {
/* override data dir to current work dir" */
if (ConfigSetDataDirectory((char *)".") != TM_ECODE_OK) {
FatalError("could not set USER mode datadir");
}
SCMutexUnlock(&tmq->pq->mutex_q);
- if (err == true)
+ if (err)
goto error;
}
/* if we didn't get a packet see if we need to do some housekeeping */
if (unlikely(p == NULL)) {
- if (tv->flow_queue && SC_ATOMIC_GET(tv->flow_queue->non_empty) == true) {
+ if (tv->flow_queue && SC_ATOMIC_GET(tv->flow_queue->non_empty)) {
p = PacketGetFromQueueOrAlloc();
if (p != NULL) {
p->flags |= PKT_PSEUDO_STREAM_END;
return;
} else {
- if (TmThreadsHandleInjectedPackets(tv) == false) {
+ if (!TmThreadsHandleInjectedPackets(tv)) {
/* see if we have to do some house keeping */
- if (tv->flow_queue && SC_ATOMIC_GET(tv->flow_queue->non_empty) == true) {
+ if (tv->flow_queue && SC_ATOMIC_GET(tv->flow_queue->non_empty)) {
TmThreadsCaptureInjectPacket(tv, p); /* consumes 'p' */
return;
}
(p->action & ACTION_DROP) ? "DROP" : "no drop");
/* we're done with the tunnel root now as well */
- if (proot == true) {
+ if (proot) {
SCLogDebug("getting rid of root pkt... alloc'd %s", BOOL2STR(p->root->pool == NULL));
PacketReleaseRefs(p->root);
}
}
- if (found == false) {
+ if (!found) {
SCLogError("No section '%s' in '%s' file. Will not be able to use the file", section, path);
return -1;
}
* being the incoming prefix is shorter than the differ bit of the current
* node. In case we fail in this aspect, we walk down to the tree, till we
* arrive at a node that ends in a prefix */
- while (node->bit < NETMASK_MAX || node->has_prefix == false) {
+ while (node->bit < NETMASK_MAX || !node->has_prefix) {
/* if the bitlen isn't long enough to handle the bit test, we just walk
* down along one of the paths, since either paths should end up with a
* node that has a common prefix whose differ bit is greater than the
}
}
- if (node->bit != NETMASK_MAX || node->has_prefix == false) {
+ if (node->bit != NETMASK_MAX || !node->has_prefix) {
SCLogDebug("node %p bit %d != %d, or not has_prefix %s", node, node->bit, NETMASK_MAX,
node->has_prefix ? "true" : "false");
return;
return NULL;
}
- if (node->bit != NETMASK_MAX || node->has_prefix == false)
+ if (node->bit != NETMASK_MAX || !node->has_prefix)
return NULL;
if (SCMemcmp(node->prefix_stream, tmp_stream, sizeof(tmp_stream)) == 0) {
}
}
- if (node->bit != NETMASK_MAX || node->has_prefix == false) {
+ if (node->bit != NETMASK_MAX || !node->has_prefix) {
return NULL;
}
return false;
if (Callback != NULL) {
- if (Callback(u1->user, u2->user) == false)
+ if (!Callback(u1->user, u2->user))
return false;
}
}
if (n1->left && n2->left)
- if (CompareTreesSub(n1->left, n2->left, Callback) == false)
+ if (!CompareTreesSub(n1->left, n2->left, Callback))
return false;
if (n1->right && n2->right)
- if (CompareTreesSub(n1->right, n2->right, Callback) == false)
+ if (!CompareTreesSub(n1->right, n2->right, Callback))
return false;
return true;
StreamingBufferRegion *p = NULL;
for (; r != NULL; r = r->next) {
- if (RegionsIntersect(cfg, r, offset, data_re) == true) {
+ if (RegionsIntersect(cfg, r, offset, data_re)) {
*prev = p;
return r;
}
h = h->next;
}
HRLOCK_UNLOCK(hb);
- if (err == true)
+ if (err)
return -1;
}
return 0;