const EngineDescription &eng,
const map<BucketIndex, vector<LiteralIndex>> &bucketToLits,
bool make_small) {
- unique_ptr<TeddyEngineDescription> teddyDescr =
- getTeddyDescription(eng.getID());
-
BC2CONF bc2Conf;
u32 totalConfirmSize = 0;
for (BucketIndex b = 0; b < eng.getNumBuckets(); b++) {
DEBUG_PRINTF("dfa is empty\n");
}
+ // cppcheck-suppress unreadVariable
UNUSED const size_t states_before = rdfa.states.size();
HopcroftInfo info(rdfa);
static
void prep_joins_for_generation(const GoughGraph &g, GoughVertex v,
- map<GoughEdge, edge_join_info> *edge_info) {
+ map<GoughEdge, edge_join_info> &edge_info) {
DEBUG_PRINTF("writing out joins for %u\n", g[v].state_id);
for (const auto &var : g[v].vars) {
u32 dest_slot = var->slot;
}
for (const GoughEdge &incoming_edge : var_edges.second) {
- (*edge_info)[incoming_edge].insert(input, dest_slot);
+ edge_info[incoming_edge].insert(input, dest_slot);
DEBUG_PRINTF("need %u<-%u\n", dest_slot, input);
}
}
}
map<GoughEdge, edge_join_info> eji;
- prep_joins_for_generation(g, t, &eji);
+ prep_joins_for_generation(g, t, eji);
for (auto &m : eji) {
vector<gough_ins> &block = (*blocks)[gough_edge_id(g, m.first)];
// Sherman optimization
if (info.impl_alpha_size > 16) {
+#ifdef DEBUG
u16 total_daddy = 0;
+#endif // DEBUG
for (u32 i = 0; i < info.size(); i++) {
find_better_daddy(info, i,
is_cyclic_near(info.raw, info.raw.start_anchored),
grey);
+#ifdef DEBUG
total_daddy += info.extra[i].daddytaken;
+#endif // DEBUG
}
DEBUG_PRINTF("daddy %hu/%zu states=%zu alpha=%hu\n", total_daddy,
// Sherman optimization
if (info.impl_alpha_size > 16) {
+#ifdef DEBUG
u16 total_daddy = 0;
+#endif // DEBUG
for (u32 i = 0; i < info.size(); i++) {
find_better_daddy(info, i,
is_cyclic_near(info.raw, info.raw.start_anchored),
grey);
+#ifdef DEBUG
total_daddy += info.extra[i].daddytaken;
+#endif // DEBUG
}
DEBUG_PRINTF("daddy %hu/%zu states=%zu alpha=%hu\n", total_daddy,
map<dstate_id_t, AccelScheme> accel_escape_info
= info.strat.getAccelInfo(cc.grey);
- auto old_states = info.states;
dstate_id_t sheng_end = find_sheng_states(info, accel_escape_info, MAX_SHENG_STATES);
if (sheng_end <= DEAD_STATE + 1) {
- info.states = old_states;
return bytecode_ptr<NFA>(nullptr);
}
}
if (!nfa) {
- info.states = old_states;
return nfa;
}
// Dump the contents of the given queue.
static never_inline UNUSED
void debugQueue(const struct mq *q) {
+ if (q == nullptr) {
+ DEBUG_PRINTF("q=NULL!\n");
+ return;
+ }
DEBUG_PRINTF("q=%p, nfa=%p\n", q, q->nfa);
DEBUG_PRINTF("q offset=%llu, buf={%p, len=%zu}, history={%p, len=%zu}\n",
q->offset, q->buffer, q->length, q->history, q->hlength);
old_states = info.states;
auto nfa = shengCompile_int<sheng64>(raw, cc, accel_states, strat, info);
if (!nfa) {
- info.states = old_states;
+ info.states = old_states; // cppcheck-suppress unreadVariable
}
return nfa;
}
assert(rsi.repeatMax >= rsi.repeatMin);
DEBUG_PRINTF("entry\n");
-
- const unordered_set<NFAVertex> involved(rsi.vertices.begin(),
- rsi.vertices.end());
+
vector<NFAVertex> g_succs;
getSuccessors(g, rsi, &g_succs);
// try a redundancy pass.
if (addSomRedundancy(g, depths)) {
- depths = getDistancesFromSOM(g);
+ depths = getDistancesFromSOM(g); // cppcheck-suppress unreadVariable
}
auto regions = assignRegions(g);
const u32 *programs = getByOffset(t, t->delayProgramOffset);
for (u32 it = fatbit_iterate(vicSlot, delay_count, MMB_INVALID);
- it != MMB_INVALID; it = fatbit_iterate(vicSlot, delay_count, it)) {
+ it != MMB_INVALID; it = fatbit_iterate(vicSlot, delay_count, it)) {
+ // cppcheck-suppress unreadVariable
UNUSED rose_group old_groups = tctxt->groups;
DEBUG_PRINTF("DELAYED MATCH id=%u offset=%llu\n", it, offset);
if (prefilter && cc.grey.prefilterReductions) {
// If we're prefiltering, we can have another go with a reduced graph.
- UNUSED size_t numBefore = num_vertices(h);
+ UNUSED size_t numBefore = num_vertices(h); // cppcheck-suppress unreadVariable
prefilterReductions(h, cc);
- UNUSED size_t numAfter = num_vertices(h);
+ UNUSED size_t numAfter = num_vertices(h); // cppcheck-suppress unreadVariable
DEBUG_PRINTF("reduced from %zu to %zu vertices\n", numBefore, numAfter);
if (isImplementableNFA(h, &rm, cc)) {
const auto hash_functions = { bloomHash_1, bloomHash_2, bloomHash_3 };
for (const auto &hash_func : hash_functions) {
- u32 hash = hash_func(substr, nocase);
- u32 key = hash & key_mask;
+ u32 key = hash_func(substr, nocase) & key_mask;
DEBUG_PRINTF("set key %u (of %zu)\n", key, bloom.size() * 8);
bloom[key / 8] |= 1U << (key % 8);
}
}
for (const auto &m : hashToLitOffPairs) {
- u32 hash = m.first;
+ u32 bucket = m.first % numEntries;
const LitOffsetVector &d = m.second;
- u32 bucket = hash % numEntries;
-
// Placement via linear probing.
for (const auto &lit_offset : d) {
while (tab[bucket].str_offset != 0) {
}
if (eod_prefix) {
- always_run++;
DEBUG_PRINTF("eod prefixes are slow");
return 0;
}
COPY(stream_body + so->groups, so->groups_size);
/* copy the real bits of history */
+ // cppcheck-suppress unreadVariable
UNUSED u32 hend = so->history + rose->historyRequired;
COPY(stream_body + hend - history, history);
loadcompressed128(&val_out, &buf, &mask, 0);
EXPECT_TRUE(!diff128(and128(val, mask), val_out));
- mask_raw[j] = 0x7f;
+ mask_raw[j] = 0x7f; // cppcheck-suppress unreadVariable
}
}
}
loadcompressed256(&val_out, &buf, &mask, 0);
EXPECT_TRUE(!diff256(and256(val, mask), val_out));
- mask_raw[j] = 0x7f;
+ mask_raw[j] = 0x7f; // cppcheck-suppress unreadVariable
}
}
}
loadcompressed384(&val_out, &buf, &mask, 0);
EXPECT_TRUE(!diff384(and384(val, mask), val_out));
- mask_raw[j] = 0x7f;
+ mask_raw[j] = 0x7f; // cppcheck-suppress unreadVariable
}
}
}
loadcompressed512(&val_out, &buf, &mask, 0);
EXPECT_TRUE(!diff512(and512(val, mask), val_out));
- mask_raw[j] = 0x7f;
+ mask_raw[j] = 0x7f; // cppcheck-suppress unreadVariable
}
}
}