unsigned int dstPort;
// Construct a FiveTuple from a TCP or UDP packet.
- FiveTuple(const struct ip *iphdr) {
+ explicit FiveTuple(const struct ip *iphdr) {
// IP fields
protocol = iphdr->ip_p;
srcAddr = iphdr->ip_src.s_addr;
public:
Sigdata() {}
- Sigdata(const char *filename) {
+ explicit Sigdata(const char *filename) {
parseFile(filename, patterns, flags, ids, originals);
}
unsigned int dstPort;
// Construct a FiveTuple from a TCP or UDP packet.
- FiveTuple(const struct ip *iphdr) {
+ explicit FiveTuple(const struct ip *iphdr) {
// IP fields
protocol = iphdr->ip_p;
srcAddr = iphdr->ip_src.s_addr;
auto ecit = edge_cache.find(cache_key);
if (ecit == edge_cache.end()) {
DEBUG_PRINTF("adding edge %zu %zu\n", g[u].index, g[v].index);
- NFAEdge e = add_edge(u, v, g);
+ NFAEdge e;
+ std::tie(e, std::ignore) = add_edge(u, v, g);
edge_cache.emplace(cache_key, e);
g[e].assert_flags = flags;
if (++assert_edge_count > MAX_ASSERT_EDGES) {
if (!rose) {
DEBUG_PRINTF("error building rose\n");
assert(0);
- return nullptr;
+ return bytecode_ptr<RoseEngine>(nullptr);
}
dumpReportManager(ng.rm, ng.cc.grey);
}
if (!eng) {
- return nullptr;
+ return bytecode_ptr<HWLM>(nullptr);
}
assert(engSize);
u32 min_bound = pr.bounds.min; // always finite
if (min_bound == 0) { // Vacuous case, we can only do this once.
assert(!edge(g.start, g.accept, g).second);
- NFAEdge e = add_edge(g.start, g.accept, g);
+ NFAEdge e = add_edge(g.start, g.accept, g).first;
g[e].tops.insert(top);
g[u].reports.insert(pr.reports.begin(), pr.reports.end());
min_bound = 1;
for (u32 i = 0; i < min_bound; i++) {
NFAVertex v = add_vertex(g);
g[v].char_reach = pr.reach;
- NFAEdge e = add_edge(u, v, g);
+ NFAEdge e = add_edge(u, v, g).first;
if (u == g.start) {
g[e].tops.insert(top);
}
if (head != u) {
add_edge(head, v, g);
}
- NFAEdge e = add_edge(u, v, g);
+ NFAEdge e = add_edge(u, v, g).first;
if (u == g.start) {
g[e].tops.insert(top);
}
|| !cc.streaming);
if (!cc.grey.allowGough) {
- return nullptr;
+ return bytecode_ptr<NFA>(nullptr);
}
DEBUG_PRINTF("hello world\n");
auto basic_dfa = mcclellanCompile_i(raw, gbs, cc);
assert(basic_dfa);
if (!basic_dfa) {
- return nullptr;
+ return bytecode_ptr<NFA>(nullptr);
}
u8 alphaShift
// Similarly, connect (start, startDs) if necessary.
if (!edge(g.start, g.startDs, g).second) {
- NFAEdge e = add_edge(g.start, g.startDs, g);
+ NFAEdge e;
+ std::tie(e, std::ignore) = add_edge(g.start, g.startDs, g);
tempEdges.emplace_back(e); // Remove edge later.
}
static
bytecode_ptr<NFA> generateNfa(const build_info &args) {
if (args.num_states > NFATraits<dtype>::maxStates) {
- return nullptr;
+ return bytecode_ptr<NFA>(nullptr);
}
// Build bounded repeat structures.
if (!cc.grey.allowLimExNFA) {
DEBUG_PRINTF("limex not allowed\n");
- return nullptr;
+ return bytecode_ptr<NFA>(nullptr);
}
// If you ask for a particular type, it had better be an NFA.
if (scores.empty()) {
DEBUG_PRINTF("No NFA returned a valid score for this case.\n");
- return nullptr;
+ return bytecode_ptr<NFA>(nullptr);
}
// Sort acceptable models in priority order, lowest score first.
}
DEBUG_PRINTF("NFA build failed.\n");
- return nullptr;
+ return bytecode_ptr<NFA>(nullptr);
}
u32 countAccelStates(NGHolder &h,
if (!allocateFSN16(info, &count_real_states, &wide_limit)) {
DEBUG_PRINTF("failed to allocate state numbers, %zu states total\n",
info.size());
- return nullptr;
+ return bytecode_ptr<NFA>(nullptr);
}
DEBUG_PRINTF("count_real_states: %d\n", count_real_states);
if (!allocateImplId16(info, sheng_end, &sherman_limit)) {
DEBUG_PRINTF("failed to allocate state numbers, %zu states total\n",
info.size());
- return nullptr;
+ return bytecode_ptr<NFA>(nullptr);
}
u16 count_real_states = sherman_limit - sheng_end;
if (!allocateImplId16(info, sheng_end, &sherman_limit)) {
DEBUG_PRINTF("failed to allocate state numbers, %zu states total\n",
info.size());
- return nullptr;
+ return bytecode_ptr<NFA>(nullptr);
}
u16 count_real_states = sherman_limit - sheng_end;
bytecode_ptr<NFA> mcshengCompile(raw_dfa &raw, const CompileContext &cc,
const ReportManager &rm) {
if (!cc.grey.allowMcSheng) {
- return nullptr;
+ return bytecode_ptr<NFA>(nullptr);
}
mcclellan_build_strat mbs(raw, rm, false);
if (sheng_end <= DEAD_STATE + 1) {
info.states = old_states;
- return nullptr;
+ return bytecode_ptr<NFA>(nullptr);
}
bytecode_ptr<NFA> nfa;
bytecode_ptr<NFA> mcshengCompile64(raw_dfa &raw, const CompileContext &cc,
const ReportManager &rm) {
if (!cc.grey.allowMcSheng) {
- return nullptr;
+ return bytecode_ptr<NFA>(nullptr);
}
if (!cc.target_info.has_avx512vbmi()) {
DEBUG_PRINTF("McSheng64 failed, no HS_CPU_FEATURES_AVX512VBMI!\n");
- return nullptr;
+ return bytecode_ptr<NFA>(nullptr);
}
mcclellan_build_strat mbs(raw, rm, false);
sheng_end64 = find_sheng_states(info, accel_escape_info, MAX_SHENG64_STATES);
if (sheng_end64 <= DEAD_STATE + 1) {
- return nullptr;
+ return bytecode_ptr<NFA>(nullptr);
} else {
using64state = true;
}
};
struct RdfaGraph : public ue2_graph<RdfaGraph, RdfaVertexProps, RdfaEdgeProps> {
- RdfaGraph(const raw_dfa &rdfa);
+ explicit RdfaGraph(const raw_dfa &rdfa);
};
}
if (!createShuffleMasks<T>((T *)getMutableImplNfa(nfa.get()), info, accelInfo)) {
- return nullptr;
+ return bytecode_ptr<NFA>(nullptr);
}
return nfa;
set<dstate_id_t> *accel_states) {
if (!cc.grey.allowSheng) {
DEBUG_PRINTF("Sheng is not allowed!\n");
- return nullptr;
+ return bytecode_ptr<NFA>(nullptr);
}
sheng_build_strat strat(raw, rm, only_accel_init);
info.can_die ? "can" : "cannot", info.size());
if (info.size() > 16) {
DEBUG_PRINTF("Too many states\n");
- return nullptr;
+ return bytecode_ptr<NFA>(nullptr);
}
return shengCompile_int<sheng>(raw, cc, accel_states, strat, info);
set<dstate_id_t> *accel_states) {
if (!cc.grey.allowSheng) {
DEBUG_PRINTF("Sheng is not allowed!\n");
- return nullptr;
+ bytecode_ptr<NFA>(nullptr);
}
#ifdef HAVE_SVE
if (svcntb()<32) {
DEBUG_PRINTF("Sheng32 failed, SVE width is too small!\n");
- return nullptr;
+ bytecode_ptr<NFA>(nullptr);
}
#else
if (!cc.target_info.has_avx512vbmi()) {
DEBUG_PRINTF("Sheng32 failed, no HS_CPU_FEATURES_AVX512VBMI!\n");
- return nullptr;
+ bytecode_ptr<NFA>(nullptr);
}
#endif
assert(info.size() > 16);
if (info.size() > 32) {
DEBUG_PRINTF("Too many states\n");
- return nullptr;
+ return bytecode_ptr<NFA>(nullptr);
}
return shengCompile_int<sheng32>(raw, cc, accel_states, strat, info);
set<dstate_id_t> *accel_states) {
if (!cc.grey.allowSheng) {
DEBUG_PRINTF("Sheng is not allowed!\n");
- return nullptr;
+ return bytecode_ptr<NFA>(nullptr);
}
#ifdef HAVE_SVE
if (svcntb()<64) {
DEBUG_PRINTF("Sheng64 failed, SVE width is too small!\n");
- return nullptr;
+ return bytecode_ptr<NFA>(nullptr);
}
#else
if (!cc.target_info.has_avx512vbmi()) {
DEBUG_PRINTF("Sheng64 failed, no HS_CPU_FEATURES_AVX512VBMI!\n");
- return nullptr;
+ return bytecode_ptr<NFA>(nullptr);
}
#endif
assert(info.size() > 32);
if (info.size() > 64) {
DEBUG_PRINTF("Too many states\n");
- return nullptr;
+ return bytecode_ptr<NFA>(nullptr);
}
vector<dstate> old_states;
old_states = info.states;
/* there may already be a different edge from start to eod if so
* we need to make it unconditional and alive
*/
- if (NFAEdge start_eod = edge(u, g.acceptEod, g)) {
+ NFAEdge start_eod;
+ bool exists;
+ std::tie(start_eod, exists) = edge(u, g.acceptEod, g);
+ if (exists) {
g[start_eod].assert_flags = 0;
dead->erase(start_eod);
} else {
/* there may already be a different edge from start to eod if so
* we need to make it unconditional and alive
*/
- if (NFAEdge start_eod = edge(u, g.acceptEod, g)) {
+ NFAEdge start_eod;
+ bool exists;
+ std::tie(start_eod, exists) = edge(u, g.acceptEod, g);
+ if (exists) {
g[start_eod].assert_flags = 0;
dead->erase(start_eod);
} else {
* boundaries. Assert resolution handles the badness coming from asserts.
* The only other source of trouble is startDs->accept connections.
*/
- NFAEdge orig = edge(g.startDs, g.accept, g);
+ NFAEdge orig;
+ std::tie(orig, std::ignore) = edge(g.startDs, g.accept, g);
if (expr.utf8 && orig) {
DEBUG_PRINTF("rectifying %u\n", expr.report);
Report ir = rm.getBasicInternalReport(expr);
public:
struct ClassDepth {
ClassDepth() {}
- ClassDepth(const NFAVertexDepth &d)
+ explicit ClassDepth(const NFAVertexDepth &d)
: d1(d.fromStart), d2(d.fromStartDotStar) {}
- ClassDepth(const NFAVertexRevDepth &rd)
+ explicit ClassDepth(const NFAVertexRevDepth &rd)
: d1(rd.toAccept), d2(rd.toAcceptEod) {}
DepthMinMax d1;
DepthMinMax d2;
ClassInfo::ClassDepth depth;
if (eq == LEFT_EQUIVALENCE) {
- depth = depths[vi->vert_index];
+ depth = ClassInfo::ClassDepth(depths[vi->vert_index]);
} else {
- depth = rdepths[vi->vert_index];
+ depth = ClassInfo::ClassDepth(rdepths[vi->vert_index]);
}
ClassInfo ci(g, *vi, depth, eq);
pred_info->succ.erase(old_vertex_info);
// if edge doesn't exist, create it
- NFAEdge e = add_edge_if_not_present(pred_info->v, new_v, g);
-
+ NFAEdge e;
+ std::tie(e, std::ignore) = add_edge_if_not_present(pred_info->v, new_v, g);
// put edge tops, if applicable
if (!edgetops.empty()) {
assert(g[e].tops.empty() || g[e].tops == edgetops);
pred_info->succ.insert(new_vertex_info);
if (new_v_eod) {
- NFAEdge ee = add_edge_if_not_present(pred_info->v, new_v_eod,
+ NFAEdge ee;
+ std::tie(ee, std::ignore) = add_edge_if_not_present(pred_info->v, new_v_eod,
g);
// put edge tops, if applicable
// find which accepts source vertex connects to
flat_set<NFAVertex> targets;
for (const auto &accept : accepts) {
- NFAEdge e = edge(src, accept, g);
+ NFAEdge e;
+ std::tie(e, std::ignore) = edge(src, accept, g);
if (e) {
targets.insert(accept);
}
const depth &repeatMax, u32 minPeriod,
bool is_reset, ReportID report) {
if (!cr.all()) {
- return nullptr;
+ return bytecode_ptr<NFA>(nullptr);
}
enum RepeatType rtype = chooseRepeatType(repeatMin, repeatMax, minPeriod,
const CharReach escapes(~cr);
if (escapes.count() != 1) {
- return nullptr;
+ return bytecode_ptr<NFA>(nullptr);
}
enum RepeatType rtype = chooseRepeatType(repeatMin, repeatMax, minPeriod,
const CharReach escapes(cr);
if (escapes.count() != 1) {
- return nullptr;
+ return bytecode_ptr<NFA>(nullptr);
}
enum RepeatType rtype = chooseRepeatType(repeatMin, repeatMax, minPeriod,
minPeriod, rtype);
if (shuftiBuildMasks(~cr, (u8 *)&ls->mask_lo, (u8 *)&ls->mask_hi) == -1) {
- return nullptr;
+ return bytecode_ptr<NFA>(nullptr);
}
DEBUG_PRINTF("built shuf lbr\n");
if (!nfa) {
assert(0);
- return nullptr;
+ return bytecode_ptr<NFA>(nullptr);
}
return nfa;
const CompileContext &cc,
const ReportManager &rm) {
if (!cc.grey.allowLbr) {
- return nullptr;
+ return bytecode_ptr<NFA>(nullptr);
}
if (proto.repeats.size() != 1) {
- return nullptr;
+ return bytecode_ptr<NFA>(nullptr);
}
const PureRepeat &repeat = proto.repeats.begin()->second;
if (repeat.reports.size() != 1) {
DEBUG_PRINTF("too many reports\n");
- return nullptr;
+ return bytecode_ptr<NFA>(nullptr);
}
bool is_reset;
const CompileContext &cc,
const ReportManager &rm) {
if (!cc.grey.allowLbr) {
- return nullptr;
+ return bytecode_ptr<NFA>(nullptr);
}
PureRepeat repeat;
if (!isPureRepeat(g, repeat)) {
- return nullptr;
+ return bytecode_ptr<NFA>(nullptr);
}
if (repeat.reports.size() != 1) {
DEBUG_PRINTF("too many reports\n");
- return nullptr;
+ return bytecode_ptr<NFA>(nullptr);
}
CastleProto proto(g.kind, repeat);
u32 numStates = countStates(state_ids);
if (numStates > NFA_MAX_STATES) {
DEBUG_PRINTF("Can't build an NFA with %u states\n", numStates);
- return nullptr;
+ return bytecode_ptr<NFA>(nullptr);
}
map<NFAVertex, BoundedRepeatSummary> br_cyclic;
assert(h.kind == NFA_REV_PREFIX); /* triggered, raises internal callbacks */
// Do state numbering.
- auto state_ids = numberStates(h, {});
+ auto state_ids = numberStates(h, flat_set<graph_detail::vertex_descriptor<ue2_graph<NGHolder, NFAGraphVertexProps, NFAGraphEdgeProps>>>());
// Quick exit: if we've got an embarrassment of riches, i.e. more states
// than we can implement in our largest NFA model, bail here.
u32 numStates = countStates(state_ids);
if (numStates > NFA_MAX_STATES) {
DEBUG_PRINTF("Can't build an NFA with %u states\n", numStates);
- return nullptr;
+ return bytecode_ptr<NFA>(nullptr);
}
assert(sanityCheckGraph(h, state_ids));
* Score all the edges in the given graph, returning them in \p scores indexed
* by edge_index. */
std::vector<u64a> scoreEdges(const NGHolder &h,
- const flat_set<NFAEdge> &known_bad = {});
+ const flat_set<NFAEdge> &known_bad = flat_set<NFAEdge>());
/** Returns a score for a literal set. Lower scores are better. */
u64a scoreSet(const std::set<ue2_literal> &s);
if (it == allEdges.end()) {
// No reverse edge, add one.
NFAVertex u = source(fwd, g), v = target(fwd, g);
- NFAEdge rev = add_edge(v, u, g);
+ NFAEdge rev;
+ std::tie(rev, std::ignore) = add_edge(v, u, g);
it = allEdges.insert(make_pair(make_pair(vidx, uidx), rev)).first;
// Add to capacity map.
u32 revIndex = g[rev].index;
static
bool hasInEdgeTops(const NGHolder &g, NFAVertex v) {
- NFAEdge e = edge(g.start, v, g);
+
+ NFAEdge e;
+ std::tie(e, std::ignore) = edge(g.start, v, g);
return e && !g[e].tops.empty();
}
g[v].char_reach = cr;
add_edge(u, v, g);
if (u == g.start) {
- g[edge(u, v, g)].tops.insert(top);
+ g[edge(u, v, g).first].tops.insert(top);
}
u = v;
}
vector<NFAEdge> &tempEdges) {
for (NFAVertex v : tops) {
assert(!isLeafNode(v, g));
-
- const NFAEdge &e = add_edge(g.start, v, g);
+ auto edge_result = add_edge(g.start, v, g);
+ const NFAEdge &e = edge_result.first;
tempEdges.emplace_back(e);
}
}
for (auto pivot : pivots) {
assert(contains(*rhs_map, pivot));
- NFAEdge e = add_edge(rhs->start, (*rhs_map)[pivot], *rhs);
+ auto edge_result = add_edge(rhs->start, (*rhs_map)[pivot], *rhs);
+ NFAEdge e = edge_result.first;
(*rhs)[e].tops.insert(DEFAULT_TOP);
}
}
a_count++;
+ NFAEdge b_edge;
+ bool b_edge_bool;
+ std::tie(b_edge, b_edge_bool) = edge(b_ranking.at(i), b_ranking.at(sid), gb);
- NFAEdge b_edge = edge(b_ranking.at(i), b_ranking.at(sid), gb);
-
- if (!b_edge) {
+ if (!b_edge_bool) {
max = i;
DEBUG_PRINTF("lowering max to %u due to edge %zu->%u\n",
max, i, sid);
/* TODO: relax top checks if reports match */
// If both graphs have edge (start, accept), the tops must match.
- NFAEdge e1_accept = edge(h1.start, h1.accept, h1);
- NFAEdge e2_accept = edge(h2.start, h2.accept, h2);
- if (e1_accept && e2_accept && h1[e1_accept].tops != h2[e2_accept].tops) {
+ bool bool_e1_accept;
+ NFAEdge e1_accept;
+ NFAEdge e2_accept;
+ std::tie(e1_accept, bool_e1_accept) = edge(h1.start, h1.accept, h1);
+ bool bool_e2_accept;
+ std::tie(e2_accept, bool_e2_accept) = edge(h2.start, h2.accept, h2);
+
+ if (bool_e1_accept && bool_e2_accept && h1[e1_accept].tops != h2[e2_accept].tops) {
return false;
}
// If both graphs have edge (start, acceptEod), the tops must match.
- NFAEdge e1_eod = edge(h1.start, h1.acceptEod, h1);
- NFAEdge e2_eod = edge(h2.start, h2.acceptEod, h2);
- if (e1_eod && e2_eod && h1[e1_eod].tops != h2[e2_eod].tops) {
+ bool bool_e1_eod;
+ NFAEdge e1_eod;
+ NFAEdge e2_eod;
+ std::tie(e1_eod, bool_e1_eod) = edge(h1.start, h1.acceptEod, h1);
+ bool bool_e2_eod;
+ std::tie(e2_eod, bool_e2_eod) = edge(h2.start, h2.acceptEod, h2);
+
+ if (bool_e1_eod && bool_e2_eod && h1[e1_eod].tops != h2[e2_eod].tops) {
return false;
}
if (edge(dest, t, g).second) {
continue;
}
- NFAEdge clone = add_edge(dest, t, g);
+ NFAEdge clone = add_edge(dest, t, g).first;
u32 idx = g[clone].index;
g[clone] = g[e];
g[clone].index = idx;
for (const auto &e : in_edges_range(s, g)) {
NFAVertex ss = source(e, g);
assert(!edge(ss, dest, g).second);
- NFAEdge clone = add_edge(ss, dest, g);
+ NFAEdge clone = add_edge(ss, dest, g).first;
u32 idx = g[clone].index;
g[clone] = g[e];
g[clone].index = idx;
}
bool matches_everywhere(const NGHolder &h) {
- NFAEdge e = edge(h.startDs, h.accept, h);
+ bool bool_e;
+ NFAEdge e;
+ std::tie(e, bool_e) = edge(h.startDs, h.accept, h);
- return e && !h[e].assert_flags;
+ return bool_e && !h[e].assert_flags;
}
bool is_virtual_start(NFAVertex v, const NGHolder &g) {
NFAVertex s = out_mapping[si];
NFAVertex t = out_mapping[ti];
- NFAEdge e2 = add_edge(s, t, out);
+ NFAEdge e2 = add_edge(s, t, out).first;
out[e2] = in[e];
}
clearReports(g);
for (auto v : pred) {
- NFAEdge e = add_edge(v, g.accept, g);
+ NFAEdge e = add_edge(v, g.accept, g).first;
g[v].reports.insert(0);
if (is_triggered(g) && v == g.start) {
g[e].tops.insert(DEFAULT_TOP);
* makes a more svelte graphy */
clear_in_edges(temp_map[pivot], *new_lhs);
NFAEdge pivot_edge = add_edge(temp_map[prev_v], temp_map[pivot],
- *new_lhs);
+ *new_lhs).first;
+
if (is_triggered(h) && prev_v == h.start) {
(*new_lhs)[pivot_edge].tops.insert(DEFAULT_TOP);
}
}
for (auto v : preds) {
- NFAEdge e = add_edge_if_not_present(v, prev, g);
+ NFAEdge e = add_edge_if_not_present(v, prev, g).first;
if (v == g.start && is_triggered(g)) {
g[e].tops.insert(DEFAULT_TOP);
}
add_edge(lhs->accept, lhs->acceptEod, *lhs);
clearReports(*lhs);
for (NFAVertex v : splitters) {
- NFAEdge e = add_edge(v_map[v], lhs->accept, *lhs);
+ NFAEdge e = add_edge(v_map[v], lhs->accept, *lhs).first;
if (v == base_graph.start) {
(*lhs)[e].tops.insert(DEFAULT_TOP);
}
/* fill in report information */
g[v].reports.insert(reports.begin(), reports.end());
- RoseEdge e = add_edge(parent, v, g);
+ RoseEdge e = add_edge(parent, v, g).first;
DEBUG_PRINTF("adding edge (%u, %u) to parent\n", minBound, maxBound);
g[e].minBound = minBound;
DEBUG_PRINTF("created anchored vertex %zu with lit id %u\n", g[v].index,
literalId);
- RoseEdge e = add_edge(build->anchored_root, v, g);
+ RoseEdge e = add_edge(build->anchored_root, v, g).first;
g[e].minBound = min_offset;
g[e].maxBound = max_offset;
RoseVertex p = pv.first;
- RoseEdge e = add_edge(p, w, g);
+ RoseEdge e = add_edge(p, w, g).first;
DEBUG_PRINTF("adding edge (%u,%u) to parent\n", edge_props.minBound,
edge_props.maxBound);
g[e].minBound = edge_props.minBound;
for (const auto &pv : parents) {
const RoseInEdgeProps &edge_props = bd.ig[pv.second];
- RoseEdge e = add_edge(pv.first, g_v, tbi->g);
+ RoseEdge e = add_edge(pv.first, g_v, tbi->g).first;
g[e].minBound = edge_props.minBound;
g[e].maxBound = edge_props.maxBound;
g[e].history = selectHistory(*tbi, bd, pv.second, e);
g[v].left.graph = eod_leftfix;
g[v].left.leftfix_report = report_mapping.second;
g[v].left.lag = 0;
- RoseEdge e1 = add_edge(u, v, g);
+ RoseEdge e1 = add_edge(u, v, g).first;
g[e1].minBound = 0;
g[e1].maxBound = ROSE_BOUND_INF;
g[v].min_offset = add_rose_depth(g[u].min_offset,
g[w].reports = report_mapping.first;
g[w].min_offset = g[v].min_offset;
g[w].max_offset = g[v].max_offset;
- RoseEdge e = add_edge(v, w, g);
+ RoseEdge e = add_edge(v, w, g).first;
g[e].minBound = 0;
g[e].maxBound = 0;
/* No need to set history as the event is only delivered at the last
g[w].reports = ig[iv].reports;
g[w].min_offset = g[u].min_offset;
g[w].max_offset = g[u].max_offset;
- RoseEdge e = add_edge(u, w, g);
+ RoseEdge e = add_edge(u, w, g).first;
g[e].minBound = 0;
g[e].maxBound = 0;
g[e].history = ROSE_ROLE_HISTORY_LAST_BYTE;
g[v].left.graph = eod_leftfix;
g[v].left.leftfix_report = report_mapping.second;
g[v].left.lag = 0;
- RoseEdge e1 = add_edge(build.anchored_root, v, g);
+ RoseEdge e1 = add_edge(build.anchored_root, v, g).first;
g[e1].minBound = 0;
g[e1].maxBound = ROSE_BOUND_INF;
g[v].min_offset = findMinWidth(*eod_leftfix);
g[w].reports = report_mapping.first;
g[w].min_offset = g[v].min_offset;
g[w].max_offset = g[v].max_offset;
- RoseEdge e = add_edge(v, w, g);
+ RoseEdge e = add_edge(v, w, g).first;
g[e].minBound = 0;
g[e].maxBound = 0;
g[e].history = ROSE_ROLE_HISTORY_NONE;
g[v].left.leftfix_report = mask_report;
} else {
// Make sure our edge bounds are correct.
- RoseEdge e = edge(parent, v, g);
+ RoseEdge e = edge(parent, v, g).first;
g[e].minBound = 0;
g[e].maxBound = anchored ? 0 : ROSE_BOUND_INF;
g[e].history = anchored ? ROSE_ROLE_HISTORY_ANCH
g[v].max_offset = v_max_offset;
if (eod) {
- RoseEdge e = add_edge(v, eod_v, g);
+ RoseEdge e = add_edge(v, eod_v, g).first;
g[e].minBound = 0;
g[e].maxBound = 0;
g[e].history = ROSE_ROLE_HISTORY_LAST_BYTE;
succ = u;
}
- NFAEdge e = add_edge(h.start, succ, h);
+ NFAEdge e = add_edge(h.start, succ, h).first;
h[e].tops.insert(DEFAULT_TOP);
return rhs;
if (dfas.empty()) {
DEBUG_PRINTF("empty\n");
- return nullptr;
+ return bytecode_ptr<anchored_matcher_info>(nullptr);
}
for (auto &rdfa : dfas) {
DEBUG_PRINTF("added %u literal chars back, new lag %u\n", lag_adjust,
g[v].left.lag);
}
- left_id leftfix = g[succs[0]].left;
+ left_id leftfix = left_id(left_id(g[succs[0]].left));
if (leftfix.graph()) {
assert(leftfix.graph()->kind == NFA_PREFIX
continue;
}
PredTopPair ptp(v, g[v].suffix.top);
- (*suffixTriggers)[g[v].suffix].insert(ptp);
+ (*suffixTriggers)[suffix_id(g[v].suffix)].insert(ptp);
}
}
explicit OutfixBuilder(const RoseBuildImpl &build_in) : build(build_in) {}
bytecode_ptr<NFA> operator()(boost::blank&) const {
- return nullptr;
+ return bytecode_ptr<NFA>(nullptr);
};
bytecode_ptr<NFA> operator()(unique_ptr<raw_dfa> &rdfa) const {
bytecode_ptr<NFA> operator()(UNUSED const MpvProto &mpv) const {
// MPV construction handled separately.
assert(mpv.puffettes.empty());
- return nullptr;
+ return bytecode_ptr<NFA>(nullptr);
}
private:
if (!g[v].suffix) {
continue;
}
- if (contains(done, g[v].suffix)) {
+ if (contains(done, suffix_id(g[v].suffix))) {
continue; /* already done */
}
- done.insert(g[v].suffix);
+ done.insert(suffix_id(g[v].suffix));
- if (hasMpvTrigger(all_reports(g[v].suffix), build.rm)) {
+ if (hasMpvTrigger(all_reports(suffix_id(g[v].suffix)), build.rm)) {
return true;
}
}
resources.has_eod = true;
break;
}
- if (g[v].suffix && has_eod_accepts(g[v].suffix)) {
+ if (g[v].suffix && has_eod_accepts(suffix_id(g[v].suffix))) {
resources.has_eod = true;
break;
}
DEBUG_PRINTF("literally report eod\n");
return true;
}
- if (g[v].suffix && has_eod_accepts(g[v].suffix)) {
+ if (g[v].suffix && has_eod_accepts(suffix_id(g[v].suffix))) {
DEBUG_PRINTF("eod suffix\n");
return true;
}
if (!g[v].suffix) {
continue;
}
- u32 qi = bc.suffixes.at(g[v].suffix);
+ u32 qi = bc.suffixes.at(suffix_id(g[v].suffix));
assert(qi < infos.size());
if (build.isInETable(v)) {
infos.at(qi).eod = 1;
const auto &g = build.g;
for (auto v : vertices_range(g)) {
if (g[v].suffix) {
- insert(&reports, all_reports(g[v].suffix));
+ insert(&reports, all_reports(suffix_id(g[v].suffix)));
}
}
prepMpv(*this, bc, &historyRequired, &mpv_as_outfix);
proto.outfixBeginQueue = qif.allocated_count();
if (!prepOutfixes(*this, bc, &historyRequired)) {
- return nullptr;
+ return bytecode_ptr<RoseEngine>(nullptr);
}
proto.outfixEndQueue = qif.allocated_count();
proto.leftfixBeginQueue = proto.outfixEndQueue;
/* Note: buildNfas may reduce the lag for vertices that have prefixes */
if (!buildNfas(*this, bc, qif, &no_retrigger_queues, &eager_queues,
&proto.leftfixBeginQueue)) {
- return nullptr;
+ return bytecode_ptr<RoseEngine>(nullptr);
}
u32 eodNfaIterOffset = buildEodNfaIterator(bc, proto.leftfixBeginQueue);
buildCountingMiracles(bc);
for (auto v : vertices_range(g)) {
const LeftEngInfo &left = g[v].left;
if (left.castle && left.castle->repeats.size() > 1) {
- left_castles[left].emplace_back(v);
+ left_castles[left_id(left)].emplace_back(v);
}
const RoseSuffixInfo &suffix = g[v].suffix;
if (suffix.castle && suffix.castle->repeats.size() > 1) {
- suffix_castles[suffix].emplace_back(v);
+ suffix_castles[suffix_id(suffix)].emplace_back(v);
}
}
continue;
}
- const left_id &left(g[v].left);
+ const left_id &left(left_id(g[v].left));
if (::ue2::isAnchored(left) && !isInETable(v)) {
/* etable prefixes currently MUST be transient as we do not know
for (auto v : vertices_range(build.g)) {
if (build.g[v].left) {
const LeftEngInfo &lei = build.g[v].left;
- leftfixes[lei].emplace_back(v);
+ leftfixes[left_id(lei)].emplace_back(v);
}
}
return leftfixes;
if (!info.delayed_ids.empty()
|| !all_of_in(info.vertices,
[&](RoseVertex v) {
- return left == tbi.g[v].left; })) {
+ return left == left_id(tbi.g[v].left); })) {
DEBUG_PRINTF("group %llu is unsquashable\n", info.group_mask);
unsquashable |= info.group_mask;
}
g[v].max_offset = sai.max_bound + sai.literal.length();
lit_info.vertices.insert(v);
- RoseEdge e = add_edge(anchored_root, v, g);
+ RoseEdge e = add_edge(anchored_root, v, g).first;
g[e].minBound = sai.min_bound;
g[e].maxBound = sai.max_bound;
}
g[v].literals.insert(lit_id);
g[v].reports = reports;
- RoseEdge e = add_edge(tbi.root, v, g);
+ RoseEdge e = add_edge(tbi.root, v, g).first;
g[e].minBound = 0;
g[e].maxBound = ROSE_BOUND_INF;
g[v].min_offset = 1;
NFAVertex u = h->start;
for (auto it = s.begin() + s.length() - len; it != s.end(); ++it) {
NFAVertex v = addHolderVertex(*it, *h);
- NFAEdge e = add_edge(u, v, *h);
+ NFAEdge e = add_edge(u, v, *h).first;
if (u == h->start) {
(*h)[e].tops.insert(DEFAULT_TOP);
}
assert(g[e_old].maxBound >= bound_max);
setEdgeBounds(g, e_old, bound_min, bound_max);
} else {
- RoseEdge e_new = add_edge(ar, v, g);
+ RoseEdge e_new = add_edge(ar, v, g).first;
setEdgeBounds(g, e_new, bound_min, bound_max);
to_delete->emplace_back(e_old);
}
if (source(e_old, g) == ar) {
setEdgeBounds(g, e_old, ri.repeatMin + width, ri.repeatMax + width);
} else {
- RoseEdge e_new = add_edge(ar, v, g);
+ RoseEdge e_new = add_edge(ar, v, g).first;
setEdgeBounds(g, e_new, ri.repeatMin + width, ri.repeatMax + width);
to_delete->emplace_back(e_old);
}
// Several vertices may share a suffix, so we collect the set of
// suffixes first to avoid repeating work.
if (g[v].suffix) {
- suffixes.insert(g[v].suffix);
+ suffixes.insert(suffix_id(g[v].suffix));
}
}
bool eligibleForAlwaysOnGroup(const RoseBuildImpl &build, u32 id) {
auto eligble = [&](RoseVertex v) {
return build.isRootSuccessor(v)
- && (!build.g[v].left || !isAnchored(build.g[v].left));
+ && (!build.g[v].left || !isAnchored(left_id(build.g[v].left)));
};
if (any_of_in(build.literal_info[id].vertices, eligble)) {
bool new_group = !groupCount[group_always_on];
for (RoseVertex v : info.vertices) {
- if (build.g[v].left && !isAnchored(build.g[v].left)) {
+ if (build.g[v].left && !isAnchored(left_id(build.g[v].left))) {
new_group = false;
}
}
class SomSlotManager;
struct suffix_id {
- suffix_id(const RoseSuffixInfo &in)
+ explicit suffix_id(const RoseSuffixInfo &in)
: g(in.graph.get()), c(in.castle.get()), d(in.rdfa.get()),
h(in.haig.get()), t(in.tamarama.get()),
dfa_min_width(in.dfa_min_width),
/** \brief represents an engine to the left of a rose role */
struct left_id {
- left_id(const LeftEngInfo &in)
+ explicit left_id(const LeftEngInfo &in)
: g(in.graph.get()), c(in.castle.get()), d(in.dfa.get()),
h(in.haig.get()), dfa_min_width(in.dfa_min_width),
dfa_max_width(in.dfa_max_width) {
public:
u32 ckey;
- RoseInstrSetCombination(u32 ckey_in) : ckey(ckey_in) {}
+ explicit RoseInstrSetCombination(u32 ckey_in) : ckey(ckey_in) {}
bool operator==(const RoseInstrSetCombination &ri) const {
return ckey == ri.ckey;
public:
u32 ekey;
- RoseInstrSetExhaust(u32 ekey_in) : ekey(ekey_in) {}
+ explicit RoseInstrSetExhaust(u32 ekey_in) : ekey(ekey_in) {}
bool operator==(const RoseInstrSetExhaust &ri) const {
return ekey == ri.ekey;
return;
}
rose_look.emplace_back(map<s32, CharReach>());
- getRoseForwardReach(g[t].left, g[e].rose_top, rose_look.back());
+ getRoseForwardReach(left_id(g[t].left), g[e].rose_top, rose_look.back());
}
if (g[v].suffix) {
DEBUG_PRINTF("suffix engine\n");
rose_look.emplace_back(map<s32, CharReach>());
- getSuffixForwardReach(g[v].suffix, g[v].suffix.top, rose_look.back());
+ getSuffixForwardReach(suffix_id(g[v].suffix), g[v].suffix.top, rose_look.back());
}
combineForwardMasks(rose_look, look);
DEBUG_PRINTF("u=%zu is not a root role\n", g[u].index);
return false;
}
+ auto edge_result = edge(build.root, u, g);
+ RoseEdge e = edge_result.first;
- RoseEdge e = edge(build.root, u, g);
-
- if (!e) {
+ if (!edge_result.second) {
DEBUG_PRINTF("u=%zu is not a root role\n", g[u].index);
return false;
}
}
if (g[v].suffix) {
- depth suffix_width = findMinWidth(g[v].suffix, g[v].suffix.top);
+ depth suffix_width = findMinWidth(suffix_id(g[v].suffix), g[v].suffix.top);
assert(suffix_width.is_reachable());
DEBUG_PRINTF("suffix with width %s\n", suffix_width.str().c_str());
min_offset = min(min_offset, vert_offset + suffix_width);
bytecode_ptr<HWLM>
buildHWLMMatcher(const RoseBuildImpl &build, const LitProto *litProto) {
if (!litProto) {
- return nullptr;
+ return bytecode_ptr<HWLM>(nullptr);
}
auto hwlm = hwlmBuild(*litProto->hwlmProto, build.cc,
build.getInitialGroups());
/** Key used to group sets of leftfixes by the dedupeLeftfixes path. */
struct RoseGroup {
RoseGroup(const RoseBuildImpl &build, RoseVertex v)
- : left_hash(hashLeftfix(build.g[v].left)),
+ : left_hash(hashLeftfix(left_id(build.g[v].left))),
lag(build.g[v].left.lag), eod_table(build.isInETable(v)) {
const RoseGraph &g = build.g;
assert(in_degree(v, g) == 1);
// Scan the rest of the list for dupes.
for (auto kt = std::next(jt); kt != jte; ++kt) {
if (g[v].left == g[*kt].left
- || !is_equal(g[v].left, g[v].left.leftfix_report,
- g[*kt].left, g[*kt].left.leftfix_report)) {
+ || !is_equal(left_id(g[v].left), g[v].left.leftfix_report,
+ left_id(g[*kt].left), g[*kt].left.leftfix_report)) {
continue;
}
static
bool hasSameEngineType(const RoseVertexProps &u_prop,
const RoseVertexProps &v_prop) {
- const left_id u_left = u_prop.left;
- const left_id v_left = v_prop.left;
+ const left_id u_left = left_id(u_prop.left);
+ const left_id v_left = left_id(v_prop.left);
return !u_left.haig() == !v_left.haig()
&& !u_left.dfa() == !v_left.dfa()
continue;
}
assert(contains(all_reports(left), left.leftfix_report));
- eng_verts[left].emplace_back(v);
+ eng_verts[left_id(left)].emplace_back(v);
}
return eng_verts;
continue;
}
- eng_verts[g[v].left].emplace_back(v);
+ eng_verts[left_id(g[v].left)].emplace_back(v);
}
map<CharReach, vector<left_id>> by_reach;
continue;
}
- suffixes.insert(g[v].suffix, v);
+ suffixes.insert(suffix_id(g[v].suffix), v);
}
deque<SuffixBouquet> suff_groups;
continue;
}
- suffixes.insert(g[v].suffix, v);
+ suffixes.insert(suffix_id(g[v].suffix), v);
}
deque<SuffixBouquet> suff_groups;
return;
}
assert(contains(suffixes, g[v].suffix));
- u32 queue = suffixes.at(g[v].suffix);
+ u32 queue = suffixes.at(suffix_id(g[v].suffix));
u32 event;
assert(contains(engine_info_by_queue, queue));
const auto eng_info = engine_info_by_queue.at(queue);
namespace {
struct ProgKey {
- ProgKey(const RoseProgram &p) : prog(&p) {}
+ explicit ProgKey(const RoseProgram &p) : prog(&p) {}
bool operator==(const ProgKey &b) const {
return RoseProgramEquivalence()(*prog, *b.prog);
ue2_unordered_set<ProgKey> seen;
for (auto &block : blocks_in) {
- if (contains(seen, block)) {
+ if (contains(seen, ProgKey(block))) {
continue;
}
};
struct RoseAliasingInfo {
- RoseAliasingInfo(const RoseBuildImpl &build) {
+ explicit RoseAliasingInfo(const RoseBuildImpl &build) {
const auto &g = build.g;
// Populate reverse leftfix map.
for (auto v : vertices_range(g)) {
if (g[v].left) {
- rev_leftfix[g[v].left].insert(v);
+ rev_leftfix[left_id(g[v].left)].insert(v);
}
}
}
for (const auto &e_a : in_edges_range(a, g)) {
- RoseEdge e = edge(source(e_a, g), b, g);
- if (!e || g[e].rose_top != g[e_a].rose_top) {
+ auto edge_result = edge(source(e_a, g), b, g);
+ RoseEdge e = edge_result.first;
+
+ if (!edge_result.second || g[e].rose_top != g[e_a].rose_top) {
DEBUG_PRINTF("bad tops\n");
return false;
}
bool hasCommonSuccWithBadBounds(RoseVertex a, RoseVertex b,
const RoseGraph &g) {
for (const auto &e_a : out_edges_range(a, g)) {
- if (RoseEdge e = edge(b, target(e_a, g), g)) {
+ auto edge_result = edge(b, target(e_a, g), g);
+ RoseEdge e = edge_result.first;
+ if (edge_result.second) {
if (g[e_a].maxBound < g[e].minBound
|| g[e].maxBound < g[e_a].minBound) {
return true;
bool hasCommonPredWithBadBounds(RoseVertex a, RoseVertex b,
const RoseGraph &g) {
for (const auto &e_a : in_edges_range(a, g)) {
- if (RoseEdge e = edge(source(e_a, g), b, g)) {
+ auto edge_result = edge(source(e_a, g), b, g);
+ RoseEdge e = edge_result.first;
+ if (edge_result.second) {
if (g[e_a].maxBound < g[e].minBound
|| g[e].maxBound < g[e_a].minBound) {
return true;
const bool equal_roses = hasEqualLeftfixes(a, b, g);
for (const auto &e_a : in_edges_range(a, g)) {
- if (RoseEdge e = edge(source(e_a, g), b, g)) {
+ auto edge_result = edge(source(e_a, g), b, g);
+ RoseEdge e = edge_result.first;
+ if (edge_result.second) {
DEBUG_PRINTF("common pred, e_r=%d r_t %u,%u\n",
(int)equal_roses, g[e].rose_top, g[e_a].rose_top);
if (!equal_roses) {
}
assert(contains(rai.rev_leftfix[b_left], b));
- rai.rev_leftfix[b_left].erase(b);
- rai.rev_leftfix[a_left].insert(b);
+ rai.rev_leftfix[left_id(b_left)].erase(b);
+ rai.rev_leftfix[left_id(a_left)].insert(b);
a_left.leftfix_report = new_report;
b_left.leftfix_report = new_report;
updateEdgeTops(g, a, a_top_map);
updateEdgeTops(g, b, b_top_map);
- pruneUnusedTops(castle, g, rai.rev_leftfix[a_left]);
+ pruneUnusedTops(castle, g, rai.rev_leftfix[left_id(a_left)]);
return true;
}
b_left.castle = new_castle;
assert(a_left == b_left);
- rai.rev_leftfix[a_left].insert(a);
- rai.rev_leftfix[a_left].insert(b);
- pruneUnusedTops(*new_castle, g, rai.rev_leftfix[a_left]);
+ rai.rev_leftfix[left_id(a_left)].insert(a);
+ rai.rev_leftfix[left_id(a_left)].insert(b);
+ pruneUnusedTops(*new_castle, g, rai.rev_leftfix[left_id(a_left)]);
return true;
}
// We should be protected from merging common preds with tops leading
// to completely different repeats by earlier checks, but just in
// case...
- if (RoseEdge a_edge = edge(source(e, g), a, g)) {
+ auto edge_result = edge(source(e, g), a, g);
+ RoseEdge a_edge = edge_result.first;
+ if (edge_result.second) {
u32 a_top = g[a_edge].rose_top;
const PureRepeat &a_pr = m_castle->repeats[a_top]; // new report
if (pr != a_pr) {
b_left.leftfix_report = new_report;
assert(a_left == b_left);
- rai.rev_leftfix[a_left].insert(a);
- rai.rev_leftfix[a_left].insert(b);
- pruneUnusedTops(*m_castle, g, rai.rev_leftfix[a_left]);
+ rai.rev_leftfix[left_id(a_left)].insert(a);
+ rai.rev_leftfix[left_id(a_left)].insert(b);
+ pruneUnusedTops(*m_castle, g, rai.rev_leftfix[left_id(a_left)]);
return true;
}
a_left.graph = new_graph;
b_left.graph = new_graph;
- rai.rev_leftfix[a_left].insert(a);
- rai.rev_leftfix[a_left].insert(b);
- pruneUnusedTops(*new_graph, g, rai.rev_leftfix[a_left]);
+ rai.rev_leftfix[left_id(a_left)].insert(a);
+ rai.rev_leftfix[left_id(a_left)].insert(b);
+ pruneUnusedTops(*new_graph, g, rai.rev_leftfix[left_id(a_left)]);
return true;
}
DEBUG_PRINTF("attempting merge of roses on vertices %zu and %zu\n",
g[a].index, g[b].index);
- set<RoseVertex> &b_verts = rai.rev_leftfix[b_left];
+ set<RoseVertex> &b_verts = rai.rev_leftfix[left_id(b_left)];
set<RoseVertex> aa;
aa.insert(a);
for (const auto &e : in_edges_range(v, g)) {
RoseVertex u = source(e, g);
DEBUG_PRINTF("u index=%zu\n", g[u].index);
- if (RoseEdge et = edge(u, t, g)) {
+ auto edge_result = edge(u, t, g);
+ RoseEdge et = edge_result.first;
+ if (edge_result.second) {
if (g[et].minBound <= g[e].minBound
&& g[et].maxBound >= g[e].maxBound) {
DEBUG_PRINTF("remove more constrained edge\n");
}
if (g[v].suffix) {
- depth suffix_width = findMinWidth(g[v].suffix, g[v].suffix.top);
+ depth suffix_width = findMinWidth(suffix_id(g[v].suffix), g[v].suffix.top);
assert(suffix_width.is_reachable());
DEBUG_PRINTF("%zu has suffix with top %u (width %s), can fire "
"report at %u\n",
u64a w = g[v].max_offset;
if (g[v].suffix) {
- if (has_non_eod_accepts(g[v].suffix)) {
+ if (has_non_eod_accepts(suffix_id(g[v].suffix))) {
return ROSE_BOUND_INF;
}
- depth suffix_width = findMaxWidth(g[v].suffix, g[v].suffix.top);
+ depth suffix_width = findMaxWidth(suffix_id(g[v].suffix), g[v].suffix.top);
DEBUG_PRINTF("suffix max width for top %u is %s\n", g[v].suffix.top,
suffix_width.str().c_str());
assert(suffix_width.is_reachable());
accept_eod node */
if (g[v].suffix) {
- if (has_non_eod_accepts(g[v].suffix)) {
+ if (has_non_eod_accepts(suffix_id(g[v].suffix))) {
DEBUG_PRINTF("has accept\n");
return ROSE_BOUND_INF;
}
- depth suffix_width = findMaxWidth(g[v].suffix);
+ depth suffix_width = findMaxWidth(suffix_id(g[v].suffix));
DEBUG_PRINTF("suffix max width %s\n", suffix_width.str().c_str());
assert(suffix_width.is_reachable());
if (!suffix_width.is_finite()) {
bool only_accel_init = !has_non_literals;
bool trust_daddy_states = !has_non_literals;
- bytecode_ptr<NFA> dfa = nullptr;
+ bytecode_ptr<NFA> dfa = bytecode_ptr<NFA>(nullptr);
if (cc.grey.allowSmallWriteSheng) {
dfa = shengCompile(rdfa, cc, rm, only_accel_init, &accel_states);
if (!dfa) {
auto nfa = getDfa(rdfa, cc, rm, has_non_literals, accel_states);
if (!nfa) {
DEBUG_PRINTF("DFA compile failed for smallwrite NFA\n");
- return nullptr;
+ return bytecode_ptr<NFA>(nullptr);
}
if (is_slow(rdfa, accel_states, roseQuality)) {
DEBUG_PRINTF("is slow\n");
*small_region = cc.grey.smallWriteLargestBufferBad;
if (*small_region <= *start_offset) {
- return nullptr;
+ return bytecode_ptr<NFA>(nullptr);
}
if (clear_deeper_reports(rdfa, *small_region - *start_offset)) {
minimize_hopcroft(rdfa, cc.grey);
if (rdfa.start_anchored == DEAD_STATE) {
DEBUG_PRINTF("all patterns pruned out\n");
- return nullptr;
+ return bytecode_ptr<NFA>(nullptr);
}
nfa = getDfa(rdfa, cc, rm, has_non_literals, accel_states);
if (!nfa) {
DEBUG_PRINTF("DFA compile failed for smallwrite NFA\n");
assert(0); /* able to build orig dfa but not the trimmed? */
- return nullptr;
+ return bytecode_ptr<NFA>(nullptr);
}
}
} else {
if (nfa->length > cc.grey.limitSmallWriteOutfixSize
|| nfa->length > cc.grey.limitDFASize) {
DEBUG_PRINTF("smallwrite outfix size too large\n");
- return nullptr; /* this is just a soft failure - don't build smwr */
+ return bytecode_ptr<NFA>(nullptr); /* this is just a soft failure - don't build smwr */
}
nfa->queueIndex = 0; /* dummy, small write API does not use queue */
if (dfas.empty() && !has_literals) {
DEBUG_PRINTF("no smallwrite engine\n");
poisoned = true;
- return nullptr;
+ return bytecode_ptr<SmallWriteEngine>(nullptr);
}
if (poisoned) {
DEBUG_PRINTF("some pattern could not be made into a smallwrite dfa\n");
- return nullptr;
+ return bytecode_ptr<SmallWriteEngine>(nullptr);
}
// We happen to know that if the rose is high quality, we're going to limit
if (dfas.empty()) {
DEBUG_PRINTF("no dfa, pruned everything away\n");
- return nullptr;
+ return bytecode_ptr<SmallWriteEngine>(nullptr);
}
if (!mergeDfas(dfas, rm, cc)) {
dfas.clear();
- return nullptr;
+ return bytecode_ptr<SmallWriteEngine>(nullptr);
}
assert(dfas.size() == 1);
DEBUG_PRINTF("some smallwrite outfix could not be prepped\n");
/* just skip the smallwrite optimization */
poisoned = true;
- return nullptr;
+ return bytecode_ptr<SmallWriteEngine>(nullptr);
}
u32 size = sizeof(SmallWriteEngine) + nfa->length;
AlignedAllocator() noexcept {}
template <class U, std::size_t N2>
- AlignedAllocator(const AlignedAllocator<U, N2> &) noexcept {}
+ explicit AlignedAllocator(const AlignedAllocator<U, N2> &) noexcept {}
template <class U> struct rebind {
using other = AlignedAllocator<U, N>;
assert(none());
}
- bitfield(const boost::dynamic_bitset<> &a) : bits{{0}} {
+ explicit bitfield(const boost::dynamic_bitset<> &a) : bits{{0}} {
assert(a.size() == requested_size);
assert(none());
for (auto i = a.find_first(); i != a.npos; i = a.find_next(i)) {
}
}
- bytecode_ptr(std::nullptr_t) {}
+ explicit bytecode_ptr(std::nullptr_t) {}
T *get() const { return ptr.get(); }
// Constructors.
- flat_set(const Compare &compare = Compare(),
+ explicit flat_set(const Compare &compare = Compare(),
const Allocator &alloc = Allocator())
: base_type(compare, alloc) {}
-
+
template <class InputIt>
flat_set(InputIt first, InputIt last, const Compare &compare = Compare(),
const Allocator &alloc = Allocator())
// Constructors.
- flat_map(const Compare &compare = Compare(),
+ explicit flat_map(const Compare &compare = Compare(),
const Allocator &alloc = Allocator())
: base_type(compare, alloc) {}
friend class flat_map;
protected:
Compare c;
- value_compare(Compare c_in) : c(c_in) {}
+ explicit value_compare(Compare c_in) : c(c_in) {}
public:
bool operator()(const value_type &lhs, const value_type &rhs) {
return c(lhs.first, rhs.first);
using reference = void;
using iterator_category = std::output_iterator_tag;
- hash_output_it(size_t *hash_out = nullptr) : out(hash_out) {}
+ explicit hash_output_it(size_t *hash_out = nullptr) : out(hash_out) {}
hash_output_it &operator++() {
return *this;
}
}
struct deref_proxy {
- deref_proxy(size_t *hash_out) : out(hash_out) {}
+ explicit deref_proxy(size_t *hash_out) : out(hash_out) {}
template<typename T>
void operator=(const T &val) const {
size_t *out; /* output location of the owning iterator */
};
- deref_proxy operator*() { return {out}; }
+ deref_proxy operator*() { return deref_proxy(out); }
private:
size_t *out; /* location to output the hashes to */
* edge() and add_edge(). As we have null_edges and we always allow
* parallel edges, the bool component of the return from these functions is
* not required. */
- edge_descriptor(const std::pair<edge_descriptor, bool> &tup)
+ explicit edge_descriptor(const std::pair<edge_descriptor, bool> &tup)
: p(tup.first.p), serial(tup.first.serial) {
assert(tup.second == (bool)tup.first);
}
vertex_descriptor> {
using super = typename adjacency_iterator::iterator_adaptor_;
public:
- adjacency_iterator(out_edge_iterator a) : super(std::move(a)) { }
+ explicit adjacency_iterator(out_edge_iterator a) : super(std::move(a)) { }
adjacency_iterator() { }
vertex_descriptor dereference() const {
vertex_descriptor> {
using super = typename inv_adjacency_iterator::iterator_adaptor_;
public:
- inv_adjacency_iterator(in_edge_iterator a) : super(std::move(a)) { }
+ explicit inv_adjacency_iterator(in_edge_iterator a) : super(std::move(a)) { }
inv_adjacency_iterator() { }
vertex_descriptor dereference() const {
typedef typename boost::lvalue_property_map_tag category;
- prop_map(value_type P_of::*m_in) : member(m_in) { }
+ explicit prop_map(value_type P_of::*m_in) : member(m_in) { }
reference operator[](key_type k) const {
return k.raw()->props.*member;
ParsedExpression parsed(0, pattern.c_str(), flags, 0);
auto built_expr = buildGraph(rm, cc, parsed);
const auto &g = built_expr.g;
- ASSERT_TRUE(g != nullptr);
+ ASSERT_TRUE(static_cast<bool>(g));
clearReports(*g);
rm.setProgramOffset(0, MATCH_REPORT);
/* LBR triggered by dot */
vector<vector<CharReach>> triggers = {{CharReach::dot()}};
nfa = constructLBR(*g, triggers, cc, rm);
- ASSERT_TRUE(nfa != nullptr);
+ ASSERT_TRUE(static_cast<bool>(nfa));
full_state = make_bytecode_ptr<char>(nfa->scratchStateSize, 64);
stream_state = make_bytecode_ptr<char>(nfa->streamStateSize);
class CorpusEditor {
public:
- CorpusEditor(CorpusProperties &p) : props(p) {}
+ explicit CorpusEditor(CorpusProperties &p) : props(p) {}
// Apply edits to a corpus
void applyEdits(string &corpus);
class CorpusEditorUtf8 {
public:
- CorpusEditorUtf8(CorpusProperties &p) : props(p) {}
+ explicit CorpusEditorUtf8(CorpusProperties &p) : props(p) {}
// Apply edits to a corpus.
void applyEdits(vector<unichar> &corpus);