this set only includes fill and copy operations.
/* we need to interpose a dummy dot vertex between v and accept if
* required so that ^ doesn't match trailing \n */
- for (const auto &e : out_edges_range(v, g)) {
- if (target(e, g) == g.accept) {
- dead.emplace_back(e);
- }
- }
+ auto deads = [&g=g](const NFAEdge &e) {
+ return (target(e, g) == g.accept);
+ };
+ const auto &er = out_edges_range(v, g);
+ std::copy_if(begin(er), end(er), std::back_inserter(dead), deads);
+
/* assert has been resolved; clear flag */
g[v].assert_flags &= ~POS_FLAG_MULTILINE_START;
}
while (!graph_empty(cg)) {
const vector<u32> &c = cliquesVec.back();
vector<CliqueVertex> dead;
- for (const auto &v : vertices_range(cg)) {
- if (find(c.begin(), c.end(), cg[v].stateId) != c.end()) {
- dead.emplace_back(v);
- }
- }
+
+ auto deads = [&c=c, &cg=cg](const CliqueVertex &v) {
+ return (find(c.begin(), c.end(), cg[v].stateId) != c.end());
+ };
+ const auto &vr = vertices_range(cg);
+ std::copy_if(begin(vr), end(vr), std::back_inserter(dead), deads);
+
for (const auto &v : dead) {
clear_vertex(v, cg);
remove_vertex(v, cg);
// Build a list of vertices with a state index assigned.
vector<NFAVertex> verts;
verts.reserve(args.num_states);
- for (auto v : vertices_range(h)) {
- if (state_ids.at(v) != NO_STATE) {
- verts.emplace_back(v);
- }
- }
+ auto sidat = [&state_ids=state_ids](const NFAVertex &v) {
+ return (state_ids.at(v) != NO_STATE);
+ };
+ const auto &vr = vertices_range(h);
+ std::copy_if(begin(vr), end(vr), std::back_inserter(verts), sidat);
// Build a mapping from set-of-states -> reachability.
map<NFAStateSet, CharReach> mapping;
*/
#include "config.h"
+#include <numeric>
+
#include "tamaramacompile.h"
sizeof(u32) * subSize + 64; // offsets to subengines in bytecode and
// padding for subengines
- for (const auto &sub : tamaInfo.subengines) {
- total_size += ROUNDUP_CL(sub->length);
- }
+ auto subl = [](size_t z, NFA *sub) {
+ return z + (size_t)(ROUNDUP_CL(sub->length));
+ };
+ total_size += std::accumulate(tamaInfo.subengines.begin(), tamaInfo.subengines.end(), 0, subl);
// use subSize as a sentinel value for no active subengines,
// so add one to subSize here
static
vector<NFAEdge> getAsserts(const NGHolder &g) {
vector<NFAEdge> out;
- for (const auto &e : edges_range(g)) {
- if (g[e].assert_flags) {
- out.emplace_back(e);
- }
- }
+ auto assertflags = [&g=g](const NFAEdge &e) {
+ return (g[e].assert_flags);
+ };
+ const auto &er = edges_range(g);
+ std::copy_if(begin(er), end(er), std::back_inserter(out), assertflags);
+
return out;
}
* for SOM mode. (see UE-1544) */
bool optimiseVirtualStarts(NGHolder &g) {
vector<NFAEdge> dead;
+ auto deads = [&g=g](const NFAEdge &e) {
+ return (!is_any_start(source(e, g), g));
+ };
+
for (auto v : adjacent_vertices_range(g.startDs, g)) {
u32 flags = g[v].assert_flags;
if (!(flags & POS_FLAG_VIRTUAL_START)) {
continue;
}
-
- for (const auto &e : in_edges_range(v, g)) {
- if (!is_any_start(source(e, g), g)) {
- dead.emplace_back(e);
- }
- }
+ const auto &e = in_edges_range(v, g);
+ std::copy_if(begin(e), end(e), std::back_inserter(dead), deads);
}
if (dead.empty()) {
vector<NFAVertex> preds;
vector<NFAEdge> dead;
+ auto deads = [&g=g](const NFAEdge &e) {
+ return (target(e, g) != g.startDs);
+ };
for (auto u : inv_adjacent_vertices_range(cyclic, g)) {
DEBUG_PRINTF("pred %zu\n", g[u].index);
if (u == cyclic) {
// We want to delete the out-edges of each predecessor, but need to
// make sure we don't delete the startDs self loop.
- for (const auto &e : out_edges_range(u, g)) {
- if (target(e, g) != g.startDs) {
- dead.emplace_back(e);
- }
- }
+
+ const auto &e = out_edges_range(u, g);
+ std::copy_if(begin(e), end(e), std::back_inserter(dead), deads);
}
remove_edges(dead, g);
/* create list of candidates first, to avoid issues of iter invalidation */
DEBUG_PRINTF("attempting to reuse vertices for top starts\n");
vector<NFAVertex> cand_starts;
- for (NFAVertex u : unhandled_succ_tops | map_keys) {
- if (hasSelfLoop(u, g)) {
- cand_starts.emplace_back(u);
- }
- }
+ auto cands = [&g=g](const NFAVertex &u) {
+ return (hasSelfLoop(u, g));
+ };
+ const auto &u = unhandled_succ_tops | map_keys;
+ std::copy_if(begin(u), end(u), std::back_inserter(cand_starts), cands);
for (NFAVertex u : cand_starts) {
if (!contains(unhandled_succ_tops, u)) {
const size_t edge_count = num_edges(lg);
vector<LitEdge> fwd_edges;
fwd_edges.reserve(edge_count);
- for (const auto &e : edges_range(lg)) {
- fwd_edges.push_back(e);
- }
+
+ const auto &e = edges_range(lg);
+ std::copy(begin(e), end(e), std::back_inserter(fwd_edges));
vector<LitEdge> rev_map(2 * edge_count);
&& edge(g.accept, g.acceptEod, g).second) {
// Trivial case: there are no in-edges to our accepts (other than
// accept->acceptEod), so all non-specials are unreachable.
- for (auto v : vertices_range(g)) {
- if (!is_special(v, g)) {
- dead.emplace_back(v);
- }
- }
+
+ auto deads = [&g=g](const NFAVertex &v) {
+ return (!is_special(v, g));
+ };
+ const auto &vr = vertices_range(g);
+ std::copy_if(begin(vr), end(vr), std::back_inserter(dead), deads);
+
} else {
// Walk a reverse graph from acceptEod with Boost's depth_first_visit
// call.
}
vector<NFAEdge> dead;
+ auto deads = [&g=g](const NFAEdge &e) {
+ return (!is_any_accept(target(e, g), g));
+ };
for (auto u : inv_adjacent_vertices_range(g.accept, g)) {
if (is_special(u, g)) {
continue;
}
// We can prune any out-edges that aren't accepts
- for (const auto &e : out_edges_range(u, g)) {
- if (!is_any_accept(target(e, g), g)) {
- dead.emplace_back(e);
- }
- }
+ const auto &er = out_edges_range(u, g);
+ std::copy_if(begin(er), end(er), std::back_inserter(dead), deads);
}
if (dead.empty()) {
cloneHolder(g, g_orig, &vmap);
vector<NFAVertex> vstarts;
- for (auto v : vertices_range(g)) {
- if (is_virtual_start(v, g)) {
- vstarts.emplace_back(v);
- }
- }
+ auto vstart = [&g=g](const NFAVertex &v) {
+ return (is_virtual_start(v, g));
+ };
+ const auto &vr = vertices_range(g);
+ std::copy_if(begin(vr), end(vr), std::back_inserter(vstarts), vstart);
+
vstarts.emplace_back(g.startDs);
// wire the successors of every virtual start or startDs to g.start.
DEBUG_PRINTF("adding '%s' to graph\n", dumpString(s).c_str());
vector<NFAVertex> tail;
assert(in_degree(h.acceptEod, h) == 1);
- for (auto v : inv_adjacent_vertices_range(h.accept, h)) {
- tail.emplace_back(v);
- }
+
+ const auto &vr = inv_adjacent_vertices_range(h.accept, h);
+ std::copy(begin(vr), end(vr), std::back_inserter(tail));
+
assert(!tail.empty());
for (auto v : tail) {
vector<PositionInfo> out;
out.reserve(a.size()); // output should be close to input in size.
- for (const auto &p : a) {
- if (seen.emplace(p.pos, p.flags).second) {
- out.emplace_back(p); // first encounter
- }
- }
+ auto seens = [&seen=seen](const PositionInfo &p) {
+ return (seen.emplace(p.pos, p.flags).second);
+ };
+ std::copy_if(begin(a), end(a), std::back_inserter(out), seens);
DEBUG_PRINTF("in %zu; out %zu\n", a.size(), out.size());
a.swap(out);
// Collect all edges leading into EOD event literal vertices.
vector<RoseEdge> edge_list;
+
+
for (const auto &v : lit_info.vertices) {
- for (const auto &e : in_edges_range(v, g)) {
- edge_list.emplace_back(e);
- }
+ const auto &er = in_edges_range(v, g);
+ std::copy(begin(er), end(er), std::back_inserter(edge_list));
}
// Sort edge list for determinism, prettiness.
RoseProgram sparse_program;
vector<u32> keys;
- for (const u32 &key : pred_blocks | map_keys) {
- keys.emplace_back(key);
- }
+ const auto &k = pred_blocks | map_keys;
+ std::copy(begin(k), end(k), std::back_inserter(keys));
const RoseInstruction *end_inst = sparse_program.end_instruction();
auto ri = std::make_unique<RoseInstrSparseIterAny>(num_states, keys, end_inst);
DEBUG_PRINTF("%zu candidates enter\n", candidates.size());
vector<RoseVertex> dead;
- for (const auto &v : candidates) {
- if (hasNoDiamondSiblings(g, v)) {
- dead.emplace_back(v);
- }
- }
+ auto deads = [&g=g](const RoseVertex &v) {
+ return (hasNoDiamondSiblings(g, v));
+ };
+ std::copy_if(begin(candidates), end(candidates), std::back_inserter(dead), deads);
for (const auto &v : dead) {
candidates.erase(v);
const RoseGraph &g = tbi.g;
vector<RoseVertex> table_verts;
-
- for (auto v : vertices_range(g)) {
- if (tbi.hasLiteralInTable(v, table)) {
- table_verts.emplace_back(v);
- }
- }
+ auto tvs = [&tbi=tbi, &table=table](const RoseVertex &v) {
+ return (tbi.hasLiteralInTable(v, table));
+ };
+ const auto &vr = vertices_range(g);
+ std::copy_if(begin(vr), end(vr), std::back_inserter(table_verts), tvs);
set<RoseVertex> reachable;
find_reachable(g, table_verts, &reachable);
table == ROSE_FLOATING ? "floating" : "anchored");
vector<RoseVertex> table_verts;
-
- for (auto v : vertices_range(g)) {
- if ((table == ROSE_FLOATING && tbi.isFloating(v))
- || (table == ROSE_ANCHORED && tbi.isAnchored(v))) {
- table_verts.emplace_back(v);
- }
- }
+ auto tvs = [&tbi=tbi, &table=table](const RoseVertex &v) {
+ return ((table == ROSE_FLOATING && tbi.isFloating(v))
+ || (table == ROSE_ANCHORED && tbi.isAnchored(v)));
+ };
+ const auto &vr = vertices_range(g);
+ std::copy_if(begin(vr), end(vr), std::back_inserter(table_verts), tvs);
set<RoseVertex> reachable;
find_reachable(g, table_verts, &reachable);
/// Set all bits.
void setall() {
- for (auto &e : bits) {
- e = all_ones;
- }
+ std::fill(bits.begin(), bits.end(), all_ones);
clear_trailer();
}
/// Clear all bits.
void clear() {
- for (auto &e : bits) {
- e = 0;
- }
+ std::fill(bits.begin(), bits.end(), 0);
}
/// Clear all bits (alias for bitset::clear).
if (sigSets.empty()) {
SignatureSet sigs;
sigs.reserve(exprMapTemplate.size());
- for (auto i : exprMapTemplate | map_keys) {
- sigs.push_back(i);
- }
+ const auto &i = exprMapTemplate | map_keys;
+ std::copy(begin(i), end(i), std::back_inserter(sigs));
+
sigSets.emplace_back(exprPath, std::move(sigs));
}