src/util/dump_mask.h
src/util/fatbit_build.cpp
src/util/fatbit_build.h
+ src/util/flat_containers.h
src/util/graph.h
src/util/graph_range.h
src/util/graph_small_color_map.h
src/util/small_vector.h
src/util/target_info.cpp
src/util/target_info.h
- src/util/ue2_containers.h
src/util/ue2_graph.h
src/util/ue2string.cpp
src/util/ue2string.h
src/util/unicode_def.h
src/util/unicode_set.h
src/util/uniform_ops.h
+ src/util/unordered.h
src/util/verify_types.h
)
#include "util/math.h"
#include "util/noncopyable.h"
#include "util/target_info.h"
-#include "util/ue2_containers.h"
#include "util/ue2string.h"
#include "util/verify_types.h"
#include <numeric>
#include <set>
#include <string>
+#include <unordered_map>
+#include <unordered_set>
#include <vector>
#include <boost/multi_array.hpp>
const vector<LiteralIndex> &vl,
const vector<hwlmLiteral> &lits,
SuffixPositionInString pos,
- std::map<u32, ue2::unordered_set<u32> > &m2) {
+ map<u32, unordered_set<u32>> &m2) {
assert(eng.bits < 32);
u32 distance = 0;
SuffixPositionInString pLimit = eng.getBucketWidth(b);
for (SuffixPositionInString pos = 0; pos < pLimit; pos++) {
u32 bit = eng.getSchemeBit(b, pos);
- map<u32, ue2::unordered_set<u32>> m2;
+ map<u32, unordered_set<u32>> m2;
bool done = getMultiEntriesAtPosition(eng, vl, lits, pos, m2);
if (done) {
clearbit(&defaultMask[0], bit);
}
for (const auto &elem : m2) {
u32 dc = elem.first;
- const ue2::unordered_set<u32> &mskSet = elem.second;
+ const unordered_set<u32> &mskSet = elem.second;
u32 v = ~dc;
do {
u32 b2 = v & dc;
#define FDR_ENGINE_DESCRIPTION_H
#include "engine_description.h"
-#include "util/ue2_containers.h"
#include <map>
#include <memory>
#include "util/make_unique.h"
#include "util/noncopyable.h"
#include "util/popcount.h"
+#include "util/small_vector.h"
#include "util/target_info.h"
#include "util/verify_types.h"
#include "util/verify_types.h"
#include <sstream>
+#include <unordered_set>
#include <vector>
#define PATHS_LIMIT 500
u16 top_remap = raw.alpha_remap[TOP];
- ue2::unordered_set<dstate_id_t> seen;
+ std::unordered_set<dstate_id_t> seen;
while (true) {
seen.insert(s);
DEBUG_PRINTF("basis %hu\n", s);
#include "util/simd_types.h"
#include <cstdio>
+#include <map>
+#include <set>
#include <vector>
#ifndef DUMP_SUPPORT
#include "ue2common.h"
#include "util/charreach.h"
-#include "util/ue2_containers.h"
+#include "util/flat_containers.h"
union AccelAux;
#include "util/compile_context.h"
#include "util/container.h"
#include "util/dump_charclass.h"
+#include "util/flat_containers.h"
#include "util/graph.h"
#include "util/make_unique.h"
#include "util/multibit_build.h"
#include "util/report_manager.h"
-#include "util/ue2_containers.h"
#include "util/verify_types.h"
#include "grey.h"
void getNeighborInfo(const CliqueGraph &g, vector<u32> &neighbor,
const CliqueVertex &cv, const set<u32> &group) {
u32 id = g[cv].stateId;
- ue2::unordered_set<u32> neighborId;
+ unordered_set<u32> neighborId;
// find neighbors for cv
for (const auto &v : adjacent_vertices_range(cv, g)) {
#include "nfagraph/ng_repeat.h"
#include "util/bytecode_ptr.h"
#include "util/depth.h"
-#include "util/ue2_containers.h"
+#include "util/flat_containers.h"
#include <map>
#include <memory>
#include <set>
+#include <unordered_map>
#include <vector>
struct NFA;
std::map<u32, PureRepeat> repeats;
/** \brief Mapping from report to associated tops. */
- ue2::unordered_map<ReportID, flat_set<u32>> report_map;
+ std::unordered_map<ReportID, flat_set<u32>> report_map;
/**
* \brief Next top id to use. Repeats may be removed without top remapping,
* of the reports in the given set.
*/
bool requiresDedupe(const CastleProto &proto,
- const ue2::flat_set<ReportID> &reports);
+ const flat_set<ReportID> &reports);
/**
* \brief Build an NGHolder from a CastleProto.
#include "rdfa.h"
#include "ue2common.h"
#include "util/container.h"
+#include "util/flat_containers.h"
#include "util/noncopyable.h"
#include "util/partitioned_set.h"
-#include "util/ue2_containers.h"
#include <algorithm>
#include <functional>
#include "nfa_internal.h"
#include "util/compile_context.h"
#include "util/container.h"
+#include "util/flat_containers.h"
#include "util/graph_range.h"
#include "util/make_unique.h"
#include "util/order_check.h"
#include "util/report_manager.h"
-#include "util/ue2_containers.h"
#include "util/verify_types.h"
#include "ue2common.h"
#include "nfa_kind.h"
#include "ue2common.h"
#include "util/bytecode_ptr.h"
-#include "util/ue2_containers.h"
+#include "util/flat_containers.h"
#include "util/order_check.h"
#include <map>
#include "mcclellancompile.h"
#include "ue2common.h"
#include "util/charreach.h"
+#include "util/flat_containers.h"
#include "util/noncopyable.h"
#include "util/order_check.h"
-#include "util/ue2_containers.h"
#include <map>
#include <memory>
/*
- * Copyright (c) 2015, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
#include "gough_internal.h"
#include "grey.h"
#include "util/container.h"
+#include "util/flat_containers.h"
#include "util/graph.h"
#include "util/graph_range.h"
#include "util/order_check.h"
-#include "util/ue2_containers.h"
#include "ue2common.h"
if (contains(aux.containing_v, def)) {
def_v = aux.containing_v.at(def);
}
- ue2::unordered_set<GoughVertex> done;
+ unordered_set<GoughVertex> done;
while (!pending_vertex.empty()) {
GoughVertex current = *pending_vertex.begin();
pending_vertex.erase(current);
#include "util/charreach.h"
#include "util/compile_context.h"
#include "util/container.h"
+#include "util/flat_containers.h"
#include "util/graph.h"
#include "util/graph_range.h"
#include "util/graph_small_color_map.h"
#include "util/order_check.h"
+#include "util/unordered.h"
#include "util/verify_types.h"
-#include "util/ue2_containers.h"
#include <algorithm>
#include <cassert>
};
struct limex_accel_info {
- ue2::unordered_set<NFAVertex> accelerable;
+ unordered_set<NFAVertex> accelerable;
map<NFAStateSet, precalcAccel> precalc;
- ue2::unordered_map<NFAVertex, flat_set<NFAVertex>> friends;
- ue2::unordered_map<NFAVertex, AccelScheme> accel_map;
+ unordered_map<NFAVertex, flat_set<NFAVertex>> friends;
+ unordered_map<NFAVertex, AccelScheme> accel_map;
};
static
map<NFAVertex, NFAStateSet>
reindexByStateId(const map<NFAVertex, NFAStateSet> &in, const NGHolder &g,
- const ue2::unordered_map<NFAVertex, u32> &state_ids,
+ const unordered_map<NFAVertex, u32> &state_ids,
const u32 num_states) {
map<NFAVertex, NFAStateSet> out;
struct build_info {
build_info(NGHolder &hi,
- const ue2::unordered_map<NFAVertex, u32> &states_in,
+ const unordered_map<NFAVertex, u32> &states_in,
const vector<BoundedRepeatData> &ri,
const map<NFAVertex, NFAStateSet> &rsmi,
const map<NFAVertex, NFAStateSet> &smi,
}
NGHolder &h;
- const ue2::unordered_map<NFAVertex, u32> &state_ids;
+ const unordered_map<NFAVertex, u32> &state_ids;
const vector<BoundedRepeatData> &repeats;
// Squash maps; state sets are indexed by state_id.
map<NFAVertex, NFAStateSet> squashMap;
const map<u32, set<NFAVertex>> &tops;
- ue2::unordered_set<NFAVertex> tugs;
+ unordered_set<NFAVertex> tugs;
map<NFAVertex, BoundedRepeatSummary> br_cyclic;
const set<NFAVertex> &zombies;
bool do_accel;
static
void nfaFindAccelSchemes(const NGHolder &g,
const map<NFAVertex, BoundedRepeatSummary> &br_cyclic,
- ue2::unordered_map<NFAVertex, AccelScheme> *out) {
+ unordered_map<NFAVertex, AccelScheme> *out) {
vector<CharReach> refined_cr = reduced_cr(g, br_cyclic);
NFAVertex sds_or_proxy = get_sds_or_proxy(g);
}
struct fas_visitor : public boost::default_bfs_visitor {
- fas_visitor(const ue2::unordered_map<NFAVertex, AccelScheme> &am_in,
- ue2::unordered_map<NFAVertex, AccelScheme> *out_in)
+ fas_visitor(const unordered_map<NFAVertex, AccelScheme> &am_in,
+ unordered_map<NFAVertex, AccelScheme> *out_in)
: accel_map(am_in), out(out_in) {}
void discover_vertex(NFAVertex v, const NGHolder &) {
throw this; /* done */
}
}
- const ue2::unordered_map<NFAVertex, AccelScheme> &accel_map;
- ue2::unordered_map<NFAVertex, AccelScheme> *out;
+ const unordered_map<NFAVertex, AccelScheme> &accel_map;
+ unordered_map<NFAVertex, AccelScheme> *out;
};
static
void filterAccelStates(NGHolder &g, const map<u32, set<NFAVertex>> &tops,
- ue2::unordered_map<NFAVertex, AccelScheme> *accel_map) {
+ unordered_map<NFAVertex, AccelScheme> *accel_map) {
/* We want the NFA_MAX_ACCEL_STATES best acceleration states, everything
* else should be ditched. We use a simple BFS to choose accel states near
* the start. */
tempEdges.push_back(e); // Remove edge later.
}
- ue2::unordered_map<NFAVertex, AccelScheme> out;
+ unordered_map<NFAVertex, AccelScheme> out;
try {
boost::breadth_first_search(g, g.start,
return idx;
}
+using ReportListCache = ue2_unordered_map<vector<ReportID>, u32>;
+
static
u32 addReports(const flat_set<ReportID> &r, vector<ReportID> &reports,
- unordered_map<vector<ReportID>, u32> &reportListCache) {
+ ReportListCache &reports_cache) {
assert(!r.empty());
vector<ReportID> my_reports(begin(r), end(r));
my_reports.push_back(MO_INVALID_IDX); // sentinel
- auto cache_it = reportListCache.find(my_reports);
- if (cache_it != end(reportListCache)) {
+ auto cache_it = reports_cache.find(my_reports);
+ if (cache_it != end(reports_cache)) {
u32 offset = cache_it->second;
DEBUG_PRINTF("reusing cached report list at %u\n", offset);
return offset;
u32 offset = verify_u32(reports.size());
insert(&reports, reports.end(), my_reports);
- reportListCache.emplace(move(my_reports), offset);
+ reports_cache.emplace(move(my_reports), offset);
return offset;
}
static
-void buildAcceptsList(const build_info &args,
- unordered_map<vector<ReportID>, u32> &reports_cache,
+void buildAcceptsList(const build_info &args, ReportListCache &reports_cache,
vector<NFAVertex> &verts, vector<NFAAccept> &accepts,
vector<ReportID> &reports, vector<NFAStateSet> &squash) {
if (verts.empty()) {
}
static
-void buildAccepts(const build_info &args,
- unordered_map<vector<ReportID>, u32> &reports_cache,
+void buildAccepts(const build_info &args, ReportListCache &reports_cache,
NFAStateSet &acceptMask, NFAStateSet &acceptEodMask,
vector<NFAAccept> &accepts, vector<NFAAccept> &acceptsEod,
vector<ReportID> &reports, vector<NFAStateSet> &squash) {
static
u32 compressedStateSize(const NGHolder &h, const NFAStateSet &maskedStates,
- const ue2::unordered_map<NFAVertex, u32> &state_ids) {
+ const unordered_map<NFAVertex, u32> &state_ids) {
// Shrink state requirement to enough to fit the compressed largest reach.
vector<u32> allreach(N_CHARS, 0);
static
bool hasInitDsStates(const NGHolder &h,
- const ue2::unordered_map<NFAVertex, u32> &state_ids) {
+ const unordered_map<NFAVertex, u32> &state_ids) {
if (state_ids.at(h.startDs) != NO_STATE) {
return true;
}
};
static
-u32 buildExceptionMap(const build_info &args,
- unordered_map<vector<ReportID>, u32> &reports_cache,
- const ue2::unordered_set<NFAEdge> &exceptional,
+u32 buildExceptionMap(const build_info &args, ReportListCache &reports_cache,
+ const unordered_set<NFAEdge> &exceptional,
map<ExceptionProto, vector<u32>> &exceptionMap,
vector<ReportID> &reportList) {
const NGHolder &h = args.h;
const u32 num_states = args.num_states;
u32 exceptionCount = 0;
- ue2::unordered_map<NFAVertex, u32> pos_trigger;
- ue2::unordered_map<NFAVertex, u32> tug_trigger;
+ unordered_map<NFAVertex, u32> pos_trigger;
+ unordered_map<NFAVertex, u32> tug_trigger;
for (u32 i = 0; i < args.repeats.size(); i++) {
const BoundedRepeatData &br = args.repeats[i];
static
void findExceptionalTransitions(const build_info &args,
- ue2::unordered_set<NFAEdge> &exceptional,
+ unordered_set<NFAEdge> &exceptional,
u32 maxShift) {
const NGHolder &h = args.h;
// We track report lists that have already been written into the global
// list in case we can reuse them.
- unordered_map<vector<ReportID>, u32> reports_cache;
+ ReportListCache reports_cache;
- ue2::unordered_set<NFAEdge> exceptional;
+ unordered_set<NFAEdge> exceptional;
u32 shiftCount = findBestNumOfVarShifts(args);
assert(shiftCount);
u32 maxShift = findMaxVarShift(args, shiftCount);
// Some sanity tests, called by an assertion in generate().
static UNUSED
bool isSane(const NGHolder &h, const map<u32, set<NFAVertex>> &tops,
- const ue2::unordered_map<NFAVertex, u32> &state_ids,
+ const unordered_map<NFAVertex, u32> &state_ids,
u32 num_states) {
- ue2::unordered_set<u32> seen;
- ue2::unordered_set<NFAVertex> top_starts;
+ unordered_set<u32> seen;
+ unordered_set<NFAVertex> top_starts;
for (const auto &vv : tops | map_values) {
insert(&top_starts, vv);
}
#endif // NDEBUG
static
-u32 max_state(const ue2::unordered_map<NFAVertex, u32> &state_ids) {
+u32 max_state(const unordered_map<NFAVertex, u32> &state_ids) {
u32 rv = 0;
for (const auto &m : state_ids) {
DEBUG_PRINTF("state %u\n", m.second);
}
bytecode_ptr<NFA> generate(NGHolder &h,
- const ue2::unordered_map<NFAVertex, u32> &states,
+ const unordered_map<NFAVertex, u32> &states,
const vector<BoundedRepeatData> &repeats,
const map<NFAVertex, NFAStateSet> &reportSquashMap,
const map<NFAVertex, NFAStateSet> &squashMap,
}
u32 countAccelStates(NGHolder &h,
- const ue2::unordered_map<NFAVertex, u32> &states,
+ const unordered_map<NFAVertex, u32> &states,
const vector<BoundedRepeatData> &repeats,
const map<NFAVertex, NFAStateSet> &reportSquashMap,
const map<NFAVertex, NFAStateSet> &squashMap,
#ifndef LIMEX_COMPILE_H
#define LIMEX_COMPILE_H
-#include <map>
-#include <memory>
-#include <vector>
-
#include "nfagraph/ng_holder.h"
#include "nfagraph/ng_squash.h" // for NFAStateSet
#include "ue2common.h"
#include "util/bytecode_ptr.h"
-#include "util/ue2_containers.h"
+
+#include <set>
+#include <map>
+#include <memory>
+#include <unordered_map>
+#include <vector>
struct NFA;
* graph.
*/
bytecode_ptr<NFA> generate(NGHolder &g,
- const ue2::unordered_map<NFAVertex, u32> &states,
+ const std::unordered_map<NFAVertex, u32> &states,
const std::vector<BoundedRepeatData> &repeats,
const std::map<NFAVertex, NFAStateSet> &reportSquashMap,
const std::map<NFAVertex, NFAStateSet> &squashMap,
* implementable.
*/
u32 countAccelStates(NGHolder &h,
- const ue2::unordered_map<NFAVertex, u32> &states,
+ const std::unordered_map<NFAVertex, u32> &states,
const std::vector<BoundedRepeatData> &repeats,
const std::map<NFAVertex, NFAStateSet> &reportSquashMap,
const std::map<NFAVertex, NFAStateSet> &squashMap,
#include "util/make_unique.h"
#include "util/order_check.h"
#include "util/report_manager.h"
-#include "util/ue2_containers.h"
+#include "util/flat_containers.h"
#include "util/unaligned.h"
#include "util/verify_types.h"
#include "rdfa.h"
#include "ue2common.h"
#include "util/bytecode_ptr.h"
-#include "util/ue2_containers.h"
#include <memory>
#include <vector>
#include "rdfa.h"
#include "util/container.h"
-#include "util/ue2_containers.h"
+#include "util/hash.h"
#include "ue2common.h"
#include <deque>
-
-#include <boost/functional/hash/hash.hpp>
+#include <map>
using namespace std;
}
size_t hash_dfa_no_reports(const raw_dfa &rdfa) {
- using boost::hash_combine;
- using boost::hash_range;
-
size_t v = 0;
hash_combine(v, rdfa.alpha_size);
- hash_combine(v, hash_range(begin(rdfa.alpha_remap), end(rdfa.alpha_remap)));
+ hash_combine(v, rdfa.alpha_remap);
for (const auto &ds : rdfa.states) {
- hash_combine(v, hash_range(begin(ds.next), end(ds.next)));
+ hash_combine(v, ds.next);
}
return v;
}
size_t hash_dfa(const raw_dfa &rdfa) {
- using boost::hash_combine;
size_t v = 0;
hash_combine(v, hash_dfa_no_reports(rdfa));
hash_combine(v, all_reports(rdfa));
#include "util/compare.h"
#include "util/compile_context.h"
#include "util/container.h"
+#include "util/flat_containers.h"
#include "util/graph.h"
#include "util/graph_range.h"
#include "util/make_unique.h"
#include "util/order_check.h"
#include "util/report_manager.h"
-#include "util/ue2_containers.h"
#include "util/unaligned.h"
+#include "util/unordered.h"
#include "util/verify_types.h"
#include <algorithm>
#define MAX_SHENG_STATES 16
#define MAX_SHENG_LEAKINESS 0.05
+using LeakinessCache = ue2_unordered_map<pair<RdfaVertex, u32>, double>;
+
/**
* Returns the proportion of strings of length 'depth' which will leave the
* sheng region when starting at state 'u'.
static
double leakiness(const RdfaGraph &g, dfa_info &info,
const flat_set<RdfaVertex> &sheng_states, RdfaVertex u,
- u32 depth,
- unordered_map<pair<RdfaVertex, u32>, double> &cache) {
+ u32 depth, LeakinessCache &cache) {
double rv = 0;
if (contains(cache, make_pair(u, depth))) {
return cache[make_pair(u, depth)];
static
double leakiness(const RdfaGraph &g, dfa_info &info,
const flat_set<RdfaVertex> &sheng_states, RdfaVertex u) {
- unordered_map<pair<RdfaVertex, u32>, double> cache;
+ LeakinessCache cache;
double rv = leakiness(g, info, sheng_states, u, 8, cache);
return rv;
}
/*
- * Copyright (c) 2015, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
#include "nfa_kind.h"
#include "ue2common.h"
-#include "util/ue2_containers.h"
+#include "util/flat_containers.h"
#include <array>
#include <vector>
#include "nfagraph/ng_mcclellan_internal.h"
#include "util/container.h"
#include "util/determinise.h"
+#include "util/flat_containers.h"
#include "util/make_unique.h"
#include "util/report_manager.h"
-#include "util/ue2_containers.h"
+#include "util/unordered.h"
#include <algorithm>
#include <queue>
class Automaton_Merge {
public:
using StateSet = vector<u16>;
- using StateMap = unordered_map<StateSet, dstate_id_t>;
+ using StateMap = ue2_unordered_map<StateSet, dstate_id_t>;
Automaton_Merge(const raw_dfa *rdfa1, const raw_dfa *rdfa2,
const ReportManager *rm_in, const Grey &grey_in)
#include "rdfa.h"
#include "util/bytecode_ptr.h"
#include "util/charreach.h"
-#include "util/ue2_containers.h"
+#include "util/flat_containers.h"
+
+#include <memory>
+#include <set>
struct NFA;
/*
- * Copyright (c) 2015-2016, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
#include "ue2common.h"
#include "util/charreach.h"
#include "util/container.h"
-#include "util/ue2_containers.h"
+#include "util/flat_containers.h"
#include <array>
#include <cassert>
/*
- * Copyright (c) 2015-2016, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
#include "ue2common.h"
#include "util/charreach.h"
-#include "util/ue2_containers.h"
+#include "util/flat_containers.h"
#include <utility>
/*
- * Copyright (c) 2015-2016, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* truffle is always able to represent an entire character class, providing a
* backstop to other acceleration engines.
*/
+
#include "trufflecompile.h"
+
#include "ue2common.h"
#include "util/charreach.h"
+#include "util/dump_mask.h"
#include "util/simd_types.h"
-#include "util/dump_mask.h"
+#include <cstring>
using namespace std;
#include "util/graph.h"
#include "util/noncopyable.h"
#include "util/report_manager.h"
-#include "util/ue2_containers.h"
#include <deque>
#include <map>
return;
}
- ue2::unordered_map<NFAVertex, NFAUndirectedVertex> old2new;
+ unordered_map<NFAVertex, NFAUndirectedVertex> old2new;
auto ug = createUnGraph(*g, true, true, old2new);
// Construct reverse mapping.
- ue2::unordered_map<NFAUndirectedVertex, NFAVertex> new2old;
+ unordered_map<NFAUndirectedVertex, NFAVertex> new2old;
for (const auto &m : old2new) {
new2old.emplace(m.second, m.first);
}
DEBUG_PRINTF("vertex %zu is in comp %u\n", (*g)[v].index, c);
}
- ue2::unordered_map<NFAVertex, NFAVertex> v_map; // temp map for fillHolder
+ unordered_map<NFAVertex, NFAVertex> v_map; // temp map for fillHolder
for (auto &vv : verts) {
// Shells are in every component.
vv.insert(vv.end(), begin(head_shell), end(head_shell));
#include "ng_prune.h"
#include "ng_util.h"
#include "util/container.h"
+#include "util/flat_containers.h"
#include "util/graph_range.h"
#include "util/graph_small_color_map.h"
-#include "util/ue2_containers.h"
#include <algorithm>
#include <boost/graph/depth_first_search.hpp>
#include "ue2common.h"
#include "ng_holder.h"
#include "ng_util.h"
-#include "util/ue2_containers.h"
#include <boost-patched/graph/dominator_tree.hpp> // locally patched version
#include <boost-patched/graph/reverse_graph.hpp>
/*
- * Copyright (c) 2015, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
#define NG_DOMINATORS_H
#include "ng_holder.h"
-#include "util/ue2_containers.h"
-namespace ue2 {
+#include <unordered_map>
-class NGHolder;
+namespace ue2 {
-ue2::unordered_map<NFAVertex, NFAVertex> findDominators(const NGHolder &g);
+std::unordered_map<NFAVertex, NFAVertex> findDominators(const NGHolder &g);
-ue2::unordered_map<NFAVertex, NFAVertex> findPostDominators(const NGHolder &g);
+std::unordered_map<NFAVertex, NFAVertex> findPostDominators(const NGHolder &g);
} // namespace ue2
: g(g_in), rm(&rm_in) {}
NFAWriter(const GraphT &g_in,
- const ue2::unordered_map<NFAVertex, u32> ®ion_map_in)
+ const unordered_map<NFAVertex, u32> ®ion_map_in)
: g(g_in), region_map(®ion_map_in) {}
void operator()(ostream& os, const VertexT& v) const {
private:
const GraphT &g;
const ReportManager *rm = nullptr;
- const ue2::unordered_map<NFAVertex, u32> *region_map = nullptr;
+ const unordered_map<NFAVertex, u32> *region_map = nullptr;
};
}
template <typename GraphT>
void dumpGraphImpl(const char *name, const GraphT &g,
- const ue2::unordered_map<NFAVertex, u32> ®ion_map) {
+ const unordered_map<NFAVertex, u32> ®ion_map) {
typedef typename boost::graph_traits<GraphT>::vertex_descriptor VertexT;
typedef typename boost::graph_traits<GraphT>::edge_descriptor EdgeT;
ofstream os(name);
}
void dumpHolderImpl(const NGHolder &h,
- const ue2::unordered_map<NFAVertex, u32> ®ion_map,
+ const unordered_map<NFAVertex, u32> ®ion_map,
unsigned int stageNumber, const char *stageName,
const Grey &grey) {
if (grey.dumpFlags & Grey::DUMP_INT_GRAPH) {
#include "grey.h"
#include "ng_holder.h" // for graph types
#include "ue2common.h"
-#include "util/ue2_containers.h"
+
+#include <unordered_map>
#ifdef DUMP_SUPPORT
#include <fstream>
// Variant that takes a region map as well.
void dumpHolderImpl(const NGHolder &h,
- const ue2::unordered_map<NFAVertex, u32> ®ion_map,
+ const std::unordered_map<NFAVertex, u32> ®ion_map,
unsigned int stageNumber, const char *stageName,
const Grey &grey);
UNUSED static inline
void dumpHolder(UNUSED const NGHolder &h,
- UNUSED const ue2::unordered_map<NFAVertex, u32> ®ion_map,
+ UNUSED const std::unordered_map<NFAVertex, u32> ®ion_map,
UNUSED unsigned int stageNumber, UNUSED const char *name,
UNUSED const Grey &grey) {
#ifdef DUMP_SUPPORT
#include "parser/position.h"
#include "util/compile_context.h"
#include "util/container.h"
+#include "util/flat_containers.h"
#include "util/graph_range.h"
-#include "util/ue2_containers.h"
#include <set>
#include <vector>
#include "ng_holder.h"
#include "ng_util.h"
#include "util/compile_context.h"
+#include "util/flat_containers.h"
#include "util/graph_range.h"
#include "util/make_unique.h"
-#include "util/ue2_containers.h"
+#include "util/unordered.h"
#include <algorithm>
#include <memory>
vertex_flags == b.vertex_flags && rs == b.rs;
}
- friend size_t hash_value(const ClassInfo &c) {
- size_t val = 0;
- boost::hash_combine(val, c.rs);
- boost::hash_combine(val, c.vertex_flags);
- boost::hash_combine(val, c.cr);
- boost::hash_combine(val, c.adjacent_cr);
- boost::hash_combine(val, c.node_type);
- boost::hash_combine(val, c.depth.d1);
- boost::hash_combine(val, c.depth.d2);
- return val;
+ size_t hash() const {
+ return hash_all(rs, vertex_flags, cr, adjacent_cr, node_type, depth.d1,
+ depth.d2);
}
private:
const size_t num_verts = infos.size();
vector<VertexInfoSet> classes;
- unordered_map<ClassInfo, unsigned> classinfomap;
+ ue2_unordered_map<ClassInfo, unsigned> classinfomap;
// assume we will have lots of classes, so we don't waste time resizing
// these structures.
/*
- * Copyright (c) 2015-2016, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
#define NG_EXECUTE_H
#include "ng_holder.h"
-#include "util/ue2_containers.h"
+#include "util/flat_containers.h"
#include <vector>
#include "util/bitfield.h"
#include "util/container.h"
#include "util/determinise.h"
+#include "util/flat_containers.h"
#include "util/graph.h"
#include "util/graph_range.h"
#include "util/hash_dynamic_bitset.h"
#include "util/make_unique.h"
-#include "util/ue2_containers.h"
+#include "util/unordered.h"
#include <algorithm>
#include <functional>
struct Graph_Traits {
using StateSet = bitfield<NFA_STATE_LIMIT>;
- using StateMap = ue2::unordered_map<StateSet, dstate_id_t>;
+ using StateMap = unordered_map<StateSet, dstate_id_t>;
static StateSet init_states(UNUSED u32 num) {
assert(num <= NFA_STATE_LIMIT);
class Automaton_Haig_Merge {
public:
using StateSet = vector<u16>;
- using StateMap = unordered_map<StateSet, dstate_id_t>;
+ using StateMap = ue2_unordered_map<StateSet, dstate_id_t>;
explicit Automaton_Haig_Merge(const vector<const raw_som_dfa *> &in)
: nfas(in.begin(), in.end()), dead(in.size()) {
/*
- * Copyright (c) 2015-2016, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
#include "ue2common.h"
#include "nfa/nfa_kind.h"
#include "util/charreach.h"
-#include "util/ue2_containers.h"
+#include "util/flat_containers.h"
#include "util/ue2_graph.h"
namespace ue2 {
/*
- * Copyright (c) 2015-2016, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
#include "ng_util.h"
#include "ue2common.h"
#include "util/container.h"
+#include "util/flat_containers.h"
#include "util/graph_range.h"
#include "util/make_unique.h"
-#include "util/ue2_containers.h"
-
-#include <set>
-
-#include <boost/functional/hash/hash.hpp>
using namespace std;
size_t rv = 0;
for (auto v : vertices_range(g)) {
- boost::hash_combine(rv, g[v].index);
- boost::hash_combine(rv, g[v].char_reach);
+ hash_combine(rv, g[v].index);
+ hash_combine(rv, g[v].char_reach);
for (auto w : adjacent_vertices_range(v, g)) {
- boost::hash_combine(rv, g[w].index);
+ hash_combine(rv, g[w].index);
}
}
#include "util/container.h"
#include "util/graph_range.h"
#include "util/report_manager.h"
-#include "util/ue2_containers.h"
+#include "util/flat_containers.h"
#include "util/verify_types.h"
#include <algorithm>
#include <map>
+#include <unordered_map>
+#include <unordered_set>
#include <vector>
#include <boost/range/adaptor/map.hpp>
// Only used in assertions.
static
bool sanityCheckGraph(const NGHolder &g,
- const ue2::unordered_map<NFAVertex, u32> &state_ids) {
- ue2::unordered_set<u32> seen_states;
+ const unordered_map<NFAVertex, u32> &state_ids) {
+ unordered_set<u32> seen_states;
for (auto v : vertices_range(g)) {
// Non-specials should have non-empty reachability.
static
set<NFAVertex> findZombies(const NGHolder &h,
const map<NFAVertex, BoundedRepeatSummary> &br_cyclic,
- const ue2::unordered_map<NFAVertex, u32> &state_ids,
+ const unordered_map<NFAVertex, u32> &state_ids,
const CompileContext &cc) {
set<NFAVertex> zombies;
if (!cc.grey.allowZombies) {
}
static
-void reverseStateOrdering(ue2::unordered_map<NFAVertex, u32> &state_ids) {
+void reverseStateOrdering(unordered_map<NFAVertex, u32> &state_ids) {
vector<NFAVertex> ordering;
for (auto &e : state_ids) {
if (e.second == NO_STATE) {
const map<u32, u32> &fixed_depth_tops,
const map<u32, vector<vector<CharReach>>> &triggers,
bool impl_test_only, const CompileContext &cc,
- ue2::unordered_map<NFAVertex, u32> &state_ids,
+ unordered_map<NFAVertex, u32> &state_ids,
vector<BoundedRepeatData> &repeats,
map<u32, set<NFAVertex>> &tops) {
assert(is_triggered(h_in) || fixed_depth_tops.empty());
assert(rm);
}
- ue2::unordered_map<NFAVertex, u32> state_ids;
+ unordered_map<NFAVertex, u32> state_ids;
vector<BoundedRepeatData> repeats;
map<u32, set<NFAVertex>> tops;
unique_ptr<NGHolder> h
* resultant NGHolder has <= NFA_MAX_STATES. If it does, we know we can
* implement it as an NFA. */
- ue2::unordered_map<NFAVertex, u32> state_ids;
+ unordered_map<NFAVertex, u32> state_ids;
vector<BoundedRepeatData> repeats;
map<u32, set<NFAVertex>> tops;
unique_ptr<NGHolder> h
const map<u32, u32> fixed_depth_tops; // empty
const map<u32, vector<vector<CharReach>>> triggers; // empty
- ue2::unordered_map<NFAVertex, u32> state_ids;
+ unordered_map<NFAVertex, u32> state_ids;
vector<BoundedRepeatData> repeats;
map<u32, set<NFAVertex>> tops;
unique_ptr<NGHolder> h
#include "nfa/accelcompile.h"
#include "util/accel_scheme.h"
#include "util/charreach.h"
+#include "util/flat_containers.h"
#include "util/order_check.h"
-#include "util/ue2_containers.h"
#include <map>
#include <vector>
}
assert(u != g.startDs);
- ue2::unordered_map<NFAVertex, NFAVertex> rhs_map;
+ unordered_map<NFAVertex, NFAVertex> rhs_map;
vector<NFAVertex> pivots = make_vector_from(adjacent_vertices(u, g));
splitRHS(g, pivots, rhs, &rhs_map);
#include "util/graph_range.h"
#include "util/ue2string.h"
+#include <unordered_set>
+
using namespace std;
namespace ue2 {
bool changed = false;
set<NFAVertex> dead;
- ue2::unordered_set<NFAVertex> unanchored; // for faster lookup.
+ unordered_set<NFAVertex> unanchored; // for faster lookup.
insert(&unanchored, adjacent_vertices(g.startDs, g));
// Anchored literals.
#include "ue2common.h"
#include "util/bitfield.h"
#include "util/determinise.h"
+#include "util/flat_containers.h"
#include "util/graph_range.h"
#include "util/hash.h"
#include "util/hash_dynamic_bitset.h"
#include "util/make_unique.h"
#include "util/report_manager.h"
-#include "util/ue2_containers.h"
#include <algorithm>
#include <functional>
#include <map>
#include <set>
+#include <unordered_map>
#include <vector>
#include <boost/dynamic_bitset.hpp>
struct Graph_Traits {
using StateSet = bitfield<NFA_STATE_LIMIT>;
- using StateMap = ue2::unordered_map<StateSet, dstate_id_t>;
+ using StateMap = unordered_map<StateSet, dstate_id_t>;
static StateSet init_states(UNUSED u32 num) {
assert(num <= NFA_STATE_LIMIT);
/*
- * Copyright (c) 2015-2016, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
#include "nfagraph/ng_holder.h"
#include "util/charreach.h"
#include "util/graph_range.h"
-#include "util/ue2_containers.h"
+#include "util/flat_containers.h"
#include <boost/dynamic_bitset.hpp>
#include "util/container.h"
#include "util/graph_range.h"
#include "util/graph_small_color_map.h"
-#include "util/ue2_containers.h"
+#include "util/flat_containers.h"
#include "ue2common.h"
#include <boost/dynamic_bitset.hpp>
#include "util/compile_context.h"
#include "util/container.h"
#include "util/dump_charclass.h"
-#include "util/ue2_containers.h"
#include "util/graph_range.h"
#include <queue>
+#include <unordered_map>
+#include <unordered_set>
#include <boost/range/adaptor/map.hpp>
static
void findWidths(const NGHolder &g,
- const ue2::unordered_map<NFAVertex, u32> ®ion_map,
+ const unordered_map<NFAVertex, u32> ®ion_map,
RegionInfo &ri) {
NGHolder rg;
- ue2::unordered_map<NFAVertex, NFAVertex> mapping;
+ unordered_map<NFAVertex, NFAVertex> mapping;
fillHolder(&rg, g, ri.vertices, &mapping);
// Wire our entries to start and our exits to accept.
// acc can be either h.accept or h.acceptEod.
static
void markBoundaryRegions(const NGHolder &h,
- const ue2::unordered_map<NFAVertex, u32> ®ion_map,
+ const unordered_map<NFAVertex, u32> ®ion_map,
map<u32, RegionInfo> ®ions, NFAVertex acc) {
for (auto v : inv_adjacent_vertices_range(acc, h)) {
if (is_special(v, h)) {
static
map<u32, RegionInfo> findRegionInfo(const NGHolder &h,
- const ue2::unordered_map<NFAVertex, u32> ®ion_map) {
+ const unordered_map<NFAVertex, u32> ®ion_map) {
map<u32, RegionInfo> regions;
for (auto v : vertices_range(h)) {
if (is_special(v, h)) {
static
bool isDominatedByReporter(const NGHolder &g,
- const ue2::unordered_map<NFAVertex, NFAVertex> &dom,
+ const unordered_map<NFAVertex, NFAVertex> &dom,
NFAVertex v, ReportID report_id) {
for (auto it = dom.find(v); it != end(dom); it = dom.find(v)) {
NFAVertex u = it->second;
/*
- * Copyright (c) 2015-2016, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
#include "ng_util.h"
#include "ue2common.h"
#include "util/container.h"
+#include "util/flat_containers.h"
#include "util/graph_range.h"
-#include "util/ue2_containers.h"
#include <algorithm>
#include <cassert>
static
void findCyclicDom(NGHolder &g, vector<bool> &cyclic,
set<NFAEdge> &dead, som_type som) {
- ue2::unordered_map<NFAVertex, NFAVertex> dominators = findDominators(g);
+ auto dominators = findDominators(g);
for (auto v : vertices_range(g)) {
if (is_special(v, g)) {
static
void findCyclicPostDom(NGHolder &g, vector<bool> &cyclic,
set<NFAEdge> &dead) {
- ue2::unordered_map<NFAVertex, NFAVertex> postdominators =
- findPostDominators(g);
+ auto postdominators = findPostDominators(g);
for (auto v : vertices_range(g)) {
if (is_special(v, g)) {
#include "ng_util.h"
#include "ue2common.h"
#include "util/container.h"
-#include "util/ue2_containers.h"
+#include "util/flat_containers.h"
#include "util/graph_range.h"
#include "util/graph_small_color_map.h"
/*
- * Copyright (c) 2015-2016, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
#include "ng_holder.h"
#include "util/container.h"
#include "util/graph_range.h"
-#include "util/ue2_containers.h"
+#include <unordered_map>
#include <vector>
namespace ue2 {
/** \brief Assign a region ID to every vertex in the graph. */
-ue2::unordered_map<NFAVertex, u32> assignRegions(const NGHolder &g);
+std::unordered_map<NFAVertex, u32> assignRegions(const NGHolder &g);
/** \brief True if vertices \p a and \p b are in the same region. */
template <class Graph>
bool inSameRegion(const Graph &g, NFAVertex a, NFAVertex b,
- const ue2::unordered_map<NFAVertex, u32> ®ion_map) {
+ const std::unordered_map<NFAVertex, u32> ®ion_map) {
assert(contains(region_map, a) && contains(region_map, b));
return region_map.at(a) == region_map.at(b) &&
/** \brief True if vertex \p b is in a later region than vertex \p a. */
template <class Graph>
bool inLaterRegion(const Graph &g, NFAVertex a, NFAVertex b,
- const ue2::unordered_map<NFAVertex, u32> ®ion_map) {
+ const std::unordered_map<NFAVertex, u32> ®ion_map) {
assert(contains(region_map, a) && contains(region_map, b));
u32 aa = g[a].index;
/** \brief True if vertex \p b is in an earlier region than vertex \p a. */
template <class Graph>
bool inEarlierRegion(const Graph &g, NFAVertex a, NFAVertex b,
- const ue2::unordered_map<NFAVertex, u32> ®ion_map) {
+ const std::unordered_map<NFAVertex, u32> ®ion_map) {
assert(contains(region_map, a) && contains(region_map, b));
u32 aa = g[a].index;
/** \brief True if vertex \p v is an entry vertex for its region. */
template <class Graph>
bool isRegionEntry(const Graph &g, NFAVertex v,
- const ue2::unordered_map<NFAVertex, u32> ®ion_map) {
+ const std::unordered_map<NFAVertex, u32> ®ion_map) {
// Note that some graph types do not have inv_adjacent_vertices, so we must
// use in_edges here.
for (const auto &e : in_edges_range(v, g)) {
/** \brief True if vertex \p v is an exit vertex for its region. */
template <class Graph>
bool isRegionExit(const Graph &g, NFAVertex v,
- const ue2::unordered_map<NFAVertex, u32> ®ion_map) {
+ const std::unordered_map<NFAVertex, u32> ®ion_map) {
for (auto w : adjacent_vertices_range(v, g)) {
if (!inSameRegion(g, v, w, region_map)) {
return true;
/** \brief True if vertex \p v is in a region all on its own. */
template <class Graph>
bool isSingletonRegion(const Graph &g, NFAVertex v,
- const ue2::unordered_map<NFAVertex, u32> ®ion_map) {
+ const std::unordered_map<NFAVertex, u32> ®ion_map) {
for (const auto &e : in_edges_range(v, g)) {
auto u = source(e, g);
if (u != v && inSameRegion(g, v, u, region_map)) {
*/
template <class Graph>
bool isOptionalRegion(const Graph &g, NFAVertex v,
- const ue2::unordered_map<NFAVertex, u32> ®ion_map) {
+ const std::unordered_map<NFAVertex, u32> ®ion_map) {
assert(isRegionEntry(g, v, region_map));
DEBUG_PRINTF("check if r%u is optional (inspecting v%zu)\n",
/*
- * Copyright (c) 2015-2016, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
static
bool regionHasUnexpectedAccept(const NGHolder &g, const u32 region,
const flat_set<ReportID> &expected_reports,
- const ue2::unordered_map<NFAVertex, u32> ®ion_map) {
+ const unordered_map<NFAVertex, u32> ®ion_map) {
/* TODO: only check vertices connected to accept/acceptEOD */
for (auto v : vertices_range(g)) {
if (region != region_map.at(v)) {
static
void processCyclicStateForward(NGHolder &h, NFAVertex cyc,
const map<u32, RegionInfo> &info,
- const ue2::unordered_map<NFAVertex, u32> ®ion_map,
+ const unordered_map<NFAVertex, u32> ®ion_map,
set<u32> &deadRegions) {
u32 region = region_map.at(cyc);
CharReach cr = h[cyc].char_reach;
static
void processCyclicStateReverse(NGHolder &h, NFAVertex cyc,
const map<u32, RegionInfo> &info,
- const ue2::unordered_map<NFAVertex, u32> ®ion_map,
+ const unordered_map<NFAVertex, u32> ®ion_map,
set<u32> &deadRegions) {
u32 region = region_map.at(cyc);
CharReach cr = h[cyc].char_reach;
static
map<u32, RegionInfo> buildRegionInfoMap(const NGHolder &g,
- const ue2::unordered_map<NFAVertex, u32> ®ion_map) {
+ const unordered_map<NFAVertex, u32> ®ion_map) {
map<u32, RegionInfo> info;
for (auto v : vertices_range(g)) {
#include "util/graph_range.h"
#include "util/graph_small_color_map.h"
#include "util/report_manager.h"
+#include "util/unordered.h"
#include <algorithm>
#include <map>
#include <queue>
+#include <unordered_map>
+#include <unordered_set>
#include <boost/graph/connected_components.hpp>
#include <boost/graph/depth_first_search.hpp>
using namespace std;
using boost::depth_first_search;
using boost::depth_first_visit;
+using boost::make_assoc_property_map;
namespace ue2 {
static
void findInitDepths(const NGHolder &g,
- ue2::unordered_map<NFAVertex, NFAVertexDepth> &depths) {
+ unordered_map<NFAVertex, NFAVertexDepth> &depths) {
auto d = calcDepths(g);
for (auto v : vertices_range(g)) {
/* Note: RepeatGraph is a filtered version of NGHolder and still has
* NFAVertex as its vertex descriptor */
- typedef ue2::unordered_set<NFAEdge> EdgeSet;
+ typedef unordered_set<NFAEdge> EdgeSet;
EdgeSet deadEdges;
// We don't have indices spanning [0,N] on our filtered graph, so we
// provide a colour map.
- ue2::unordered_map<NFAVertex, boost::default_color_type> colours;
+ unordered_map<NFAVertex, boost::default_color_type> colours;
depth_first_search(g, visitor(BackEdges<EdgeSet>(deadEdges)).
color_map(make_assoc_property_map(colours)));
static
void proper_pred(const NGHolder &g, NFAVertex v,
- ue2::unordered_set<NFAVertex> &p) {
+ unordered_set<NFAVertex> &p) {
pred(g, v, &p);
p.erase(v); // self-loops
}
static
void proper_succ(const NGHolder &g, NFAVertex v,
- ue2::unordered_set<NFAVertex> &s) {
+ unordered_set<NFAVertex> &s) {
succ(g, v, &s);
s.erase(v); // self-loops
}
static
bool roguePredecessor(const NGHolder &g, NFAVertex v,
- const ue2::unordered_set<NFAVertex> &involved,
- const ue2::unordered_set<NFAVertex> &pred) {
+ const unordered_set<NFAVertex> &involved,
+ const unordered_set<NFAVertex> &pred) {
u32 seen = 0;
for (auto u : inv_adjacent_vertices_range(v, g)) {
static
bool rogueSuccessor(const NGHolder &g, NFAVertex v,
- const ue2::unordered_set<NFAVertex> &involved,
- const ue2::unordered_set<NFAVertex> &succ) {
+ const unordered_set<NFAVertex> &involved,
+ const unordered_set<NFAVertex> &succ) {
u32 seen = 0;
for (auto w : adjacent_vertices_range(v, g)) {
if (contains(involved, w)) {
static
bool vertexIsBad(const NGHolder &g, NFAVertex v,
- const ue2::unordered_set<NFAVertex> &involved,
- const ue2::unordered_set<NFAVertex> &tail,
- const ue2::unordered_set<NFAVertex> &pred,
- const ue2::unordered_set<NFAVertex> &succ,
+ const unordered_set<NFAVertex> &involved,
+ const unordered_set<NFAVertex> &tail,
+ const unordered_set<NFAVertex> &pred,
+ const unordered_set<NFAVertex> &succ,
const flat_set<ReportID> &reports) {
DEBUG_PRINTF("check vertex %zu\n", g[v].index);
// We construct a copy of the graph using just the vertices we want, rather
// than using a filtered_graph -- this way is faster.
NGHolder verts_g;
- ue2::unordered_map<NFAVertex, NFAVertex> verts_map; // in g -> in verts_g
+ unordered_map<NFAVertex, NFAVertex> verts_map; // in g -> in verts_g
fillHolder(&verts_g, g, verts, &verts_map);
- ue2::unordered_map<NFAVertex, NFAUndirectedVertex> old2new;
+ unordered_map<NFAVertex, NFAUndirectedVertex> old2new;
auto ug = createUnGraph(verts_g, true, true, old2new);
- ue2::unordered_map<NFAUndirectedVertex, u32> repeatMap;
+ unordered_map<NFAUndirectedVertex, u32> repeatMap;
size_t num = connected_components(ug, make_assoc_property_map(repeatMap));
DEBUG_PRINTF("found %zu connected repeat components\n", num);
continue;
}
- ue2::unordered_set<NFAVertex> involved(rsi.vertices.begin(),
- rsi.vertices.end());
- ue2::unordered_set<NFAVertex> tail(involved); // to look for back-edges.
- ue2::unordered_set<NFAVertex> pred, succ;
+ unordered_set<NFAVertex> involved(rsi.vertices.begin(),
+ rsi.vertices.end());
+ unordered_set<NFAVertex> tail(involved); // to look for back-edges.
+ unordered_set<NFAVertex> pred, succ;
proper_pred(g, rsi.vertices.front(), pred);
proper_succ(g, rsi.vertices.back(), succ);
NFAVertex first = rsi.vertices.front();
NFAVertex last = rsi.vertices.back();
- typedef ue2::unordered_map<NFAVertex, DistanceSet> DistanceMap;
+ typedef unordered_map<NFAVertex, DistanceSet> DistanceMap;
DistanceMap dist;
// Initial distance sets.
static
bool allPredsInSubgraph(NFAVertex v, const NGHolder &g,
- const ue2::unordered_set<NFAVertex> &involved) {
+ const unordered_set<NFAVertex> &involved) {
for (auto u : inv_adjacent_vertices_range(v, g)) {
if (!contains(involved, u)) {
return false;
static
void buildTugTrigger(NGHolder &g, NFAVertex cyclic, NFAVertex v,
- const ue2::unordered_set<NFAVertex> &involved,
- ue2::unordered_map<NFAVertex, NFAVertexDepth> &depths,
+ const unordered_set<NFAVertex> &involved,
+ unordered_map<NFAVertex, NFAVertexDepth> &depths,
vector<NFAVertex> &tugs) {
if (allPredsInSubgraph(v, g, involved)) {
// We can transform this vertex into a tug trigger in-place.
static
void unpeelNearEnd(NGHolder &g, ReachSubgraph &rsi,
- ue2::unordered_map<NFAVertex, NFAVertexDepth> &depths,
+ unordered_map<NFAVertex, NFAVertexDepth> &depths,
vector<NFAVertex> *succs) {
u32 unpeel = unpeelAmount(g, rsi);
DEBUG_PRINTF("unpeeling %u vertices\n", unpeel);
static
void replaceSubgraphWithSpecial(NGHolder &g, ReachSubgraph &rsi,
vector<BoundedRepeatData> *repeats,
- ue2::unordered_map<NFAVertex, NFAVertexDepth> &depths,
- ue2::unordered_set<NFAVertex> &created) {
+ unordered_map<NFAVertex, NFAVertexDepth> &depths,
+ unordered_set<NFAVertex> &created) {
assert(!rsi.bad);
assert(rsi.repeatMin > depth(0));
assert(rsi.repeatMax >= rsi.repeatMin);
DEBUG_PRINTF("entry\n");
- const ue2::unordered_set<NFAVertex> involved(rsi.vertices.begin(),
+ const unordered_set<NFAVertex> involved(rsi.vertices.begin(),
rsi.vertices.end());
vector<NFAVertex> succs;
getSuccessors(g, rsi, &succs);
static
void replaceSubgraphWithLazySpecial(NGHolder &g, ReachSubgraph &rsi,
vector<BoundedRepeatData> *repeats,
- ue2::unordered_map<NFAVertex, NFAVertexDepth> &depths,
- ue2::unordered_set<NFAVertex> &created) {
+ unordered_map<NFAVertex, NFAVertexDepth> &depths,
+ unordered_set<NFAVertex> &created) {
assert(!rsi.bad);
assert(rsi.repeatMin);
assert(rsi.repeatMax >= rsi.repeatMin);
DEBUG_PRINTF("entry\n");
- const ue2::unordered_set<NFAVertex> involved(rsi.vertices.begin(),
- rsi.vertices.end());
+ const unordered_set<NFAVertex> involved(rsi.vertices.begin(),
+ rsi.vertices.end());
vector<NFAVertex> succs;
getSuccessors(g, rsi, &succs);
* involved in other repeats as a result of earlier repeat transformations. */
static
bool peelSubgraph(const NGHolder &g, const Grey &grey, ReachSubgraph &rsi,
- const ue2::unordered_set<NFAVertex> &created) {
+ const unordered_set<NFAVertex> &created) {
assert(!rsi.bad);
if (created.empty()) {
* idea to extend to cyclic states, too. */
static
void peelStartDotStar(const NGHolder &g,
- const ue2::unordered_map<NFAVertex, NFAVertexDepth> &depths,
- const Grey &grey, ReachSubgraph &rsi) {
+ const unordered_map<NFAVertex, NFAVertexDepth> &depths,
+ const Grey &grey, ReachSubgraph &rsi) {
if (rsi.vertices.size() < 1) {
return;
}
/* depth info is valid as calculated at entry */
static
bool entered_at_fixed_offset(NFAVertex v, const NGHolder &g,
- const ue2::unordered_map<NFAVertex, NFAVertexDepth> &depths,
- const ue2::unordered_set<NFAVertex> &reached_by_fixed_tops) {
+ const unordered_map<NFAVertex, NFAVertexDepth> &depths,
+ const unordered_set<NFAVertex> &reached_by_fixed_tops) {
DEBUG_PRINTF("|reached_by_fixed_tops| %zu\n",
reached_by_fixed_tops.size());
if (is_triggered(g) && !contains(reached_by_fixed_tops, v)) {
*/
static
void filterMap(const NGHolder &subg,
- ue2::unordered_map<NFAVertex, NFAVertex> &vmap) {
+ unordered_map<NFAVertex, NFAVertex> &vmap) {
NGHolder::vertex_iterator vi, ve;
tie(vi, ve) = vertices(subg);
- const ue2::unordered_set<NFAVertex> remaining_verts(vi, ve);
+ const unordered_set<NFAVertex> remaining_verts(vi, ve);
- ue2::unordered_map<NFAVertex, NFAVertex> fmap; // filtered map
+ unordered_map<NFAVertex, NFAVertex> fmap; // filtered map
for (const auto &m : vmap) {
if (contains(remaining_verts, m.second)) {
* the bounded repeat. */
static
void buildRepeatGraph(NGHolder &rg,
- ue2::unordered_map<NFAVertex, NFAVertex> &rg_map,
+ unordered_map<NFAVertex, NFAVertex> &rg_map,
const NGHolder &g, const ReachSubgraph &rsi,
const map<u32, vector<vector<CharReach>>> &triggers) {
cloneHolder(rg, g, &rg_map);
add_edge(rg.accept, rg.acceptEod, rg);
// Find the set of vertices in rg involved in the repeat.
- ue2::unordered_set<NFAVertex> rg_involved;
+ unordered_set<NFAVertex> rg_involved;
for (const auto &v : rsi.vertices) {
assert(contains(rg_map, v));
rg_involved.insert(rg_map.at(v));
*/
static
void buildInputGraph(NGHolder &lhs,
- ue2::unordered_map<NFAVertex, NFAVertex> &lhs_map,
+ unordered_map<NFAVertex, NFAVertex> &lhs_map,
const NGHolder &g, const NFAVertex first,
const map<u32, vector<vector<CharReach>>> &triggers) {
DEBUG_PRINTF("building lhs with first=%zu\n", g[first].index);
* single offset at runtime. See UE-1361. */
static
bool hasSoleEntry(const NGHolder &g, const ReachSubgraph &rsi,
- const ue2::unordered_map<NFAVertex, NFAVertexDepth> &depths,
- const ue2::unordered_set<NFAVertex> &reached_by_fixed_tops,
+ const unordered_map<NFAVertex, NFAVertexDepth> &depths,
+ const unordered_set<NFAVertex> &reached_by_fixed_tops,
const map<u32, vector<vector<CharReach>>> &triggers) {
DEBUG_PRINTF("checking repeat {%s,%s}\n", rsi.repeatMin.str().c_str(),
rsi.repeatMax.str().c_str());
}
NGHolder rg;
- ue2::unordered_map<NFAVertex, NFAVertex> rg_map;
+ unordered_map<NFAVertex, NFAVertex> rg_map;
buildRepeatGraph(rg, rg_map, g, rsi, triggers);
assert(rg.kind == g.kind);
NGHolder lhs;
- ue2::unordered_map<NFAVertex, NFAVertex> lhs_map;
+ unordered_map<NFAVertex, NFAVertex> lhs_map;
buildInputGraph(lhs, lhs_map, g, first, triggers);
assert(lhs.kind == g.kind);
// are in one region, vertices in the bounded repeat are in another.
const u32 lhs_region = 1;
const u32 repeat_region = 2;
- ue2::unordered_map<NFAVertex, u32> region_map;
+ unordered_map<NFAVertex, u32> region_map;
for (const auto &v : rsi.vertices) {
assert(!is_special(v, g)); // no specials in repeats
NFAVertex walk(NFAVertex v, vector<NFAVertex> &straw) const {
DEBUG_PRINTF("walk from %zu\n", g[v].index);
- ue2::unordered_set<NFAVertex> visited;
+ unordered_set<NFAVertex> visited;
straw.clear();
while (!is_special(v, g)) {
assert(!done.empty());
// Convert our path list into a set of unique triggers.
- ue2::unordered_set<vector<CharReach>> unique_triggers;
+ ue2_unordered_set<vector<CharReach>> unique_triggers;
for (const auto &path : done) {
vector<CharReach> reach_path;
for (auto jt = path.rbegin(), jte = path.rend(); jt != jte; ++jt) {
void
selectHistoryScheme(const NGHolder &g, const ReportManager *rm,
ReachSubgraph &rsi,
- const ue2::unordered_map<NFAVertex, NFAVertexDepth> &depths,
- const ue2::unordered_set<NFAVertex> &reached_by_fixed_tops,
+ const unordered_map<NFAVertex, NFAVertexDepth> &depths,
+ const unordered_set<NFAVertex> &reached_by_fixed_tops,
const map<u32, vector<vector<CharReach>>> &triggers,
const vector<BoundedRepeatData> &all_repeats,
const bool simple_model_selection) {
static
void buildFeeder(NGHolder &g, const BoundedRepeatData &rd,
- ue2::unordered_set<NFAVertex> &created,
+ unordered_set<NFAVertex> &created,
const vector<NFAVertex> &straw) {
if (!g[rd.cyclic].char_reach.all()) {
// Create another cyclic feeder state with flipped reach. It has an
*/
static
bool improveLeadingRepeat(NGHolder &g, BoundedRepeatData &rd,
- ue2::unordered_set<NFAVertex> &created,
+ unordered_set<NFAVertex> &created,
const vector<BoundedRepeatData> &all_repeats) {
assert(edge(g.startDs, g.startDs, g).second);
*/
static
bool improveLeadingRepeatOutfix(NGHolder &g, BoundedRepeatData &rd,
- ue2::unordered_set<NFAVertex> &created,
+ unordered_set<NFAVertex> &created,
const vector<BoundedRepeatData> &all_repeats) {
assert(g.kind == NFA_OUTFIX);
namespace {
class pfti_visitor : public boost::default_dfs_visitor {
public:
- pfti_visitor(ue2::unordered_map<NFAVertex, depth> &top_depths_in,
+ pfti_visitor(unordered_map<NFAVertex, depth> &top_depths_in,
const depth &our_depth_in)
: top_depths(top_depths_in), our_depth(our_depth_in) {}
top_depths[v] = our_depth;
}
}
- ue2::unordered_map<NFAVertex, depth> &top_depths;
+ unordered_map<NFAVertex, depth> &top_depths;
const depth &our_depth;
};
} // namespace
}
assert(!proper_out_degree(g.startDs, g));
- ue2::unordered_map<NFAVertex, depth> top_depths;
+ unordered_map<NFAVertex, depth> top_depths;
auto colours = make_small_color_map(g);
for (const auto &e : out_edges_range(g.start, g)) {
static
bool hasOverlappingRepeats(UNUSED const NGHolder &g,
const vector<BoundedRepeatData> &repeats) {
- ue2::unordered_set<NFAVertex> involved;
+ unordered_set<NFAVertex> involved;
for (const auto &br : repeats) {
if (contains(involved, br.cyclic)) {
*/
static
bool repeatIsNasty(const NGHolder &g, const ReachSubgraph &rsi,
- const ue2::unordered_map<NFAVertex, NFAVertexDepth> &depths) {
+ const unordered_map<NFAVertex, NFAVertexDepth> &depths) {
if (num_vertices(g) > NFA_MAX_STATES) {
// We may have no choice but to implement this repeat to get the graph
// down to a tractable number of vertices.
// Later on, we're (a little bit) dependent on depth information for
// unpeeling and so forth. Note that these depths MUST be maintained when
// new vertices are added.
- ue2::unordered_map<NFAVertex, NFAVertexDepth> depths;
+ unordered_map<NFAVertex, NFAVertexDepth> depths;
findInitDepths(g, depths);
// Construct our list of subgraphs with the same reach using BGL magic.
// could make this unnecessary?
const unique_ptr<const NGHolder> orig_g(cloneHolder(g));
- ue2::unordered_set<NFAVertex> reached_by_fixed_tops;
+ unordered_set<NFAVertex> reached_by_fixed_tops;
if (is_triggered(g)) {
populateFixedTopInfo(fixed_depth_tops, g, &reached_by_fixed_tops);
}
// Go to town on the remaining acceptable subgraphs.
- ue2::unordered_set<NFAVertex> created;
+ unordered_set<NFAVertex> created;
for (auto &rsi : rs) {
DEBUG_PRINTF("subgraph (beginning vertex %zu) is a {%s,%s} repeat\n",
g[rsi.vertices.front()].index,
/*
- * Copyright (c) 2015, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
#include "ue2common.h"
#include "nfa/repeat_internal.h"
#include "util/depth.h"
-#include "util/ue2_containers.h"
+#include "util/flat_containers.h"
#include <map>
#include <vector>
struct PureRepeat {
CharReach reach;
DepthMinMax bounds;
- ue2::flat_set<ReportID> reports;
+ flat_set<ReportID> reports;
bool operator==(const PureRepeat &a) const {
return reach == a.reach && bounds == a.bounds && reports == a.reports;
/*
- * Copyright (c) 2015-2016, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
// Returns the number of states.
static
-ue2::unordered_map<NFAVertex, u32>
+unordered_map<NFAVertex, u32>
getStateIndices(const NGHolder &h, const vector<NFAVertex> &ordering) {
- ue2::unordered_map<NFAVertex, u32> states;
+ unordered_map<NFAVertex, u32> states;
for (const auto &v : vertices_range(h)) {
states[v] = NO_STATE;
}
/*
- * Copyright (c) 2015-2016, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
/** \file
* \brief State numbering and late graph restructuring code.
-
*/
+
#ifndef NG_RESTRUCTURING_H
#define NG_RESTRUCTURING_H
#include "ng_holder.h"
#include "ue2common.h"
-#include "util/ue2_containers.h"
+#include "util/flat_containers.h"
+
+#include <unordered_map>
namespace ue2 {
/**
* \brief Gives each participating vertex in the graph a unique state index.
*/
-unordered_map<NFAVertex, u32>
+std::unordered_map<NFAVertex, u32>
numberStates(NGHolder &h, const flat_set<NFAVertex> &tops);
/**
* \brief Counts the number of states (vertices with state indices) in the
* graph.
*/
-u32 countStates(const unordered_map<NFAVertex, u32> &state_ids);
+u32 countStates(const std::unordered_map<NFAVertex, u32> &state_ids);
} // namespace ue2
/*
- * Copyright (c) 2015, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
#include "util/charreach.h"
#include "util/graph_range.h"
+#include <set>
+
using namespace std;
namespace ue2 {
#include <algorithm>
#include <map>
+#include <unordered_map>
+#include <unordered_set>
#include <vector>
using namespace std;
static
bool regionCanEstablishSom(const NGHolder &g,
- const ue2::unordered_map<NFAVertex, u32> ®ions,
+ const unordered_map<NFAVertex, u32> ®ions,
const u32 region, const vector<NFAVertex> &r_exits,
const vector<DepthMinMax> &depths) {
if (region == regions.at(g.accept) ||
static
void buildRegionMapping(const NGHolder &g,
- const ue2::unordered_map<NFAVertex, u32> ®ions,
+ const unordered_map<NFAVertex, u32> ®ions,
map<u32, region_info> &info,
bool include_region_0 = false) {
for (auto v : vertices_range(g)) {
static
bool validateXSL(const NGHolder &g,
- const ue2::unordered_map<NFAVertex, u32> ®ions,
+ const unordered_map<NFAVertex, u32> ®ions,
const u32 region, const CharReach &escapes, u32 *bad_region) {
/* need to check that the escapes escape all of the graph past region */
u32 first_bad_region = ~0U;
static
bool validateEXSL(const NGHolder &g,
- const ue2::unordered_map<NFAVertex, u32> ®ions,
+ const unordered_map<NFAVertex, u32> ®ions,
const u32 region, const CharReach &escapes,
const NGHolder &prefix, u32 *bad_region) {
/* EXSL: To be a valid EXSL with escapes e, we require that all states
static
unique_ptr<NGHolder>
-makePrefix(const NGHolder &g, const ue2::unordered_map<NFAVertex, u32> ®ions,
+makePrefix(const NGHolder &g, const unordered_map<NFAVertex, u32> ®ions,
const region_info &curr, const region_info &next,
bool renumber = true) {
const vector<NFAVertex> &curr_exits = curr.exits;
deque<NFAVertex> lhs_verts;
insert(&lhs_verts, lhs_verts.end(), vertices(g));
- ue2::unordered_map<NFAVertex, NFAVertex> lhs_map; // g -> prefix
+ unordered_map<NFAVertex, NFAVertex> lhs_map; // g -> prefix
fillHolder(&prefix, g, lhs_verts, &lhs_map);
prefix.kind = NFA_OUTFIX;
// We need a reverse mapping to track regions.
- ue2::unordered_map<NFAVertex, NFAVertex> rev_map; // prefix -> g
+ unordered_map<NFAVertex, NFAVertex> rev_map; // prefix -> g
for (const auto &e : lhs_map) {
rev_map.emplace(e.second, e.first);
}
static
bool finalRegion(const NGHolder &g,
- const ue2::unordered_map<NFAVertex, u32> ®ions,
+ const unordered_map<NFAVertex, u32> ®ions,
NFAVertex v) {
u32 region = regions.at(v);
for (auto w : adjacent_vertices_range(v, g)) {
static
void fillRoughMidfix(NGHolder *out, const NGHolder &g,
- const ue2::unordered_map<NFAVertex, u32> ®ions,
+ const unordered_map<NFAVertex, u32> ®ions,
const map<u32, region_info> &info,
map<u32, region_info>::const_iterator picked) {
/* as we are not the first prefix, we are probably not acyclic. We need to
// (woot!); updates picked, plan and bad_region.
static
bool advancePlan(const NGHolder &g,
- const ue2::unordered_map<NFAVertex, u32> ®ions,
+ const unordered_map<NFAVertex, u32> ®ions,
const NGHolder &prefix, bool stuck,
map<u32, region_info>::const_iterator &picked,
const map<u32, region_info>::const_iterator furthest,
// Fetches the mappings of all preds of {accept, acceptEod} in this region.
static
void addMappedReporterVertices(const region_info &r, const NGHolder &g,
- const ue2::unordered_map<NFAVertex, NFAVertex> &mapping,
+ const unordered_map<NFAVertex, NFAVertex> &mapping,
vector<NFAVertex> &reporters) {
for (auto v : r.exits) {
if (edge(v, g.accept, g).second || edge(v, g.acceptEod, g).second) {
DEBUG_PRINTF("adding v=%zu\n", g[v].index);
- ue2::unordered_map<NFAVertex, NFAVertex>::const_iterator it =
- mapping.find(v);
+ auto it = mapping.find(v);
assert(it != mapping.end());
reporters.push_back(it->second);
}
// from earlier regions.
static
void cloneGraphWithOneEntry(NGHolder &out, const NGHolder &g,
- const ue2::unordered_map<NFAVertex, u32> ®ions,
+ const unordered_map<NFAVertex, u32> ®ions,
NFAVertex entry, const vector<NFAVertex> &enters,
- ue2::unordered_map<NFAVertex, NFAVertex> &orig_to_copy) {
+ unordered_map<NFAVertex, NFAVertex> &orig_to_copy) {
orig_to_copy.clear();
cloneHolder(out, g, &orig_to_copy);
}
static
-void expandGraph(NGHolder &g, ue2::unordered_map<NFAVertex, u32> ®ions,
+void expandGraph(NGHolder &g, unordered_map<NFAVertex, u32> ®ions,
vector<NFAVertex> &enters) {
assert(!enters.empty());
const u32 split_region = regions.at(enters.front());
static
bool doTreePlanningIntl(NGHolder &g,
- const ue2::unordered_map<NFAVertex, u32> ®ions,
+ const unordered_map<NFAVertex, u32> ®ions,
const map<u32, region_info> &info,
map<u32, region_info>::const_iterator picked, u32 bad_region,
u32 parent_plan,
- const ue2::unordered_map<NFAVertex, NFAVertex> ©_to_orig,
+ const unordered_map<NFAVertex, NFAVertex> ©_to_orig,
vector<som_plan> &plan, const Grey &grey) {
assert(picked != info.end());
// regions.
NGHolder g_path;
- ue2::unordered_map<NFAVertex, NFAVertex> orig_to_copy;
+ unordered_map<NFAVertex, NFAVertex> orig_to_copy;
cloneGraphWithOneEntry(g_path, g, g_regions, v, enters, orig_to_copy);
auto regions = assignRegions(g_path);
dumpHolder(g_path, regions, 14, "som_treepath", grey);
}
// Construct reverse mapping from vertices in g_path to g.
- ue2::unordered_map<NFAVertex, NFAVertex> copy_to_orig;
+ unordered_map<NFAVertex, NFAVertex> copy_to_orig;
for (const auto &m : orig_to_copy) {
copy_to_orig.insert(make_pair(m.second, m.first));
}
static
bool doSomPlanning(NGHolder &g, bool stuck_in,
- const ue2::unordered_map<NFAVertex, u32> ®ions,
+ const unordered_map<NFAVertex, u32> ®ions,
const map<u32, region_info> &info,
map<u32, region_info>::const_iterator picked,
vector<som_plan> &plan,
static
bool attemptToBuildChainAfterSombe(SomSlotManager &ssm, NGHolder &g,
- const ue2::unordered_map<NFAVertex, u32> ®ions,
+ const unordered_map<NFAVertex, u32> ®ions,
const map<u32, region_info> &info,
map<u32, region_info>::const_iterator picked,
const Grey &grey,
static
bool tryHaig(RoseBuild &rose, NGHolder &g,
- const ue2::unordered_map<NFAVertex, u32> ®ions,
+ const unordered_map<NFAVertex, u32> ®ions,
som_type som, u32 somPrecision,
map<u32, region_info>::const_iterator picked,
shared_ptr<raw_som_dfa> *haig, shared_ptr<NGHolder> *haig_prefix,
static
sombe_rv doHaigLitSom(NG &ng, NGHolder &g, const ExpressionInfo &expr,
u32 comp_id, som_type som,
- const ue2::unordered_map<NFAVertex, u32> ®ions,
+ const unordered_map<NFAVertex, u32> ®ions,
const map<u32, region_info> &info,
map<u32, region_info>::const_iterator lower_bound) {
DEBUG_PRINTF("entry\n");
}
}
- ue2::unordered_map<NFAVertex, NFAVertex> rhs_map;
+ unordered_map<NFAVertex, NFAVertex> rhs_map;
vector<NFAVertex> pivots;
insert(&pivots, pivots.end(), adj_term1);
splitRHS(g, pivots, rhs, &rhs_map);
static
void findBestLiteral(const NGHolder &g,
- const ue2::unordered_map<NFAVertex, u32> ®ions,
+ const unordered_map<NFAVertex, u32> ®ions,
ue2_literal *lit_out, NFAVertex *v,
const CompileContext &cc) {
map<u32, region_info> info;
static
bool splitOffBestLiteral(const NGHolder &g,
- const ue2::unordered_map<NFAVertex, u32> ®ions,
+ const unordered_map<NFAVertex, u32> ®ions,
ue2_literal *lit_out, NGHolder *lhs, NGHolder *rhs,
const CompileContext &cc) {
NFAVertex v = NGHolder::null_vertex();
DEBUG_PRINTF("literal is '%s'\n", dumpString(*lit_out).c_str());
- ue2::unordered_map<NFAVertex, NFAVertex> lhs_map;
- ue2::unordered_map<NFAVertex, NFAVertex> rhs_map;
+ unordered_map<NFAVertex, NFAVertex> lhs_map;
+ unordered_map<NFAVertex, NFAVertex> rhs_map;
splitGraph(g, v, lhs, &lhs_map, rhs, &rhs_map);
static
bool doHaigLitHaigSom(NG &ng, NGHolder &g,
- const ue2::unordered_map<NFAVertex, u32> ®ions,
+ const unordered_map<NFAVertex, u32> ®ions,
som_type som) {
if (!ng.cc.grey.allowLitHaig) {
return false;
static
map<u32, region_info>::const_iterator pickInitialSomCut(const NGHolder &g,
- const ue2::unordered_map<NFAVertex, u32> ®ions,
+ const unordered_map<NFAVertex, u32> ®ions,
const map<u32, region_info> &info,
const vector<DepthMinMax> &depths) {
map<u32, region_info>::const_iterator picked = info.end();
static
map<u32, region_info>::const_iterator tryForLaterRevNfaCut(const NGHolder &g,
- const ue2::unordered_map<NFAVertex, u32> ®ions,
+ const unordered_map<NFAVertex, u32> ®ions,
const map<u32, region_info> &info,
const vector<DepthMinMax> &depths,
const map<u32, region_info>::const_iterator &orig,
static
unique_ptr<NGHolder> makePrefixForChain(NGHolder &g,
- const ue2::unordered_map<NFAVertex, u32> ®ions,
+ const unordered_map<NFAVertex, u32> ®ions,
const map<u32, region_info> &info,
const map<u32, region_info>::const_iterator &picked,
vector<DepthMinMax> *depths, bool prefix_by_rev,
// We operate on a temporary copy of the original graph here, so we don't
// have to mutate the original.
NGHolder g;
- ue2::unordered_map<NFAVertex, NFAVertex> vmap; // vertex in g_orig to vertex in g
+ unordered_map<NFAVertex, NFAVertex> vmap; // vertex in g_orig to vertex in g
cloneHolder(g, g_orig, &vmap);
vector<NFAVertex> vstarts;
return false;
}
- ue2::flat_set<NFAVertex> states;
+ flat_set<NFAVertex> states;
/* turn on all states (except starts - avoid suffix matches) */
/* If we were doing (1) we would also except states leading to accepts -
avoid prefix matches */
}
bool somMayGoBackwards(NFAVertex u, const NGHolder &g,
- const ue2::unordered_map<NFAVertex, u32> ®ion_map,
+ const unordered_map<NFAVertex, u32> ®ion_map,
smgb_cache &cache) {
/* Need to ensure all matches of the graph g up to u contain no infixes
* which are also matches of the graph to u.
}
}
- ue2::unordered_map<NFAVertex, NFAVertex> orig_to_copy;
+ unordered_map<NFAVertex, NFAVertex> orig_to_copy;
NGHolder c_g;
cloneHolder(c_g, g, &orig_to_copy);
}
bool sentClearsTail(const NGHolder &g,
- const ue2::unordered_map<NFAVertex, u32> ®ion_map,
+ const unordered_map<NFAVertex, u32> ®ion_map,
const NGHolder &sent, u32 last_head_region,
u32 *bad_region) {
/* if a subsequent match from the prefix clears the rest of the pattern
*/
u32 first_bad_region = ~0U;
- ue2::flat_set<NFAVertex> states;
+ flat_set<NFAVertex> states;
/* turn on all states */
DEBUG_PRINTF("region %u is cutover\n", last_head_region);
for (auto v : vertices_range(g)) {
/*
- * Copyright (c) 2015, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
#include "ng_util.h"
#include "util/depth.h"
-#include "util/ue2_containers.h"
#include <map>
+#include <unordered_map>
#include <vector>
namespace ue2 {
};
bool somMayGoBackwards(NFAVertex u, const NGHolder &g,
- const ue2::unordered_map<NFAVertex, u32> ®ion_map,
+ const std::unordered_map<NFAVertex, u32> ®ion_map,
smgb_cache &cache);
/**
* region ID associated with a tail state that is still on.
*/
bool sentClearsTail(const NGHolder &g,
- const ue2::unordered_map<NFAVertex, u32> ®ion_map,
+ const std::unordered_map<NFAVertex, u32> ®ion_map,
const NGHolder &sent, u32 last_head_region,
u32 *bad_region);
/*
- * Copyright (c) 2015-2016, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
#include "util/container.h"
#include "util/graph.h"
#include "util/graph_range.h"
-#include "util/ue2_containers.h"
#include <map>
#include <set>
}
static
-void filterSplitMap(const NGHolder &g, ue2::unordered_map<NFAVertex, NFAVertex> *out_map) {
- ue2::unordered_set<NFAVertex> verts;
+void filterSplitMap(const NGHolder &g,
+ unordered_map<NFAVertex, NFAVertex> *out_map) {
+ unordered_set<NFAVertex> verts;
insert(&verts, vertices(g));
- ue2::unordered_map<NFAVertex, NFAVertex>::iterator it = out_map->begin();
+ auto it = out_map->begin();
while (it != out_map->end()) {
- ue2::unordered_map<NFAVertex, NFAVertex>::iterator jt = it;
+ auto jt = it;
++it;
if (!contains(verts, jt->second)) {
out_map->erase(jt);
static
void splitLHS(const NGHolder &base, const vector<NFAVertex> &pivots,
- const vector<NFAVertex> &rhs_pivots,
- NGHolder *lhs, ue2::unordered_map<NFAVertex, NFAVertex> *lhs_map) {
+ const vector<NFAVertex> &rhs_pivots, NGHolder *lhs,
+ unordered_map<NFAVertex, NFAVertex> *lhs_map) {
assert(lhs && lhs_map);
cloneHolder(*lhs, base, lhs_map);
}
void splitLHS(const NGHolder &base, NFAVertex pivot,
- NGHolder *lhs, ue2::unordered_map<NFAVertex, NFAVertex> *lhs_map) {
+ NGHolder *lhs, unordered_map<NFAVertex, NFAVertex> *lhs_map) {
vector<NFAVertex> pivots(1, pivot);
vector<NFAVertex> rhs_pivots;
insert(&rhs_pivots, rhs_pivots.end(), adjacent_vertices(pivot, base));
}
void splitRHS(const NGHolder &base, const vector<NFAVertex> &pivots,
- NGHolder *rhs, ue2::unordered_map<NFAVertex, NFAVertex> *rhs_map) {
+ NGHolder *rhs, unordered_map<NFAVertex, NFAVertex> *rhs_map) {
assert(rhs && rhs_map);
cloneHolder(*rhs, base, rhs_map);
}
void splitGraph(const NGHolder &base, const vector<NFAVertex> &pivots,
- NGHolder *lhs, ue2::unordered_map<NFAVertex, NFAVertex> *lhs_map,
- NGHolder *rhs, ue2::unordered_map<NFAVertex, NFAVertex> *rhs_map) {
+ NGHolder *lhs, unordered_map<NFAVertex, NFAVertex> *lhs_map,
+ NGHolder *rhs, unordered_map<NFAVertex, NFAVertex> *rhs_map) {
DEBUG_PRINTF("splitting graph at %zu vertices\n", pivots.size());
assert(!has_parallel_edge(base));
}
void splitGraph(const NGHolder &base, NFAVertex pivot,
- NGHolder *lhs, ue2::unordered_map<NFAVertex, NFAVertex> *lhs_map,
- NGHolder *rhs, ue2::unordered_map<NFAVertex, NFAVertex> *rhs_map) {
+ NGHolder *lhs, unordered_map<NFAVertex, NFAVertex> *lhs_map,
+ NGHolder *rhs, unordered_map<NFAVertex, NFAVertex> *rhs_map) {
vector<NFAVertex> pivots(1, pivot);
splitGraph(base, pivots, lhs, lhs_map, rhs, rhs_map);
}
/*
- * Copyright (c) 2015-2016, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
#ifndef NG_SPLIT_H
#define NG_SPLIT_H
-#include <vector>
-
#include "ng_holder.h"
-#include "util/ue2_containers.h"
+
+#include <unordered_map>
+#include <vector>
namespace ue2 {
* vertices which have an edge to every pivot
*/
void splitGraph(const NGHolder &base, NFAVertex pivot, NGHolder *lhs,
- ue2::unordered_map<NFAVertex, NFAVertex> *lhs_map,
+ std::unordered_map<NFAVertex, NFAVertex> *lhs_map,
NGHolder *rhs,
- ue2::unordered_map<NFAVertex, NFAVertex> *rhs_map);
+ std::unordered_map<NFAVertex, NFAVertex> *rhs_map);
void splitGraph(const NGHolder &base, const std::vector<NFAVertex> &pivots,
NGHolder *lhs,
- ue2::unordered_map<NFAVertex, NFAVertex> *lhs_map,
+ std::unordered_map<NFAVertex, NFAVertex> *lhs_map,
NGHolder *rhs,
- ue2::unordered_map<NFAVertex, NFAVertex> *rhs_map);
+ std::unordered_map<NFAVertex, NFAVertex> *rhs_map);
void splitLHS(const NGHolder &base, NFAVertex pivot, NGHolder *lhs,
- ue2::unordered_map<NFAVertex, NFAVertex> *lhs_map);
+ std::unordered_map<NFAVertex, NFAVertex> *lhs_map);
void splitRHS(const NGHolder &base, const std::vector<NFAVertex> &pivots,
- NGHolder *rhs, ue2::unordered_map<NFAVertex, NFAVertex> *rhs_map);
+ NGHolder *rhs, std::unordered_map<NFAVertex, NFAVertex> *rhs_map);
} // namespace ue2
/*
- * Copyright (c) 2015-2016, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
#include "ng_region.h"
#include "ng_som_util.h"
#include "ng_util.h"
-#include "ng_util.h"
#include "util/container.h"
#include "util/graph_range.h"
#include "util/report_manager.h"
#include <deque>
#include <map>
+#include <unordered_map>
+#include <unordered_set>
#include <boost/graph/depth_first_search.hpp>
#include <boost/graph/reverse_graph.hpp>
namespace ue2 {
-typedef ue2::unordered_map<NFAVertex,
- ue2::unordered_set<NFAVertex> > PostDomTree;
+typedef unordered_map<NFAVertex, unordered_set<NFAVertex>> PostDomTree;
static
void buildPDomTree(const NGHolder &g, PostDomTree &tree) {
- ue2::unordered_map<NFAVertex, NFAVertex> postdominators =
- findPostDominators(g);
+ auto postdominators = findPostDominators(g);
for (auto v : vertices_range(g)) {
if (is_special(v, g)) {
const CharReach &cr, const NFAStateSet &init,
const vector<NFAVertex> &vByIndex, const PostDomTree &tree,
som_type som, const vector<DepthMinMax> &som_depths,
- const ue2::unordered_map<NFAVertex, u32> ®ion_map,
+ const unordered_map<NFAVertex, u32> ®ion_map,
smgb_cache &cache) {
DEBUG_PRINTF("build base squash mask for vertex %zu)\n", g[v].index);
const PostDomTree &pdom_tree, const NFAStateSet &init,
map<NFAVertex, NFAStateSet> *squash, som_type som,
const vector<DepthMinMax> &som_depths,
- const ue2::unordered_map<NFAVertex, u32> ®ion_map,
+ const unordered_map<NFAVertex, u32> ®ion_map,
smgb_cache &cache) {
deque<NFAVertex> remaining;
for (const auto &m : *squash) {
vector<NFAVertex> findUnreachable(const NGHolder &g) {
const boost::reverse_graph<NGHolder, const NGHolder &> revg(g);
- ue2::unordered_map<NFAVertex, boost::default_color_type> colours;
+ unordered_map<NFAVertex, boost::default_color_type> colours;
colours.reserve(num_vertices(g));
depth_first_visit(revg, g.acceptEod,
// cutting the appropriate out-edges to accept and seeing which
// vertices become unreachable.
- ue2::unordered_map<NFAVertex, NFAVertex> orig_to_copy;
+ unordered_map<NFAVertex, NFAVertex> orig_to_copy;
NGHolder h;
cloneHolder(h, g, &orig_to_copy);
removeEdgesToAccept(h, orig_to_copy[v]);
/*
- * Copyright (c) 2015, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
#include "ng_holder.h"
#include "som/som.h"
#include "ue2common.h"
-#include "util/ue2_containers.h"
#include <map>
#include <boost/dynamic_bitset.hpp>
class NGHolder;
class ReportManager;
-/** Dynamically-sized bitset, as an NFA can have an arbitrary number of states. */
+/**
+ * Dynamically-sized bitset, as an NFA can have an arbitrary number of states.
+ */
typedef boost::dynamic_bitset<> NFAStateSet;
/**
#include "ng_util.h"
#include "ue2common.h"
#include "util/graph_range.h"
-#include "util/ue2_containers.h"
+#include "util/unordered.h"
#include <vector>
NFAUndirectedGraph createUnGraph(const Graph &g,
bool excludeStarts,
bool excludeAccepts,
- unordered_map<typename Graph::vertex_descriptor,
- NFAUndirectedVertex> &old2new) {
+ std::unordered_map<typename Graph::vertex_descriptor,
+ NFAUndirectedVertex> &old2new) {
NFAUndirectedGraph ug;
size_t idx = 0;
// Track seen edges so that we don't insert parallel edges.
using Vertex = typename Graph::vertex_descriptor;
- unordered_set<std::pair<Vertex, Vertex>> seen;
+ ue2_unordered_set<std::pair<Vertex, Vertex>> seen;
seen.reserve(num_edges(g));
auto make_ordered_edge = [](Vertex a, Vertex b) {
return std::make_pair(std::min(a, b), std::max(a, b));
#include <limits>
#include <map>
#include <set>
+#include <unordered_map>
+#include <unordered_set>
+
#include <boost/graph/filtered_graph.hpp>
#include <boost/graph/topological_sort.hpp>
#include <boost/range/adaptor/map.hpp>
// having to reallocate it, etc.
auto colors = make_small_color_map(g);
- using EdgeSet = ue2::unordered_set<NFAEdge>;
+ using EdgeSet = unordered_set<NFAEdge>;
EdgeSet backEdges;
BackEdges<EdgeSet> be(backEdges);
void clearReports(NGHolder &g) {
DEBUG_PRINTF("clearing reports without an accept edge\n");
- ue2::unordered_set<NFAVertex> allow;
+ unordered_set<NFAVertex> allow;
insert(&allow, inv_adjacent_vertices(g.accept, g));
insert(&allow, inv_adjacent_vertices(g.acceptEod, g));
allow.erase(g.accept); // due to stylised edge.
static
void fillHolderOutEdges(NGHolder &out, const NGHolder &in,
- const ue2::unordered_map<NFAVertex, NFAVertex> &v_map,
+ const unordered_map<NFAVertex, NFAVertex> &v_map,
NFAVertex u) {
NFAVertex u_new = v_map.at(u);
}
void fillHolder(NGHolder *outp, const NGHolder &in, const deque<NFAVertex> &vv,
- ue2::unordered_map<NFAVertex, NFAVertex> *v_map_out) {
+ unordered_map<NFAVertex, NFAVertex> *v_map_out) {
NGHolder &out = *outp;
- ue2::unordered_map<NFAVertex, NFAVertex> &v_map = *v_map_out;
+ unordered_map<NFAVertex, NFAVertex> &v_map = *v_map_out;
out.kind = in.kind;
}
void cloneHolder(NGHolder &out, const NGHolder &in,
- ue2::unordered_map<NFAVertex, NFAVertex> *mapping) {
+ unordered_map<NFAVertex, NFAVertex> *mapping) {
cloneHolder(out, in);
vector<NFAVertex> out_verts(num_vertices(in));
for (auto v : vertices_range(out)) {
void reverseHolder(const NGHolder &g_in, NGHolder &g) {
// Make the BGL do the grunt work.
- ue2::unordered_map<NFAVertex, NFAVertex> vertexMap;
+ unordered_map<NFAVertex, NFAVertex> vertexMap;
boost::transpose_graph(g_in, g,
orig_to_copy(boost::make_assoc_property_map(vertexMap)));
#ifndef NG_UTIL_H
#define NG_UTIL_H
-#include <map>
-#include <vector>
-
-#include <boost/graph/depth_first_search.hpp> // for default_dfs_visitor
-
#include "ng_holder.h"
#include "ue2common.h"
+#include "util/flat_containers.h"
#include "util/graph.h"
#include "util/graph_range.h"
-#include "util/ue2_containers.h"
+
+#include <boost/graph/depth_first_search.hpp> // for default_dfs_visitor
+
+#include <map>
+#include <unordered_map>
+#include <vector>
namespace ue2 {
* \a in). A vertex mapping is returned in \a v_map_out. */
void fillHolder(NGHolder *outp, const NGHolder &in,
const std::deque<NFAVertex> &vv,
- unordered_map<NFAVertex, NFAVertex> *v_map_out);
+ std::unordered_map<NFAVertex, NFAVertex> *v_map_out);
/** \brief Clone the graph in \a in into graph \a out, returning a vertex
* mapping in \a v_map_out. */
void cloneHolder(NGHolder &out, const NGHolder &in,
- unordered_map<NFAVertex, NFAVertex> *v_map_out);
+ std::unordered_map<NFAVertex, NFAVertex> *v_map_out);
/** \brief Clone the graph in \a in into graph \a out. */
void cloneHolder(NGHolder &out, const NGHolder &in);
#include "util/compare.h"
#include "util/compile_context.h"
#include "util/container.h"
+#include "util/flat_containers.h"
#include "util/graph.h"
#include "util/graph_range.h"
#include "util/make_unique.h"
#include "util/order_check.h"
#include "util/target_info.h"
#include "util/ue2string.h"
-#include "util/ue2_containers.h"
#include <set>
#include <utility>
static
void getCandidatePivots(const NGHolder &g, set<NFAVertex> *cand,
set<NFAVertex> *cand_raw) {
- ue2::unordered_map<NFAVertex, NFAVertex> dominators = findDominators(g);
+ auto dominators = findDominators(g);
set<NFAVertex> accepts;
shared_ptr<NGHolder> lhs = make_shared<NGHolder>();
shared_ptr<NGHolder> rhs = make_shared<NGHolder>();
- ue2::unordered_map<NFAVertex, NFAVertex> lhs_map;
- ue2::unordered_map<NFAVertex, NFAVertex> rhs_map;
+ unordered_map<NFAVertex, NFAVertex> lhs_map;
+ unordered_map<NFAVertex, NFAVertex> rhs_map;
splitGraph(base_graph, splitters, lhs.get(), &lhs_map, rhs.get(), &rhs_map);
DEBUG_PRINTF("split %s:%zu into %s:%zu + %s:%zu\n",
NFAVertex pivot = target(e, h);
DEBUG_PRINTF("splitting on pivot %zu\n", h[pivot].index);
- ue2::unordered_map<NFAVertex, NFAVertex> temp_map;
+ unordered_map<NFAVertex, NFAVertex> temp_map;
shared_ptr<NGHolder> new_lhs = make_shared<NGHolder>();
splitLHS(h, pivot, new_lhs.get(), &temp_map);
effort */
if (!contains(done_rhs, adj)) {
- ue2::unordered_map<NFAVertex, NFAVertex> temp_map;
+ unordered_map<NFAVertex, NFAVertex> temp_map;
shared_ptr<NGHolder> new_rhs = make_shared<NGHolder>();
splitRHS(h, adj, new_rhs.get(), &temp_map);
remove_edge(new_rhs->start, new_rhs->accept, *new_rhs);
#include "parser/Parser.h"
#include "ue2common.h"
#include "util/compare.h"
+#include "util/flat_containers.h"
#include "util/make_unique.h"
-#include "util/ue2_containers.h"
#include "util/unicode_def.h"
#include "util/verify_types.h"
/*
- * Copyright (c) 2015, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
#include "nfagraph/ng_builder.h"
#include "util/charreach.h"
#include "util/container.h"
+#include "util/flat_containers.h"
+#include "util/hash.h"
#include "util/make_unique.h"
-#include "util/ue2_containers.h"
+#include "util/unordered.h"
#include <algorithm>
#include <iterator>
* Scans through a list of positions and retains only the highest priority
* version of a given (position, flags) entry. */
void cleanupPositions(vector<PositionInfo> &a) {
- ue2::unordered_set<pair<Position, int>> seen; // track dupes
+ ue2_unordered_set<pair<Position, int>> seen;
vector<PositionInfo> out;
out.reserve(a.size()); // output should be close to input in size.
/*
- * Copyright (c) 2015-2016, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
#include "ConstComponentVisitor.h"
#include "parse_error.h"
#include "util/container.h"
-#include "util/ue2_containers.h"
+#include "util/flat_containers.h"
#include <sstream>
/*
- * Copyright (c) 2015, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* POSSIBILITY OF SUCH DAMAGE.
*/
-/** \file
+/**
+ * \file
* \brief Component tree analysis that checks that references (such as
* back-refs, conditionals) have valid referents.
*/
-#ifndef PARSER_CHECK_REFS_H_
-#define PARSER_CHECK_REFS_H_
-#include "util/ue2_containers.h"
+#ifndef PARSER_CHECK_REFS_H
+#define PARSER_CHECK_REFS_H
+
+#include "util/flat_containers.h"
#include <string>
class ComponentSequence;
void checkReferences(const Component &root, unsigned int groupIndices,
- const ue2::flat_set<std::string> &groupNames);
+ const flat_set<std::string> &groupNames);
} // namespace ue2
-#endif // PARSER_CHECK_REFS_H_
+#endif // PARSER_CHECK_REFS_H
#include "rose_in_graph.h"
#include "util/bytecode_ptr.h"
#include "util/charreach.h"
+#include "util/flat_containers.h"
#include "util/noncopyable.h"
-#include "util/ue2_containers.h"
#include "util/ue2string.h"
#include <memory>
/** \brief True if we can not establish that at most a single callback will
* be generated at a given offset from this set of reports. */
- virtual bool requiresDedupeSupport(const ue2::flat_set<ReportID> &reports)
+ virtual bool requiresDedupeSupport(const flat_set<ReportID> &reports)
const = 0;
};
/** \brief Adds a single literal. */
virtual void add(bool anchored, bool eod, const ue2_literal &lit,
- const ue2::flat_set<ReportID> &ids) = 0;
+ const flat_set<ReportID> &ids) = 0;
virtual bool addRose(const RoseInGraph &ig, bool prefilter) = 0;
virtual bool addSombeRose(const RoseInGraph &ig) = 0;
/** \brief Returns true if we were able to add it as a mask. */
virtual bool add(bool anchored, const std::vector<CharReach> &mask,
- const ue2::flat_set<ReportID> &reports) = 0;
+ const flat_set<ReportID> &reports) = 0;
/** \brief Attempts to add the graph to the anchored acyclic table. Returns
* true on success. */
virtual bool addAnchoredAcyclic(const NGHolder &graph) = 0;
virtual bool validateMask(const std::vector<CharReach> &mask,
- const ue2::flat_set<ReportID> &reports,
+ const flat_set<ReportID> &reports,
bool anchored, bool eod) const = 0;
virtual void addMask(const std::vector<CharReach> &mask,
- const ue2::flat_set<ReportID> &reports, bool anchored,
+ const flat_set<ReportID> &reports, bool anchored,
bool eod) = 0;
/** \brief Construct a runtime implementation. */
/** Edges we've transformed (in \ref transformAnchoredLiteralOverlap) which
* require ANCH history to prevent overlap. */
- ue2::unordered_set<RoseInEdge> anch_history_edges;
+ unordered_set<RoseInEdge> anch_history_edges;
/** True if we're tracking Start of Match. */
bool som;
#include "util/compile_error.h"
#include "util/container.h"
#include "util/determinise.h"
+#include "util/flat_containers.h"
#include "util/graph_range.h"
#include "util/make_unique.h"
#include "util/order_check.h"
-#include "util/ue2_containers.h"
#include "util/ue2string.h"
+#include "util/unordered.h"
#include "util/verify_types.h"
#include <map>
bool operator==(const Holder_StateSet &b) const {
return wdelay == b.wdelay && wrap_state == b.wrap_state;
}
-};
-size_t hash_value(const Holder_StateSet &s) {
- size_t val = 0;
- boost::hash_combine(val, s.wrap_state);
- boost::hash_combine(val, s.wdelay);
- return val;
-}
+ size_t hash() const {
+ return hash_all(wrap_state, wdelay);
+ }
+};
class Automaton_Holder {
public:
using StateSet = Holder_StateSet;
- using StateMap = unordered_map<StateSet, dstate_id_t>;
+ using StateMap = ue2_unordered_map<StateSet, dstate_id_t>;
explicit Automaton_Holder(const NGHolder &g_in) : g(g_in) {
for (auto v : vertices_range(g)) {
private:
const NGHolder &g;
- ue2::unordered_map<NFAVertex, u32> vertexToIndex;
+ unordered_map<NFAVertex, u32> vertexToIndex;
vector<NFAVertex> indexToVertex;
vector<CharReach> cr_by_index;
StateSet init;
static
void setReports(NGHolder &h, const map<NFAVertex, set<u32>> &reportMap,
- const ue2::unordered_map<NFAVertex, NFAVertex> &orig_to_copy) {
+ const unordered_map<NFAVertex, NFAVertex> &orig_to_copy) {
for (const auto &m : reportMap) {
NFAVertex t = orig_to_copy.at(m.first);
assert(!m.second.empty());
int addAnchoredNFA(RoseBuildImpl &build, const NGHolder &wrapper,
const map<NFAVertex, set<u32>> &reportMap) {
NGHolder h;
- ue2::unordered_map<NFAVertex, NFAVertex> orig_to_copy;
+ unordered_map<NFAVertex, NFAVertex> orig_to_copy;
cloneHolder(h, wrapper, &orig_to_copy);
clear_in_edges(h.accept, h);
clear_in_edges(h.acceptEod, h);
/** \brief Simple cache of programs written to engine blob, used for
* deduplication. */
- ue2::unordered_map<RoseProgram, u32, RoseProgramHash,
- RoseProgramEquivalence> program_cache;
+ unordered_map<RoseProgram, u32, RoseProgramHash,
+ RoseProgramEquivalence> program_cache;
/** \brief State indices, for those roles that have them.
* Each vertex present has a unique state index in the range
/** \brief Mapping from queue index to bytecode offset for built engines
* that have already been pushed into the engine_blob. */
- ue2::unordered_map<u32, u32> engineOffsets;
+ unordered_map<u32, u32> engineOffsets;
/** \brief List of long literals (ones with CHECK_LONG_LIT instructions)
* that need hash table support. */
map<left_id, set<PredTopPair> > infixTriggers;
vector<left_id> order;
- unordered_map<left_id, vector<RoseVertex> > succs;
+ unordered_map<left_id, vector<RoseVertex>> succs;
findInfixTriggers(tbi, &infixTriggers);
if (cc.grey.allowTamarama && cc.streaming && !do_prefix) {
}
static
-bool anyEndfixMpvTriggers(const RoseBuildImpl &tbi) {
- const RoseGraph &g = tbi.g;
- ue2::unordered_set<suffix_id> done;
+bool anyEndfixMpvTriggers(const RoseBuildImpl &build) {
+ const RoseGraph &g = build.g;
+ unordered_set<suffix_id> done;
/* suffixes */
for (auto v : vertices_range(g)) {
}
done.insert(g[v].suffix);
- if (hasMpvTrigger(all_reports(g[v].suffix), tbi.rm)) {
+ if (hasMpvTrigger(all_reports(g[v].suffix), build.rm)) {
return true;
}
}
/* outfixes */
- for (const auto &out : tbi.outfixes) {
- if (hasMpvTrigger(all_reports(out), tbi.rm)) {
+ for (const auto &out : build.outfixes) {
+ if (hasMpvTrigger(all_reports(out), build.rm)) {
return true;
}
}
const RoseGraph &g = tbi.g;
const CompileContext &cc = tbi.cc;
- ue2::unordered_set<u32> done_core;
+ unordered_set<u32> done_core;
leftTable.resize(leftfixCount);
#include "util/container.h"
#include "util/dump_charclass.h"
#include "util/graph_range.h"
-#include "util/ue2_containers.h"
#include "util/ue2string.h"
#include <map>
static
void makeCastle(LeftEngInfo &left,
- unordered_map<const NGHolder *, shared_ptr<CastleProto>> &cache) {
+ unordered_map<const NGHolder *, shared_ptr<CastleProto>> &cache) {
if (left.dfa || left.haig || left.castle) {
return;
}
static
void makeCastleSuffix(RoseBuildImpl &tbi, RoseVertex v,
- ue2::unordered_map<const NGHolder *, shared_ptr<CastleProto> > &cache) {
+ unordered_map<const NGHolder *, shared_ptr<CastleProto>> &cache) {
RoseSuffixInfo &suffix = tbi.g[v].suffix;
if (!suffix.graph) {
return;
}
void remapCastleTops(RoseBuildImpl &tbi) {
- ue2::unordered_map<CastleProto *, vector<RoseVertex> > rose_castles;
- ue2::unordered_map<CastleProto *, vector<RoseVertex> > suffix_castles;
+ unordered_map<CastleProto *, vector<RoseVertex>> rose_castles;
+ unordered_map<CastleProto *, vector<RoseVertex>> suffix_castles;
RoseGraph &g = tbi.g;
for (auto v : vertices_range(g)) {
#include "util/compile_context.h"
#include "util/container.h"
#include "util/dump_charclass.h"
+#include "util/flat_containers.h"
#include "util/graph_range.h"
#include "util/order_check.h"
#include "util/report_manager.h"
-#include "util/ue2_containers.h"
#include "util/ue2string.h"
#include "util/verify_types.h"
bool danglingVertexRef(RoseBuildImpl &tbi) {
RoseGraph::vertex_iterator vi, ve;
tie(vi, ve) = vertices(tbi.g);
- const ue2::unordered_set<RoseVertex> valid_vertices(vi, ve);
+ const unordered_set<RoseVertex> valid_vertices(vi, ve);
if (!contains(valid_vertices, tbi.anchored_root)) {
DEBUG_PRINTF("anchored root vertex %zu not in graph\n",
#include <queue>
#include <set>
#include <string>
-#include <vector>
+#include <unordered_map>
#include <utility>
+#include <vector>
#include <boost/range/adaptor/map.hpp>
DEBUG_PRINTF("woot?\n");
shared_ptr<NGHolder> h_new = make_shared<NGHolder>();
- ue2::unordered_map<NFAVertex, NFAVertex> rhs_map;
+ unordered_map<NFAVertex, NFAVertex> rhs_map;
vector<NFAVertex> exits_vec;
insert(&exits_vec, exits_vec.end(), exits);
splitRHS(h, exits_vec, h_new.get(), &rhs_map);
#include "util/bytecode_ptr.h"
#include "util/charreach.h"
#include "util/container.h"
+#include "util/hash.h"
#include "util/multibit_build.h"
#include "util/noncopyable.h"
-#include "util/ue2_containers.h"
#include "util/verify_types.h"
+#include "util/unordered.h"
-#include <vector>
#include <type_traits>
+#include <vector>
namespace ue2 {
u32 get_offset_of(const std::vector<s8> &look, RoseEngineBlob &blob);
private:
- unordered_map<std::vector<std::vector<CharReach>>, u32> multi_cache;
- unordered_map<std::vector<s8>, u32> lcache;
- unordered_map<std::vector<CharReach>, u32> rcache;
+ using Path = std::vector<CharReach>;
+ ue2_unordered_map<std::vector<Path>, u32> multi_cache;
+ ue2_unordered_map<std::vector<s8>, u32> lcache;
+ ue2_unordered_map<Path, u32> rcache;
};
class RoseEngineBlob : noncopyable {
}
/** \brief Cache of previously-written sparse iterators. */
- unordered_map<std::vector<mmbit_sparse_iter>, u32> cached_iters;
+ ue2_unordered_map<std::vector<mmbit_sparse_iter>, u32> cached_iters;
/**
* \brief Contents of the Rose bytecode immediately following the
/*
- * Copyright (c) 2016, Intel Corporation
+ * Copyright (c) 2016-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
/* add prefix literals to engine graph */
static
-bool addPrefixLiterals(NGHolder &h, ue2::unordered_set<u32> &tailId,
+bool addPrefixLiterals(NGHolder &h, unordered_set<u32> &tailId,
const vector<vector<CharReach>> &triggers) {
DEBUG_PRINTF("add literals to graph\n");
template<typename role_id>
static
bool isExclusive(const NGHolder &h,
- const u32 num, ue2::unordered_set<u32> &tailId,
- map<u32, ue2::unordered_set<u32>> &skipList,
+ const u32 num, unordered_set<u32> &tailId,
+ map<u32, unordered_set<u32>> &skipList,
const RoleInfo<role_id> &role1,
const RoleInfo<role_id> &role2) {
const u32 id1 = role1.id;
template<typename role_id>
static
-ue2::unordered_set<u32> checkExclusivity(const NGHolder &h,
- const u32 num, ue2::unordered_set<u32> &tailId,
- map<u32, ue2::unordered_set<u32>> &skipList,
- const RoleInfo<role_id> &role1,
- const RoleChunk<role_id> &roleChunk) {
- ue2::unordered_set<u32> info;
+unordered_set<u32> checkExclusivity(const NGHolder &h,
+ const u32 num, unordered_set<u32> &tailId,
+ map<u32, unordered_set<u32>> &skipList,
+ const RoleInfo<role_id> &role1,
+ const RoleChunk<role_id> &roleChunk) {
+ unordered_set<u32> info;
const u32 id1 = role1.id;
for (const auto &role2 : roleChunk.roles) {
const u32 id2 = role2.id;
static
map<u32, set<u32>> findExclusiveGroups(const RoseBuildImpl &build,
- const map<u32, ue2::unordered_set<u32>> &exclusiveInfo,
+ const map<u32, unordered_set<u32>> &exclusiveInfo,
const map<u32, vector<RoseVertex>> &vertex_map,
const bool is_infix) {
map<u32, set<u32>> exclusiveGroups;
vector<vector<u32>> &exclusive_roles, const bool is_infix) {
const auto &chunks = divideIntoChunks(build, roleInfoSet);
DEBUG_PRINTF("Exclusivity analysis entry\n");
- map<u32, ue2::unordered_set<u32>> exclusiveInfo;
+ map<u32, unordered_set<u32>> exclusiveInfo;
for (const auto &roleChunk : chunks) {
- map<u32, ue2::unordered_set<u32>> skipList;
+ map<u32, unordered_set<u32>> skipList;
for (const auto &role1 : roleChunk.roles) {
const u32 id1 = role1.id;
const role_id &s1 = role1.role;
/*
- * Copyright (c) 2016, Intel Corporation
+ * Copyright (c) 2016-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
#define ROSE_BUILD_GROUPS_H
#include "rose_build_impl.h"
-#include "util/ue2_containers.h"
+
+#include <unordered_map>
namespace ue2 {
-unordered_map<RoseVertex, rose_group>
+std::unordered_map<RoseVertex, rose_group>
getVertexGroupMap(const RoseBuildImpl &build);
rose_group getSquashableGroups(const RoseBuildImpl &build);
#include "nfagraph/ng_holder.h"
#include "nfagraph/ng_revacc.h"
#include "util/bytecode_ptr.h"
+#include "util/flat_containers.h"
#include "util/hash.h"
#include "util/order_check.h"
#include "util/queue_index_factory.h"
-#include "util/ue2_containers.h"
#include "util/ue2string.h"
+#include "util/unordered.h"
#include "util/verify_types.h"
#include <deque>
depth findMaxWidth(const suffix_id &s);
depth findMinWidth(const suffix_id &s, u32 top);
depth findMaxWidth(const suffix_id &s, u32 top);
-size_t hash_value(const suffix_id &s);
/** \brief represents an engine to the left of a rose role */
struct left_id {
depth findMinWidth(const left_id &r);
depth findMaxWidth(const left_id &r);
u32 num_tops(const left_id &r);
-size_t hash_value(const left_id &r);
struct rose_literal_info {
- ue2::flat_set<u32> delayed_ids;
- ue2::flat_set<RoseVertex> vertices;
+ flat_set<u32> delayed_ids;
+ flat_set<RoseVertex> vertices;
rose_group group_mask = 0;
u32 undelayed_id = MO_INVALID_IDX;
bool squash_group = false;
return s == b.s && msk == b.msk && cmp == b.cmp && table == b.table &&
delay == b.delay && distinctiveness == b.distinctiveness;
}
+
+ size_t hash() const {
+ return hash_all(s, msk, cmp, table, delay, distinctiveness);
+ }
};
static inline
return 0;
}
-inline
-size_t hash_value(const rose_literal_id &lit) {
- return hash_all(lit.s, lit.msk, lit.cmp, lit.table, lit.delay,
- lit.distinctiveness);
-}
-
class RoseLiteralMap {
/**
* \brief Main storage for literals.
std::deque<rose_literal_id> lits;
/** \brief Quick-lookup index from literal -> index in lits. */
- unordered_map<rose_literal_id, u32> lits_index;
+ ue2_unordered_map<rose_literal_id, u32> lits_index;
public:
std::pair<u32, bool> insert(const rose_literal_id &lit) {
// Adds a single literal.
void add(bool anchored, bool eod, const ue2_literal &lit,
- const ue2::flat_set<ReportID> &ids) override;
+ const flat_set<ReportID> &ids) override;
bool addRose(const RoseInGraph &ig, bool prefilter) override;
bool addSombeRose(const RoseInGraph &ig) override;
// Returns true if we were able to add it as a mask
bool add(bool anchored, const std::vector<CharReach> &mask,
- const ue2::flat_set<ReportID> &reports) override;
+ const flat_set<ReportID> &reports) override;
bool addAnchoredAcyclic(const NGHolder &graph) override;
bool validateMask(const std::vector<CharReach> &mask,
- const ue2::flat_set<ReportID> &reports, bool anchored,
+ const flat_set<ReportID> &reports, bool anchored,
bool eod) const override;
void addMask(const std::vector<CharReach> &mask,
- const ue2::flat_set<ReportID> &reports, bool anchored,
+ const flat_set<ReportID> &reports, bool anchored,
bool eod) override;
// Construct a runtime implementation.
* overlap calculation in history assignment. */
std::map<u32, rose_literal_id> anchoredLitSuffix;
- unordered_set<left_id> transient;
- unordered_map<left_id, rose_group> rose_squash_masks;
+ ue2_unordered_set<left_id> transient;
+ ue2_unordered_map<left_id, rose_group> rose_squash_masks;
std::vector<OutfixInfo> outfixes;
} // namespace ue2
+namespace std {
+
+template<>
+struct hash<ue2::left_id> {
+ size_t operator()(const ue2::left_id &l) const {
+ return l.hash();
+ }
+};
+
+template<>
+struct hash<ue2::suffix_id> {
+ size_t operator()(const ue2::suffix_id &s) const {
+ return s.hash();
+ }
+};
+
+} // namespace std
+
#endif /* ROSE_BUILD_IMPL_H */
/*
- * Copyright (c) 2015-2016, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
#include "rose/rose_build_impl.h"
#include "util/container.h"
#include "util/dump_charclass.h"
+#include "util/flat_containers.h"
#include "util/graph_range.h"
#include "util/graph.h"
-#include "util/ue2_containers.h"
+#include "util/hash.h"
#include "util/ue2string.h"
+#include "util/unordered.h"
#include <algorithm>
#include <set>
static
bool couldEndLiteral(const ue2_literal &s, NFAVertex initial,
const NGHolder &h) {
- ue2::flat_set<NFAVertex> curr, next;
+ flat_set<NFAVertex> curr, next;
curr.insert(initial);
for (auto it = s.rbegin(), ite = s.rend(); it != ite; ++it) {
return true;
}
+using EdgeCache = ue2_unordered_set<pair<NFAVertex, NFAVertex>>;
+
static
-void contractVertex(NGHolder &g, NFAVertex v,
- ue2::unordered_set<pair<NFAVertex, NFAVertex>> &all_edges) {
+void contractVertex(NGHolder &g, NFAVertex v, EdgeCache &all_edges) {
for (auto u : inv_adjacent_vertices_range(v, g)) {
if (u == v) {
continue; // self-edge
cloneHolder(g, h);
vector<NFAVertex> dead;
- // The set of all edges in the graph is used for existence checks in contractVertex.
- ue2::unordered_set<pair<NFAVertex, NFAVertex>> all_edges;
+ // The set of all edges in the graph is used for existence checks in
+ // contractVertex.
+ EdgeCache all_edges;
for (const auto &e : edges_range(g)) {
all_edges.emplace(source(e, g), target(e, g));
}
#include "rose_build_lookaround.h"
#include "rose_build_program.h"
+#include "util/hash.h"
#include "util/verify_types.h"
namespace ue2 {
/** \brief Length of the bytecode instruction in bytes. */
virtual size_t byte_length() const = 0;
- using OffsetMap = unordered_map<const RoseInstruction *, u32>;
+ using OffsetMap = std::unordered_map<const RoseInstruction *, u32>;
/**
* \brief Writes a concrete implementation of this instruction.
}
};
+template<RoseInstructionCode Opcode, class ImplType, class RoseInstrType>
+constexpr RoseInstructionCode
+ RoseInstrBase<Opcode, ImplType, RoseInstrType>::opcode;
+
/**
* \brief Refinement of RoseInstrBase to use for instructions that have
* just a single target member, called "target".
virtual bool operator==(const RoseInstrType &) const { return true; }
size_t hash() const override {
- return boost::hash_value(static_cast<int>(Opcode));
+ return hash_all(Opcode);
}
bool equiv_to(const RoseInstrType &, const RoseInstruction::OffsetMap &,
}
size_t hash() const override {
- return hash_all(static_cast<int>(opcode), groups, anch_id);
+ return hash_all(opcode, groups, anch_id);
}
void write(void *dest, RoseEngineBlob &blob,
}
size_t hash() const override {
- return hash_all(static_cast<int>(opcode), min_offset);
+ return hash_all(opcode, min_offset);
}
void write(void *dest, RoseEngineBlob &blob,
}
size_t hash() const override {
- return hash_all(static_cast<int>(opcode), groups);
+ return hash_all(opcode, groups);
}
void write(void *dest, RoseEngineBlob &blob,
}
size_t hash() const override {
- return boost::hash_value(static_cast<int>(opcode));
+ return hash_all(opcode);
}
void write(void *dest, RoseEngineBlob &blob,
}
size_t hash() const override {
- return hash_all(static_cast<int>(opcode), min_bound, max_bound);
+ return hash_all(opcode, min_bound, max_bound);
}
void write(void *dest, RoseEngineBlob &blob,
}
size_t hash() const override {
- return hash_all(static_cast<int>(opcode), key);
+ return hash_all(opcode, key);
}
void write(void *dest, RoseEngineBlob &blob,
}
size_t hash() const override {
- return hash_all(static_cast<int>(opcode), offset, reach);
+ return hash_all(opcode, offset, reach);
}
void write(void *dest, RoseEngineBlob &blob,
}
size_t hash() const override {
- return hash_all(static_cast<int>(opcode), look);
+ return hash_all(opcode, look);
}
void write(void *dest, RoseEngineBlob &blob,
}
size_t hash() const override {
- return hash_all(static_cast<int>(opcode), and_mask, cmp_mask, neg_mask,
- offset);
+ return hash_all(opcode, and_mask, cmp_mask, neg_mask, offset);
}
void write(void *dest, RoseEngineBlob &blob,
}
size_t hash() const override {
- return hash_all(static_cast<int>(opcode), and_mask, cmp_mask, neg_mask,
- offset);
+ return hash_all(opcode, and_mask, cmp_mask, neg_mask, offset);
}
void write(void *dest, RoseEngineBlob &blob,
}
size_t hash() const override {
- return hash_all(static_cast<int>(opcode), and_mask, cmp_mask, negation,
- offset);
+ return hash_all(opcode, and_mask, cmp_mask, negation, offset);
}
void write(void *dest, RoseEngineBlob &blob,
}
size_t hash() const override {
- return hash_all(static_cast<int>(opcode), nib_mask,
- bucket_select_mask, neg_mask, offset);
+ return hash_all(opcode, nib_mask, bucket_select_mask, neg_mask, offset);
}
void write(void *dest, RoseEngineBlob &blob,
}
size_t hash() const override {
- return hash_all(static_cast<int>(opcode), hi_mask, lo_mask,
- bucket_select_mask, neg_mask, offset);
+ return hash_all(opcode, hi_mask, lo_mask, bucket_select_mask, neg_mask,
+ offset);
}
void write(void *dest, RoseEngineBlob &blob,
}
size_t hash() const override {
- return hash_all(static_cast<int>(opcode), hi_mask, lo_mask,
- bucket_select_mask, neg_mask, offset);
+ return hash_all(opcode, hi_mask, lo_mask, bucket_select_mask, neg_mask,
+ offset);
}
void write(void *dest, RoseEngineBlob &blob,
}
size_t hash() const override {
- return hash_all(static_cast<int>(opcode), hi_mask, lo_mask,
- bucket_select_mask_hi, bucket_select_mask_lo,
- neg_mask, offset);
+ return hash_all(opcode, hi_mask, lo_mask, bucket_select_mask_hi,
+ bucket_select_mask_lo, neg_mask, offset);
}
void write(void *dest, RoseEngineBlob &blob,
}
size_t hash() const override {
- return hash_all(static_cast<int>(opcode), queue, lag, report);
+ return hash_all(opcode, queue, lag, report);
}
void write(void *dest, RoseEngineBlob &blob,
}
size_t hash() const override {
- return hash_all(static_cast<int>(opcode), queue, lag, report);
+ return hash_all(opcode, queue, lag, report);
}
void write(void *dest, RoseEngineBlob &blob,
}
size_t hash() const override {
- return hash_all(static_cast<int>(opcode), delay, index);
+ return hash_all(opcode, delay, index);
}
void write(void *dest, RoseEngineBlob &blob,
}
size_t hash() const override {
- return hash_all(static_cast<int>(opcode), distance);
+ return hash_all(opcode, distance);
}
void write(void *dest, RoseEngineBlob &blob,
}
size_t hash() const override {
- return hash_all(static_cast<int>(opcode), queue, lag);
+ return hash_all(opcode, queue, lag);
}
void write(void *dest, RoseEngineBlob &blob,
}
size_t hash() const override {
- return hash_all(static_cast<int>(opcode), som.type, som.onmatch);
+ return hash_all(opcode, som.type, som.onmatch);
}
void write(void *dest, RoseEngineBlob &blob,
}
size_t hash() const override {
- return hash_all(static_cast<int>(opcode), cancel, queue, event);
+ return hash_all(opcode, cancel, queue, event);
}
void write(void *dest, RoseEngineBlob &blob,
}
size_t hash() const override {
- return hash_all(static_cast<int>(opcode), queue, event);
+ return hash_all(opcode, queue, event);
}
void write(void *dest, RoseEngineBlob &blob,
}
size_t hash() const override {
- return hash_all(static_cast<int>(opcode), quash_som, dkey,
- offset_adjust);
+ return hash_all(opcode, quash_som, dkey, offset_adjust);
}
void write(void *dest, RoseEngineBlob &blob,
}
size_t hash() const override {
- return hash_all(static_cast<int>(opcode), quash_som, dkey,
- offset_adjust);
+ return hash_all(opcode, quash_som, dkey, offset_adjust);
}
void write(void *dest, RoseEngineBlob &blob,
}
size_t hash() const override {
- return hash_all(static_cast<int>(opcode), event, top_squash_distance);
+ return hash_all(opcode, event, top_squash_distance);
}
void write(void *dest, RoseEngineBlob &blob,
}
size_t hash() const override {
- return hash_all(static_cast<int>(opcode), som.type, som.onmatch);
+ return hash_all(opcode, som.type, som.onmatch);
}
void write(void *dest, RoseEngineBlob &blob,
}
size_t hash() const override {
- return hash_all(static_cast<int>(opcode), som.type, som.onmatch);
+ return hash_all(opcode, som.type, som.onmatch);
}
void write(void *dest, RoseEngineBlob &blob,
}
size_t hash() const override {
- return hash_all(static_cast<int>(opcode), onmatch, offset_adjust);
+ return hash_all(opcode, onmatch, offset_adjust);
}
void write(void *dest, RoseEngineBlob &blob,
}
size_t hash() const override {
- return hash_all(static_cast<int>(opcode), onmatch, offset_adjust, ekey);
+ return hash_all(opcode, onmatch, offset_adjust, ekey);
}
void write(void *dest, RoseEngineBlob &blob,
}
size_t hash() const override {
- return hash_all(static_cast<int>(opcode), onmatch, offset_adjust);
+ return hash_all(opcode, onmatch, offset_adjust);
}
void write(void *dest, RoseEngineBlob &blob,
}
size_t hash() const override {
- return hash_all(static_cast<int>(opcode), onmatch, offset_adjust, ekey);
+ return hash_all(opcode, onmatch, offset_adjust, ekey);
}
void write(void *dest, RoseEngineBlob &blob,
}
size_t hash() const override {
- return hash_all(static_cast<int>(opcode), quash_som, dkey, onmatch,
- offset_adjust);
+ return hash_all(opcode, quash_som, dkey, onmatch, offset_adjust);
}
void write(void *dest, RoseEngineBlob &blob,
}
size_t hash() const override {
- return hash_all(static_cast<int>(opcode), onmatch, offset_adjust);
+ return hash_all(opcode, onmatch, offset_adjust);
}
void write(void *dest, RoseEngineBlob &blob,
}
size_t hash() const override {
- return hash_all(static_cast<int>(opcode), ekey);
+ return hash_all(opcode, ekey);
}
void write(void *dest, RoseEngineBlob &blob,
}
size_t hash() const override {
- return hash_all(static_cast<int>(opcode), end_adj, min_length);
+ return hash_all(opcode, end_adj, min_length);
}
void write(void *dest, RoseEngineBlob &blob,
}
size_t hash() const override {
- return hash_all(static_cast<int>(opcode), index);
+ return hash_all(opcode, index);
}
void write(void *dest, RoseEngineBlob &blob,
}
size_t hash() const override {
- return hash_all(static_cast<int>(opcode), groups);
+ return hash_all(opcode, groups);
}
void write(void *dest, RoseEngineBlob &blob,
}
size_t hash() const override {
- return hash_all(static_cast<int>(opcode), groups);
+ return hash_all(opcode, groups);
}
void write(void *dest, RoseEngineBlob &blob,
}
size_t hash() const override {
- return hash_all(static_cast<int>(opcode), index);
+ return hash_all(opcode, index);
}
void write(void *dest, RoseEngineBlob &blob,
}
size_t hash() const override {
- size_t v = hash_all(static_cast<int>(opcode), num_keys);
+ size_t v = hash_all(opcode, num_keys);
for (const u32 &key : jump_table | boost::adaptors::map_keys) {
- boost::hash_combine(v, key);
+ hash_combine(v, key);
}
return v;
}
}
size_t hash() const override {
- return hash_all(static_cast<int>(opcode), state);
+ return hash_all(opcode, state);
}
void write(void *dest, RoseEngineBlob &blob,
}
size_t hash() const override {
- return hash_all(static_cast<int>(opcode), num_keys, keys);
+ return hash_all(opcode, num_keys, keys);
}
void write(void *dest, RoseEngineBlob &blob,
}
size_t hash() const override {
- return hash_all(static_cast<int>(opcode), iter_offset);
+ return hash_all(opcode, iter_offset);
}
void write(void *dest, RoseEngineBlob &blob,
}
size_t hash() const override {
- return hash_all(static_cast<int>(opcode), literal);
+ return hash_all(opcode, literal);
}
void write(void *dest, RoseEngineBlob &blob,
}
size_t hash() const override {
- return hash_all(static_cast<int>(opcode), literal);
+ return hash_all(opcode, literal);
}
void write(void *dest, RoseEngineBlob &blob,
}
size_t hash() const override {
- return hash_all(static_cast<int>(opcode), literal);
+ return hash_all(opcode, literal);
}
void write(void *dest, RoseEngineBlob &blob,
}
size_t hash() const override {
- return hash_all(static_cast<int>(opcode), literal);
+ return hash_all(opcode, literal);
}
void write(void *dest, RoseEngineBlob &blob,
}
size_t hash() const override {
- return hash_all(static_cast<int>(opcode), multi_look, last_start,
- start_mask);
+ return hash_all(opcode, multi_look, last_start, start_mask);
}
void write(void *dest, RoseEngineBlob &blob,
}
size_t hash() const override {
- return hash_all(static_cast<int>(opcode), nib_mask,
- bucket_select_mask, data_select_mask, hi_bits_mask,
- lo_bits_mask, neg_mask, base_offset, last_start);
+ return hash_all(opcode, nib_mask, bucket_select_mask, data_select_mask,
+ hi_bits_mask, lo_bits_mask, neg_mask, base_offset,
+ last_start);
}
void write(void *dest, RoseEngineBlob &blob,
}
size_t hash() const override {
- return hash_all(static_cast<int>(opcode), hi_mask, lo_mask,
- bucket_select_mask, data_select_mask, hi_bits_mask,
- lo_bits_mask, neg_mask, base_offset, last_start);
+ return hash_all(opcode, hi_mask, lo_mask, bucket_select_mask,
+ data_select_mask, hi_bits_mask, lo_bits_mask, neg_mask,
+ base_offset, last_start);
}
void write(void *dest, RoseEngineBlob &blob,
}
size_t hash() const override {
- return hash_all(static_cast<int>(opcode), hi_mask, lo_mask,
- bucket_select_mask_hi, bucket_select_mask_lo,
- data_select_mask, hi_bits_mask, lo_bits_mask, neg_mask,
- base_offset, last_start);
+ return hash_all(opcode, hi_mask, lo_mask, bucket_select_mask_hi,
+ bucket_select_mask_lo, data_select_mask, hi_bits_mask,
+ lo_bits_mask, neg_mask, base_offset, last_start);
}
void write(void *dest, RoseEngineBlob &blob,
}
size_t hash() const override {
- return hash_all(static_cast<int>(opcode), hi_mask, lo_mask,
- bucket_select_mask, data_select_mask, hi_bits_mask,
- lo_bits_mask, neg_mask, base_offset, last_start);
+ return hash_all(opcode, hi_mask, lo_mask, bucket_select_mask,
+ data_select_mask, hi_bits_mask, lo_bits_mask, neg_mask,
+ base_offset, last_start);
}
void write(void *dest, RoseEngineBlob &blob,
#include "util/container.h"
#include "util/dump_charclass.h"
#include "util/graph_range.h"
-#include "util/ue2_containers.h"
+#include "util/flat_containers.h"
#include "util/verify_types.h"
#include <cstdlib>
static
void getForwardReach(const NGHolder &g, u32 top, map<s32, CharReach> &look) {
- ue2::flat_set<NFAVertex> curr, next;
+ flat_set<NFAVertex> curr, next;
// Consider only successors of start with the required top.
for (const auto &e : out_edges_range(g.start, g)) {
static
void getBackwardReach(const NGHolder &g, ReportID report, u32 lag,
map<s32, CharReach> &look) {
- ue2::flat_set<NFAVertex> curr, next;
+ flat_set<NFAVertex> curr, next;
for (auto v : inv_adjacent_vertices_range(g.accept, g)) {
if (contains(g[v].reports, report)) {
return;
}
- ue2::flat_set<dstate_id_t> curr, next;
+ flat_set<dstate_id_t> curr, next;
curr.insert(rdfa.start_anchored);
for (u32 i = 0; i < MAX_FWD_LEN && !curr.empty(); i++) {
}
// Don't merge lookarounds at offsets we already have entries for.
- ue2::flat_set<s8> offsets;
+ flat_set<s8> offsets;
for (const auto &e : lookaround) {
offsets.insert(e.offset);
}
#define ROSE_ROSE_BUILD_LOOKAROUND_H
#include "rose_graph.h"
+#include "util/hash.h"
#include <vector>
}
};
-static inline
-size_t hash_value(const LookEntry &l) {
- size_t val = 0;
- boost::hash_combine(val, l.offset);
- boost::hash_combine(val, l.reach);
- return val;
-}
-
void findLookaroundMasks(const RoseBuildImpl &tbi, const RoseVertex v,
std::vector<LookEntry> &look_more);
} // namespace ue2
+namespace std {
+
+template<>
+struct hash<ue2::LookEntry> {
+ size_t operator()(const ue2::LookEntry &l) const {
+ return ue2::hash_all(l.offset, l.reach);
+ }
+};
+
+} // namespace std
+
#endif // ROSE_ROSE_BUILD_LOOKAROUND_H
#include "util/container.h"
#include "util/dump_charclass.h"
#include "util/graph_range.h"
+#include "util/hash.h"
#include "util/order_check.h"
#include "util/report_manager.h"
#include "util/ue2string.h"
+#include "util/unordered.h"
#include <algorithm>
#include <functional>
#include <vector>
#include <utility>
-#include <boost/functional/hash/hash_fwd.hpp>
#include <boost/range/adaptor/map.hpp>
using namespace std;
using boost::adaptors::map_values;
-using boost::hash_combine;
namespace ue2 {
const RoseGraph &g = tbi.g;
vector<RoseVertex> suffix_vertices; // vertices with suffix graphs
- ue2::unordered_map<const NGHolder *, u32> fcount; // ref count per graph
+ unordered_map<const NGHolder *, u32> fcount; // ref count per graph
for (auto v : vertices_range(g)) {
if (g[v].suffix) {
for (deque<RoseVertex> &verts : roses | map_values) {
DEBUG_PRINTF("group has %zu vertices\n", verts.size());
- ue2::unordered_set<left_id> seen;
+ unordered_set<left_id> seen;
for (auto jt = verts.begin(), jte = verts.end(); jt != jte; ++jt) {
RoseVertex v = *jt;
void dedupeSuffixes(RoseBuildImpl &tbi) {
DEBUG_PRINTF("deduping suffixes\n");
- ue2::unordered_map<suffix_id, set<RoseVertex>> suffix_map;
+ unordered_map<suffix_id, set<RoseVertex>> suffix_map;
map<pair<size_t, set<ReportID>>, vector<suffix_id>> part;
// Collect suffixes into groups.
class Bouquet {
private:
list<EngineRef> ordering; // Unique list in insert order.
- typedef ue2::unordered_map<EngineRef, deque<RoseVertex> > BouquetMap;
+ using BouquetMap = ue2_unordered_map<EngineRef, deque<RoseVertex>>;
BouquetMap bouquet;
public:
void insert(const EngineRef &h, RoseVertex v) {
static
void processMergeQueue(RoseBuildImpl &tbi, RoseBouquet &roses,
priority_queue<RoseMergeCandidate> &pq) {
- ue2::unordered_set<left_id> dead;
+ unordered_set<left_id> dead;
DEBUG_PRINTF("merge queue has %zu entries\n", pq.size());
// We track the number of accelerable states for each graph in a map and
// only recompute them when the graph is modified.
- ue2::unordered_map<left_id, u32> accel_count;
+ unordered_map<left_id, u32> accel_count;
for (const auto &rose : roses) {
assert(rose.graph()->kind == NFA_INFIX);
accel_count[rose] = estimatedAccelStates(tbi, *rose.graph());
// If this isn't an acyclic case, we track the number of accelerable states
// for each graph in a map and only recompute them when the graph is
// modified.
- ue2::unordered_map<suffix_id, u32> accel_count;
+ unordered_map<suffix_id, u32> accel_count;
if (!acyclic) {
for (const auto &suffix : suffixes) {
assert(suffix.graph() && suffix.graph()->kind == NFA_SUFFIX);
template<class RawDfa, class MergeFunctor>
static
void pairwiseDfaMerge(vector<RawDfa *> &dfas,
- ue2::unordered_map<RawDfa *, size_t> &dfa_mapping,
+ unordered_map<RawDfa *, size_t> &dfa_mapping,
vector<OutfixInfo> &outfixes,
MergeFunctor merge_func) {
DEBUG_PRINTF("merging group of size %zu\n", dfas.size());
template<class RawDfa, class MergeFunctor>
static
void chunkedDfaMerge(vector<RawDfa *> &dfas,
- ue2::unordered_map<RawDfa *, size_t> &dfa_mapping,
+ unordered_map<RawDfa *, size_t> &dfa_mapping,
vector<OutfixInfo> &outfixes,
MergeFunctor merge_func) {
DEBUG_PRINTF("begin merge of %zu dfas\n", dfas.size());
/* key is index into outfix array as iterators, etc may be invalidated by
* element addition. */
- ue2::unordered_map<raw_dfa *, size_t> dfa_mapping;
+ unordered_map<raw_dfa *, size_t> dfa_mapping;
for (size_t i = 0; i < outfixes.size(); i++) {
auto *rdfa = outfixes[i].rdfa();
if (rdfa) {
/* key is index into outfix array as iterators, etc may be invalidated by
* element addition. */
size_t new_dfas = 0;
- ue2::unordered_map<raw_dfa *, size_t> dfa_mapping;
+ unordered_map<raw_dfa *, size_t> dfa_mapping;
vector<raw_dfa *> dfas;
for (auto it = tbi.outfixes.begin(); it != tbi.outfixes.end(); ++it) {
vector<OutfixInfo> &outfixes = tbi.outfixes;
- ue2::unordered_map<raw_som_dfa *, size_t> dfa_mapping;
+ unordered_map<raw_som_dfa *, size_t> dfa_mapping;
for (size_t i = 0; i < outfixes.size(); i++) {
auto *haig = outfixes[i].haig();
if (haig) {
#include "ue2common.h"
#include "grey.h"
-#include <boost/functional/hash/hash_fwd.hpp>
#include <boost/graph/breadth_first_search.hpp>
using namespace std;
-using boost::hash_combine;
namespace ue2 {
}
size_t suffix_id::hash() const {
- size_t val = 0;
- hash_combine(val, g);
- hash_combine(val, c);
- hash_combine(val, d);
- hash_combine(val, h);
- return val;
-}
-
-size_t hash_value(const suffix_id &s) {
- return s.hash();
+ return hash_all(g, c, d, h);
}
bool isAnchored(const left_id &r) {
}
size_t left_id::hash() const {
- size_t val = 0;
- hash_combine(val, g);
- hash_combine(val, c);
- hash_combine(val, d);
- hash_combine(val, h);
- return val;
-}
-
-size_t hash_value(const left_id &r) {
- return r.hash();
+ return hash_all(g, c, d, h);
}
u64a findMaxOffset(const set<ReportID> &reports, const ReportManager &rm) {
bool hasOrphanedTops(const RoseBuildImpl &build) {
const RoseGraph &g = build.g;
- ue2::unordered_map<left_id, set<u32> > roses;
- ue2::unordered_map<suffix_id, set<u32> > suffixes;
+ unordered_map<left_id, set<u32>> roses;
+ unordered_map<suffix_id, set<u32>> suffixes;
for (auto v : vertices_range(g)) {
if (g[v].left) {
#include "util/compile_context.h"
#include "util/compile_error.h"
#include "util/report_manager.h"
+#include "util/unordered.h"
#include "util/verify_types.h"
#include <boost/range/adaptor/map.hpp>
size_t v = 0;
for (const auto &ri : program) {
assert(ri);
- boost::hash_combine(v, ri->hash());
+ hash_combine(v, ri->hash());
}
return v;
}
namespace {
struct ProgKey {
- ProgKey(const RoseProgram &p) : prog(&p) { }
+ ProgKey(const RoseProgram &p) : prog(&p) {}
bool operator==(const ProgKey &b) const {
return RoseProgramEquivalence()(*prog, *b.prog);
}
- friend size_t hash_value(const ProgKey &a) {
- return RoseProgramHash()(*a.prog);
+ size_t hash() const {
+ return RoseProgramHash()(*prog);
}
private:
const RoseProgram *prog;
vector<RoseProgram> blocks;
blocks.reserve(blocks_in.size()); /* to ensure stable reference for seen */
- unordered_set<ProgKey> seen;
+ ue2_unordered_set<ProgKey> seen;
for (auto &block : blocks_in) {
if (contains(seen, block)) {
continue;
#include "util/bytecode_ptr.h"
#include "util/hash.h"
#include "util/make_unique.h"
-#include "util/ue2_containers.h"
+#include <unordered_map>
#include <vector>
#include <boost/range/adaptor/map.hpp>
/** \brief Mapping from vertex to key, for vertices with a
* CHECK_NOT_HANDLED instruction. */
- ue2::unordered_map<RoseVertex, u32> handledKeys;
+ std::unordered_map<RoseVertex, u32> handledKeys;
/** \brief Mapping from Rose literal ID to anchored program index. */
std::map<u32, u32> anchored_programs;
/** \brief Mapping from every vertex to the groups that must be on for that
* vertex to be reached. */
- ue2::unordered_map<RoseVertex, rose_group> vertex_group_map;
+ std::unordered_map<RoseVertex, rose_group> vertex_group_map;
/** \brief Global bitmap of groups that can be squashed. */
rose_group squashable_groups = 0;
RoseProgram assembleProgramBlocks(std::vector<RoseProgram> &&blocks);
RoseProgram makeLiteralProgram(const RoseBuildImpl &build,
- const std::map<RoseVertex, left_build_info> &leftfix_info,
- const std::map<suffix_id, u32> &suffixes,
- const std::map<u32, engine_info> &engine_info_by_queue,
- const unordered_map<RoseVertex, u32> &roleStateIndices,
- ProgramBuild &prog_build, u32 lit_id,
- const std::vector<RoseEdge> &lit_edges,
- bool is_anchored_replay_program);
+ const std::map<RoseVertex, left_build_info> &leftfix_info,
+ const std::map<suffix_id, u32> &suffixes,
+ const std::map<u32, engine_info> &engine_info_by_queue,
+ const std::unordered_map<RoseVertex, u32> &roleStateIndices,
+ ProgramBuild &prog_build, u32 lit_id,
+ const std::vector<RoseEdge> &lit_edges,
+ bool is_anchored_replay_program);
RoseProgram makeDelayRebuildProgram(const RoseBuildImpl &build,
ProgramBuild &prog_build,
#include "util/bitutils.h"
#include "util/compile_context.h"
#include "util/container.h"
+#include "util/flat_containers.h"
#include "util/graph.h"
#include "util/graph_range.h"
#include "util/hash.h"
#include "util/order_check.h"
-#include "util/ue2_containers.h"
#include <algorithm>
#include <numeric>
#include <vector>
-#include <boost/functional/hash/hash.hpp>
#include <boost/graph/adjacency_iterator.hpp>
#include <boost/range/adaptor/map.hpp>
private:
/* if a vertex is worth storing, it is worth storing twice */
set<RoseVertex> main_cont; /* deterministic iterator */
- ue2::unordered_set<RoseVertex> hash_cont; /* member checks */
+ unordered_set<RoseVertex> hash_cont; /* member checks */
};
struct RoseAliasingInfo {
}
/** \brief Mapping from leftfix to vertices. */
- ue2::unordered_map<left_id, set<RoseVertex>> rev_leftfix;
+ unordered_map<left_id, set<RoseVertex>> rev_leftfix;
/** \brief Mapping from undelayed ghost to delayed vertices. */
- ue2::unordered_map<RoseVertex, set<RoseVertex>> rev_ghost;
+ unordered_map<RoseVertex, set<RoseVertex>> rev_ghost;
};
} // namespace
static
void pruneUnusedTops(CastleProto &castle, const RoseGraph &g,
const set<RoseVertex> &verts) {
- ue2::unordered_set<u32> used_tops;
+ unordered_set<u32> used_tops;
for (auto v : verts) {
assert(g[v].left.castle.get() == &castle);
}
assert(isCorrectlyTopped(h));
DEBUG_PRINTF("pruning unused tops\n");
- ue2::flat_set<u32> used_tops;
+ flat_set<u32> used_tops;
for (auto v : verts) {
assert(g[v].left.graph.get() == &h);
static
void buildInvBucketMap(const vector<vector<RoseVertex>> &buckets,
- ue2::unordered_map<RoseVertex, size_t> &inv) {
+ unordered_map<RoseVertex, size_t> &inv) {
inv.clear();
for (size_t i = 0; i < buckets.size(); i++) {
for (auto v : buckets[i]) {
vector<vector<RoseVertex>> &buckets) {
// Split by report set and suffix info.
auto make_split_key = [&g](RoseVertex v) {
- return hash_all(g[v].reports, g[v].suffix);
+ return hash_all(g[v].reports, suffix_id(g[v].suffix));
};
splitAndFilterBuckets(buckets, make_split_key);
}
auto make_split_key = [&](RoseVertex v) {
const auto &lits = g[v].literals;
assert(!lits.empty());
- return build.literals.at(*lits.begin()).table;
+ auto table = build.literals.at(*lits.begin()).table;
+ return std::underlying_type<decltype(table)>::type(table);
};
splitAndFilterBuckets(buckets, make_split_key);
}
static
void splitByNeighbour(const RoseGraph &g, vector<vector<RoseVertex>> &buckets,
- ue2::unordered_map<RoseVertex, size_t> &inv, bool succ) {
+ unordered_map<RoseVertex, size_t> &inv, bool succ) {
vector<vector<RoseVertex>> extras;
map<size_t, vector<RoseVertex>> neighbours_by_bucket;
set<RoseVertex> picked;
}
// Neighbour splits require inverse map.
- ue2::unordered_map<RoseVertex, size_t> inv;
+ unordered_map<RoseVertex, size_t> inv;
buildInvBucketMap(buckets, inv);
splitByNeighbour(g, buckets, inv, true);
#include "nfa/nfa_internal.h" // for MO_INVALID_IDX
#include "util/charreach.h"
#include "util/depth.h"
-#include "util/ue2_containers.h"
+#include "util/flat_containers.h"
#include "util/ue2_graph.h"
#include <memory>
#include "ue2common.h"
#include "rose/rose_common.h"
-#include "util/ue2_containers.h"
+#include "util/flat_containers.h"
#include "util/ue2_graph.h"
#include "util/ue2string.h"
/*
- * Copyright (c) 2015-2016, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
#include "util/container.h"
#include "util/graph_range.h"
#include "util/make_unique.h"
-#include "util/ue2_containers.h"
#include <vector>
#include "nfagraph/ng_som_util.h"
#include "nfagraph/ng_region.h"
#include "util/charreach.h"
+#include "util/hash.h"
#include "util/make_unique.h"
#include "util/dump_charclass.h"
#include "util/verify_types.h"
#include <deque>
#include <utility>
-#include <boost/functional/hash/hash.hpp>
-
using namespace std;
namespace ue2 {
size_t SlotEntryHasher::operator()(const SlotCacheEntry &e) const {
assert(e.prefix);
- using boost::hash_combine;
-
- size_t v = 0;
- hash_combine(v, hash_holder(*e.prefix));
- hash_combine(v, e.parent_slot);
- hash_combine(v, e.is_reset);
- hash_combine(v, e.escapes.hash());
+ size_t v = hash_all(hash_holder(*e.prefix), e.parent_slot,
+ e.is_reset, e.escapes);
DEBUG_PRINTF("%zu vertices, parent_slot=%u, escapes=%s, is_reset=%d "
"hashes to %zx\n", num_vertices(*e.prefix), e.parent_slot,
u32 SomSlotManager::getInitialResetSomSlot(const NGHolder &prefix,
const NGHolder &g,
- const ue2::unordered_map<NFAVertex, u32> ®ion_map,
+ const unordered_map<NFAVertex, u32> ®ion_map,
u32 last_sent_region, bool *prefix_already_implemented) {
DEBUG_PRINTF("getting initial reset; last sent region %u\n",
last_sent_region);
// Clone a copy of g (and its region map) that we will be able to store
// later on.
shared_ptr<NGHolder> gg = make_shared<NGHolder>();
- ue2::unordered_map<NFAVertex, NFAVertex> orig_to_copy;
+ unordered_map<NFAVertex, NFAVertex> orig_to_copy;
cloneHolder(*gg, g, &orig_to_copy);
- ue2::unordered_map<NFAVertex, u32> gg_region_map;
+ unordered_map<NFAVertex, u32> gg_region_map;
for (const auto &m : region_map) {
assert(contains(region_map, m.first));
gg_region_map.emplace(orig_to_copy.at(m.first), m.second);
#include "nfagraph/ng_holder.h"
#include "util/bytecode_ptr.h"
#include "util/noncopyable.h"
-#include "util/ue2_containers.h"
#include <deque>
#include <memory>
+#include <unordered_map>
struct NFA;
/** prefix must be acting as a resetting sentinel and should be a dag (if
* not how are we establish som?) */
u32 getInitialResetSomSlot(const NGHolder &prefix, const NGHolder &g,
- const ue2::unordered_map<NFAVertex, u32> ®ion_map,
+ const std::unordered_map<NFAVertex, u32> ®ion_map,
u32 last_sent_region,
bool *prefix_already_implemented);
/*
- * Copyright (c) 2015, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
#include "nfagraph/ng.h"
#include "nfagraph/ng_is_equal.h"
#include "util/charreach.h"
-#include "util/ue2_containers.h"
#include "ue2common.h"
#include <memory>
+#include <unordered_map>
+#include <unordered_set>
#include <vector>
namespace ue2 {
struct InitialResetEntry {
InitialResetEntry(std::shared_ptr<const NGHolder> sent_in,
std::shared_ptr<const NGHolder> body_in,
- const ue2::unordered_map<NFAVertex, u32> &body_regions_in,
+ const std::unordered_map<NFAVertex, u32> &body_regions_in,
u32 sent_region_in, u32 first_bad_region_in)
: sent(sent_in), body(body_in), body_regions(body_regions_in),
sent_region(sent_region_in), first_bad_region(first_bad_region_in) {}
std::shared_ptr<const NGHolder> sent;
std::shared_ptr<const NGHolder> body;
- ue2::unordered_map<NFAVertex, u32> body_regions;
+ std::unordered_map<NFAVertex, u32> body_regions;
u32 sent_region;
u32 first_bad_region; /* ~0U if it must cover the whole g */
};
};
struct SlotCache {
- typedef ue2::unordered_set<SlotCacheEntry, SlotEntryHasher,
+ typedef std::unordered_set<SlotCacheEntry, SlotEntryHasher,
SlotEntryEqual> CacheStore;
void insert(const NGHolder &prefix, const CharReach &escapes,
CacheStore store;
- ue2::unordered_set<std::shared_ptr<const NGHolder>, NGHolderHasher,
- NGHolderEqual> initial_prefixes;
+ std::unordered_set<std::shared_ptr<const NGHolder>, NGHolderHasher,
+ NGHolderEqual> initial_prefixes;
std::vector<InitialResetInfo> initial_resets;
};
/*
- * Copyright (c) 2016, Intel Corporation
+ * Copyright (c) 2016-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
#define ACCEL_SCHEME_H
#include "util/charreach.h"
-#include "util/ue2_containers.h"
+#include "util/flat_containers.h"
#include <utility>
#define MAX_ACCEL_DEPTH 4
struct AccelScheme {
- flat_set<std::pair<u8, u8> > double_byte;
+ flat_set<std::pair<u8, u8>> double_byte;
CharReach cr = CharReach::dot();
CharReach double_cr;
u32 offset = MAX_ACCEL_DEPTH + 1;
/*
- * Copyright (c) 2015-2016, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
#include "ue2common.h"
#include "popcount.h"
#include "util/bitutils.h"
+#include "util/hash.h"
#include <array>
#include <cassert>
#include <boost/dynamic_bitset.hpp>
-#include <boost/functional/hash/hash.hpp>
namespace ue2 {
/// Simple hash.
size_t hash() const {
- return boost::hash_range(std::begin(bits), std::end(bits));
+ return ue2_hasher()(bits);
}
/// Sentinel value meaning "no more bits", used by find_first and
std::array<block_type, num_blocks> bits;
};
-/** \brief Boost-style hash free function. */
+} // namespace ue2
+
+namespace std {
+
template<size_t requested_size>
-size_t hash_value(const bitfield<requested_size> &b) {
- return b.hash();
-}
+struct hash<ue2::bitfield<requested_size>> {
+ size_t operator()(const ue2::bitfield<requested_size> &b) const {
+ return b.hash();
+ }
+};
-} // namespace ue2
+} // namespace std
#endif // BITFIELD_H
/*
- * Copyright (c) 2015-2016, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
bool isutf8ascii(const CharReach &cr);
bool isutf8start(const CharReach &cr);
-/** \brief Boost-style hash free function. */
-static really_inline
-size_t hash_value(const CharReach &cr) {
- return cr.hash();
-}
-
} // namespace ue2
+namespace std {
+
+template<>
+struct hash<ue2::CharReach> {
+ size_t operator()(const ue2::CharReach &cr) const {
+ return cr.hash();
+ }
+};
+
+} // namespace std
+
#endif // NG_CHARREACH_H
/*
- * Copyright (c) 2016, Intel Corporation
+ * Copyright (c) 2016-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
#include "container.h"
#include "graph_range.h"
#include "make_unique.h"
-#include "ue2_containers.h"
#include <map>
#include <set>
std::string str() const;
#endif
- friend size_t hash_value(const depth &d) {
- return d.val;
+ size_t hash() const {
+ return val;
}
private:
};
-inline size_t hash_value(const DepthMinMax &d) {
- return hash_all(d.min, d.max);
-}
-
/**
* \brief Merge two DepthMinMax values together to produce their union.
*/
} // namespace ue2
+namespace std {
+
+template<>
+struct hash<ue2::depth> {
+ size_t operator()(const ue2::depth &d) const {
+ return d.hash();
+ }
+};
+
+template<>
+struct hash<ue2::DepthMinMax> {
+ size_t operator()(const ue2::DepthMinMax &d) const {
+ return hash_all(d.min, d.max);
+ }
+};
+
+} // namespace
+
#endif // DEPTH_H
* POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef UTIL_UE2_CONTAINERS_H_
-#define UTIL_UE2_CONTAINERS_H_
+#ifndef UTIL_FLAT_CONTAINERS_H
+#define UTIL_FLAT_CONTAINERS_H
#include "ue2common.h"
+#include "util/hash.h"
#include "util/operators.h"
#include "util/small_vector.h"
#include <type_traits>
#include <utility>
-#include <boost/functional/hash/hash_fwd.hpp>
#include <boost/iterator/iterator_facade.hpp>
-#include <boost/unordered/unordered_map.hpp>
-#include <boost/unordered/unordered_set.hpp>
namespace ue2 {
-/** \brief Unordered set container implemented internally as a hash table. */
-using boost::unordered_set;
-
-/** \brief Unordered map container implemented internally as a hash table. */
-using boost::unordered_map;
-
namespace flat_detail {
// Iterator facade that wraps an underlying iterator, so that we get our
friend void swap(flat_set &a, flat_set &b) {
a.swap(b);
}
-
- // Free hash function.
- friend size_t hash_value(const flat_set &a) {
- return boost::hash_range(a.begin(), a.end());
- }
};
/**
friend void swap(flat_map &a, flat_map &b) {
a.swap(b);
}
+};
+
+} // namespace ue2
+
+namespace std {
+
+template<typename T, typename Compare, typename Allocator>
+struct hash<ue2::flat_set<T, Compare, Allocator>> {
+ size_t operator()(const ue2::flat_set<T, Compare, Allocator> &f) {
+ return ue2::ue2_hasher()(f);
+ }
+};
- // Free hash function.
- friend size_t hash_value(const flat_map &a) {
- return boost::hash_range(a.begin(), a.end());
+template<typename Key, typename T, typename Compare, typename Allocator>
+struct hash<ue2::flat_map<Key, T, Compare, Allocator>> {
+ size_t operator()(const ue2::flat_map<Key, T, Compare, Allocator> &f) {
+ return ue2::ue2_hasher()(f);
}
};
-} // namespace
+} // namespace std
-#endif // UTIL_UE2_CONTAINERS_H_
+#endif // UTIL_FLAT_CONTAINERS_H
#include "container.h"
#include "ue2common.h"
+#include "util/flat_containers.h"
#include "util/graph_range.h"
-#include "util/ue2_containers.h"
+#include "util/unordered.h"
#include <boost/graph/depth_first_search.hpp>
#include <boost/graph/strong_components.hpp>
template<class Graph, class SourceCont, class OutCont>
void find_reachable(const Graph &g, const SourceCont &sources, OutCont *out) {
using vertex_descriptor = typename Graph::vertex_descriptor;
- ue2::unordered_map<vertex_descriptor, boost::default_color_type> colours;
+ std::unordered_map<vertex_descriptor, boost::default_color_type> colours;
for (auto v : sources) {
boost::depth_first_visit(g, v,
template<class Graph, class SourceCont, class OutCont>
void find_unreachable(const Graph &g, const SourceCont &sources, OutCont *out) {
using vertex_descriptor = typename Graph::vertex_descriptor;
- ue2::unordered_set<vertex_descriptor> reachable;
+ std::unordered_set<vertex_descriptor> reachable;
find_reachable(g, sources, &reachable);
template <class Graph>
bool has_parallel_edge(const Graph &g) {
using vertex_descriptor = typename Graph::vertex_descriptor;
- ue2::unordered_set<std::pair<vertex_descriptor, vertex_descriptor>> seen;
+ ue2_unordered_set<std::pair<vertex_descriptor, vertex_descriptor>> seen;
+
for (const auto &e : edges_range(g)) {
auto u = source(e, g);
auto v = target(e, g);
#ifndef UTIL_HASH_H
#define UTIL_HASH_H
-#include <iterator>
-#include <boost/functional/hash/hash_fwd.hpp>
+#include <functional>
+#include <type_traits>
+#include <utility>
namespace ue2 {
namespace hash_detail {
+inline
+void hash_combine_impl(size_t &seed, size_t value) {
+ // Note: constants explicitly truncated on 32-bit platforms.
+ const size_t a = (size_t)0x0b4e0ef37bc32127ULL;
+ const size_t b = (size_t)0x318f07b0c8eb9be9ULL;
+ seed ^= value * a;
+ seed += b;
+}
+
+/** \brief Helper that determines whether std::begin() exists for T. */
+template<typename T>
+struct is_container_check {
+private:
+ template<typename C>
+ static auto has_begin_function(const C &obj) -> decltype(std::begin(obj)) {
+ return std::begin(obj);
+ }
+ static void has_begin_function(...) {
+ return;
+ }
+ using has_begin_type = decltype(has_begin_function(std::declval<T>()));
+
+public:
+ static const bool value = !std::is_void<has_begin_type>::value;
+};
+
+/** \brief Type trait to enable on whether T is a container. */
+template<typename T>
+struct is_container
+ : public ::std::integral_constant<bool, is_container_check<T>::value> {};
+
+/** \brief Helper that determines whether T::hash() exists. */
+template<typename T>
+struct has_hash_member_check {
+private:
+ template<typename C>
+ static auto has_hash_member_function(const C &obj) -> decltype(obj.hash()) {
+ return obj.hash();
+ }
+ static void has_hash_member_function(...) {
+ return;
+ }
+ using has_hash = decltype(has_hash_member_function(std::declval<T>()));
+
+public:
+ static const bool value = !std::is_void<has_hash>::value;
+};
+
+/** \brief Type trait to enable on whether T::hash() exists. */
+template<typename T>
+struct has_hash_member
+ : public ::std::integral_constant<bool, has_hash_member_check<T>::value> {};
+
+/** \brief Default hash: falls back to std::hash. */
+template<typename T, typename Enable = void>
+struct ue2_hash {
+ using decayed_type = typename std::decay<T>::type;
+ size_t operator()(const T &obj) const {
+ return std::hash<decayed_type>()(obj);
+ }
+};
+
+/** \brief Hash for std::pair. */
+template<typename A, typename B>
+struct ue2_hash<std::pair<A, B>, void> {
+ size_t operator()(const std::pair<A, B> &p) const {
+ size_t v = 0;
+ hash_combine_impl(v, ue2_hash<A>()(p.first));
+ hash_combine_impl(v, ue2_hash<B>()(p.second));
+ return v;
+ }
+};
+
+/** \brief Hash for any type that has a hash() member function. */
+template<typename T>
+struct ue2_hash<T, typename std::enable_if<has_hash_member<T>::value>::type> {
+ size_t operator()(const T &obj) const {
+ return obj.hash();
+ }
+};
+
+/** \brief Hash for any container type that supports std::begin(). */
+template<typename T>
+struct ue2_hash<T, typename std::enable_if<is_container<T>::value &&
+ !has_hash_member<T>::value>::type> {
+ size_t operator()(const T &obj) const {
+ size_t v = 0;
+ for (const auto &elem : obj) {
+ using element_type = typename std::decay<decltype(elem)>::type;
+ hash_combine_impl(v, ue2_hash<element_type>()(elem));
+ }
+ return v;
+ }
+};
+
+/** \brief Hash for enum types. */
+template<typename T>
+struct ue2_hash<T, typename std::enable_if<std::is_enum<T>::value>::type> {
+ size_t operator()(const T &obj) const {
+ using utype = typename std::underlying_type<T>::type;
+ return ue2_hash<utype>()(static_cast<utype>(obj));
+ }
+};
+
+template<typename T>
+void hash_combine(size_t &seed, const T &obj) {
+ hash_combine_impl(seed, ue2_hash<T>()(obj));
+}
+
template<typename T>
void hash_build(size_t &v, const T &obj) {
- boost::hash_combine(v, obj);
+ hash_combine(v, obj);
}
template<typename T, typename... Args>
} // namespace hash_detail
+using hash_detail::hash_combine;
+
+/**
+ * \brief Hasher for general use.
+ *
+ * Provides operators for most standard containers and falls back to
+ * std::hash<T>.
+ */
+struct ue2_hasher {
+ template<typename T>
+ size_t operator()(const T &obj) const {
+ return hash_detail::ue2_hash<T>()(obj);
+ }
+};
+
/**
* \brief Computes the combined hash of all its arguments.
*
return v;
}
-/**
- * \brief Compute the hash of all the elements of any range on which we can
- * call std::begin() and std::end().
- */
-template<typename Range>
-size_t hash_range(const Range &r) {
- return boost::hash_range(std::begin(r), std::end(r));
-}
-
} // namespace ue2
#endif // UTIL_HASH_H
#ifndef UTIL_HASH_DYNAMIC_BITSET_H
#define UTIL_HASH_DYNAMIC_BITSET_H
+#include "hash.h"
+
#include <boost/dynamic_bitset.hpp>
-#include <boost/functional/hash/hash.hpp>
#include <iterator>
template<typename T>
void operator=(const T &val) const {
- boost::hash_combine(*out, val);
+ hash_combine(*out, val);
}
private:
return a.mask == b.mask && a.val == b.val;
}
-inline
-size_t hash_value(const mmbit_sparse_iter &iter) {
- return ue2::hash_all(iter.mask, iter.val);
-}
+namespace std {
+
+template<>
+struct hash<mmbit_sparse_iter> {
+ size_t operator()(const mmbit_sparse_iter &iter) const {
+ return ue2::hash_all(iter.mask, iter.val);
+ }
+};
+
+} // namespace std
namespace ue2 {
#include "container.h"
#include "noncopyable.h"
-#include "ue2_containers.h"
+#include "flat_containers.h"
#include "ue2common.h"
#include <algorithm>
a.topSquashDistance == b.topSquashDistance;
}
-inline
-size_t hash_value(const Report &r) {
- return hash_all(r.type, r.quashSom, r.minOffset, r.maxOffset, r.minLength,
- r.ekey, r.offsetAdjust, r.onmatch, r.revNfaIndex,
- r.somDistance, r.topSquashDistance);
-}
-
static inline
Report makeECallback(u32 report, s32 offsetAdjust, u32 ekey) {
Report ir(EXTERNAL_CALLBACK, report);
return true;
}
-} // namespace
+} // namespace ue2
+
+namespace std {
+
+template<>
+struct hash<ue2::Report> {
+ std::size_t operator()(const ue2::Report &r) const {
+ return ue2::hash_all(r.type, r.quashSom, r.minOffset, r.maxOffset,
+ r.minLength, r.ekey, r.offsetAdjust, r.onmatch,
+ r.revNfaIndex, r.somDistance, r.topSquashDistance);
+ }
+};
+
+} // namespace std
#endif // UTIL_REPORT_H
#include "util/compile_error.h"
#include "util/noncopyable.h"
#include "util/report.h"
-#include "util/ue2_containers.h"
#include <map>
#include <set>
+#include <unordered_map>
#include <vector>
namespace ue2 {
/** \brief Mapping from Report to ID (inverse of \ref reportIds
* vector). */
- unordered_map<Report, size_t> reportIdToInternalMap;
+ std::unordered_map<Report, size_t> reportIdToInternalMap;
/** \brief Mapping from ReportID to dedupe key. */
- unordered_map<ReportID, u32> reportIdToDedupeKey;
+ std::unordered_map<ReportID, u32> reportIdToDedupeKey;
/** \brief Mapping from ReportID to Rose program offset in bytecode. */
- unordered_map<ReportID, u32> reportIdToProgramOffset;
+ std::unordered_map<ReportID, u32> reportIdToProgramOffset;
/** \brief Mapping from external match ids to information about that
* id. */
- unordered_map<ReportID, external_report_info> externalIdMap;
+ std::unordered_map<ReportID, external_report_info> externalIdMap;
/** \brief Mapping from expression index to exhaustion key. */
std::map<s64a, u32> toExhaustibleKeyMap;
#include "util/noncopyable.h"
#include "util/operators.h"
-#include <boost/functional/hash.hpp>
#include <boost/graph/properties.hpp> /* vertex_index_t, ... */
#include <boost/pending/property.hpp> /* no_property */
#include <boost/property_map/property_map.hpp>
#include <boost/iterator/iterator_adaptor.hpp>
#include <boost/iterator/iterator_facade.hpp>
+#include <functional> /* hash */
#include <tuple> /* tie */
+#include <type_traits> /* is_same, etc */
#include <utility> /* pair, declval */
/*
}
bool operator==(const vertex_descriptor b) const { return p == b.p; }
- friend size_t hash_value(vertex_descriptor v) {
- using boost::hash_value;
- return hash_value(v.serial);
+ size_t hash() const {
+ return std::hash<u64a>()(serial);
}
private:
}
bool operator==(const edge_descriptor b) const { return p == b.p; }
- friend size_t hash_value(edge_descriptor e) {
- using boost::hash_value;
- return hash_value(e.serial);
+ size_t hash() const {
+ return std::hash<u64a>()(serial);
}
private:
using boost::vertex_index;
using boost::edge_index;
-}
+} // namespace ue2
namespace boost {
std::declval<const Graph &>())) const_type;
};
-}
+} // namespace boost
+
+namespace std {
+
+/* Specialization of std::hash so that vertex_descriptor can be used in
+ * unordered containers. */
+template<typename Graph>
+struct hash<ue2::graph_detail::vertex_descriptor<Graph>> {
+ using vertex_descriptor = ue2::graph_detail::vertex_descriptor<Graph>;
+ std::size_t operator()(const vertex_descriptor &v) const {
+ return v.hash();
+ }
+};
+
+/* Specialization of std::hash so that edge_descriptor can be used in
+ * unordered containers. */
+template<typename Graph>
+struct hash<ue2::graph_detail::edge_descriptor<Graph>> {
+ using edge_descriptor = ue2::graph_detail::edge_descriptor<Graph>;
+ std::size_t operator()(const edge_descriptor &e) const {
+ return e.hash();
+ }
+};
+
+} // namespace std
#endif
/** \file
* \brief Tools for string manipulation, ue2_literal definition.
*/
+
+#include "ue2string.h"
+
#include "charreach.h"
#include "compare.h"
-#include "ue2string.h"
#include <algorithm>
+#include <cstring>
#include <iomanip>
#include <sstream>
#include <string>
std::vector<bool> nocase; /* for trolling value */
};
-inline
-size_t hash_value(const ue2_literal::elem &elem) {
- return hash_all(elem.c, elem.nocase);
-}
-
-inline
-size_t hash_value(const ue2_literal &lit) { return hash_range(lit); }
-
/// Return a reversed copy of this literal.
ue2_literal reverse_literal(const ue2_literal &in);
} // namespace ue2
+namespace std {
+
+template<>
+struct hash<ue2::ue2_literal::elem> {
+ size_t operator()(const ue2::ue2_literal::elem &elem) const {
+ return ue2::hash_all(elem.c, elem.nocase);
+ }
+};
+
+template<>
+struct hash<ue2::ue2_literal> {
+ size_t operator()(const ue2::ue2_literal &lit) const {
+ return ue2::ue2_hasher()(lit);
+ }
+};
+
+} // namespace std
+
#endif
--- /dev/null
+/*
+ * Copyright (c) 2017, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef UTIL_UNORDERED_H
+#define UTIL_UNORDERED_H
+
+/**
+ * \file
+ * \brief Unordered set and map containers that default to using our own hasher.
+ */
+
+#include "hash.h"
+
+#include <unordered_set>
+#include <unordered_map>
+
+namespace ue2 {
+
+template<class Key, class Hash = ue2_hasher>
+using ue2_unordered_set = std::unordered_set<Key, Hash>;
+
+template<class Key, class T, class Hash = ue2_hasher>
+using ue2_unordered_map = std::unordered_map<Key, T, Hash>;
+
+} // namespace ue2
+
+
+#endif // UTIL_UNORDERED_H
/*
- * Copyright (c) 2015, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
#include "gtest/gtest.h"
#include "util/bitfield.h"
-#include "util/ue2_containers.h"
#include <algorithm>
+#include <unordered_set>
+using namespace std;
using namespace ue2;
template<size_t N>
TYPED_TEST(BitfieldTest, unordered_set) {
const size_t size = TypeParam::size();
- // Exercise the hash_value free function by adding bitfields to an
+ // Exercise the hash specialisation by adding bitfields to an
// unordered_set.
- ue2::unordered_set<TypeParam> s;
+ unordered_set<TypeParam> s;
s.reserve(size);
for (size_t i = 0; i < size; ++i) {
#include "config.h"
#include "util/depth.h"
-#include "util/ue2_containers.h"
#include "gtest/gtest.h"
+#include <unordered_set>
+
+using namespace std;
using namespace ue2;
static UNUSED
}
TEST(depth, unordered_set) {
- ue2::unordered_set<depth> depths;
+ unordered_set<depth> depths;
for (const auto &val : finite_values) {
depths.emplace(val);
/*
- * Copyright (c) 2015, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
#include "config.h"
-#include "util/ue2_containers.h"
+#include "util/flat_containers.h"
#include "ue2common.h"
#include "gtest/gtest.h"
ASSERT_LE(1ULL << 24, f.max_size());
}
+template<typename FlatMap>
+size_t hash_value(const FlatMap &f) {
+ return std::hash<FlatMap>()(f);
+}
+
TEST(flat_map, hash_value) {
const vector<pair<u32, u32>> input = {
{0, 0}, {3, 1}, {76, 2}, {132, 3}, {77, 4}, {99999, 5}, {100, 6}};
/*
- * Copyright (c) 2015, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
#include "config.h"
-#include "util/ue2_containers.h"
+#include "util/flat_containers.h"
#include "ue2common.h"
#include "gtest/gtest.h"
ASSERT_LE(1ULL << 24, f.max_size());
}
+template<typename FlatSet>
+size_t hash_value(const FlatSet &f) {
+ return std::hash<FlatSet>()(f);
+}
+
TEST(flat_set, hash_value) {
const vector<u32> input = {0, 15, 3, 1, 20, 32768,
24000000, 17, 100, 101, 104, 99999};
/*
- * Copyright (c) 2015-2016, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
NFAVertex pivot = c;
- ue2::unordered_map<NFAVertex, NFAVertex> lhs_map;
- ue2::unordered_map<NFAVertex, NFAVertex> rhs_map;
+ unordered_map<NFAVertex, NFAVertex> lhs_map;
+ unordered_map<NFAVertex, NFAVertex> rhs_map;
splitGraph(src, pivot, &lhs, &lhs_map, &rhs, &rhs_map);
NFAVertex pivot = c;
- ue2::unordered_map<NFAVertex, NFAVertex> lhs_map;
- ue2::unordered_map<NFAVertex, NFAVertex> rhs_map;
+ unordered_map<NFAVertex, NFAVertex> lhs_map;
+ unordered_map<NFAVertex, NFAVertex> rhs_map;
splitGraph(src, pivot, &lhs, &lhs_map, &rhs, &rhs_map);
pivots.push_back(d);
pivots.push_back(g);
- ue2::unordered_map<NFAVertex, NFAVertex> lhs_map;
- ue2::unordered_map<NFAVertex, NFAVertex> rhs_map;
+ unordered_map<NFAVertex, NFAVertex> lhs_map;
+ unordered_map<NFAVertex, NFAVertex> rhs_map;
splitGraph(src, pivots, &lhs, &lhs_map, &rhs, &rhs_map);
pivots.push_back(d);
pivots.push_back(g);
- ue2::unordered_map<NFAVertex, NFAVertex> lhs_map;
- ue2::unordered_map<NFAVertex, NFAVertex> rhs_map;
+ unordered_map<NFAVertex, NFAVertex> lhs_map;
+ unordered_map<NFAVertex, NFAVertex> rhs_map;
splitGraph(src, pivots, &lhs, &lhs_map, &rhs, &rhs_map);
/*
- * Copyright (c) 2015-2016, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
#include "smallwrite/smallwrite_build.h"
#include "som/slot_manager.h"
-using std::vector;
+#include <memory>
+#include <unordered_set>
+#include <vector>
+
+using namespace std;
using namespace ue2;
static
static
size_t numUniqueSuffixGraphs(const RoseGraph &g) {
- ue2::unordered_set<const NGHolder *> seen;
+ unordered_set<const NGHolder *> seen;
for (const auto &v : vertices_range(g)) {
if (g[v].suffix) {
#include "util/container.h"
#include "util/graph_range.h"
#include "util/make_unique.h"
-#include "util/ue2_containers.h"
#include "util/ue2string.h"
#include "util/unicode_def.h"
#include "util/unicode_set.h"
#include <memory>
#include <set>
#include <sstream>
+#include <unordered_set>
#include <vector>
#include <boost/utility.hpp>
vector<unique_ptr<VertexPath>> open;
open.push_back(ue2::make_unique<VertexPath>(1, g.start));
- ue2::unordered_set<NFAVertex> one_way_in;
+ unordered_set<NFAVertex> one_way_in;
for (const auto &v : vertices_range(g)) {
if (in_degree(v, g) <= 1) {
one_way_in.insert(v);
#include "util/compare.h"
#include "util/report.h"
#include "util/report_manager.h"
+#include "util/unordered.h"
#include <algorithm>
}
private:
- unordered_map<pair<NFAVertex, NFAVertex>, NFAEdge> cache;
+ ue2_unordered_map<pair<NFAVertex, NFAVertex>, NFAEdge> cache;
};
struct fmstate {