* POSSIBILITY OF SUCH DAMAGE.
*/
-/** \file
+/**
+ * \file
* \brief Castle: multi-tenant repeat engine, compiler code.
*/
+
#include "castlecompile.h"
#include "castle_internal.h"
}
}
-aligned_unique_ptr<NFA>
+bytecode_ptr<NFA>
buildCastle(const CastleProto &proto,
const map<u32, vector<vector<CharReach>>> &triggers,
const CompileContext &cc, const ReportManager &rm) {
total_size = ROUNDUP_N(total_size, alignof(mmbit_sparse_iter));
total_size += byte_length(stale_iter); // stale sparse iter
- aligned_unique_ptr<NFA> nfa = aligned_zmalloc_unique<NFA>(total_size);
+ auto nfa = make_bytecode_ptr<NFA>(total_size);
nfa->type = verify_u8(CASTLE_NFA);
nfa->length = verify_u32(total_size);
nfa->nPositions = verify_u32(subs.size());
/*
- * Copyright (c) 2015-2016, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* POSSIBILITY OF SUCH DAMAGE.
*/
-/** \file
+/**
+ * \file
* \brief Castle: multi-tenant repeat engine, compiler code.
*/
#include "nfa_kind.h"
#include "ue2common.h"
#include "nfagraph/ng_repeat.h"
-#include "util/alloc.h"
+#include "util/bytecode_ptr.h"
#include "util/depth.h"
#include "util/ue2_containers.h"
* NOTE: Tops must be contiguous, i.e. \ref remapCastleTops must have been run
* first.
*/
-ue2::aligned_unique_ptr<NFA>
+bytecode_ptr<NFA>
buildCastle(const CastleProto &proto,
const std::map<u32, std::vector<std::vector<CharReach>>> &triggers,
const CompileContext &cc, const ReportManager &rm);
/*
- * Copyright (c) 2015-2016, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
#include "grey.h"
#include "mcclellancompile.h"
#include "nfa_internal.h"
-#include "util/alloc.h"
#include "util/compile_context.h"
#include "util/container.h"
#include "util/graph_range.h"
}
}
-aligned_unique_ptr<NFA> goughCompile(raw_som_dfa &raw, u8 somPrecision,
- const CompileContext &cc,
- const ReportManager &rm) {
+bytecode_ptr<NFA> goughCompile(raw_som_dfa &raw, u8 somPrecision,
+ const CompileContext &cc,
+ const ReportManager &rm) {
assert(somPrecision == 2 || somPrecision == 4 || somPrecision == 8
|| !cc.streaming);
map<dstate_id_t, gough_accel_state_info> accel_allowed;
find_allowed_accel_states(*cfg, blocks, &accel_allowed);
gough_build_strat gbs(raw, *cfg, rm, accel_allowed);
- aligned_unique_ptr<NFA> basic_dfa = mcclellanCompile_i(raw, gbs, cc);
+ auto basic_dfa = mcclellanCompile_i(raw, gbs, cc);
assert(basic_dfa);
if (!basic_dfa) {
return nullptr;
gi.stream_som_loc_width = somPrecision;
u32 gough_size = ROUNDUP_N(curr_offset, 16);
- aligned_unique_ptr<NFA> gough_dfa = aligned_zmalloc_unique<NFA>(gough_size);
+ auto gough_dfa = make_bytecode_ptr<NFA>(gough_size);
memcpy(gough_dfa.get(), basic_dfa.get(), basic_dfa->length);
memcpy((char *)gough_dfa.get() + haig_offset, &gi, sizeof(gi));
/*
- * Copyright (c) 2015, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
#include "mcclellancompile.h"
#include "nfa_kind.h"
#include "ue2common.h"
-#include "util/alloc.h"
+#include "util/bytecode_ptr.h"
#include "util/ue2_containers.h"
#include "util/order_check.h"
* som */
};
-aligned_unique_ptr<NFA> goughCompile(raw_som_dfa &raw, u8 somPrecision,
- const CompileContext &cc,
- const ReportManager &rm);
+bytecode_ptr<NFA> goughCompile(raw_som_dfa &raw, u8 somPrecision,
+ const CompileContext &cc,
+ const ReportManager &rm);
} // namespace ue2
-#endif
+#endif // GOUGHCOMPILE_H
* POSSIBILITY OF SUCH DAMAGE.
*/
-/** \file
+/**
+ * \file
* \brief Main NFA build code.
*/
+
#include "limex_compile.h"
#include "accel.h"
}
static
- aligned_unique_ptr<NFA> generateNfa(const build_info &args) {
+ bytecode_ptr<NFA> generateNfa(const build_info &args) {
if (args.num_states > NFATraits<dtype>::maxStates) {
return nullptr;
}
size_t nfaSize = sizeof(NFA) + offset;
DEBUG_PRINTF("nfa size %zu\n", nfaSize);
- auto nfa = aligned_zmalloc_unique<NFA>(nfaSize);
+ auto nfa = make_bytecode_ptr<NFA>(nfaSize);
assert(nfa); // otherwise we would have thrown std::bad_alloc
implNFA_t *limex = (implNFA_t *)getMutableImplNfa(nfa.get());
template<NFAEngineType dtype>
struct generateNfa {
- static aligned_unique_ptr<NFA> call(const build_info &args) {
+ static bytecode_ptr<NFA> call(const build_info &args) {
return Factory<dtype>::generateNfa(args);
}
};
return rv;
}
-aligned_unique_ptr<NFA> generate(NGHolder &h,
- const ue2::unordered_map<NFAVertex, u32> &states,
- const vector<BoundedRepeatData> &repeats,
- const map<NFAVertex, NFAStateSet> &reportSquashMap,
- const map<NFAVertex, NFAStateSet> &squashMap,
- const map<u32, set<NFAVertex>> &tops,
- const set<NFAVertex> &zombies,
- bool do_accel,
- bool stateCompression,
- u32 hint,
- const CompileContext &cc) {
+bytecode_ptr<NFA> generate(NGHolder &h,
+ const ue2::unordered_map<NFAVertex, u32> &states,
+ const vector<BoundedRepeatData> &repeats,
+ const map<NFAVertex, NFAStateSet> &reportSquashMap,
+ const map<NFAVertex, NFAStateSet> &squashMap,
+ const map<u32, set<NFAVertex>> &tops,
+ const set<NFAVertex> &zombies, bool do_accel,
+ bool stateCompression, u32 hint,
+ const CompileContext &cc) {
const u32 num_states = max_state(states) + 1;
DEBUG_PRINTF("total states: %u\n", num_states);
/*
- * Copyright (c) 2015-2016, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* POSSIBILITY OF SUCH DAMAGE.
*/
-/** \file
+/**
+ * \file
* \brief Main NFA build code.
*/
#include <memory>
#include <vector>
-#include "ue2common.h"
#include "nfagraph/ng_holder.h"
#include "nfagraph/ng_squash.h" // for NFAStateSet
-#include "util/alloc.h"
+#include "ue2common.h"
+#include "util/bytecode_ptr.h"
#include "util/ue2_containers.h"
struct NFA;
struct BoundedRepeatData;
struct CompileContext;
-/** \brief Construct a LimEx NFA from an NGHolder.
+/**
+ * \brief Construct a LimEx NFA from an NGHolder.
*
* \param g Input NFA graph. Must have state IDs assigned.
* \param repeats Bounded repeat information, if any.
* \return a built NFA, or nullptr if no NFA could be constructed for this
* graph.
*/
-aligned_unique_ptr<NFA> generate(NGHolder &g,
+bytecode_ptr<NFA> generate(NGHolder &g,
const ue2::unordered_map<NFAVertex, u32> &states,
const std::vector<BoundedRepeatData> &repeats,
const std::map<NFAVertex, NFAStateSet> &reportSquashMap,
/*
- * Copyright (c) 2015-2016, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
}
static
-aligned_unique_ptr<NFA> mcclellanCompile16(dfa_info &info,
- const CompileContext &cc,
- set<dstate_id_t> *accel_states) {
+bytecode_ptr<NFA> mcclellanCompile16(dfa_info &info, const CompileContext &cc,
+ set<dstate_id_t> *accel_states) {
DEBUG_PRINTF("building mcclellan 16\n");
vector<u32> reports; /* index in ri for the appropriate report list */
accel_offset -= sizeof(NFA); /* adj accel offset to be relative to m */
assert(ISALIGNED_N(accel_offset, alignof(union AccelAux)));
- aligned_unique_ptr<NFA> nfa = aligned_zmalloc_unique<NFA>(total_size);
+ auto nfa = make_bytecode_ptr<NFA>(total_size);
char *nfa_base = (char *)nfa.get();
populateBasicInfo(sizeof(u16), info, total_size, aux_offset, accel_offset,
}
static
-aligned_unique_ptr<NFA> mcclellanCompile8(dfa_info &info,
- const CompileContext &cc,
- set<dstate_id_t> *accel_states) {
+bytecode_ptr<NFA> mcclellanCompile8(dfa_info &info, const CompileContext &cc,
+ set<dstate_id_t> *accel_states) {
DEBUG_PRINTF("building mcclellan 8\n");
vector<u32> reports;
accel_offset -= sizeof(NFA); /* adj accel offset to be relative to m */
assert(ISALIGNED_N(accel_offset, alignof(union AccelAux)));
- aligned_unique_ptr<NFA> nfa = aligned_zmalloc_unique<NFA>(total_size);
+ auto nfa = bytecode_ptr<NFA>(total_size);
char *nfa_base = (char *)nfa.get();
mcclellan *m = (mcclellan *)getMutableImplNfa(nfa.get());
- allocateFSN8(info, accel_escape_info, &m->accel_limit_8, &m->accept_limit_8);
+ allocateFSN8(info, accel_escape_info, &m->accel_limit_8,
+ &m->accept_limit_8);
populateBasicInfo(sizeof(u8), info, total_size, aux_offset, accel_offset,
accel_escape_info.size(), arb, single, nfa.get());
return false;
}
-aligned_unique_ptr<NFA> mcclellanCompile_i(raw_dfa &raw, accel_dfa_build_strat &strat,
- const CompileContext &cc,
- set<dstate_id_t> *accel_states) {
+bytecode_ptr<NFA> mcclellanCompile_i(raw_dfa &raw, accel_dfa_build_strat &strat,
+ const CompileContext &cc,
+ set<dstate_id_t> *accel_states) {
u16 total_daddy = 0;
dfa_info info(strat);
bool using8bit = cc.grey.allowMcClellan8 && info.size() <= 256;
info.size() * info.impl_alpha_size, info.size(),
info.impl_alpha_size);
- aligned_unique_ptr<NFA> nfa;
+ bytecode_ptr<NFA> nfa;
if (!using8bit) {
nfa = mcclellanCompile16(info, cc, accel_states);
} else {
return nfa;
}
-aligned_unique_ptr<NFA> mcclellanCompile(raw_dfa &raw, const CompileContext &cc,
- const ReportManager &rm,
- set<dstate_id_t> *accel_states) {
+bytecode_ptr<NFA> mcclellanCompile(raw_dfa &raw, const CompileContext &cc,
+ const ReportManager &rm,
+ set<dstate_id_t> *accel_states) {
mcclellan_build_strat mbs(raw, rm);
return mcclellanCompile_i(raw, mbs, cc, accel_states);
}
/*
- * Copyright (c) 2015-2016, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
#include "accel_dfa_build_strat.h"
#include "rdfa.h"
#include "ue2common.h"
-#include "util/alloc.h"
+#include "util/bytecode_ptr.h"
#include "util/ue2_containers.h"
#include <memory>
std::vector<u32> &reports /* out */,
std::vector<u32> &reports_eod /* out */,
u8 *isSingleReport /* out */,
- ReportID *arbReport /* out */) const override;
+ ReportID *arbReport /* out */) const override;
size_t accelSize(void) const override;
u32 max_allowed_offset_accel() const override;
u32 max_stop_char() const override;
/* accel_states: (optional) on success, is filled with the set of accelerable
* states */
-ue2::aligned_unique_ptr<NFA>
+bytecode_ptr<NFA>
mcclellanCompile(raw_dfa &raw, const CompileContext &cc,
const ReportManager &rm,
std::set<dstate_id_t> *accel_states = nullptr);
/* used internally by mcclellan/haig/gough compile process */
-ue2::aligned_unique_ptr<NFA>
+bytecode_ptr<NFA>
mcclellanCompile_i(raw_dfa &raw, accel_dfa_build_strat &strat,
const CompileContext &cc,
std::set<dstate_id_t> *accel_states = nullptr);
} // namespace ue2
-#endif
+#endif // MCCLELLANCOMPILE_H
/*
- * Copyright (c) 2016, Intel Corporation
+ * Copyright (c) 2016-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
}
static
-aligned_unique_ptr<NFA> mcshengCompile16(dfa_info &info, dstate_id_t sheng_end,
+bytecode_ptr<NFA> mcshengCompile16(dfa_info &info, dstate_id_t sheng_end,
const map<dstate_id_t, AccelScheme> &accel_escape_info,
const Grey &grey) {
DEBUG_PRINTF("building mcsheng 16\n");
accel_offset -= sizeof(NFA); /* adj accel offset to be relative to m */
assert(ISALIGNED_N(accel_offset, alignof(union AccelAux)));
- aligned_unique_ptr<NFA> nfa = aligned_zmalloc_unique<NFA>(total_size);
+ auto nfa = make_bytecode_ptr<NFA>(total_size);
mcsheng *m = (mcsheng *)getMutableImplNfa(nfa.get());
populateBasicInfo(sizeof(u16), info, total_size, aux_offset, accel_offset,
}
static
-aligned_unique_ptr<NFA> mcshengCompile8(dfa_info &info, dstate_id_t sheng_end,
+bytecode_ptr<NFA> mcshengCompile8(dfa_info &info, dstate_id_t sheng_end,
const map<dstate_id_t, AccelScheme> &accel_escape_info) {
DEBUG_PRINTF("building mcsheng 8\n");
accel_offset -= sizeof(NFA); /* adj accel offset to be relative to m */
assert(ISALIGNED_N(accel_offset, alignof(union AccelAux)));
- aligned_unique_ptr<NFA> nfa = aligned_zmalloc_unique<NFA>(total_size);
+ auto nfa = make_bytecode_ptr<NFA>(total_size);
mcsheng *m = (mcsheng *)getMutableImplNfa(nfa.get());
allocateImplId8(info, sheng_end, accel_escape_info, &m->accel_limit_8,
return nfa;
}
-aligned_unique_ptr<NFA> mcshengCompile(raw_dfa &raw, const CompileContext &cc,
- const ReportManager &rm) {
+bytecode_ptr<NFA> mcshengCompile(raw_dfa &raw, const CompileContext &cc,
+ const ReportManager &rm) {
if (!cc.grey.allowMcSheng) {
return nullptr;
}
return nullptr;
}
- aligned_unique_ptr<NFA> nfa;
+ bytecode_ptr<NFA> nfa;
if (!using8bit) {
nfa = mcshengCompile16(info, sheng_end, accel_escape_info, cc.grey);
} else {
/*
- * Copyright (c) 2016, Intel Corporation
+ * Copyright (c) 2016-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
#ifndef MCSHENGCOMPILE_H
#define MCSHENGCOMPILE_H
-#include "accel_dfa_build_strat.h"
-#include "rdfa.h"
#include "ue2common.h"
-#include "util/alloc.h"
-#include "util/ue2_containers.h"
-
-#include <memory>
+#include "util/bytecode_ptr.h"
struct NFA;
class ReportManager;
struct CompileContext;
+struct raw_dfa;
-ue2::aligned_unique_ptr<NFA>
-mcshengCompile(raw_dfa &raw, const CompileContext &cc,
- const ReportManager &rm);
+bytecode_ptr<NFA> mcshengCompile(raw_dfa &raw, const CompileContext &cc,
+ const ReportManager &rm);
bool has_accel_mcsheng(const NFA *nfa);
/*
- * Copyright (c) 2016, Intel Corporation
+ * Copyright (c) 2016-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
return true; /* consider the sheng region as accelerated */
}
-aligned_unique_ptr<NFA> shengCompile(raw_dfa &raw,
- const CompileContext &cc,
- const ReportManager &rm,
- set<dstate_id_t> *accel_states) {
+bytecode_ptr<NFA> shengCompile(raw_dfa &raw, const CompileContext &cc,
+ const ReportManager &rm,
+ set<dstate_id_t> *accel_states) {
if (!cc.grey.allowSheng) {
DEBUG_PRINTF("Sheng is not allowed!\n");
return nullptr;
DEBUG_PRINTF("NFA: %u, aux: %u, reports: %u, accel: %u, total: %u\n",
nfa_size, total_aux, total_reports, total_accel, total_size);
- aligned_unique_ptr<NFA> nfa = aligned_zmalloc_unique<NFA>(total_size);
+ auto nfa = make_bytecode_ptr<NFA>(total_size);
populateBasicInfo(nfa.get(), info, accelInfo, nfa_size, reports_offset,
accel_offset, total_size, total_size - sizeof(NFA));
/*
- * Copyright (c) 2016, Intel Corporation
+ * Copyright (c) 2016-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef SHENGCOMPILE_H_
-#define SHENGCOMPILE_H_
+#ifndef SHENGCOMPILE_H
+#define SHENGCOMPILE_H
#include "accel_dfa_build_strat.h"
#include "rdfa.h"
-#include "util/alloc.h"
+#include "util/bytecode_ptr.h"
#include "util/charreach.h"
#include "util/ue2_containers.h"
raw_dfa &rdfa;
};
-aligned_unique_ptr<NFA>
-shengCompile(raw_dfa &raw, const CompileContext &cc, const ReportManager &rm,
- std::set<dstate_id_t> *accel_states = nullptr);
+bytecode_ptr<NFA> shengCompile(raw_dfa &raw, const CompileContext &cc,
+ const ReportManager &rm,
+ std::set<dstate_id_t> *accel_states = nullptr);
struct sheng_escape_info {
CharReach outs;
} // namespace ue2
-#endif /* SHENGCOMPILE_H_ */
+#endif /* SHENGCOMPILE_H */
/*
- * Copyright (c) 2016, Intel Corporation
+ * Copyright (c) 2016-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* POSSIBILITY OF SUCH DAMAGE.
*/
-/** \file
- * \brief Tamarama: container engine for exclusive engines,
- * compiler code.
+/**
+ * \file
+ * \brief Tamarama: container engine for exclusive engines, compiler code.
*/
#include "config.h"
* returns via out_top_remap, a mapping indicating how tops in the subengines in
* relate to the tamarama's tops.
*/
-aligned_unique_ptr<NFA> buildTamarama(const TamaInfo &tamaInfo, const u32 queue,
- map<pair<const NFA *, u32>, u32> &out_top_remap) {
+bytecode_ptr<NFA>
+buildTamarama(const TamaInfo &tamaInfo, const u32 queue,
+ map<pair<const NFA *, u32>, u32> &out_top_remap) {
vector<u32> top_base;
remapTops(tamaInfo, top_base, out_top_remap);
// use subSize as a sentinel value for no active subengines,
// so add one to subSize here
u32 activeIdxSize = calcPackedBytes(subSize + 1);
- aligned_unique_ptr<NFA> nfa = aligned_zmalloc_unique<NFA>(total_size);
+ auto nfa = make_bytecode_ptr<NFA>(total_size);
nfa->type = verify_u8(TAMARAMA_NFA);
nfa->length = verify_u32(total_size);
nfa->queueIndex = queue;
copy_bytes(ptr, top_base);
ptr += byte_length(top_base);
- u32 *offsets = (u32*)ptr;
+ u32 *offsets = (u32 *)ptr;
char *sub_nfa_offset = ptr + sizeof(u32) * subSize;
copyInSubnfas(base_offset, *nfa, tamaInfo, offsets, sub_nfa_offset,
activeIdxSize);
/*
- * Copyright (c) 2016, Intel Corporation
+ * Copyright (c) 2016-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* POSSIBILITY OF SUCH DAMAGE.
*/
-/** \file
- * \brief Tamarama: container engine for exclusive engines, compiler code.
+/**
+ * \file
+ * \brief Tamarama: container engine for exclusive engines, compiler code.
*/
#ifndef NFA_TAMARAMACOMPILE_H
#define NFA_TAMARAMACOMPILE_H
#include "ue2common.h"
-#include "util/alloc.h"
+#include "util/bytecode_ptr.h"
#include <map>
#include <set>
namespace ue2 {
/**
- * \brief A TamaProto that contains top remapping and reports info
+ * \brief A TamaProto that contains top remapping and reports info.
*/
struct TamaProto {
void add(const NFA *n, const u32 id, const u32 top,
};
/**
- * \brief Contruction info for a Tamarama engine:
+ * \brief Construction info for a Tamarama engine:
* contains at least two subengines.
*
* A TamaInfo is converted into a single NFA, with each top triggering a
static constexpr size_t max_occupancy = 65536; // arbitrary limit
/** \brief Add a new subengine. */
- void add(NFA* sub, const std::set<u32> &top);
+ void add(NFA *sub, const std::set<u32> &top);
/** \brief All the subengines */
std::vector<NFA *> subengines;
* returns via out_top_remap, a mapping indicating how tops in the subengines in
* relate to the tamarama's tops.
*/
-ue2::aligned_unique_ptr<NFA> buildTamarama(const TamaInfo &tamaInfo,
- const u32 queue,
- std::map<std::pair<const NFA *, u32>, u32> &out_top_remap);
+bytecode_ptr<NFA>
+buildTamarama(const TamaInfo &tamaInfo, const u32 queue,
+ std::map<std::pair<const NFA *, u32>, u32> &out_top_remap);
+
} // namespace ue2
#endif // NFA_TAMARAMACOMPILE_H
/*
- * Copyright (c) 2015-2016, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* POSSIBILITY OF SUCH DAMAGE.
*/
-/** \file
+/**
+ * \file
* \brief Large Bounded Repeat (LBR) engine build code.
*/
}
template <class LbrStruct> static
-aligned_unique_ptr<NFA> makeLbrNfa(NFAEngineType nfa_type,
- enum RepeatType rtype,
- const depth &repeatMax) {
+bytecode_ptr<NFA> makeLbrNfa(NFAEngineType nfa_type, enum RepeatType rtype,
+ const depth &repeatMax) {
size_t tableLen = 0;
if (rtype == REPEAT_SPARSE_OPTIMAL_P) {
tableLen = sizeof(u64a) * (repeatMax + 1);
}
size_t len = sizeof(NFA) + sizeof(LbrStruct) + sizeof(RepeatInfo) +
tableLen + sizeof(u64a);
- aligned_unique_ptr<NFA> nfa = aligned_zmalloc_unique<NFA>(len);
+ auto nfa = make_bytecode_ptr<NFA>(len);
nfa->type = verify_u8(nfa_type);
nfa->length = verify_u32(len);
return nfa;
}
static
-aligned_unique_ptr<NFA> buildLbrDot(const CharReach &cr, const depth &repeatMin,
- const depth &repeatMax, u32 minPeriod,
- bool is_reset, ReportID report) {
+bytecode_ptr<NFA> buildLbrDot(const CharReach &cr, const depth &repeatMin,
+ const depth &repeatMax, u32 minPeriod,
+ bool is_reset, ReportID report) {
if (!cr.all()) {
return nullptr;
}
}
static
-aligned_unique_ptr<NFA> buildLbrVerm(const CharReach &cr,
- const depth &repeatMin,
- const depth &repeatMax, u32 minPeriod,
- bool is_reset, ReportID report) {
+bytecode_ptr<NFA> buildLbrVerm(const CharReach &cr, const depth &repeatMin,
+ const depth &repeatMax, u32 minPeriod,
+ bool is_reset, ReportID report) {
const CharReach escapes(~cr);
if (escapes.count() != 1) {
}
static
-aligned_unique_ptr<NFA> buildLbrNVerm(const CharReach &cr,
- const depth &repeatMin,
- const depth &repeatMax, u32 minPeriod,
- bool is_reset, ReportID report) {
+bytecode_ptr<NFA> buildLbrNVerm(const CharReach &cr, const depth &repeatMin,
+ const depth &repeatMax, u32 minPeriod,
+ bool is_reset, ReportID report) {
const CharReach escapes(cr);
if (escapes.count() != 1) {
}
static
-aligned_unique_ptr<NFA> buildLbrShuf(const CharReach &cr,
- const depth &repeatMin,
- const depth &repeatMax, u32 minPeriod,
- bool is_reset, ReportID report) {
+bytecode_ptr<NFA> buildLbrShuf(const CharReach &cr, const depth &repeatMin,
+ const depth &repeatMax, u32 minPeriod,
+ bool is_reset, ReportID report) {
enum RepeatType rtype = chooseRepeatType(repeatMin, repeatMax, minPeriod,
is_reset);
auto nfa = makeLbrNfa<lbr_shuf>(LBR_NFA_SHUF, rtype, repeatMax);
}
static
-aligned_unique_ptr<NFA> buildLbrTruf(const CharReach &cr,
- const depth &repeatMin,
- const depth &repeatMax, u32 minPeriod,
- bool is_reset, ReportID report) {
+bytecode_ptr<NFA> buildLbrTruf(const CharReach &cr, const depth &repeatMin,
+ const depth &repeatMax, u32 minPeriod,
+ bool is_reset, ReportID report) {
enum RepeatType rtype = chooseRepeatType(repeatMin, repeatMax, minPeriod,
is_reset);
auto nfa = makeLbrNfa<lbr_truf>(LBR_NFA_TRUF, rtype, repeatMax);
}
static
-aligned_unique_ptr<NFA> constructLBR(const CharReach &cr,
- const depth &repeatMin,
- const depth &repeatMax, u32 minPeriod,
- bool is_reset, ReportID report) {
+bytecode_ptr<NFA> constructLBR(const CharReach &cr, const depth &repeatMin,
+ const depth &repeatMax, u32 minPeriod,
+ bool is_reset, ReportID report) {
DEBUG_PRINTF("bounds={%s,%s}, cr=%s (count %zu), report=%u\n",
repeatMin.str().c_str(), repeatMax.str().c_str(),
describeClass(cr, 20, CC_OUT_TEXT).c_str(), cr.count(),
assert(repeatMin <= repeatMax);
assert(repeatMax.is_reachable());
- aligned_unique_ptr<NFA> nfa
- = buildLbrDot(cr, repeatMin, repeatMax, minPeriod, is_reset, report);
+ auto nfa =
+ buildLbrDot(cr, repeatMin, repeatMax, minPeriod, is_reset, report);
if (!nfa) {
nfa = buildLbrVerm(cr, repeatMin, repeatMax, minPeriod, is_reset,
return nfa;
}
-aligned_unique_ptr<NFA> constructLBR(const CastleProto &proto,
- const vector<vector<CharReach>> &triggers,
- const CompileContext &cc,
- const ReportManager &rm) {
+bytecode_ptr<NFA> constructLBR(const CastleProto &proto,
+ const vector<vector<CharReach>> &triggers,
+ const CompileContext &cc,
+ const ReportManager &rm) {
if (!cc.grey.allowLbr) {
return nullptr;
}
}
/** \brief Construct an LBR engine from the given graph \p g. */
-aligned_unique_ptr<NFA> constructLBR(const NGHolder &g,
- const vector<vector<CharReach>> &triggers,
- const CompileContext &cc,
- const ReportManager &rm) {
+bytecode_ptr<NFA> constructLBR(const NGHolder &g,
+ const vector<vector<CharReach>> &triggers,
+ const CompileContext &cc,
+ const ReportManager &rm) {
if (!cc.grey.allowLbr) {
return nullptr;
}
/*
- * Copyright (c) 2015-2016, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* POSSIBILITY OF SUCH DAMAGE.
*/
-/** \file
+/**
+ * \file
* \brief Large Bounded Repeat (LBR) engine build code.
*/
#define NG_LBR_H
#include "ue2common.h"
-#include "util/alloc.h"
+#include "util/bytecode_ptr.h"
#include <memory>
#include <vector>
struct Grey;
/** \brief Construct an LBR engine from the given graph \p g. */
-aligned_unique_ptr<NFA>
+bytecode_ptr<NFA>
constructLBR(const NGHolder &g,
const std::vector<std::vector<CharReach>> &triggers,
const CompileContext &cc, const ReportManager &rm);
-/** \brief Construct an LBR engine from the given CastleProto, which should
- * contain only one repeat. */
-aligned_unique_ptr<NFA>
+/**
+ * \brief Construct an LBR engine from the given CastleProto, which should
+ * contain only one repeat.
+ */
+bytecode_ptr<NFA>
constructLBR(const CastleProto &proto,
const std::vector<std::vector<CharReach>> &triggers,
const CompileContext &cc, const ReportManager &rm);
* POSSIBILITY OF SUCH DAMAGE.
*/
-/** \file
+/**
+ * \file
* \brief Limex NFA construction code.
*/
+
#include "ng_limex.h"
#include "grey.h"
}
static
-aligned_unique_ptr<NFA>
+bytecode_ptr<NFA>
constructNFA(const NGHolder &h_in, const ReportManager *rm,
const map<u32, u32> &fixed_depth_tops,
const map<u32, vector<vector<CharReach>>> &triggers,
zombies, do_accel, compress_state, hint, cc);
}
-aligned_unique_ptr<NFA>
+bytecode_ptr<NFA>
constructNFA(const NGHolder &h_in, const ReportManager *rm,
const map<u32, u32> &fixed_depth_tops,
const map<u32, vector<vector<CharReach>>> &triggers,
#ifndef RELEASE_BUILD
// Variant that allows a hint to be specified.
-aligned_unique_ptr<NFA>
+bytecode_ptr<NFA>
constructNFA(const NGHolder &h_in, const ReportManager *rm,
const map<u32, u32> &fixed_depth_tops,
const map<u32, vector<vector<CharReach>>> &triggers,
#endif // RELEASE_BUILD
static
-aligned_unique_ptr<NFA> constructReversedNFA_i(const NGHolder &h_in, u32 hint,
- const CompileContext &cc) {
+bytecode_ptr<NFA> constructReversedNFA_i(const NGHolder &h_in, u32 hint,
+ const CompileContext &cc) {
// Make a mutable copy of the graph that we can renumber etc.
NGHolder h;
cloneHolder(h, h_in);
zombies, false, false, hint, cc);
}
-aligned_unique_ptr<NFA> constructReversedNFA(const NGHolder &h_in,
- const CompileContext &cc) {
+bytecode_ptr<NFA> constructReversedNFA(const NGHolder &h_in,
+ const CompileContext &cc) {
u32 hint = INVALID_NFA; // no hint
return constructReversedNFA_i(h_in, hint, cc);
}
#ifndef RELEASE_BUILD
// Variant that allows a hint to be specified.
-aligned_unique_ptr<NFA> constructReversedNFA(const NGHolder &h_in, u32 hint,
- const CompileContext &cc) {
+bytecode_ptr<NFA> constructReversedNFA(const NGHolder &h_in, u32 hint,
+ const CompileContext &cc) {
return constructReversedNFA_i(h_in, hint, cc);
}
#endif // RELEASE_BUILD
/*
- * Copyright (c) 2015, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* POSSIBILITY OF SUCH DAMAGE.
*/
-/** \file
+/**
+ * \file
* \brief Limex NFA construction code.
*/
#include "ue2common.h"
#include "som/som.h"
-#include "util/alloc.h"
+#include "util/bytecode_ptr.h"
#include <map>
#include <memory>
class ReportManager;
struct CompileContext;
-/** \brief Determine if the given graph is implementable as an NFA.
+/**
+ * \brief Determine if the given graph is implementable as an NFA.
*
* Returns zero if the NFA is not implementable (usually because it has too
* many states for any of our models). Otherwise returns the number of states.
u32 isImplementableNFA(const NGHolder &g, const ReportManager *rm,
const CompileContext &cc);
-/** \brief Late-stage graph reductions.
+/**
+ * \brief Late-stage graph reductions.
*
* This will call \ref removeRedundancy and apply its changes to the given
- * holder only if it is implementable afterwards. */
-void reduceImplementableGraph(NGHolder &g, som_type som, const ReportManager *rm,
+ * holder only if it is implementable afterwards.
+ */
+void reduceImplementableGraph(NGHolder &g, som_type som,
+ const ReportManager *rm,
const CompileContext &cc);
/**
u32 countAccelStates(const NGHolder &g, const ReportManager *rm,
const CompileContext &cc);
-/** \brief Construct an NFA from the given NFAGraph.
+/**
+ * \brief Construct an NFA from the given graph.
*
* Returns zero if the NFA is not implementable (usually because it has too
* many states for any of our models). Otherwise returns the number of states.
* Note: this variant of the function allows a model to be specified with the
* \a hint parameter.
*/
-aligned_unique_ptr<NFA>
+bytecode_ptr<NFA>
constructNFA(const NGHolder &g, const ReportManager *rm,
const std::map<u32, u32> &fixed_depth_tops,
const std::map<u32, std::vector<std::vector<CharReach>>> &triggers,
bool compress_state, const CompileContext &cc);
-/** \brief Build a reverse NFA from the graph given, which should have already
+/**
+ * \brief Build a reverse NFA from the graph given, which should have already
* been reversed.
*
* Used for reverse NFAs used in SOM mode.
*/
-aligned_unique_ptr<NFA> constructReversedNFA(const NGHolder &h,
- const CompileContext &cc);
+bytecode_ptr<NFA> constructReversedNFA(const NGHolder &h,
+ const CompileContext &cc);
#ifndef RELEASE_BUILD
-/** \brief Construct an NFA (with model type hint) from the given NFAGraph.
+/**
+ * \brief Construct an NFA (with model type hint) from the given graph.
*
* Returns zero if the NFA is not implementable (usually because it has too
* many states for any of our models). Otherwise returns the number of states.
* Note: this variant of the function allows a model to be specified with the
* \a hint parameter.
*/
-aligned_unique_ptr<NFA>
+bytecode_ptr<NFA>
constructNFA(const NGHolder &g, const ReportManager *rm,
const std::map<u32, u32> &fixed_depth_tops,
const std::map<u32, std::vector<std::vector<CharReach>>> &triggers,
bool compress_state, u32 hint, const CompileContext &cc);
-/** \brief Build a reverse NFA (with model type hint) from the graph given,
+/**
+ * \brief Build a reverse NFA (with model type hint) from the graph given,
* which should have already been reversed.
*
* Used for reverse NFAs used in SOM mode.
*/
-aligned_unique_ptr<NFA> constructReversedNFA(const NGHolder &h, u32 hint,
- const CompileContext &cc);
+bytecode_ptr<NFA> constructReversedNFA(const NGHolder &h, u32 hint,
+ const CompileContext &cc);
#endif // RELEASE_BUILD
* POSSIBILITY OF SUCH DAMAGE.
*/
-/** \file
+/**
+ * \file
* \brief SOM ("Start of Match") analysis.
*/
namespace {
struct SomRevNfa {
- SomRevNfa(NFAVertex s, ReportID r, aligned_unique_ptr<NFA> n)
+ SomRevNfa(NFAVertex s, ReportID r, bytecode_ptr<NFA> n)
: sink(s), report(r), nfa(move(n)) {}
SomRevNfa(SomRevNfa&& s) // MSVC2013 needs this for emplace
: sink(s.sink), report(s.report), nfa(move(s.nfa)) {}
NFAVertex sink;
ReportID report;
- aligned_unique_ptr<NFA> nfa;
+ bytecode_ptr<NFA> nfa;
};
}
static
-aligned_unique_ptr<NFA> makeBareSomRevNfa(const NGHolder &g,
- const CompileContext &cc) {
+bytecode_ptr<NFA> makeBareSomRevNfa(const NGHolder &g,
+ const CompileContext &cc) {
// Create a reversed anchored version of this NFA which fires a zero report
// ID on accept.
NGHolder g_rev;
DEBUG_PRINTF("building a rev NFA with %zu vertices\n", num_vertices(g_rev));
- aligned_unique_ptr<NFA> nfa = constructReversedNFA(g_rev, cc);
+ auto nfa = constructReversedNFA(g_rev, cc);
if (!nfa) {
return nfa;
}
renumber_vertices(g2); // for findMinWidth, findMaxWidth.
- aligned_unique_ptr<NFA> nfa = makeBareSomRevNfa(g2, cc);
+ auto nfa = makeBareSomRevNfa(g2, cc);
if (!nfa) {
DEBUG_PRINTF("couldn't build rev nfa\n");
return false;
*/
static
size_t buildNfas(vector<raw_dfa> &anchored_dfas,
- vector<aligned_unique_ptr<NFA>> *nfas,
+ vector<bytecode_ptr<NFA>> *nfas,
vector<u32> *start_offset, const CompileContext &cc,
const ReportManager &rm) {
const size_t num_dfas = anchored_dfas.size();
remapIdsToPrograms(fragments, rdfa);
}
- vector<aligned_unique_ptr<NFA>> nfas;
+ vector<bytecode_ptr<NFA>> nfas;
vector<u32> start_offset; // start offset for each dfa (dots removed)
size_t total_size = buildNfas(dfas, &nfas, &start_offset, cc, build.rm);
#include "nfagraph/ng_width.h"
#include "smallwrite/smallwrite_build.h"
#include "som/slot_manager.h"
-#include "util/alloc.h"
#include "util/bitutils.h"
#include "util/boundary_reports.h"
#include "util/charreach.h"
/** \brief subengine info including built engine and
* corresponding triggering rose vertices */
struct ExclusiveSubengine {
- aligned_unique_ptr<NFA> nfa;
+ bytecode_ptr<NFA> nfa;
vector<RoseVertex> vertices;
};
* engine.
*/
static
-aligned_unique_ptr<NFA> pickImpl(aligned_unique_ptr<NFA> dfa_impl,
- aligned_unique_ptr<NFA> nfa_impl) {
+bytecode_ptr<NFA> pickImpl(bytecode_ptr<NFA> dfa_impl,
+ bytecode_ptr<NFA> nfa_impl) {
assert(nfa_impl);
assert(dfa_impl);
assert(isDfaType(dfa_impl->type));
* otherwise a Castle.
*/
static
-aligned_unique_ptr<NFA>
+bytecode_ptr<NFA>
buildRepeatEngine(const CastleProto &proto,
const map<u32, vector<vector<CharReach>>> &triggers,
const CompileContext &cc, const ReportManager &rm) {
}
static
-aligned_unique_ptr<NFA> getDfa(raw_dfa &rdfa, bool is_transient,
+bytecode_ptr<NFA> getDfa(raw_dfa &rdfa, bool is_transient,
const CompileContext &cc,
const ReportManager &rm) {
// Unleash the Sheng!!
/* builds suffix nfas */
static
-aligned_unique_ptr<NFA>
+bytecode_ptr<NFA>
buildSuffix(const ReportManager &rm, const SomSlotManager &ssm,
const map<u32, u32> &fixed_depth_tops,
const map<u32, vector<vector<CharReach>>> &triggers,
}
}
-static aligned_unique_ptr<NFA>
-makeLeftNfa(const RoseBuildImpl &tbi, left_id &left,
- const bool is_prefix, const bool is_transient,
- const map<left_id, set<PredTopPair> > &infixTriggers,
+static
+bytecode_ptr<NFA>
+makeLeftNfa(const RoseBuildImpl &tbi, left_id &left, const bool is_prefix,
+ const bool is_transient,
+ const map<left_id, set<PredTopPair>> &infixTriggers,
const CompileContext &cc) {
const ReportManager &rm = tbi.rm;
- aligned_unique_ptr<NFA> n;
+ bytecode_ptr<NFA> n;
// Should compress state if this rose is non-transient and we're in
// streaming mode.
leftfix = updateLeftfixWithEager(g, eager.at(leftfix), succs);
}
- aligned_unique_ptr<NFA> nfa;
+ bytecode_ptr<NFA> nfa;
// Need to build NFA, which is either predestined to be a Haig (in SOM mode)
// or could be all manner of things.
if (leftfix.haig()) {
}
namespace {
-class OutfixBuilder : public boost::static_visitor<aligned_unique_ptr<NFA>> {
+class OutfixBuilder : public boost::static_visitor<bytecode_ptr<NFA>> {
public:
explicit OutfixBuilder(const RoseBuildImpl &build_in) : build(build_in) {}
- aligned_unique_ptr<NFA> operator()(boost::blank&) const {
+ bytecode_ptr<NFA> operator()(boost::blank&) const {
return nullptr;
};
- aligned_unique_ptr<NFA> operator()(unique_ptr<raw_dfa> &rdfa) const {
+ bytecode_ptr<NFA> operator()(unique_ptr<raw_dfa> &rdfa) const {
// Unleash the mighty DFA!
return getDfa(*rdfa, false, build.cc, build.rm);
}
- aligned_unique_ptr<NFA> operator()(unique_ptr<raw_som_dfa> &haig) const {
+ bytecode_ptr<NFA> operator()(unique_ptr<raw_som_dfa> &haig) const {
// Unleash the Goughfish!
return goughCompile(*haig, build.ssm.somPrecision(), build.cc,
build.rm);
}
- aligned_unique_ptr<NFA> operator()(unique_ptr<NGHolder> &holder) const {
+ bytecode_ptr<NFA> operator()(unique_ptr<NGHolder> &holder) const {
const CompileContext &cc = build.cc;
const ReportManager &rm = build.rm;
return n;
}
- aligned_unique_ptr<NFA> operator()(UNUSED MpvProto &mpv) const {
+ bytecode_ptr<NFA> operator()(UNUSED MpvProto &mpv) const {
// MPV construction handled separately.
assert(mpv.puffettes.empty());
return nullptr;
}
static
-aligned_unique_ptr<NFA> buildOutfix(const RoseBuildImpl &build, OutfixInfo &outfix) {
+bytecode_ptr<NFA> buildOutfix(const RoseBuildImpl &build, OutfixInfo &outfix) {
assert(!outfix.is_dead()); // should not be marked dead.
auto n = boost::apply_visitor(OutfixBuilder(build), outfix.proto);
}
static
-aligned_unique_ptr<NFA> getDfa(raw_dfa &rdfa, const CompileContext &cc,
- const ReportManager &rm,
- set<dstate_id_t> &accel_states) {
- aligned_unique_ptr<NFA> dfa = nullptr;
+bytecode_ptr<NFA> getDfa(raw_dfa &rdfa, const CompileContext &cc,
+ const ReportManager &rm,
+ set<dstate_id_t> &accel_states) {
+ bytecode_ptr<NFA> dfa = nullptr;
if (cc.grey.allowSmallWriteSheng) {
dfa = shengCompile(rdfa, cc, rm, &accel_states);
}
}
static
-aligned_unique_ptr<NFA> prepEngine(raw_dfa &rdfa, u32 roseQuality,
- const CompileContext &cc,
- const ReportManager &rm, u32 *start_offset,
- u32 *small_region) {
+bytecode_ptr<NFA> prepEngine(raw_dfa &rdfa, u32 roseQuality,
+ const CompileContext &cc, const ReportManager &rm,
+ u32 *start_offset, u32 *small_region) {
*start_offset = remove_leading_dots(rdfa);
// Unleash the McClellan!
/*
- * Copyright (c) 2015, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* POSSIBILITY OF SUCH DAMAGE.
*/
-/** \file
+/**
+ * \file
* \brief SOM Slot Manager.
*/
+
#include "slot_manager.h"
#include "slot_manager_internal.h"
return nextSomSlot;
}
-u32 SomSlotManager::addRevNfa(aligned_unique_ptr<NFA> nfa, u32 maxWidth) {
+u32 SomSlotManager::addRevNfa(bytecode_ptr<NFA> nfa, u32 maxWidth) {
u32 rv = verify_u32(rev_nfas.size());
rev_nfas.push_back(move(nfa));
/*
- * Copyright (c) 2015-2016, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* POSSIBILITY OF SUCH DAMAGE.
*/
-/** \file
+/**
+ * \file
* \brief SOM Slot Manager.
*/
#include "ue2common.h"
#include "nfagraph/ng_holder.h"
-#include "util/alloc.h"
+#include "util/bytecode_ptr.h"
#include "util/noncopyable.h"
#include "util/ue2_containers.h"
u32 numSomSlots() const;
- const std::deque<aligned_unique_ptr<NFA>> &getRevNfas() const {
+ const std::deque<bytecode_ptr<NFA>> &getRevNfas() const {
return rev_nfas;
}
- u32 addRevNfa(aligned_unique_ptr<NFA> nfa, u32 maxWidth);
+ u32 addRevNfa(bytecode_ptr<NFA> nfa, u32 maxWidth);
u32 somHistoryRequired() const { return historyRequired; }
std::unique_ptr<SlotCache> cache;
/** \brief Reverse NFAs used for SOM support. */
- std::deque<aligned_unique_ptr<NFA>> rev_nfas;
+ std::deque<bytecode_ptr<NFA>> rev_nfas;
/** \brief In streaming mode, the amount of history we've committed to
* using for SOM rev NFAs. */
bytecode_ptr(std::nullptr_t) {}
- T *get() { return ptr.get(); };
- const T *get() const { return ptr.get(); };
+ T *get() const { return ptr.get(); };
T &operator*() { return *ptr; }
const T &operator*() const { return *ptr; }
unsigned matches;
// Compiled NFA structure.
- aligned_unique_ptr<NFA> nfa;
+ bytecode_ptr<NFA> nfa;
// Space for full state.
aligned_unique_ptr<char> full_state;
unsigned matches;
// Compiled NFA structure.
- aligned_unique_ptr<NFA> nfa;
+ bytecode_ptr<NFA> nfa;
// Space for full state.
aligned_unique_ptr<char> full_state;
// Expand state into a new copy and check that it matches the original
// uncompressed state.
- aligned_unique_ptr<char> state_copy =
- aligned_zmalloc_unique<char>(nfa->scratchStateSize);
+ auto state_copy = aligned_zmalloc_unique<char>(nfa->scratchStateSize);
char *dest = state_copy.get();
memset(dest, 0xff, nfa->scratchStateSize);
nfaExpandState(nfa.get(), dest, q.streamState, q.offset,
unsigned matches;
// Compiled NFA structure.
- aligned_unique_ptr<NFA> nfa;
+ bytecode_ptr<NFA> nfa;
};
INSTANTIATE_TEST_CASE_P(LimExReverse, LimExReverseTest,
unsigned matches;
// Compiled NFA structure.
- aligned_unique_ptr<NFA> nfa;
+ bytecode_ptr<NFA> nfa;
// Space for full state.
aligned_unique_ptr<char> full_state;