/*
- * Copyright (c) 2015, Intel Corporation
+ * Copyright (c) 2015-2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
}
static really_inline
-void init_state_for_block(const struct RoseEngine *t, u8 *state) {
+void init_state_for_block(const struct RoseEngine *t, char *state) {
assert(t);
assert(state);
static really_inline
void init_outfixes_for_block(const struct RoseEngine *t,
- struct hs_scratch *scratch, u8 *state,
+ struct hs_scratch *scratch, char *state,
char is_small_block) {
/* active leaf array has been cleared by the init scatter */
static really_inline
void init_for_block(const struct RoseEngine *t, struct hs_scratch *scratch,
RoseCallback callback, RoseCallbackSom som_callback,
- void *ctxt, u8 *state, char is_small_block) {
+ void *ctxt, char *state, char is_small_block) {
init_state_for_block(t, state);
struct RoseContext *tctxt = &scratch->tctxt;
- tctxt->t = t;
tctxt->groups = t->initialGroups;
tctxt->lit_offset_adjust = 1; // index after last byte
tctxt->delayLastEndOffset = 0;
tctxt->lastEndOffset = 0;
tctxt->filledDelayedSlots = 0;
- tctxt->state = state;
tctxt->cb = callback;
tctxt->cb_som = som_callback;
tctxt->userCtx = ctxt;
const char is_small_block =
(length < ROSE_SMALL_BLOCK_LEN && t->sbmatcherOffset);
- u8 *state = (u8 *)scratch->core_info.state;
+ char *state = scratch->core_info.state;
init_for_block(t, scratch, callback, som_callback, ctx, state,
is_small_block);
/*
- * Copyright (c) 2015, Intel Corporation
+ * Copyright (c) 2015-2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
static really_inline
int handleReportInternally(struct hs_scratch *scratch, ReportID id,
u64a offset) {
- const struct internal_report *ri = getInternalReport(scratch->tctxt.t, id);
+ const struct RoseEngine *t = scratch->core_info.rose;
+ const struct internal_report *ri = getInternalReport(t, id);
if (ri->type == EXTERNAL_CALLBACK) {
return 0;
}
return 1;
}
if (ri->type == INTERNAL_ROSE_CHAIN) {
- roseHandleChainMatch(scratch->tctxt.t, id, offset, &scratch->tctxt, 0,
- 1);
+ roseHandleChainMatch(t, id, offset, &scratch->tctxt, 0, 1);
return 1;
}
static really_inline
int handleReportInternallyNoChain(struct hs_scratch *scratch, ReportID id,
u64a offset) {
- const struct internal_report *ri = getInternalReport(scratch->tctxt.t, id);
+ const struct RoseEngine *t = scratch->core_info.rose;
+ const struct internal_report *ri = getInternalReport(t, id);
if (ri->type == EXTERNAL_CALLBACK) {
return 0;
}
static really_inline
void deactivateQueue(u8 *aa, u32 qi, struct hs_scratch *scratch) {
- u32 aaCount = scratch->tctxt.t->activeArrayCount;
- u32 qCount = scratch->tctxt.t->queueCount;
+ const struct RoseEngine *t = scratch->core_info.rose;
+ u32 aaCount = t->activeArrayCount;
+ u32 qCount = t->queueCount;
/* this is sailing close to the wind with regards to invalidating an
* iteration. We are saved by the fact that unsetting does not clear the
return MO_CONTINUE_MATCHING;
} else {
assert(cb_rv == MO_CONTINUE_MATCHING);
- return !roseSuffixIsExhausted(tctxt->t, 0,
- scratch->core_info.exhaustionVector);
+ return !roseSuffixIsExhausted(scratch->core_info.rose, 0,
+ scratch->core_info.exhaustionVector);
}
}
return MO_CONTINUE_MATCHING;
} else {
assert(cb_rv == MO_CONTINUE_MATCHING);
- return !roseSuffixIsExhausted(tctxt->t, 0,
- scratch->core_info.exhaustionVector);
+ return !roseSuffixIsExhausted(scratch->core_info.rose, 0,
+ scratch->core_info.exhaustionVector);
}
}
}
}
-hwlmcb_rv_t roseCatchUpMPV_i(const struct RoseEngine *t, u8 *state, s64a loc,
+hwlmcb_rv_t roseCatchUpMPV_i(const struct RoseEngine *t, char *state, s64a loc,
struct hs_scratch *scratch) {
struct mq *queues = scratch->queues;
u8 *aa = getActiveLeafArray(t, state);
int roseNfaBlastAdaptor(u64a offset, ReportID id, void *context) {
struct RoseContext *tctxt = context;
struct hs_scratch *scratch = tctxtToScratch(tctxt);
+ const struct RoseEngine *t = scratch->core_info.rose;
- const struct internal_report *ri = getInternalReport(scratch->tctxt.t, id);
+ const struct internal_report *ri = getInternalReport(t, id);
DEBUG_PRINTF("called\n");
if (ri->type != INTERNAL_ROSE_CHAIN) {
/* INTERNAL_ROSE_CHAIN are not visible externally */
- if (roseCatchUpMPV(tctxt->t, tctxt->state,
+ if (roseCatchUpMPV(t, scratch->core_info.state,
offset - scratch->core_info.buf_offset, scratch)
== HWLM_TERMINATE_MATCHING) {
DEBUG_PRINTF("done\n");
return MO_CONTINUE_MATCHING;
} else {
assert(cb_rv == MO_CONTINUE_MATCHING);
- return !roseSuffixIsExhausted(tctxt->t, tctxt->curr_qi,
- scratch->core_info.exhaustionVector);
+ return !roseSuffixIsExhausted(t, tctxt->curr_qi,
+ scratch->core_info.exhaustionVector);
}
}
int roseNfaBlastAdaptorNoInternal(u64a offset, ReportID id, void *context) {
struct RoseContext *tctxt = context;
struct hs_scratch *scratch = tctxtToScratch(tctxt);
+ const struct RoseEngine *t = scratch->core_info.rose;
DEBUG_PRINTF("called\n");
- if (roseCatchUpMPV(tctxt->t, tctxt->state,
- offset - scratch->core_info.buf_offset, scratch)
- == HWLM_TERMINATE_MATCHING) {
+ if (roseCatchUpMPV(t, scratch->core_info.state,
+ offset - scratch->core_info.buf_offset,
+ scratch) == HWLM_TERMINATE_MATCHING) {
DEBUG_PRINTF("done\n");
return MO_HALT_MATCHING;
}
return MO_CONTINUE_MATCHING;
} else {
assert(cb_rv == MO_CONTINUE_MATCHING);
- return !roseSuffixIsExhausted(tctxt->t, tctxt->curr_qi,
- scratch->core_info.exhaustionVector);
+ return !roseSuffixIsExhausted(t, tctxt->curr_qi,
+ scratch->core_info.exhaustionVector);
}
}
return MO_CONTINUE_MATCHING;
} else {
assert(cb_rv == MO_CONTINUE_MATCHING);
- return !roseSuffixIsExhausted(tctxt->t, tctxt->curr_qi,
- scratch->core_info.exhaustionVector);
+ return !roseSuffixIsExhausted(scratch->core_info.rose, tctxt->curr_qi,
+ scratch->core_info.exhaustionVector);
}
}
return MO_CONTINUE_MATCHING;
} else {
assert(cb_rv == MO_CONTINUE_MATCHING);
- return !roseSuffixIsExhausted(tctxt->t, tctxt->curr_qi,
- scratch->core_info.exhaustionVector);
+ return !roseSuffixIsExhausted(scratch->core_info.rose, tctxt->curr_qi,
+ scratch->core_info.exhaustionVector);
}
}
void *context) {
struct RoseContext *tctxt = context;
struct hs_scratch *scratch = tctxtToScratch(tctxt);
+ const struct RoseEngine *t = scratch->core_info.rose;
DEBUG_PRINTF("called\n");
- if (roseCatchUpMPV(tctxt->t, tctxt->state,
- offset - scratch->core_info.buf_offset, scratch)
- == HWLM_TERMINATE_MATCHING) {
+ if (roseCatchUpMPV(t, scratch->core_info.state,
+ offset - scratch->core_info.buf_offset,
+ scratch) == HWLM_TERMINATE_MATCHING) {
DEBUG_PRINTF("roseCatchUpNfas done\n");
return MO_HALT_MATCHING;
}
return MO_CONTINUE_MATCHING;
} else {
assert(cb_rv == MO_CONTINUE_MATCHING);
- return !roseSuffixIsExhausted(tctxt->t, tctxt->curr_qi,
- scratch->core_info.exhaustionVector);
+ return !roseSuffixIsExhausted(t, tctxt->curr_qi,
+ scratch->core_info.exhaustionVector);
}
}
return HWLM_CONTINUE_MATCHING;
}
-void streamInitSufPQ(const struct RoseEngine *t, u8 *state,
+void streamInitSufPQ(const struct RoseEngine *t, char *state,
struct hs_scratch *scratch) {
assert(scratch->catchup_pq.qm_size == 0);
assert(t->outfixBeginQueue != t->outfixEndQueue);
}
}
-void blockInitSufPQ(const struct RoseEngine *t, u8 *state,
+void blockInitSufPQ(const struct RoseEngine *t, char *state,
struct hs_scratch *scratch, char is_small_block) {
DEBUG_PRINTF("initSufPQ: outfixes [%u,%u)\n", t->outfixBeginQueue,
t->outfixEndQueue);
* safe_loc is ???
*/
static rose_inline
-hwlmcb_rv_t buildSufPQ(const struct RoseEngine *t, u8 *state, s64a safe_loc,
+hwlmcb_rv_t buildSufPQ(const struct RoseEngine *t, char *state, s64a safe_loc,
s64a final_loc, struct hs_scratch *scratch) {
assert(scratch->catchup_pq.qm_size <= t->outfixEndQueue);
s64a report_ok_loc = tctxt->minNonMpvMatchOffset + 1
- scratch->core_info.buf_offset;
- hwlmcb_rv_t rv = roseCatchUpMPV(tctxt->t, state, report_ok_loc, scratch);
+ hwlmcb_rv_t rv = roseCatchUpMPV(t, state, report_ok_loc, scratch);
if (rv != HWLM_CONTINUE_MATCHING) {
return rv;
}
}
static never_inline
-hwlmcb_rv_t roseCatchUpNfas(const struct RoseEngine *t, u8 *state, s64a loc,
+hwlmcb_rv_t roseCatchUpNfas(const struct RoseEngine *t, char *state, s64a loc,
s64a final_loc, struct hs_scratch *scratch) {
struct RoseContext *tctxt = &scratch->tctxt;
assert(t->activeArrayCount);
}
static really_inline
-hwlmcb_rv_t roseCatchUpNfasAndMpv(const struct RoseEngine *t, u8 *state,
+hwlmcb_rv_t roseCatchUpNfasAndMpv(const struct RoseEngine *t, char *state,
s64a loc, s64a final_loc,
struct hs_scratch *scratch) {
hwlmcb_rv_t rv = roseCatchUpNfas(t, state, loc, final_loc, scratch);
static really_inline
hwlmcb_rv_t roseCatchUpAll_i(s64a loc, struct hs_scratch *scratch,
char do_full_mpv) {
- assert(scratch->tctxt.t->activeArrayCount); /* otherwise use
- * roseCatchUpAnchoredOnly */
+ const struct RoseEngine *t = scratch->core_info.rose;
+ assert(t->activeArrayCount); /* otherwise use roseCatchUpAnchoredOnly */
struct RoseContext *tctxt = &scratch->tctxt;
u64a current_offset = scratch->core_info.buf_offset + loc;
u64a anchored_end;
ReportID anchored_report;
- currentAnchoredMatch(tctxt->t, tctxt, &anchored_report, &anchored_end);
+ currentAnchoredMatch(t, tctxt, &anchored_report, &anchored_end);
DEBUG_PRINTF("am current_offset %llu\n", current_offset);
DEBUG_PRINTF("min match offset %llu\n", scratch->tctxt.minMatchOffset);
assert(current_offset > tctxt->minMatchOffset);
assert(anchored_end != ANCHORED_MATCH_SENTINEL);
- hwlmcb_rv_t rv = buildSufPQ(tctxt->t, tctxt->state,
+ hwlmcb_rv_t rv = buildSufPQ(t, scratch->core_info.state,
anchored_end - scratch->core_info.buf_offset,
loc, scratch);
if (rv != HWLM_CONTINUE_MATCHING) {
}
/* buildSufPQ may have caught only part of the pq upto anchored_end */
- rv = roseCatchUpNfas(tctxt->t, tctxt->state,
- anchored_end - scratch->core_info.buf_offset, loc,
- scratch);
+ rv = roseCatchUpNfas(t, scratch->core_info.state,
+ anchored_end - scratch->core_info.buf_offset, loc,
+ scratch);
if (rv != HWLM_CONTINUE_MATCHING) {
return rv;
while (anchored_report != MO_INVALID_IDX
&& anchored_end <= current_offset) {
if (anchored_end != tctxt->minMatchOffset) {
- rv = roseCatchUpNfasAndMpv(tctxt->t, tctxt->state,
+ rv = roseCatchUpNfasAndMpv(t, scratch->core_info.state,
anchored_end - scratch->core_info.buf_offset,
loc, scratch);
if (rv != HWLM_CONTINUE_MATCHING) {
return HWLM_TERMINATE_MATCHING;
}
next:
- nextAnchoredMatch(tctxt->t, tctxt, &anchored_report, &anchored_end);
+ nextAnchoredMatch(t, tctxt, &anchored_report, &anchored_end);
DEBUG_PRINTF("catch up %u %llu\n", anchored_report, anchored_end);
}
if (current_offset == tctxt->minMatchOffset) {
DEBUG_PRINTF("caught up\n");
- assert(scratch->catchup_pq.qm_size <= tctxt->t->outfixEndQueue);
+ assert(scratch->catchup_pq.qm_size <= t->outfixEndQueue);
return HWLM_CONTINUE_MATCHING;
}
- rv = roseCatchUpNfas(tctxt->t, tctxt->state, loc, loc, scratch);
+ rv = roseCatchUpNfas(t, scratch->core_info.state, loc, loc, scratch);
if (rv != HWLM_CONTINUE_MATCHING) {
return rv;
}
- assert(scratch->catchup_pq.qm_size <= tctxt->t->outfixEndQueue
+ assert(scratch->catchup_pq.qm_size <= t->outfixEndQueue
|| rv == HWLM_TERMINATE_MATCHING);
if (do_full_mpv) {
/* finish off any outstanding chained matches */
- rv = roseCatchUpMPV(tctxt->t, tctxt->state, loc, scratch);
+ rv = roseCatchUpMPV(t, scratch->core_info.state, loc, scratch);
}
DEBUG_PRINTF("catchup all done %llu\n", current_offset);
assert(scratch->core_info.buf_offset + loc
> scratch->tctxt.minNonMpvMatchOffset);
- hwlmcb_rv_t rv = buildSufPQ(scratch->tctxt.t, scratch->tctxt.state, loc,
- loc, scratch);
+ const struct RoseEngine *t = scratch->core_info.rose;
+ char *state = scratch->core_info.state;
+
+ hwlmcb_rv_t rv = buildSufPQ(t, state, loc, loc, scratch);
if (rv != HWLM_CONTINUE_MATCHING) {
return rv;
}
- rv = roseCatchUpNfas(scratch->tctxt.t, scratch->tctxt.state, loc, loc,
- scratch);
-
+ rv = roseCatchUpNfas(t, state, loc, loc, scratch);
if (rv != HWLM_CONTINUE_MATCHING) {
return rv;
}
- rv = roseCatchUpMPV(scratch->tctxt.t, scratch->tctxt.state, loc, scratch);
-
+ rv = roseCatchUpMPV(t, state, loc, scratch);
assert(rv != HWLM_CONTINUE_MATCHING
- || scratch->catchup_pq.qm_size <= scratch->tctxt.t->outfixEndQueue);
-
+ || scratch->catchup_pq.qm_size <= t->outfixEndQueue);
return rv;
}
assert(scratch->core_info.buf_offset + loc
> scratch->tctxt.minNonMpvMatchOffset);
- hwlmcb_rv_t rv = buildSufPQ(scratch->tctxt.t, scratch->tctxt.state, loc,
- loc, scratch);
+ const struct RoseEngine *t = scratch->core_info.rose;
+ char *state = scratch->core_info.state;
+
+ hwlmcb_rv_t rv = buildSufPQ(t, state, loc, loc, scratch);
if (rv != HWLM_CONTINUE_MATCHING) {
return rv;
}
- rv = roseCatchUpNfas(scratch->tctxt.t, scratch->tctxt.state, loc, loc,
- scratch);
- assert(rv != HWLM_CONTINUE_MATCHING
- || scratch->catchup_pq.qm_size <= scratch->tctxt.t->outfixEndQueue);
+ rv = roseCatchUpNfas(t, state, loc, loc, scratch);
+ assert(rv != HWLM_CONTINUE_MATCHING ||
+ scratch->catchup_pq.qm_size <= t->outfixEndQueue);
return rv;
}
hwlmcb_rv_t roseCatchUpAnchoredOnly(s64a loc, struct hs_scratch *scratch) {
+ const struct RoseEngine *t = scratch->core_info.rose;
struct RoseContext *tctxt = &scratch->tctxt;
- assert(!tctxt->t->activeArrayCount); /* otherwise use roseCatchUpAll */
+ assert(!t->activeArrayCount); /* otherwise use roseCatchUpAll */
u64a current_offset = scratch->core_info.buf_offset + loc;
u64a anchored_end;
ReportID anchored_report;
- currentAnchoredMatch(tctxt->t, tctxt, &anchored_report, &anchored_end);
+ currentAnchoredMatch(t, tctxt, &anchored_report, &anchored_end);
DEBUG_PRINTF("am current_offset %llu\n", current_offset);
return HWLM_TERMINATE_MATCHING;
}
next:
- nextAnchoredMatch(tctxt->t, tctxt, &anchored_report, &anchored_end);
+ nextAnchoredMatch(t, tctxt, &anchored_report, &anchored_end);
DEBUG_PRINTF("catch up %u %llu\n", anchored_report, anchored_end);
}
/*
- * Copyright (c) 2015, Intel Corporation
+ * Copyright (c) 2015-2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
hwlmcb_rv_t roseCatchUpAnchoredAndSuf(s64a loc, struct hs_scratch *scratch);
-hwlmcb_rv_t roseCatchUpMPV_i(const struct RoseEngine *t, u8 *state, s64a loc,
+hwlmcb_rv_t roseCatchUpMPV_i(const struct RoseEngine *t, char *state, s64a loc,
struct hs_scratch *scratch);
-void blockInitSufPQ(const struct RoseEngine *t, u8 *state,
+void blockInitSufPQ(const struct RoseEngine *t, char *state,
struct hs_scratch *scratch, char is_small_block);
-void streamInitSufPQ(const struct RoseEngine *t, u8 *state,
+void streamInitSufPQ(const struct RoseEngine *t, char *state,
struct hs_scratch *scratch);
static really_inline
-hwlmcb_rv_t roseCatchUpMPV(const struct RoseEngine *t, u8 *state,
+hwlmcb_rv_t roseCatchUpMPV(const struct RoseEngine *t, char *state,
s64a loc, struct hs_scratch *scratch) {
u64a cur_offset = loc + scratch->core_info.buf_offset;
assert(cur_offset >= scratch->tctxt.minMatchOffset);
/* catches up nfas, anchored matches and the mpv */
static rose_inline
-hwlmcb_rv_t roseCatchUpTo(const struct RoseEngine *t, u8 *state, u64a end,
+hwlmcb_rv_t roseCatchUpTo(const struct RoseEngine *t, char *state, u64a end,
struct hs_scratch *scratch, char in_anchored) {
/* no need to catch up if we are at the same offset as last time */
if (end <= scratch->tctxt.minMatchOffset) {
return roseCatchUpMPV(t, state, loc, scratch);
}
- assert(t == scratch->tctxt.t);
assert(scratch->tctxt.minMatchOffset >= scratch->core_info.buf_offset);
u64a curr_anchored_end = currentAnchoredEnd(t, &scratch->tctxt);
hwlmcb_rv_t rv;
* and suf/outfixes. The MPV will be run only to intersperse matches in
* the output match stream if external matches are raised. */
static rose_inline
-hwlmcb_rv_t roseCatchUpMpvFeeders(const struct RoseEngine *t, u8 *state,
+hwlmcb_rv_t roseCatchUpMpvFeeders(const struct RoseEngine *t, char *state,
u64a end, struct hs_scratch *scratch,
char in_anchored) {
/* no need to catch up if we are at the same offset as last time */
s64a loc = end - scratch->core_info.buf_offset;
- assert(t == scratch->tctxt.t);
assert(t->activeArrayCount); /* mpv is in active array */
assert(scratch->tctxt.minMatchOffset >= scratch->core_info.buf_offset);
u64a curr_anchored_end = currentAnchoredEnd(t, &scratch->tctxt);
#include "util/fatbit.h"
static really_inline
-void initContext(const struct RoseEngine *t, u8 *state, u64a offset,
+void initContext(const struct RoseEngine *t, char *state, u64a offset,
struct hs_scratch *scratch, RoseCallback callback,
RoseCallbackSom som_callback, void *ctx) {
struct RoseContext *tctxt = &scratch->tctxt;
- tctxt->t = t;
tctxt->groups = loadGroups(t, state); /* TODO: diff groups for eod */
tctxt->lit_offset_adjust = scratch->core_info.buf_offset
- scratch->core_info.hlen
tctxt->delayLastEndOffset = offset;
tctxt->lastEndOffset = offset;
tctxt->filledDelayedSlots = 0;
- tctxt->state = state;
tctxt->cb = callback;
tctxt->cb_som = som_callback;
tctxt->userCtx = ctx;
* or outfix) NFAs.
*/
static rose_inline
-void roseCheckNfaEod(const struct RoseEngine *t, u8 *state,
+void roseCheckNfaEod(const struct RoseEngine *t, char *state,
struct hs_scratch *scratch, u64a offset,
const char is_streaming) {
if (!t->eodNfaIterOffset) {
}
static rose_inline
-void cleanupAfterEodMatcher(const struct RoseEngine *t, u8 *state, u64a offset,
- struct hs_scratch *scratch) {
+void cleanupAfterEodMatcher(const struct RoseEngine *t, char *state,
+ u64a offset, struct hs_scratch *scratch) {
struct RoseContext *tctxt = &scratch->tctxt;
// Flush history to make sure it's consistent.
}
static rose_inline
-void roseCheckEodSuffixes(const struct RoseEngine *t, u8 *state, u64a offset,
+void roseCheckEodSuffixes(const struct RoseEngine *t, char *state, u64a offset,
struct hs_scratch *scratch) {
const u8 *aa = getActiveLeafArray(t, state);
const u32 aaCount = t->activeArrayCount;
}
static really_inline
-void roseEodExec_i(const struct RoseEngine *t, u8 *state, u64a offset,
+void roseEodExec_i(const struct RoseEngine *t, char *state, u64a offset,
struct hs_scratch *scratch, const char is_streaming) {
assert(t);
assert(scratch->core_info.buf || scratch->core_info.hbuf);
}
}
-void roseEodExec(const struct RoseEngine *t, u8 *state, u64a offset,
+void roseEodExec(const struct RoseEngine *t, u64a offset,
struct hs_scratch *scratch, RoseCallback callback,
RoseCallbackSom som_callback, void *context) {
- assert(state);
assert(scratch);
assert(callback);
assert(context);
return;
}
+ char *state = scratch->core_info.state;
+ assert(state);
+
initContext(t, state, offset, scratch, callback, som_callback, context);
roseEodExec_i(t, state, offset, scratch, 1);
}
static rose_inline
-void prepForEod(const struct RoseEngine *t, u8 *state, size_t length,
+void prepForEod(const struct RoseEngine *t, char *state, size_t length,
struct RoseContext *tctxt) {
roseFlushLastByteHistory(t, state, length, tctxt);
tctxt->lastEndOffset = length;
assert(!can_stop_matching(scratch));
- u8 *state = (u8 *)scratch->core_info.state;
+ char *state = scratch->core_info.state;
// Ensure that history is correct before we look for EOD matches
prepForEod(t, state, scratch->core_info.len, &scratch->tctxt);
/*
- * Copyright (c) 2015, Intel Corporation
+ * Copyright (c) 2015-2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
#include <string.h>
static really_inline
-void init_rstate(const struct RoseEngine *t, u8 *state) {
+void init_rstate(const struct RoseEngine *t, char *state) {
// Set runtime state: we take our initial groups from the RoseEngine.
DEBUG_PRINTF("setting initial groups to 0x%016llx\n", t->initialGroups);
struct RoseRuntimeState *rstate = getRuntimeState(state);
}
static really_inline
-void init_outfixes(const struct RoseEngine *t, u8 *state) {
+void init_outfixes(const struct RoseEngine *t, char *state) {
/* The active leaf array has been init'ed by the scatter with outfix
* bits set on */
}
}
-void roseInitState(const struct RoseEngine *t, u8 *state) {
+void roseInitState(const struct RoseEngine *t, char *state) {
assert(t);
assert(state);
/*
- * Copyright (c) 2015, Intel Corporation
+ * Copyright (c) 2015-2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*/
static really_inline
-void init_state(const struct RoseEngine *t, u8 *state) {
+void init_state(const struct RoseEngine *t, char *state) {
scatter(state, t, &t->state_init);
}
void *ctx) {
struct hs_scratch *scratch = ctx;
struct RoseContext *tctx = &scratch->tctxt;
- const struct RoseEngine *t = tctx->t;
struct core_info *ci = &scratch->core_info;
+ const struct RoseEngine *t = ci->rose;
size_t rb_len = MIN(ci->hlen, t->delayRebuildLength);
u64a real_end = ci->buf_offset - rb_len + end + 1; // index after last byte
static rose_inline
void recordAnchoredMatch(struct RoseContext *tctxt, ReportID reportId,
u64a end) {
- const struct RoseEngine *t = tctxt->t;
struct hs_scratch *scratch = tctxtToScratch(tctxt);
+ const struct RoseEngine *t = scratch->core_info.rose;
u8 **anchoredRows = getAnchoredLog(scratch);
DEBUG_PRINTF("record %u @ %llu\n", reportId, end);
void recordAnchoredLiteralMatch(struct RoseContext *tctxt, u32 literal_id,
u64a end) {
assert(end);
- const struct RoseEngine *t = tctxt->t;
struct hs_scratch *scratch = tctxtToScratch(tctxt);
+ const struct RoseEngine *t = scratch->core_info.rose;
u8 **anchoredLiteralRows = getAnchoredLiteralLog(scratch);
DEBUG_PRINTF("record %u @ %llu\n", literal_id, end);
struct hs_scratch *scratch = tctxtToScratch(tctxt);
struct core_info *ci = &scratch->core_info;
- u8 *aa = getActiveLeafArray(t, tctxt->state);
+ u8 *aa = getActiveLeafArray(t, scratch->core_info.state);
u32 aaCount = t->activeArrayCount;
struct fatbit *activeQueues = scratch->aqa;
u32 qCount = t->queueCount;
/* handles catchup, som, cb, etc */
static really_inline
-hwlmcb_rv_t roseHandleReport(const struct RoseEngine *t, u8 *state,
- struct RoseContext *tctxt, ReportID id, u64a offset,
- char in_anchored) {
+hwlmcb_rv_t roseHandleReport(const struct RoseEngine *t, char *state,
+ struct RoseContext *tctxt, ReportID id,
+ u64a offset, char in_anchored) {
const struct internal_report *ri = getInternalReport(t, id);
if (ri) {
static really_inline
hwlmcb_rv_t roseHandleAnchoredDirectReport(const struct RoseEngine *t,
- u8 *state, struct RoseContext *tctxt,
+ char *state,
+ struct RoseContext *tctxt,
u64a real_end, ReportID report) {
DEBUG_PRINTF("direct report %u, real_end=%llu\n", report, real_end);
int roseAnchoredCallback(u64a end, u32 id, void *ctx) {
struct RoseContext *tctxt = ctx;
- const struct RoseEngine *t = tctxt->t;
- u8 *state = tctxt->state;
struct core_info *ci = &tctxtToScratch(tctxt)->core_info;
+ char *state = ci->state;
+ const struct RoseEngine *t = ci->rose;
u64a real_end = ci->buf_offset + end; // index after last byte
struct RoseContext *tctxt, char in_delay_play,
char in_anch_playback) {
/* assert(!tctxt->in_anchored); */
- u8 *state = tctxt->state;
+ struct hs_scratch *scratch = tctxtToScratch(tctxt);
+ char *state = scratch->core_info.state;
DEBUG_PRINTF("id=%u\n", id);
}
static rose_inline
-hwlmcb_rv_t playDelaySlot(struct RoseContext *tctxt, const u8 *delaySlotBase,
- size_t delaySlotSize, u32 vicIndex, u64a offset) {
+hwlmcb_rv_t playDelaySlot(const struct RoseEngine *t, struct RoseContext *tctxt,
+ const u8 *delaySlotBase, size_t delaySlotSize,
+ u32 vicIndex, u64a offset) {
/* assert(!tctxt->in_anchored); */
assert(vicIndex < DELAY_SLOT_COUNT);
const u8 *vicSlot = delaySlotBase + delaySlotSize * vicIndex;
- u32 delay_count = tctxt->t->delay_count;
+ u32 delay_count = t->delay_count;
- if (offset < tctxt->t->floatingMinLiteralMatchOffset) {
+ if (offset < t->floatingMinLiteralMatchOffset) {
DEBUG_PRINTF("too soon\n");
return HWLM_CONTINUE_MATCHING;
}
- roseFlushLastByteHistory(tctxt->t, tctxt->state, offset, tctxt);
+ struct hs_scratch *scratch = tctxtToScratch(tctxt);
+ roseFlushLastByteHistory(t, scratch->core_info.state, offset, tctxt);
tctxt->lastEndOffset = offset;
for (u32 it = mmbit_iterate(vicSlot, delay_count, MMB_INVALID);
it != MMB_INVALID; it = mmbit_iterate(vicSlot, delay_count, it)) {
- u32 literal_id = tctxt->t->delay_base_id + it;
+ u32 literal_id = t->delay_base_id + it;
UNUSED rose_group old_groups = tctxt->groups;
DEBUG_PRINTF("DELAYED MATCH id=%u offset=%llu\n", literal_id, offset);
- hwlmcb_rv_t rv = roseProcessDelayedMatch(tctxt->t, offset, literal_id,
- tctxt);
+ hwlmcb_rv_t rv = roseProcessDelayedMatch(t, offset, literal_id, tctxt);
DEBUG_PRINTF("DONE groups=0x%016llx\n", tctxt->groups);
/* delayed literals can't safely set groups.
}
static really_inline
-hwlmcb_rv_t flushAnchoredLiteralAtLoc(struct RoseContext *tctxt, u32 curr_loc) {
+hwlmcb_rv_t flushAnchoredLiteralAtLoc(const struct RoseEngine *t,
+ struct RoseContext *tctxt, u32 curr_loc) {
u8 *curr_row = getAnchoredLiteralLog(tctxtToScratch(tctxt))[curr_loc - 1];
- u32 region_width = tctxt->t->anchored_count;
+ u32 region_width = t->anchored_count;
DEBUG_PRINTF("report matches at curr loc\n");
for (u32 it = mmbit_iterate(curr_row, region_width, MMB_INVALID);
it != MMB_INVALID; it = mmbit_iterate(curr_row, region_width, it)) {
DEBUG_PRINTF("it = %u/%u\n", it, region_width);
- u32 literal_id = tctxt->t->anchored_base_id + it;
+ u32 literal_id = t->anchored_base_id + it;
rose_group old_groups = tctxt->groups;
DEBUG_PRINTF("ANCH REPLAY MATCH id=%u offset=%u\n", literal_id,
curr_loc);
- hwlmcb_rv_t rv = roseProcessDelayedAnchoredMatch(tctxt->t, curr_loc,
+ hwlmcb_rv_t rv = roseProcessDelayedAnchoredMatch(t, curr_loc,
literal_id, tctxt);
DEBUG_PRINTF("DONE groups=0x%016llx\n", tctxt->groups);
}
static really_inline
-hwlmcb_rv_t flushAnchoredLiterals(struct RoseContext *tctxt,
+hwlmcb_rv_t flushAnchoredLiterals(const struct RoseEngine *t,
+ struct RoseContext *tctxt,
u32 *anchored_it_param, u64a to_off) {
struct hs_scratch *scratch = tctxtToScratch(tctxt);
+ char *state = scratch->core_info.state;
u32 anchored_it = *anchored_it_param;
/* catch up any remaining anchored matches */
for (; anchored_it != MMB_INVALID && anchored_it < to_off;
assert(anchored_it < scratch->anchored_literal_region_len);
DEBUG_PRINTF("loc_it = %u\n", anchored_it);
u32 curr_off = anchored_it + 1;
- roseFlushLastByteHistory(tctxt->t, tctxt->state, curr_off, tctxt);
+ roseFlushLastByteHistory(t, state, curr_off, tctxt);
tctxt->lastEndOffset = curr_off;
- if (flushAnchoredLiteralAtLoc(tctxt, curr_off)
+ if (flushAnchoredLiteralAtLoc(t, tctxt, curr_off)
== HWLM_TERMINATE_MATCHING) {
return HWLM_TERMINATE_MATCHING;
}
}
static really_inline
-hwlmcb_rv_t playVictims(struct RoseContext *tctxt, u32 *anchored_it,
- u64a lastEnd, u64a victimDelaySlots, u8 *delaySlotBase,
- size_t delaySlotSize) {
+hwlmcb_rv_t playVictims(const struct RoseEngine *t, struct RoseContext *tctxt,
+ u32 *anchored_it, u64a lastEnd, u64a victimDelaySlots,
+ u8 *delaySlotBase, size_t delaySlotSize) {
/* assert (!tctxt->in_anchored); */
while (victimDelaySlots) {
DEBUG_PRINTF("vic = %u\n", vic);
u64a vicOffset = vic + (lastEnd & ~(u64a)DELAY_MASK);
- if (flushAnchoredLiterals(tctxt, anchored_it, vicOffset)
+ if (flushAnchoredLiterals(t, tctxt, anchored_it, vicOffset)
== HWLM_TERMINATE_MATCHING) {
return HWLM_TERMINATE_MATCHING;
}
- if (playDelaySlot(tctxt, delaySlotBase, delaySlotSize,
+ if (playDelaySlot(t, tctxt, delaySlotBase, delaySlotSize,
vic % DELAY_SLOT_COUNT, vicOffset)
== HWLM_TERMINATE_MATCHING) {
return HWLM_TERMINATE_MATCHING;
/* call flushQueuedLiterals instead */
hwlmcb_rv_t flushQueuedLiterals_i(struct RoseContext *tctxt, u64a currEnd) {
+ struct hs_scratch *scratch = tctxtToScratch(tctxt);
+ const struct RoseEngine *t = scratch->core_info.rose;
+
/* assert(!tctxt->in_anchored); */
u64a lastEnd = tctxt->delayLastEndOffset;
DEBUG_PRINTF("flushing backed up matches @%llu up from %llu\n", currEnd,
}
{
- u8 *delaySlotBase = getDelaySlots(tctxtToScratch(tctxt));
- size_t delaySlotSize = tctxt->t->delay_slot_size;
+ u8 *delaySlotBase = getDelaySlots(scratch);
+ size_t delaySlotSize = t->delay_slot_size;
u32 lastIndex = lastEnd & DELAY_MASK;
u32 currIndex = currEnd & DELAY_MASK;
second_half, victimDelaySlots, lastIndex);
}
- if (playVictims(tctxt, &anchored_it, lastEnd, victimDelaySlots,
+ if (playVictims(t, tctxt, &anchored_it, lastEnd, victimDelaySlots,
delaySlotBase, delaySlotSize)
== HWLM_TERMINATE_MATCHING) {
return HWLM_TERMINATE_MATCHING;
}
anchored_leftovers:;
- hwlmcb_rv_t rv = flushAnchoredLiterals(tctxt, &anchored_it, currEnd);
+ hwlmcb_rv_t rv = flushAnchoredLiterals(t, tctxt, &anchored_it, currEnd);
tctxt->delayLastEndOffset = currEnd;
return rv;
}
hwlmcb_rv_t roseCallback(size_t start, size_t end, u32 id, void *ctxt) {
struct RoseContext *tctx = ctxt;
+ struct hs_scratch *scratch = tctxtToScratch(tctx);
+ const struct RoseEngine *t = scratch->core_info.rose;
+
u64a real_end = end + tctx->lit_offset_adjust;
#if defined(DEBUG)
- struct core_info *ci = &tctxtToScratch(tctx)->core_info;
DEBUG_PRINTF("MATCH id=%u offsets=[%llu,%llu]: ", id,
start + tctx->lit_offset_adjust, real_end);
- printMatch(ci, start + tctx->lit_offset_adjust, real_end);
+ printMatch(&scratch->core_info, start + tctx->lit_offset_adjust, real_end);
printf("\n");
#endif
DEBUG_PRINTF("last end %llu\n", tctx->lastEndOffset);
DEBUG_PRINTF("STATE groups=0x%016llx\n", tctx->groups);
- if (can_stop_matching(tctxtToScratch(tctx))) {
+ if (can_stop_matching(scratch)) {
DEBUG_PRINTF("received a match when we're already dead!\n");
return HWLM_TERMINATE_MATCHING;
}
hwlmcb_rv_t rv = flushQueuedLiterals(tctx, real_end);
/* flushDelayed may have advanced tctx->lastEndOffset */
- if (real_end >= tctx->t->floatingMinLiteralMatchOffset) {
- roseFlushLastByteHistory(tctx->t, tctx->state, real_end, tctx);
+ if (real_end >= t->floatingMinLiteralMatchOffset) {
+ roseFlushLastByteHistory(t, scratch->core_info.state, real_end, tctx);
tctx->lastEndOffset = real_end;
}
}
size_t match_len = end - start + 1;
- rv = roseProcessMainMatch(tctx->t, real_end, match_len, id, tctx);
+ rv = roseProcessMainMatch(t, real_end, match_len, id, tctx);
DEBUG_PRINTF("DONE groups=0x%016llx\n", tctx->groups);
return tctx->groups;
}
- assert(can_stop_matching(tctxtToScratch(tctx)));
+ assert(can_stop_matching(scratch));
DEBUG_PRINTF("user requested halt\n");
return HWLM_TERMINATE_MATCHING;
}
/*
- * Copyright (c) 2015, Intel Corporation
+ * Copyright (c) 2015-2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
q->end = 0;
q->cur = 0;
q->state = scratch->fullState + info->fullStateOffset;
- q->streamState = (char *)tctxt->state + info->stateOffset;
+ q->streamState = scratch->core_info.state + info->stateOffset;
q->offset = scratch->core_info.buf_offset;
q->buffer = scratch->core_info.buf;
q->length = scratch->core_info.len;
if (left->transient) {
q->streamState = (char *)scratch->tstate + info->stateOffset;
} else {
- q->streamState = (char *)tctxt->state + info->stateOffset;
+ q->streamState = scratch->core_info.state + info->stateOffset;
}
q->offset = scratch->core_info.buf_offset;
}
static really_inline
-void storeRoseDelay(const struct RoseEngine *t, u8 *state,
+void storeRoseDelay(const struct RoseEngine *t, char *state,
const struct LeftNfaInfo *left, u32 loc) {
u32 di = left->lagIndex;
if (di == ROSE_OFFSET_INVALID) {
}
static really_inline
-void setAsZombie(const struct RoseEngine *t, u8 *state,
+void setAsZombie(const struct RoseEngine *t, char *state,
const struct LeftNfaInfo *left) {
u32 di = left->lagIndex;
assert(di != ROSE_OFFSET_INVALID);
/* loadRoseDelay MUST NOT be called on the first stream write as it is only
* initialized for running nfas on stream boundaries */
static really_inline
-u32 loadRoseDelay(const struct RoseEngine *t, const u8 *state,
+u32 loadRoseDelay(const struct RoseEngine *t, const char *state,
const struct LeftNfaInfo *left) {
u32 di = left->lagIndex;
if (di == ROSE_OFFSET_INVALID) {
}
static really_inline
-char isZombie(const struct RoseEngine *t, const u8 *state,
+char isZombie(const struct RoseEngine *t, const char *state,
const struct LeftNfaInfo *left) {
u32 di = left->lagIndex;
assert(di != ROSE_OFFSET_INVALID);
}
static rose_inline
-void roseFlushLastByteHistory(const struct RoseEngine *t, u8 *state,
+void roseFlushLastByteHistory(const struct RoseEngine *t, char *state,
u64a currEnd, struct RoseContext *tctxt) {
if (!t->lastByteHistoryIterOffset) {
return;
}
static rose_inline
-int roseHasInFlightMatches(const struct RoseEngine *t, u8 *state,
+int roseHasInFlightMatches(const struct RoseEngine *t, char *state,
const struct hs_scratch *scratch) {
if (scratch->al_log_sum) {
DEBUG_PRINTF("anchored literals in log\n");
#include "util/multibit.h"
static rose_inline
-int roseCheckBenefits(struct RoseContext *tctxt, u64a end, u32 mask_rewind,
+int roseCheckBenefits(const struct core_info *ci, u64a end, u32 mask_rewind,
const u8 *and_mask, const u8 *exp_mask) {
- DEBUG_PRINTF("am offset = %zu, em offset = %zu\n",
- and_mask - (const u8 *)tctxt->t,
- exp_mask - (const u8 *)tctxt->t);
const u8 *data;
// If the check works over part of the history and part of the buffer, we
// create a temporary copy of the data in here so it's contiguous.
u8 temp[MAX_MASK2_WIDTH];
- struct core_info *ci = &tctxtToScratch(tctxt)->core_info;
s64a buffer_offset = (s64a)end - ci->buf_offset;
DEBUG_PRINTF("rel offset %lld\n", buffer_offset);
if (buffer_offset >= mask_rewind) {
char is_mpv, char in_anchored,
char in_catchup) {
struct RoseContext *tctxt = &scratch->tctxt;
- u8 *aa = getActiveLeafArray(t, tctxt->state);
+ u8 *aa = getActiveLeafArray(t, scratch->core_info.state);
struct fatbit *activeQueues = scratch->aqa;
u32 aaCount = t->activeArrayCount;
u32 qCount = t->queueCount;
if (loc + scratch->core_info.buf_offset
<= tctxt->minNonMpvMatchOffset) {
DEBUG_PRINTF("flushing chained\n");
- if (roseCatchUpMPV(t, tctxt->state, loc, scratch)
- == HWLM_TERMINATE_MATCHING) {
+ if (roseCatchUpMPV(t, scratch->core_info.state, loc,
+ scratch) == HWLM_TERMINATE_MATCHING) {
return HWLM_TERMINATE_MATCHING;
}
goto done_queue_empty;
}
}
- if (roseCatchUpTo(t, tctxt->state, loc + scratch->core_info.buf_offset,
- scratch, in_anchored)
- == HWLM_TERMINATE_MATCHING) {
+ if (roseCatchUpTo(t, scratch->core_info.state,
+ loc + scratch->core_info.buf_offset, scratch,
+ in_anchored) == HWLM_TERMINATE_MATCHING) {
return HWLM_TERMINATE_MATCHING;
}
} else {
assert(is_mpv);
DEBUG_PRINTF("flushing chained\n");
tctxt->next_mpv_offset = 0; /* force us to catch the mpv */
- if (roseCatchUpMPV(t, tctxt->state, loc, scratch)
+ if (roseCatchUpMPV(t, scratch->core_info.state, loc, scratch)
== HWLM_TERMINATE_MATCHING) {
return HWLM_TERMINATE_MATCHING;
}
char in_anchored) {
DEBUG_PRINTF("suffix qi=%u, top event=%u\n", qi, top);
- u8 *aa = getActiveLeafArray(t, tctxt->state);
struct hs_scratch *scratch = tctxtToScratch(tctxt);
+ u8 *aa = getActiveLeafArray(t, scratch->core_info.state);
const u32 aaCount = t->activeArrayCount;
const u32 qCount = t->queueCount;
struct mq *q = &scratch->queues[qi];
assert(leftfixLag <= left->maxLag);
struct mq *q = scratch->queues + qi;
+ char *state = scratch->core_info.state;
+ u8 *activeLeftArray = getActiveLeftArray(t, state);
u32 qCount = t->queueCount;
u32 arCount = t->activeLeftCount;
- if (!mmbit_isset(getActiveLeftArray(t, tctxt->state), arCount, ri)) {
+ if (!mmbit_isset(activeLeftArray, arCount, ri)) {
DEBUG_PRINTF("engine is dead nothing to see here\n");
return 0;
}
if (nfaSupportsZombie(getNfaByQueue(t, qi)) && ci->buf_offset
&& !fatbit_isset(scratch->aqa, qCount, qi)
- && isZombie(t, tctxt->state, left)) {
+ && isZombie(t, state, left)) {
DEBUG_PRINTF("zombie\n");
return 1;
}
if (left->transient) {
sp = -(s32)ci->hlen;
} else {
- sp = -(s32)loadRoseDelay(t, tctxt->state, left);
+ sp = -(s32)loadRoseDelay(t, state, left);
}
/* transient nfas are always started fresh -> state not maintained
if (infixTooOld(q, loc)) {
DEBUG_PRINTF("infix %u died of old age\n", ri);
scratch->tctxt.groups &= left->squash_mask;
- mmbit_unset(getActiveLeftArray(t, tctxt->state), arCount, ri);
+ mmbit_unset(activeLeftArray, arCount, ri);
return 0;
}
if (!rosePrefixCheckMiracles(t, left, ci, q, end)) {
DEBUG_PRINTF("leftfix %u died due to miracle\n", ri);
scratch->tctxt.groups &= left->squash_mask;
- mmbit_unset(getActiveLeftArray(t, tctxt->state), arCount, ri);
+ mmbit_unset(activeLeftArray, arCount, ri);
return 0;
}
char rv = nfaQueueExecRose(q->nfa, q, leftfixReport);
if (!rv) { /* nfa is dead */
DEBUG_PRINTF("leftfix %u died while trying to catch up\n", ri);
- mmbit_unset(getActiveLeftArray(t, tctxt->state), arCount, ri);
- assert(!mmbit_isset(getActiveLeftArray(t, tctxt->state), arCount,
- ri));
+ mmbit_unset(activeLeftArray, arCount, ri);
+ assert(!mmbit_isset(activeLeftArray, arCount, ri));
tctxt->groups &= left->squash_mask;
return 0;
}
struct mq *q = tctxtToScratch(tctxt)->queues + qi;
const struct NfaInfo *info = getNfaInfoByQueue(t, qi);
- u8 *activeLeftArray = getActiveLeftArray(t, tctxt->state);
+ struct hs_scratch *scratch = tctxtToScratch(tctxt);
+ char *state = scratch->core_info.state;
+ u8 *activeLeftArray = getActiveLeftArray(t, state);
const u32 arCount = t->activeLeftCount;
char alive = mmbit_set(activeLeftArray, arCount, ri);
return;
}
- struct fatbit *aqa = tctxtToScratch(tctxt)->aqa;
+ struct fatbit *aqa = scratch->aqa;
const u32 qCount = t->queueCount;
if (alive && nfaSupportsZombie(getNfaByInfo(t, info)) && ci->buf_offset &&
- !fatbit_isset(aqa, qCount, qi) && isZombie(t, tctxt->state, left)) {
+ !fatbit_isset(aqa, qCount, qi) && isZombie(t, state, left)) {
DEBUG_PRINTF("yawn - zombie\n");
return;
}
DEBUG_PRINTF("initing %u\n", qi);
initRoseQueue(t, qi, left, tctxt);
if (alive) {
- s32 sp = -(s32)loadRoseDelay(t, tctxt->state, left);
+ s32 sp = -(s32)loadRoseDelay(t, state, left);
pushQueueAt(q, 0, MQE_START, sp);
loadStreamState(q->nfa, q, sp);
} else {
/* handles the firing of external matches */
static rose_inline
-hwlmcb_rv_t roseHandleMatch(const struct RoseEngine *t, u8 *state, ReportID id,
- u64a end, struct RoseContext *tctxt,
+hwlmcb_rv_t roseHandleMatch(const struct RoseEngine *t, char *state,
+ ReportID id, u64a end, struct RoseContext *tctxt,
char in_anchored) {
struct hs_scratch *scratch = tctxtToScratch(tctxt);
* up */
static rose_inline
hwlmcb_rv_t roseCatchUpAndHandleChainMatch(const struct RoseEngine *t,
- u8 *state, ReportID r, u64a end,
+ char *state, ReportID r, u64a end,
struct RoseContext *tctxt,
char in_anchored) {
struct hs_scratch *scratch = tctxtToScratch(tctxt);
}
static rose_inline
-hwlmcb_rv_t roseSomCatchup(const struct RoseEngine *t, u8 *state, u64a end,
+hwlmcb_rv_t roseSomCatchup(const struct RoseEngine *t, char *state, u64a end,
struct RoseContext *tctxt, char in_anchored) {
struct hs_scratch *scratch = tctxtToScratch(tctxt);
}
static really_inline
-hwlmcb_rv_t roseHandleSom(const struct RoseEngine *t, u8 *state, ReportID id,
+hwlmcb_rv_t roseHandleSom(const struct RoseEngine *t, char *state, ReportID id,
u64a end, struct RoseContext *tctxt,
char in_anchored) {
struct hs_scratch *scratch = tctxtToScratch(tctxt);
}
static rose_inline
-hwlmcb_rv_t roseHandleSomMatch(const struct RoseEngine *t, u8 *state,
+hwlmcb_rv_t roseHandleSomMatch(const struct RoseEngine *t, char *state,
ReportID id, u64a start, u64a end,
struct RoseContext *tctxt, char in_anchored) {
if (roseCatchUpTo(t, state, end, tctxtToScratch(tctxt), in_anchored)
}
static rose_inline
-hwlmcb_rv_t roseHandleSomSom(const struct RoseEngine *t, u8 *state, ReportID id,
- u64a start, u64a end, struct RoseContext *tctxt,
- char in_anchored) {
+hwlmcb_rv_t roseHandleSomSom(const struct RoseEngine *t, char *state,
+ ReportID id, u64a start, u64a end,
+ struct RoseContext *tctxt, char in_anchored) {
DEBUG_PRINTF("id=%u, start=%llu, end=%llu, minMatchOffset=%llu\n",
id, start, end, tctxt->minMatchOffset);
// allow the program to squash groups).
int work_done = 0;
+ struct hs_scratch *scratch = tctxtToScratch(tctxt);
+
assert(*(const u8 *)pc != ROSE_INSTR_END);
for (;;) {
PROGRAM_CASE(CHECK_LIT_MASK) {
assert(match_len);
- if (!roseCheckBenefits(tctxt, end, match_len, ri->and_mask.a8,
+ struct core_info *ci = &scratch->core_info;
+ if (!roseCheckBenefits(ci, end, match_len, ri->and_mask.a8,
ri->cmp_mask.a8)) {
DEBUG_PRINTF("halt: failed mask check\n");
return HWLM_CONTINUE_MATCHING;
PROGRAM_NEXT_INSTRUCTION
PROGRAM_CASE(CHECK_ONLY_EOD) {
- struct core_info *ci = &tctxtToScratch(tctxt)->core_info;
+ struct core_info *ci = &scratch->core_info;
if (end != ci->buf_offset + ci->len) {
DEBUG_PRINTF("should only match at end of data\n");
assert(ri->fail_jump); // must progress
PROGRAM_NEXT_INSTRUCTION
PROGRAM_CASE(CHECK_NOT_HANDLED) {
- struct fatbit *handled = tctxtToScratch(tctxt)->handled_roles;
+ struct fatbit *handled = scratch->handled_roles;
if (fatbit_set(handled, t->handledKeyCount, ri->key)) {
DEBUG_PRINTF("key %u already set\n", ri->key);
assert(ri->fail_jump); // must progress
PROGRAM_NEXT_INSTRUCTION
PROGRAM_CASE(REPORT) {
- if (roseHandleMatch(t, tctxt->state, ri->report, end, tctxt,
+ if (roseHandleMatch(t, scratch->core_info.state,
+ ri->report, end, tctxt,
in_anchored) == HWLM_TERMINATE_MATCHING) {
return HWLM_TERMINATE_MATCHING;
}
PROGRAM_NEXT_INSTRUCTION
PROGRAM_CASE(REPORT_CHAIN) {
- if (roseCatchUpAndHandleChainMatch(t, tctxt->state, ri->report,
- end, tctxt, in_anchored) ==
- HWLM_TERMINATE_MATCHING) {
+ if (roseCatchUpAndHandleChainMatch(
+ t, scratch->core_info.state, ri->report, end,
+ tctxt, in_anchored) == HWLM_TERMINATE_MATCHING) {
return HWLM_TERMINATE_MATCHING;
}
work_done = 1;
PROGRAM_NEXT_INSTRUCTION
PROGRAM_CASE(REPORT_SOM_INT) {
- if (roseHandleSom(t, tctxt->state, ri->report, end, tctxt,
+ if (roseHandleSom(t, scratch->core_info.state, ri->report,
+ end, tctxt,
in_anchored) == HWLM_TERMINATE_MATCHING) {
return HWLM_TERMINATE_MATCHING;
}
PROGRAM_NEXT_INSTRUCTION
PROGRAM_CASE(REPORT_SOM) {
- if (roseHandleSomSom(t, tctxt->state, ri->report, som, end,
- tctxt,
+ if (roseHandleSomSom(t, scratch->core_info.state,
+ ri->report, som, end, tctxt,
in_anchored) == HWLM_TERMINATE_MATCHING) {
return HWLM_TERMINATE_MATCHING;
}
PROGRAM_NEXT_INSTRUCTION
PROGRAM_CASE(REPORT_SOM_KNOWN) {
- if (roseHandleSomMatch(t, tctxt->state, ri->report, som, end,
- tctxt, in_anchored) ==
+ if (roseHandleSomMatch(t, scratch->core_info.state, ri->report,
+ som, end, tctxt, in_anchored) ==
HWLM_TERMINATE_MATCHING) {
return HWLM_TERMINATE_MATCHING;
}
PROGRAM_CASE(SET_STATE) {
DEBUG_PRINTF("set state index %u\n", ri->index);
- mmbit_set(getRoleState(tctxt->state), t->rolesWithStateCount,
- ri->index);
+ mmbit_set(getRoleState(scratch->core_info.state),
+ t->rolesWithStateCount, ri->index);
work_done = 1;
}
PROGRAM_NEXT_INSTRUCTION
PROGRAM_CASE(CHECK_STATE) {
DEBUG_PRINTF("check state %u\n", ri->index);
- if (!mmbit_isset(getRoleState(tctxt->state),
- t->rolesWithStateCount, ri->index)) {
+ const u8 *roles = getRoleState(scratch->core_info.state);
+ if (!mmbit_isset(roles, t->rolesWithStateCount, ri->index)) {
DEBUG_PRINTF("state not on\n");
assert(ri->fail_jump); // must progress
pc += ri->fail_jump;
getByOffset(t, ri->iter_offset);
assert(ISALIGNED(it));
+ const u8 *roles = getRoleState(scratch->core_info.state);
+
u32 idx = 0;
- u32 i = mmbit_sparse_iter_begin(getRoleState(tctxt->state),
- t->rolesWithStateCount, &idx,
- it, si_state);
+ u32 i = mmbit_sparse_iter_begin(roles, t->rolesWithStateCount,
+ &idx, it, si_state);
if (i == MMB_INVALID) {
DEBUG_PRINTF("no states in sparse iter are on\n");
assert(ri->fail_jump); // must progress
continue;
}
- struct hs_scratch *scratch = tctxtToScratch(tctxt);
fatbit_clear(scratch->handled_roles);
const u32 *jumps = getByOffset(t, ri->jump_table);
getByOffset(t, ri->iter_offset);
assert(ISALIGNED(it));
+ const u8 *roles = getRoleState(scratch->core_info.state);
+
u32 idx = 0;
- u32 i = mmbit_sparse_iter_next(getRoleState(tctxt->state),
- t->rolesWithStateCount,
+ u32 i = mmbit_sparse_iter_next(roles, t->rolesWithStateCount,
ri->state, &idx, it, si_state);
if (i == MMB_INVALID) {
DEBUG_PRINTF("no more states in sparse iter are on\n");
/*
- * Copyright (c) 2015, Intel Corporation
+ * Copyright (c) 2015-2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
#include "util/multibit.h"
// Initialise state space for engine use.
-void roseInitState(const struct RoseEngine *t, u8 *state);
+void roseInitState(const struct RoseEngine *t, char *state);
void roseBlockEodExec(const struct RoseEngine *t, u64a offset,
struct hs_scratch *scratch);
}
/* assumes core_info in scratch has been init to point to data */
-void roseStreamExec(const struct RoseEngine *t, u8 *state,
- struct hs_scratch *scratch, RoseCallback callback,
- RoseCallbackSom som_callback, void *context);
+void roseStreamExec(const struct RoseEngine *t, struct hs_scratch *scratch,
+ RoseCallback callback, RoseCallbackSom som_callback,
+ void *context);
-void roseEodExec(const struct RoseEngine *t, u8 *state, u64a offset,
+void roseEodExec(const struct RoseEngine *t, u64a offset,
struct hs_scratch *scratch, RoseCallback callback,
RoseCallbackSom som_callback, void *context);
/*
- * Copyright (c) 2015, Intel Corporation
+ * Copyright (c) 2015-2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
/** \brief Fetch runtime state ptr. */
static really_inline
-struct RoseRuntimeState *getRuntimeState(u8 *state) {
+struct RoseRuntimeState *getRuntimeState(char *state) {
struct RoseRuntimeState *rs = (struct RoseRuntimeState *)(state);
assert(ISALIGNED_N(rs, 8));
return rs;
}
static really_inline
-void *getRoleState(u8 *state) {
+void *getRoleState(char *state) {
return state + sizeof(struct RoseRuntimeState);
}
/** \brief Fetch the active array for suffix nfas. */
static really_inline
-u8 *getActiveLeafArray(const struct RoseEngine *t, u8 *state) {
- return state + t->stateOffsets.activeLeafArray;
+u8 *getActiveLeafArray(const struct RoseEngine *t, char *state) {
+ return (u8 *)(state + t->stateOffsets.activeLeafArray);
}
/** \brief Fetch the active array for rose nfas. */
static really_inline
-u8 *getActiveLeftArray(const struct RoseEngine *t, u8 *state) {
- return state + t->stateOffsets.activeLeftArray;
+u8 *getActiveLeftArray(const struct RoseEngine *t, char *state) {
+ return (u8 *)(state + t->stateOffsets.activeLeftArray);
}
static really_inline
}
static really_inline
-rose_group loadGroups(const struct RoseEngine *t, const u8 *state) {
+rose_group loadGroups(const struct RoseEngine *t, const char *state) {
return partial_load_u64a(state + t->stateOffsets.groups,
t->stateOffsets.groups_size);
}
static really_inline
-void storeGroups(const struct RoseEngine *t, u8 *state, rose_group groups) {
+void storeGroups(const struct RoseEngine *t, char *state, rose_group groups) {
partial_store_u64a(state + t->stateOffsets.groups, groups,
t->stateOffsets.groups_size);
}
static really_inline
-u8 * getFloatingMatcherState(const struct RoseEngine *t, u8 *state) {
- return state + t->stateOffsets.floatingMatcherState;
+u8 *getFloatingMatcherState(const struct RoseEngine *t, char *state) {
+ return (u8 *)(state + t->stateOffsets.floatingMatcherState);
}
static really_inline
-u8 *getLeftfixLagTable(const struct RoseEngine *t, u8 *state) {
- return state + t->stateOffsets.leftfixLagTable;
+u8 *getLeftfixLagTable(const struct RoseEngine *t, char *state) {
+ return (u8 *)(state + t->stateOffsets.leftfixLagTable);
}
static really_inline
-const u8 *getLeftfixLagTableConst(const struct RoseEngine *t, const u8 *state) {
- return state + t->stateOffsets.leftfixLagTable;
+const u8 *getLeftfixLagTableConst(const struct RoseEngine *t,
+ const char *state) {
+ return (const u8 *)(state + t->stateOffsets.leftfixLagTable);
}
static rose_inline
/*
- * Copyright (c) 2015, Intel Corporation
+ * Copyright (c) 2015-2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
void runAnchoredTableStream(const struct RoseEngine *t, const void *atable,
size_t alen, u64a offset,
struct hs_scratch *scratch) {
- char *state_base
- = (char *)scratch->tctxt.state + t->stateOffsets.anchorState;
-
+ char *state_base = scratch->core_info.state + t->stateOffsets.anchorState;
const struct anchored_matcher_info *curr = atable;
do {
};
static really_inline
-enum MiracleAction roseScanForMiracles(const struct RoseEngine *t, u8 *state,
+enum MiracleAction roseScanForMiracles(const struct RoseEngine *t, char *state,
struct hs_scratch *scratch, u32 qi,
const struct LeftNfaInfo *left,
const struct NFA *nfa) {
nfaQueueInitState(q->nfa, q);
} else {
if (miracle_loc > end_loc - t->historyRequired) {
- u8 *streamState = state + getNfaInfoByQueue(t, qi)->stateOffset;
+ char *streamState = state + getNfaInfoByQueue(t, qi)->stateOffset;
u64a offset = ci->buf_offset + miracle_loc;
u8 key = offset ? getByteBefore(ci, miracle_loc) : 0;
DEBUG_PRINTF("init state, key=0x%02x, offset=%llu\n", key, offset);
static really_inline
-char roseCatchUpLeftfix(const struct RoseEngine *t, u8 *state,
+char roseCatchUpLeftfix(const struct RoseEngine *t, char *state,
struct hs_scratch *scratch, u32 qi,
const struct LeftNfaInfo *left) {
assert(!left->transient); // active roses only
}
static rose_inline
-void roseCatchUpLeftfixes(const struct RoseEngine *t, u8 *state,
+void roseCatchUpLeftfixes(const struct RoseEngine *t, char *state,
struct hs_scratch *scratch) {
if (!t->activeLeftIterOffset) {
// No sparse iter, no non-transient roses.
// Saves out stream state for all our active suffix NFAs.
static rose_inline
-void roseSaveNfaStreamState(const struct RoseEngine *t, u8 *state,
+void roseSaveNfaStreamState(const struct RoseEngine *t, char *state,
struct hs_scratch *scratch) {
struct mq *queues = scratch->queues;
u8 *aa = getActiveLeafArray(t, state);
}
static rose_inline
-void ensureStreamNeatAndTidy(const struct RoseEngine *t, u8 *state,
+void ensureStreamNeatAndTidy(const struct RoseEngine *t, char *state,
struct hs_scratch *scratch, size_t length,
u64a offset, u8 delay_rb_status) {
struct RoseContext *tctxt = &scratch->tctxt;
assert(!can_stop_matching(scratch));
}
-void roseStreamExec(const struct RoseEngine *t, u8 *state,
- struct hs_scratch *scratch, RoseCallback callback,
- RoseCallbackSom som_callback, void *ctx) {
+void roseStreamExec(const struct RoseEngine *t, struct hs_scratch *scratch,
+ RoseCallback callback, RoseCallbackSom som_callback,
+ void *ctx) {
DEBUG_PRINTF("OH HAI\n");
assert(t);
- assert(state);
assert(scratch->core_info.hbuf);
assert(scratch->core_info.buf);
return;
}
+ char *state = scratch->core_info.state;
struct RoseRuntimeState *rstate = getRuntimeState(state);
struct RoseContext *tctxt = &scratch->tctxt;
- tctxt->t = t;
tctxt->mpv_inactive = 0;
tctxt->groups = loadGroups(t, state);
tctxt->lit_offset_adjust = offset + 1; // index after last byte
tctxt->delayLastEndOffset = offset;
tctxt->lastEndOffset = offset;
tctxt->filledDelayedSlots = 0;
- tctxt->state = state;
tctxt->cb = callback;
tctxt->cb_som = som_callback;
tctxt->userCtx = ctx;
/*
- * Copyright (c) 2015, Intel Corporation
+ * Copyright (c) 2015-2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
/** \brief Initialise SOM state. Used in both block and streaming mode. */
static really_inline
-void initSomState(const struct RoseEngine *rose, u8 *state) {
+void initSomState(const struct RoseEngine *rose, char *state) {
assert(rose && state);
const u32 somCount = rose->somLocationCount;
- mmbit_clear(state + rose->stateOffsets.somValid, somCount);
- mmbit_clear(state + rose->stateOffsets.somWritable, somCount);
+ mmbit_clear((u8 *)state + rose->stateOffsets.somValid, somCount);
+ mmbit_clear((u8 *)state + rose->stateOffsets.somWritable, somCount);
}
static really_inline
assert(rose);
assert(scratch);
- initSomState(rose, (u8 *)scratch->core_info.state);
+ initSomState(rose, scratch->core_info.state);
DEBUG_PRINTF("blockmode scan len=%zu\n", scratch->core_info.len);
assert(scratch);
const struct HWLM *ftable = getFLiteralMatcher(rose);
- initSomState(rose, (u8 *)scratch->core_info.state);
+ initSomState(rose, scratch->core_info.state);
const u8 *buffer = scratch->core_info.buf;
size_t length = scratch->core_info.len;
DEBUG_PRINTF("rose engine %d\n", rose->runtimeImpl);
assert(t);
assert(scratch);
- initSomState(t, (u8 *)scratch->core_info.state);
+ initSomState(t, scratch->core_info.state);
assert(t->outfixEndQueue == 1);
assert(!t->amatcherOffset);
assert(!t->ematcherOffset);
s->rose = rose;
s->offset = 0;
- u8 *state = (u8 *)getMultiState(s);
+ char *state = getMultiState(s);
roseInitState(rose, state);
return;
}
- roseEodExec(rose, (u8 *)state, id->offset, scratch, selectAdaptor(rose),
+ roseEodExec(rose, id->offset, scratch, selectAdaptor(rose),
selectSomAdaptor(rose), scratch);
}
const struct RoseEngine *rose = stream_state->rose;
assert(rose);
- u8 *rose_state = (u8 *)state;
- roseStreamExec(rose, rose_state, scratch, selectAdaptor(rose),
- selectSomAdaptor(rose), scratch);
+ roseStreamExec(rose, scratch, selectAdaptor(rose), selectSomAdaptor(rose),
+ scratch);
if (!told_to_stop_matching(scratch) &&
isAllExhausted(rose, scratch->core_info.exhaustionVector)) {
u8 *hwlm_stream_state;
if (rose->floatingStreamState) {
- hwlm_stream_state = getFloatingMatcherState(rose, (u8 *)state);
+ hwlm_stream_state = getFloatingMatcherState(rose, state);
} else {
hwlm_stream_state = NULL;
}
/*
- * Copyright (c) 2015, Intel Corporation
+ * Copyright (c) 2015-2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
/** \brief Rose state information. */
struct RoseContext {
- const struct RoseEngine *t;
- u8 *state; /**< base pointer to the full state */
u8 mpv_inactive;
u64a groups;
u64a lit_offset_adjust; /**< offset to add to matches coming from hwlm */