size_t len = nfaRevAccelCheck(nfa, scratch->core_info.buf,
scratch->core_info.len);
if (len) {
- struct RoseContext *tctxt = &scratch->tctxt;
u8 *activeArray = getActiveLeafArray(t, state);
const u32 activeArraySize = t->activeArrayCount;
const u32 qCount = t->queueCount;
fatbit_set(scratch->aqa, qCount, 0);
struct mq *q = scratch->queues;
- initQueue(q, 0, t, tctxt);
+ initQueue(q, 0, t, scratch);
q->length = len; /* adjust for rev_accel */
nfaQueueInitState(nfa, q);
pushQueueAt(q, 0, MQE_START, 0);
}
exit:;
- if (cleanUpDelayed(length, 0, scratch) == HWLM_TERMINATE_MATCHING) {
+ if (cleanUpDelayed(t, scratch, length, 0) == HWLM_TERMINATE_MATCHING) {
return;
}
assert(!can_stop_matching(scratch));
- roseCatchUpTo(t, state, length, scratch, 0);
+ roseCatchUpTo(t, scratch, length, 0);
}
#include "util/pqueue.h"
static really_inline
-int handleReportInternally(struct hs_scratch *scratch, ReportID id,
+int handleReportInternally(const struct RoseEngine *t,
+ struct hs_scratch *scratch, ReportID id,
u64a offset) {
- const struct RoseEngine *t = scratch->core_info.rose;
const struct internal_report *ri = getInternalReport(t, id);
if (ri->type == EXTERNAL_CALLBACK) {
return 0;
return 1;
}
if (ri->type == INTERNAL_ROSE_CHAIN) {
- roseHandleChainMatch(t, id, offset, &scratch->tctxt, 0, 1);
+ roseHandleChainMatch(t, scratch, id, offset, 0, 1);
return 1;
}
}
static really_inline
-int handleReportInternallyNoChain(struct hs_scratch *scratch, ReportID id,
+int handleReportInternallyNoChain(const struct RoseEngine *t,
+ struct hs_scratch *scratch, ReportID id,
u64a offset) {
- const struct RoseEngine *t = scratch->core_info.rose;
const struct internal_report *ri = getInternalReport(t, id);
if (ri->type == EXTERNAL_CALLBACK) {
return 0;
}
static rose_inline
-void nextAnchoredMatch(const struct RoseEngine *t, struct RoseContext *tctxt,
+void nextAnchoredMatch(const struct RoseEngine *t, struct hs_scratch *scratch,
ReportID *reportId, u64a *end) {
+ struct RoseContext *tctxt = &scratch->tctxt;
assert(tctxt->curr_anchored_loc != MMB_INVALID);
- struct hs_scratch *scratch = tctxtToScratch(tctxt);
struct fatbit **anchoredRows = getAnchoredLog(scratch);
u32 region_width = t->anchoredMatches;
}
static really_inline
-void deactivateQueue(u8 *aa, u32 qi, struct hs_scratch *scratch) {
- const struct RoseEngine *t = scratch->core_info.rose;
+void deactivateQueue(const struct RoseEngine *t, u8 *aa, u32 qi,
+ struct hs_scratch *scratch) {
u32 aaCount = t->activeArrayCount;
u32 qCount = t->queueCount;
struct mq *q, struct hs_scratch *scratch) {
if (!fatbit_set(scratch->aqa, qCount, qi)) {
DEBUG_PRINTF("initing %u\n", qi);
- initQueue(q, qi, t, &scratch->tctxt);
+ initQueue(q, qi, t, scratch);
loadStreamState(q->nfa, q, 0);
pushQueueAt(q, 0, MQE_START, 0);
}
/* requires that we are the top item on the pq */
static really_inline
-hwlmcb_rv_t runExistingNfaToNextMatch(u32 qi, struct mq *q, s64a loc,
+hwlmcb_rv_t runExistingNfaToNextMatch(const struct RoseEngine *t, u32 qi,
+ struct mq *q, s64a loc,
struct hs_scratch *scratch, u8 *aa,
char report_curr) {
assert(pq_top(scratch->catchup_pq.qm)->queue == qi);
return HWLM_TERMINATE_MATCHING;
}
- deactivateQueue(aa, qi, scratch);
+ deactivateQueue(t, aa, qi, scratch);
} else if (q->cur == q->end) {
DEBUG_PRINTF("queue %u finished, nfa lives\n", qi);
q->cur = q->end = 0;
}
static really_inline
-hwlmcb_rv_t runNewNfaToNextMatch(u32 qi, struct mq *q, s64a loc,
+hwlmcb_rv_t runNewNfaToNextMatch(const struct RoseEngine *t, u32 qi,
+ struct mq *q, s64a loc,
struct hs_scratch *scratch, u8 *aa,
s64a report_ok_loc) {
assert(!q->report_current);
return HWLM_TERMINATE_MATCHING;
}
- deactivateQueue(aa, qi, scratch);
+ deactivateQueue(t, aa, qi, scratch);
} else if (q->cur == q->end) {
DEBUG_PRINTF("queue %u finished, nfa lives\n", qi);
q->cur = q->end = 0;
int roseNfaFinalBlastAdaptor(u64a offset, ReportID id, void *context) {
struct RoseContext *tctxt = context;
struct hs_scratch *scratch = tctxtToScratch(tctxt);
+ const struct RoseEngine *t = scratch->core_info.rose;
DEBUG_PRINTF("called\n");
offset, id);
updateLastMatchOffset(tctxt, offset);
- if (handleReportInternallyNoChain(scratch, id, offset)) {
+ if (handleReportInternallyNoChain(t, scratch, id, offset)) {
return MO_CONTINUE_MATCHING;
}
return MO_CONTINUE_MATCHING;
} else {
assert(cb_rv == MO_CONTINUE_MATCHING);
- return !roseSuffixIsExhausted(scratch->core_info.rose, 0,
+ return !roseSuffixIsExhausted(t, 0,
scratch->core_info.exhaustionVector);
}
}
void *context) {
struct RoseContext *tctxt = context;
struct hs_scratch *scratch = tctxtToScratch(tctxt);
+ const struct RoseEngine *t = scratch->core_info.rose;
DEBUG_PRINTF("called\n");
/* chained nfas are run under the control of the anchored catchup */
return MO_CONTINUE_MATCHING;
} else {
assert(cb_rv == MO_CONTINUE_MATCHING);
- return !roseSuffixIsExhausted(scratch->core_info.rose, 0,
+ return !roseSuffixIsExhausted(t, 0,
scratch->core_info.exhaustionVector);
}
}
if (roseSuffixInfoIsExhausted(t, info,
scratch->core_info.exhaustionVector)) {
- deactivateQueue(aa, qi, scratch);
+ deactivateQueue(t, aa, qi, scratch);
return HWLM_CONTINUE_MATCHING;
}
ensureEnd(q, qi, loc);
- return runNewNfaToNextMatch(qi, q, loc, scratch, aa, report_ok_loc);
+ return runNewNfaToNextMatch(t, qi, q, loc, scratch, aa, report_ok_loc);
}
static really_inline
}
}
-hwlmcb_rv_t roseCatchUpMPV_i(const struct RoseEngine *t, char *state, s64a loc,
+hwlmcb_rv_t roseCatchUpMPV_i(const struct RoseEngine *t, s64a loc,
struct hs_scratch *scratch) {
+ char *state = scratch->core_info.state;
struct mq *queues = scratch->queues;
u8 *aa = getActiveLeafArray(t, state);
UNUSED u32 aaCount = t->activeArrayCount;
if (roseSuffixInfoIsExhausted(t, info,
scratch->core_info.exhaustionVector)) {
- deactivateQueue(aa, qi, scratch);
+ deactivateQueue(t, aa, qi, scratch);
goto done;
}
if (!next_pos_match_loc) { /* 0 means dead */
DEBUG_PRINTF("mpv is pining for the fjords\n");
if (can_stop_matching(scratch)) {
- deactivateQueue(aa, qi, scratch);
+ deactivateQueue(t, aa, qi, scratch);
return HWLM_TERMINATE_MATCHING;
}
DEBUG_PRINTF("called\n");
if (ri->type != INTERNAL_ROSE_CHAIN) {
/* INTERNAL_ROSE_CHAIN are not visible externally */
- if (roseCatchUpMPV(t, scratch->core_info.state,
- offset - scratch->core_info.buf_offset, scratch)
- == HWLM_TERMINATE_MATCHING) {
+ if (roseCatchUpMPV(t, offset - scratch->core_info.buf_offset,
+ scratch) == HWLM_TERMINATE_MATCHING) {
DEBUG_PRINTF("done\n");
return MO_HALT_MATCHING;
}
DEBUG_PRINTF("masky got himself a blasted match @%llu id %u !woot!\n",
offset, id);
- if (handleReportInternally(scratch, id, offset)) {
+ if (handleReportInternally(t, scratch, id, offset)) {
return MO_CONTINUE_MATCHING;
}
const struct RoseEngine *t = scratch->core_info.rose;
DEBUG_PRINTF("called\n");
- if (roseCatchUpMPV(t, scratch->core_info.state,
- offset - scratch->core_info.buf_offset,
- scratch) == HWLM_TERMINATE_MATCHING) {
+ if (roseCatchUpMPV(t, offset - scratch->core_info.buf_offset, scratch) ==
+ HWLM_TERMINATE_MATCHING) {
DEBUG_PRINTF("done\n");
return MO_HALT_MATCHING;
}
int roseNfaBlastAdaptorNoChain(u64a offset, ReportID id, void *context) {
struct RoseContext *tctxt = context;
struct hs_scratch *scratch = tctxtToScratch(tctxt);
+ const struct RoseEngine *t = scratch->core_info.rose;
DEBUG_PRINTF("masky got himself a blasted match @%llu id %u !woot!\n",
offset, id);
updateLastMatchOffset(tctxt, offset);
- if (handleReportInternallyNoChain(scratch, id, offset)) {
+ if (handleReportInternallyNoChain(t, scratch, id, offset)) {
return MO_CONTINUE_MATCHING;
}
return MO_CONTINUE_MATCHING;
} else {
assert(cb_rv == MO_CONTINUE_MATCHING);
- return !roseSuffixIsExhausted(scratch->core_info.rose, tctxt->curr_qi,
+ return !roseSuffixIsExhausted(t, tctxt->curr_qi,
scratch->core_info.exhaustionVector);
}
}
void *context) {
struct RoseContext *tctxt = context;
struct hs_scratch *scratch = tctxtToScratch(tctxt);
+ const struct RoseEngine *t = scratch->core_info.rose;
/* chained nfas are run under the control of the anchored catchup */
return MO_CONTINUE_MATCHING;
} else {
assert(cb_rv == MO_CONTINUE_MATCHING);
- return !roseSuffixIsExhausted(scratch->core_info.rose, tctxt->curr_qi,
+ return !roseSuffixIsExhausted(t, tctxt->curr_qi,
scratch->core_info.exhaustionVector);
}
}
const struct RoseEngine *t = scratch->core_info.rose;
DEBUG_PRINTF("called\n");
- if (roseCatchUpMPV(t, scratch->core_info.state,
- offset - scratch->core_info.buf_offset,
- scratch) == HWLM_TERMINATE_MATCHING) {
+ if (roseCatchUpMPV(t, offset - scratch->core_info.buf_offset, scratch) ==
+ HWLM_TERMINATE_MATCHING) {
DEBUG_PRINTF("roseCatchUpNfas done\n");
return MO_HALT_MATCHING;
}
updateLastMatchOffset(tctxt, offset);
struct hs_scratch *scratch = tctxtToScratch(tctxt);
- if (handleReportInternally(scratch, id, offset)) {
+ const struct RoseEngine *t = scratch->core_info.rose;
+ if (handleReportInternally(t, scratch, id, offset)) {
return MO_CONTINUE_MATCHING;
}
- int cb_rv = tctxt->cb(offset, id, scratch);
- return cb_rv;
+ return tctxt->cb(offset, id, scratch);
}
int roseNfaAdaptorNoInternal(u64a offset, ReportID id, void *context) {
if (roseSuffixInfoIsExhausted(t, info,
scratch->core_info.exhaustionVector)) {
- deactivateQueue(aa, a_qi, scratch);
+ deactivateQueue(t, aa, a_qi, scratch);
return HWLM_CONTINUE_MATCHING;
}
return HWLM_TERMINATE_MATCHING;
}
- deactivateQueue(aa, a_qi, scratch);
+ deactivateQueue(t, aa, a_qi, scratch);
} else if (q->cur == q->end) {
DEBUG_PRINTF("queue %u finished, nfa lives [%lld]\n", a_qi, final_loc);
assert(second_place_loc < final_loc);
assert(q_cur_loc(q) >= second_place_loc);
- if (runNewNfaToNextMatch(a_qi, q, final_loc, scratch, aa, report_ok_loc)
- == HWLM_TERMINATE_MATCHING) {
+ if (runNewNfaToNextMatch(t, a_qi, q, final_loc, scratch, aa,
+ report_ok_loc) == HWLM_TERMINATE_MATCHING) {
DEBUG_PRINTF("roseCatchUpNfas done\n");
return HWLM_TERMINATE_MATCHING;
}
pq_insert_with(&scratch->catchup_pq, scratch, qi, qcl);
} else if (!alive) {
- deactivateQueue(aa, qi, scratch);
+ deactivateQueue(t, aa, qi, scratch);
} else {
assert(q->cur == q->end);
/* TODO: can this be simplified? the nfa will never produce any
mmbit_set(aa, aaCount, qi);
fatbit_set(aqa, qCount, qi);
struct mq *q = queues + qi;
- initQueue(q, qi, t, &scratch->tctxt);
+ initQueue(q, qi, t, scratch);
q->length = len; /* adjust for rev_accel */
nfaQueueInitState(nfa, q);
pushQueueAt(q, 0, MQE_START, 0);
pq_insert_with(&scratch->catchup_pq, scratch, qi, qcl);
} else if (!alive) {
- deactivateQueue(aa, qi, scratch);
+ deactivateQueue(t, aa, qi, scratch);
} else {
assert(q->cur == q->end);
/* TODO: can this be simplified? the nfa will never produce any
s64a report_ok_loc = tctxt->minNonMpvMatchOffset + 1
- scratch->core_info.buf_offset;
- hwlmcb_rv_t rv = roseCatchUpMPV(t, state, report_ok_loc, scratch);
+ hwlmcb_rv_t rv = roseCatchUpMPV(t, report_ok_loc, scratch);
if (rv != HWLM_CONTINUE_MATCHING) {
return rv;
}
}
static never_inline
-hwlmcb_rv_t roseCatchUpNfas(const struct RoseEngine *t, char *state, s64a loc,
+hwlmcb_rv_t roseCatchUpNfas(const struct RoseEngine *t, s64a loc,
s64a final_loc, struct hs_scratch *scratch) {
struct RoseContext *tctxt = &scratch->tctxt;
assert(t->activeArrayCount);
DEBUG_PRINTF("min non mpv match offset %llu\n",
scratch->tctxt.minNonMpvMatchOffset);
+ char *state = scratch->core_info.state;
struct mq *queues = scratch->queues;
u8 *aa = getActiveLeafArray(t, state);
}
/* catch up char matches to this point */
- if (roseCatchUpMPV(t, state, match_loc, scratch)
+ if (roseCatchUpMPV(t, match_loc, scratch)
== HWLM_TERMINATE_MATCHING) {
DEBUG_PRINTF("roseCatchUpNfas done\n");
return HWLM_TERMINATE_MATCHING;
DEBUG_PRINTF("second place %lld loc %lld\n", second_place_loc, loc);
if (second_place_loc == q_cur_loc(q)) {
- if (runExistingNfaToNextMatch(qi, q, q_final_loc, scratch, aa, 1)
+ if (runExistingNfaToNextMatch(t, qi, q, q_final_loc, scratch, aa, 1)
== HWLM_TERMINATE_MATCHING) {
return HWLM_TERMINATE_MATCHING;
}
return HWLM_TERMINATE_MATCHING;
}
- deactivateQueue(aa, qi, scratch);
+ deactivateQueue(t, aa, qi, scratch);
pq_pop_nice(&scratch->catchup_pq);
} else if (q->cur == q->end) {
DEBUG_PRINTF("queue %u finished, nfa lives [%lld]\n", qi, loc);
} else {
DEBUG_PRINTF("queue %u not finished, %u/%u [%lld/%lld]\n",
qi, q->cur, q->end, q->items[q->cur].location, loc);
- runExistingNfaToNextMatch(qi, q, q_final_loc, scratch, aa, 0);
+ runExistingNfaToNextMatch(t, qi, q, q_final_loc, scratch, aa, 0);
}
}
exit:;
}
static really_inline
-hwlmcb_rv_t roseCatchUpNfasAndMpv(const struct RoseEngine *t, char *state,
+hwlmcb_rv_t roseCatchUpNfasAndMpv(const struct RoseEngine *t,
s64a loc, s64a final_loc,
struct hs_scratch *scratch) {
- hwlmcb_rv_t rv = roseCatchUpNfas(t, state, loc, final_loc, scratch);
+ hwlmcb_rv_t rv = roseCatchUpNfas(t, loc, final_loc, scratch);
if (rv != HWLM_CONTINUE_MATCHING) {
return rv;
}
- return roseCatchUpMPV(t, state, loc, scratch);
+ return roseCatchUpMPV(t, loc, scratch);
}
}
/* buildSufPQ may have caught only part of the pq upto anchored_end */
- rv = roseCatchUpNfas(t, scratch->core_info.state,
+ rv = roseCatchUpNfas(t,
anchored_end - scratch->core_info.buf_offset, loc,
scratch);
while (anchored_report != MO_INVALID_IDX
&& anchored_end <= current_offset) {
if (anchored_end != tctxt->minMatchOffset) {
- rv = roseCatchUpNfasAndMpv(t, scratch->core_info.state,
+ rv = roseCatchUpNfasAndMpv(t,
anchored_end - scratch->core_info.buf_offset,
loc, scratch);
if (rv != HWLM_CONTINUE_MATCHING) {
assert(anchored_end == tctxt->minMatchOffset);
updateLastMatchOffset(tctxt, anchored_end);
- if (handleReportInternally(scratch, anchored_report, anchored_end)) {
+ if (handleReportInternally(t, scratch, anchored_report, anchored_end)) {
goto next;
}
return HWLM_TERMINATE_MATCHING;
}
next:
- nextAnchoredMatch(t, tctxt, &anchored_report, &anchored_end);
+ nextAnchoredMatch(t, scratch, &anchored_report, &anchored_end);
DEBUG_PRINTF("catch up %u %llu\n", anchored_report, anchored_end);
}
return HWLM_CONTINUE_MATCHING;
}
- rv = roseCatchUpNfas(t, scratch->core_info.state, loc, loc, scratch);
+ rv = roseCatchUpNfas(t, loc, loc, scratch);
if (rv != HWLM_CONTINUE_MATCHING) {
return rv;
if (do_full_mpv) {
/* finish off any outstanding chained matches */
- rv = roseCatchUpMPV(t, scratch->core_info.state, loc, scratch);
+ rv = roseCatchUpMPV(t, loc, scratch);
}
DEBUG_PRINTF("catchup all done %llu\n", current_offset);
return rv;
}
- rv = roseCatchUpNfas(t, state, loc, loc, scratch);
+ rv = roseCatchUpNfas(t, loc, loc, scratch);
if (rv != HWLM_CONTINUE_MATCHING) {
return rv;
}
- rv = roseCatchUpMPV(t, state, loc, scratch);
+ rv = roseCatchUpMPV(t, loc, scratch);
assert(rv != HWLM_CONTINUE_MATCHING
|| scratch->catchup_pq.qm_size <= t->outfixEndQueue);
return rv;
return rv;
}
- rv = roseCatchUpNfas(t, state, loc, loc, scratch);
+ rv = roseCatchUpNfas(t, loc, loc, scratch);
assert(rv != HWLM_CONTINUE_MATCHING ||
scratch->catchup_pq.qm_size <= t->outfixEndQueue);
updateLastMatchOffset(tctxt, anchored_end);
/* as we require that there are no leaf nfas - there must be no nfa */
- if (handleReportInternallyNoChain(scratch, anchored_report,
+ if (handleReportInternallyNoChain(t, scratch, anchored_report,
anchored_end)) {
goto next;
}
return HWLM_TERMINATE_MATCHING;
}
next:
- nextAnchoredMatch(t, tctxt, &anchored_report, &anchored_end);
+ nextAnchoredMatch(t, scratch, &anchored_report, &anchored_end);
DEBUG_PRINTF("catch up %u %llu\n", anchored_report, anchored_end);
}
/* will only catch mpv upto last reported external match */
hwlmcb_rv_t roseCatchUpAnchoredAndSuf(s64a loc, struct hs_scratch *scratch);
-
-hwlmcb_rv_t roseCatchUpMPV_i(const struct RoseEngine *t, char *state, s64a loc,
+hwlmcb_rv_t roseCatchUpMPV_i(const struct RoseEngine *t, s64a loc,
struct hs_scratch *scratch);
void blockInitSufPQ(const struct RoseEngine *t, char *state,
struct hs_scratch *scratch);
static really_inline
-hwlmcb_rv_t roseCatchUpMPV(const struct RoseEngine *t, char *state,
- s64a loc, struct hs_scratch *scratch) {
+hwlmcb_rv_t roseCatchUpMPV(const struct RoseEngine *t, s64a loc,
+ struct hs_scratch *scratch) {
u64a cur_offset = loc + scratch->core_info.buf_offset;
assert(cur_offset >= scratch->tctxt.minMatchOffset);
assert(t->outfixBeginQueue == 1); /* if it exists mpv is queue 0 */
- u8 *aa = getActiveLeafArray(t, state);
+ u8 *aa = getActiveLeafArray(t, scratch->core_info.state);
u32 aaCount = t->activeArrayCount;
if (!mmbit_isset(aa, aaCount, 0)){
* they may have events pushed on during this process which may be before
* the catch up point */
- return roseCatchUpMPV_i(t, state, loc, scratch);
+ return roseCatchUpMPV_i(t, loc, scratch);
}
static really_inline
/* catches up nfas, anchored matches and the mpv */
static rose_inline
-hwlmcb_rv_t roseCatchUpTo(const struct RoseEngine *t, char *state, u64a end,
- struct hs_scratch *scratch, char in_anchored) {
+hwlmcb_rv_t roseCatchUpTo(const struct RoseEngine *t,
+ struct hs_scratch *scratch, u64a end,
+ char in_anchored) {
/* no need to catch up if we are at the same offset as last time */
if (end <= scratch->tctxt.minMatchOffset) {
/* we must already be up to date */
return HWLM_CONTINUE_MATCHING;
}
+ char *state = scratch->core_info.state;
s64a loc = end - scratch->core_info.buf_offset;
if (end <= scratch->tctxt.minNonMpvMatchOffset) {
/* only need to catch up the mpv */
- return roseCatchUpMPV(t, state, loc, scratch);
+ return roseCatchUpMPV(t, loc, scratch);
}
assert(scratch->tctxt.minMatchOffset >= scratch->core_info.buf_offset);
* and suf/outfixes. The MPV will be run only to intersperse matches in
* the output match stream if external matches are raised. */
static rose_inline
-hwlmcb_rv_t roseCatchUpMpvFeeders(const struct RoseEngine *t, char *state,
- u64a end, struct hs_scratch *scratch,
+hwlmcb_rv_t roseCatchUpMpvFeeders(const struct RoseEngine *t,
+ struct hs_scratch *scratch, u64a end,
char in_anchored) {
/* no need to catch up if we are at the same offset as last time */
if (end <= scratch->tctxt.minNonMpvMatchOffset) {
/* sadly, this branch rarely gets taken as the mpv itself is usually
* alive. */
+ char *state = scratch->core_info.state;
if (!mmbit_any(getActiveLeafArray(t, state), t->activeArrayCount)) {
scratch->tctxt.minNonMpvMatchOffset = end;
return HWLM_CONTINUE_MATCHING;
hwlmExec(etable, eod_data, eod_len, adj, roseCallback, tctxt, tctxt->groups);
// We may need to fire delayed matches
- return cleanUpDelayed(0, offset, scratch);
+ return cleanUpDelayed(t, scratch, 0, offset);
}
static rose_inline
DEBUG_PRINTF("running eod program at offset %u\n", t->eodIterProgramOffset);
const size_t match_len = 0;
- if (roseRunProgram(t, t->eodIterProgramOffset, offset, match_len,
- &(scratch->tctxt), 0) == HWLM_TERMINATE_MATCHING) {
+ if (roseRunProgram(t, scratch, t->eodIterProgramOffset, offset, match_len,
+ 0) == HWLM_TERMINATE_MATCHING) {
return MO_HALT_MATCHING;
}
}
static rose_inline
-void cleanupAfterEodMatcher(const struct RoseEngine *t, char *state,
- u64a offset, struct hs_scratch *scratch) {
- struct RoseContext *tctxt = &scratch->tctxt;
-
+void cleanupAfterEodMatcher(const struct RoseEngine *t, u64a offset,
+ struct hs_scratch *scratch) {
// Flush history to make sure it's consistent.
- roseFlushLastByteHistory(t, state, offset, tctxt);
+ roseFlushLastByteHistory(t, scratch, offset);
}
static rose_inline
assert(!scratch->tctxt.filledDelayedSlots);
const size_t match_len = 0;
- if (roseRunProgram(t, t->eodProgramOffset, offset, match_len,
- &scratch->tctxt, 0) == HWLM_TERMINATE_MATCHING) {
+ if (roseRunProgram(t, scratch, t->eodProgramOffset, offset, match_len, 0) ==
+ HWLM_TERMINATE_MATCHING) {
return MO_HALT_MATCHING;
}
return;
}
- cleanupAfterEodMatcher(t, state, offset, scratch);
+ cleanupAfterEodMatcher(t, offset, scratch);
// Fire any new EOD reports.
if (roseEodRunIterator(t, offset, scratch) == MO_HALT_MATCHING) {
}
static rose_inline
-void prepForEod(const struct RoseEngine *t, char *state, size_t length,
- struct RoseContext *tctxt) {
- roseFlushLastByteHistory(t, state, length, tctxt);
- tctxt->lastEndOffset = length;
+void prepForEod(const struct RoseEngine *t, struct hs_scratch *scratch,
+ size_t length) {
+ roseFlushLastByteHistory(t, scratch, length);
+ scratch->tctxt.lastEndOffset = length;
}
void roseBlockEodExec(const struct RoseEngine *t, u64a offset,
char *state = scratch->core_info.state;
// Ensure that history is correct before we look for EOD matches
- prepForEod(t, state, scratch->core_info.len, &scratch->tctxt);
+ prepForEod(t, scratch, scratch->core_info.len);
roseEodExec_i(t, state, offset, scratch, 0);
}
if (programOffset) {
const size_t match_len = end - start + 1;
UNUSED hwlmcb_rv_t rv =
- roseRunProgram(t, programOffset, real_end, match_len, tctx, 0);
+ roseRunProgram(t, scratch, programOffset, real_end, match_len, 0);
assert(rv != HWLM_TERMINATE_MATCHING);
}
}
static rose_inline
-void recordAnchoredMatch(struct RoseContext *tctxt, ReportID reportId,
- u64a end) {
- struct hs_scratch *scratch = tctxtToScratch(tctxt);
- const struct RoseEngine *t = scratch->core_info.rose;
+void recordAnchoredMatch(const struct RoseEngine *t, struct hs_scratch *scratch,
+ ReportID reportId, u64a end) {
struct fatbit **anchoredRows = getAnchoredLog(scratch);
DEBUG_PRINTF("record %u @ %llu\n", reportId, end);
}
static rose_inline
-void recordAnchoredLiteralMatch(struct RoseContext *tctxt, u32 literal_id,
+void recordAnchoredLiteralMatch(const struct RoseEngine *t,
+ struct hs_scratch *scratch, u32 literal_id,
u64a end) {
assert(end);
- struct hs_scratch *scratch = tctxtToScratch(tctxt);
- const struct RoseEngine *t = scratch->core_info.rose;
struct fatbit **anchoredLiteralRows = getAnchoredLiteralLog(scratch);
DEBUG_PRINTF("record %u @ %llu\n", literal_id, end);
fatbit_set(anchoredLiteralRows[end - 1], t->anchored_count, rel_idx);
}
-hwlmcb_rv_t roseHandleChainMatch(const struct RoseEngine *t, ReportID r,
- u64a end, struct RoseContext *tctxt,
- char in_anchored, char in_catchup) {
- struct hs_scratch *scratch = tctxtToScratch(tctxt);
+hwlmcb_rv_t roseHandleChainMatch(const struct RoseEngine *t,
+ struct hs_scratch *scratch, ReportID r,
+ u64a end, char in_anchored, char in_catchup) {
struct core_info *ci = &scratch->core_info;
u8 *aa = getActiveLeafArray(t, scratch->core_info.state);
assert(loc <= (s64a)ci->len && loc >= -(s64a)ci->hlen);
if (!mmbit_set(aa, aaCount, qi)) {
- initQueue(q, qi, t, tctxt);
+ initQueue(q, qi, t, scratch);
nfaQueueInitState(q->nfa, q);
pushQueueAt(q, 0, MQE_START, loc);
fatbit_set(activeQueues, qCount, qi);
/* nfa only needs one top; we can go home now */
return HWLM_CONTINUE_MATCHING;
} else if (!fatbit_set(activeQueues, qCount, qi)) {
- initQueue(q, qi, t, tctxt);
+ initQueue(q, qi, t, scratch);
loadStreamState(q->nfa, q, 0);
pushQueueAt(q, 0, MQE_START, 0);
} else if (isQueueFull(q)) {
pushQueueNoMerge(q, MQE_END, loc);
char alive = nfaQueueExec(q->nfa, q, loc);
if (alive) {
- tctxt->mpv_inactive = 0;
+ scratch->tctxt.mpv_inactive = 0;
q->cur = q->end = 0;
pushQueueAt(q, 0, MQE_START, loc);
} else {
}
DEBUG_PRINTF("added mpv event at %lld\n", loc);
- tctxt->next_mpv_offset = 0; /* the top event may result in matches earlier
- * than expected */
+ scratch->tctxt.next_mpv_offset = 0; /* the top event may result in matches
+ * earlier than expected */
return HWLM_CONTINUE_MATCHING;
}
/* handles catchup, som, cb, etc */
static really_inline
-hwlmcb_rv_t roseHandleReport(const struct RoseEngine *t, char *state,
- struct RoseContext *tctxt, ReportID id,
+hwlmcb_rv_t roseHandleReport(const struct RoseEngine *t,
+ struct hs_scratch *scratch, ReportID id,
u64a offset, char in_anchored) {
- struct hs_scratch *scratch = tctxtToScratch(tctxt);
-
- if (roseCatchUpTo(t, state, offset, scratch, in_anchored) ==
+ if (roseCatchUpTo(t, scratch, offset, in_anchored) ==
HWLM_TERMINATE_MATCHING) {
return HWLM_TERMINATE_MATCHING;
}
roseHandleSom(t, scratch, id, offset);
return HWLM_CONTINUE_MATCHING;
} else if (ri->type == INTERNAL_ROSE_CHAIN) {
- return roseCatchUpAndHandleChainMatch(t, state, id, offset, tctxt,
+ return roseCatchUpAndHandleChainMatch(t, scratch, id, offset,
in_anchored);
}
}
static really_inline
hwlmcb_rv_t roseHandleAnchoredDirectReport(const struct RoseEngine *t,
- char *state,
- struct RoseContext *tctxt,
+ struct hs_scratch *scratch,
u64a real_end, ReportID report) {
DEBUG_PRINTF("direct report %u, real_end=%llu\n", report, real_end);
if (real_end > t->maxSafeAnchoredDROffset) {
DEBUG_PRINTF("match in overlapped anchored region --> stash\n");
- recordAnchoredMatch(tctxt, report, real_end);
+ recordAnchoredMatch(t, scratch, report, real_end);
return HWLM_CONTINUE_MATCHING;
}
- return roseHandleReport(t, state, tctxt, report, real_end,
- 1 /* in anchored */);
+ return roseHandleReport(t, scratch, report, real_end, 1 /* in anchored */);
}
int roseAnchoredCallback(u64a end, u32 id, void *ctx) {
struct RoseContext *tctxt = ctx;
- struct core_info *ci = &tctxtToScratch(tctxt)->core_info;
- char *state = ci->state;
+ struct hs_scratch *scratch = tctxtToScratch(tctxt);
+ struct core_info *ci = &scratch->core_info;
const struct RoseEngine *t = ci->rose;
u64a real_end = ci->buf_offset + end; // index after last byte
DEBUG_PRINTF("MATCH id=%u offsets=[???,%llu]\n", id, real_end);
DEBUG_PRINTF("STATE groups=0x%016llx\n", tctxt->groups);
- if (can_stop_matching(tctxtToScratch(tctxt))) {
+ if (can_stop_matching(scratch)) {
DEBUG_PRINTF("received a match when we're already dead!\n");
return MO_HALT_MATCHING;
}
(const ReportID *)((const char *)t + t->multidirectOffset) +
mdr_offset;
for (; *report != MO_INVALID_IDX; report++) {
- rv = roseHandleAnchoredDirectReport(t, state, tctxt, real_end,
- *report);
+ rv = roseHandleAnchoredDirectReport(t, scratch, real_end, *report);
if (rv == HWLM_TERMINATE_MATCHING) {
return MO_HALT_MATCHING;
}
} else if (isLiteralDR(id)) {
// Single direct report.
ReportID report = literalToReport(id);
- rv = roseHandleAnchoredDirectReport(t, state, tctxt, real_end, report);
+ rv = roseHandleAnchoredDirectReport(t, scratch, real_end, report);
if (rv == HWLM_TERMINATE_MATCHING) {
return MO_HALT_MATCHING;
}
DEBUG_PRINTF("literal id=%u\n", id);
if (real_end <= t->floatingMinLiteralMatchOffset) {
- roseFlushLastByteHistory(t, state, real_end, tctxt);
+ roseFlushLastByteHistory(t, scratch, real_end);
tctxt->lastEndOffset = real_end;
}
const size_t match_len = 0;
- if (roseRunProgram(t, programOffset, real_end, match_len, tctxt, 1) ==
+ if (roseRunProgram(t, scratch, programOffset, real_end, match_len, 1) ==
HWLM_TERMINATE_MATCHING) {
- assert(can_stop_matching(tctxtToScratch(tctxt)));
+ assert(can_stop_matching(scratch));
DEBUG_PRINTF("caller requested termination\n");
return MO_HALT_MATCHING;
}
DEBUG_PRINTF("DONE groups=0x%016llx\n", tctxt->groups);
if (real_end > t->floatingMinLiteralMatchOffset) {
- recordAnchoredLiteralMatch(tctxt, id, real_end);
+ recordAnchoredLiteralMatch(t, scratch, id, real_end);
}
return MO_CONTINUE_MATCHING;
// Rose match-processing workhorse
/* assumes not in_anchored */
static really_inline
-hwlmcb_rv_t roseProcessMatch_i(const struct RoseEngine *t, u64a end,
- size_t match_len, u32 id,
- struct RoseContext *tctxt, char in_delay_play,
+hwlmcb_rv_t roseProcessMatch_i(const struct RoseEngine *t,
+ struct hs_scratch *scratch, u64a end,
+ size_t match_len, u32 id, char in_delay_play,
char in_anch_playback) {
- /* assert(!tctxt->in_anchored); */
- struct hs_scratch *scratch = tctxtToScratch(tctxt);
- char *state = scratch->core_info.state;
-
DEBUG_PRINTF("id=%u\n", id);
if (!in_anch_playback && !in_delay_play) {
mdr_offset;
for (; *report != MO_INVALID_IDX; report++) {
DEBUG_PRINTF("handle multi-direct report %u\n", *report);
- hwlmcb_rv_t rv = roseHandleReport(t, state, tctxt, *report, end,
+ hwlmcb_rv_t rv = roseHandleReport(t, scratch, *report, end,
0 /* in anchored */);
if (rv == HWLM_TERMINATE_MATCHING) {
return HWLM_TERMINATE_MATCHING;
// Single direct report.
ReportID report = literalToReport(id);
DEBUG_PRINTF("handle direct report %u\n", report);
- return roseHandleReport(t, state, tctxt, report, end,
+ return roseHandleReport(t, scratch, report, end,
0 /* in anchored */);
}
}
assert(id < t->literalCount);
const u32 *programs = getByOffset(t, t->litProgramOffset);
- return roseRunProgram(t, programs[id], end, match_len, tctxt, 0);
+ return roseRunProgram(t, scratch, programs[id], end, match_len, 0);
}
static never_inline
-hwlmcb_rv_t roseProcessDelayedMatch(const struct RoseEngine *t, u64a end,
- u32 id, struct RoseContext *tctxt) {
+hwlmcb_rv_t roseProcessDelayedMatch(const struct RoseEngine *t,
+ struct hs_scratch *scratch, u64a end,
+ u32 id) {
size_t match_len = 0;
- return roseProcessMatch_i(t, end, match_len, id, tctxt, 1, 0);
+ return roseProcessMatch_i(t, scratch, end, match_len, id, 1, 0);
}
static never_inline
hwlmcb_rv_t roseProcessDelayedAnchoredMatch(const struct RoseEngine *t,
- u64a end, u32 id,
- struct RoseContext *tctxt) {
+ struct hs_scratch *scratch,
+ u64a end, u32 id) {
size_t match_len = 0;
- return roseProcessMatch_i(t, end, match_len, id, tctxt, 0, 1);
+ return roseProcessMatch_i(t, scratch, end, match_len, id, 0, 1);
}
static really_inline
-hwlmcb_rv_t roseProcessMainMatch(const struct RoseEngine *t, u64a end,
- size_t match_len, u32 id,
- struct RoseContext *tctxt) {
- return roseProcessMatch_i(t, end, match_len, id, tctxt, 0, 0);
+hwlmcb_rv_t roseProcessMainMatch(const struct RoseEngine *t,
+ struct hs_scratch *scratch, u64a end,
+ size_t match_len, u32 id) {
+ return roseProcessMatch_i(t, scratch, end, match_len, id, 0, 0);
}
static rose_inline
-hwlmcb_rv_t playDelaySlot(const struct RoseEngine *t, struct RoseContext *tctxt,
+hwlmcb_rv_t playDelaySlot(const struct RoseEngine *t,
+ struct hs_scratch *scratch,
struct fatbit **delaySlots, u32 vicIndex,
u64a offset) {
/* assert(!tctxt->in_anchored); */
return HWLM_CONTINUE_MATCHING;
}
- struct hs_scratch *scratch = tctxtToScratch(tctxt);
- roseFlushLastByteHistory(t, scratch->core_info.state, offset, tctxt);
+ struct RoseContext *tctxt = &scratch->tctxt;
+ roseFlushLastByteHistory(t, scratch, offset);
tctxt->lastEndOffset = offset;
for (u32 it = fatbit_iterate(vicSlot, delay_count, MMB_INVALID);
UNUSED rose_group old_groups = tctxt->groups;
DEBUG_PRINTF("DELAYED MATCH id=%u offset=%llu\n", literal_id, offset);
- hwlmcb_rv_t rv = roseProcessDelayedMatch(t, offset, literal_id, tctxt);
+ hwlmcb_rv_t rv =
+ roseProcessDelayedMatch(t, scratch, offset, literal_id);
DEBUG_PRINTF("DONE groups=0x%016llx\n", tctxt->groups);
/* delayed literals can't safely set groups.
static really_inline
hwlmcb_rv_t flushAnchoredLiteralAtLoc(const struct RoseEngine *t,
- struct RoseContext *tctxt, u32 curr_loc) {
- struct hs_scratch *scratch = tctxtToScratch(tctxt);
+ struct hs_scratch *scratch,
+ u32 curr_loc) {
+ struct RoseContext *tctxt = &scratch->tctxt;
struct fatbit *curr_row = getAnchoredLiteralLog(scratch)[curr_loc - 1];
u32 region_width = t->anchored_count;
rose_group old_groups = tctxt->groups;
DEBUG_PRINTF("ANCH REPLAY MATCH id=%u offset=%u\n", literal_id,
curr_loc);
- hwlmcb_rv_t rv = roseProcessDelayedAnchoredMatch(t, curr_loc,
- literal_id, tctxt);
+ hwlmcb_rv_t rv =
+ roseProcessDelayedAnchoredMatch(t, scratch, curr_loc, literal_id);
DEBUG_PRINTF("DONE groups=0x%016llx\n", tctxt->groups);
/* anchored literals can't safely set groups.
}
static really_inline
-u32 anchored_it_begin(struct RoseContext *tctxt) {
- struct hs_scratch *scratch = tctxtToScratch(tctxt);
+u32 anchored_it_begin(struct hs_scratch *scratch) {
+ struct RoseContext *tctxt = &scratch->tctxt;
if (tctxt->lastEndOffset >= scratch->anchored_literal_region_len) {
return MMB_INVALID;
}
u32 begin = tctxt->lastEndOffset;
begin--;
- return bf64_iterate(tctxtToScratch(tctxt)->al_log_sum, begin);
+ return bf64_iterate(scratch->al_log_sum, begin);
}
static really_inline
hwlmcb_rv_t flushAnchoredLiterals(const struct RoseEngine *t,
- struct RoseContext *tctxt,
+ struct hs_scratch *scratch,
u32 *anchored_it_param, u64a to_off) {
- struct hs_scratch *scratch = tctxtToScratch(tctxt);
- char *state = scratch->core_info.state;
+ struct RoseContext *tctxt = &scratch->tctxt;
u32 anchored_it = *anchored_it_param;
/* catch up any remaining anchored matches */
for (; anchored_it != MMB_INVALID && anchored_it < to_off;
assert(anchored_it < scratch->anchored_literal_region_len);
DEBUG_PRINTF("loc_it = %u\n", anchored_it);
u32 curr_off = anchored_it + 1;
- roseFlushLastByteHistory(t, state, curr_off, tctxt);
+ roseFlushLastByteHistory(t, scratch, curr_off);
tctxt->lastEndOffset = curr_off;
- if (flushAnchoredLiteralAtLoc(t, tctxt, curr_off)
+ if (flushAnchoredLiteralAtLoc(t, scratch, curr_off)
== HWLM_TERMINATE_MATCHING) {
return HWLM_TERMINATE_MATCHING;
}
}
static really_inline
-hwlmcb_rv_t playVictims(const struct RoseEngine *t, struct RoseContext *tctxt,
+hwlmcb_rv_t playVictims(const struct RoseEngine *t, struct hs_scratch *scratch,
u32 *anchored_it, u64a lastEnd, u64a victimDelaySlots,
struct fatbit **delaySlots) {
- /* assert (!tctxt->in_anchored); */
-
while (victimDelaySlots) {
u32 vic = findAndClearLSB_64(&victimDelaySlots);
DEBUG_PRINTF("vic = %u\n", vic);
u64a vicOffset = vic + (lastEnd & ~(u64a)DELAY_MASK);
- if (flushAnchoredLiterals(t, tctxt, anchored_it, vicOffset)
+ if (flushAnchoredLiterals(t, scratch, anchored_it, vicOffset)
== HWLM_TERMINATE_MATCHING) {
return HWLM_TERMINATE_MATCHING;
}
- if (playDelaySlot(t, tctxt, delaySlots, vic % DELAY_SLOT_COUNT,
+ if (playDelaySlot(t, scratch, delaySlots, vic % DELAY_SLOT_COUNT,
vicOffset) == HWLM_TERMINATE_MATCHING) {
return HWLM_TERMINATE_MATCHING;
}
}
/* call flushQueuedLiterals instead */
-hwlmcb_rv_t flushQueuedLiterals_i(struct RoseContext *tctxt, u64a currEnd) {
- struct hs_scratch *scratch = tctxtToScratch(tctxt);
- const struct RoseEngine *t = scratch->core_info.rose;
-
- /* assert(!tctxt->in_anchored); */
+hwlmcb_rv_t flushQueuedLiterals_i(const struct RoseEngine *t,
+ struct hs_scratch *scratch, u64a currEnd) {
+ struct RoseContext *tctxt = &scratch->tctxt;
u64a lastEnd = tctxt->delayLastEndOffset;
DEBUG_PRINTF("flushing backed up matches @%llu up from %llu\n", currEnd,
lastEnd);
assert(currEnd != lastEnd); /* checked in main entry point */
- u32 anchored_it = anchored_it_begin(tctxt);
+ u32 anchored_it = anchored_it_begin(scratch);
if (!tctxt->filledDelayedSlots) {
DEBUG_PRINTF("no delayed, no flush\n");
}
{
- struct fatbit **delaySlots = getDelaySlots(tctxtToScratch(tctxt));
+ struct fatbit **delaySlots = getDelaySlots(scratch);
u32 lastIndex = lastEnd & DELAY_MASK;
u32 currIndex = currEnd & DELAY_MASK;
second_half, victimDelaySlots, lastIndex);
}
- if (playVictims(t, tctxt, &anchored_it, lastEnd, victimDelaySlots,
+ if (playVictims(t, scratch, &anchored_it, lastEnd, victimDelaySlots,
delaySlots) == HWLM_TERMINATE_MATCHING) {
return HWLM_TERMINATE_MATCHING;
}
}
anchored_leftovers:;
- hwlmcb_rv_t rv = flushAnchoredLiterals(t, tctxt, &anchored_it, currEnd);
+ hwlmcb_rv_t rv = flushAnchoredLiterals(t, scratch, &anchored_it, currEnd);
tctxt->delayLastEndOffset = currEnd;
return rv;
}
return HWLM_TERMINATE_MATCHING;
}
- hwlmcb_rv_t rv = flushQueuedLiterals(tctx, real_end);
+ hwlmcb_rv_t rv = flushQueuedLiterals(t, scratch, real_end);
/* flushDelayed may have advanced tctx->lastEndOffset */
if (real_end >= t->floatingMinLiteralMatchOffset) {
- roseFlushLastByteHistory(t, scratch->core_info.state, real_end, tctx);
+ roseFlushLastByteHistory(t, scratch, real_end);
tctx->lastEndOffset = real_end;
}
}
size_t match_len = end - start + 1;
- rv = roseProcessMainMatch(t, real_end, match_len, id, tctx);
+ rv = roseProcessMainMatch(t, scratch, real_end, match_len, id);
DEBUG_PRINTF("DONE groups=0x%016llx\n", tctx->groups);
tctxt->curr_row_offset);
}
-hwlmcb_rv_t roseHandleChainMatch(const struct RoseEngine *t, ReportID r,
- u64a end, struct RoseContext *tctxt,
- char in_anchored, char in_catchup);
+hwlmcb_rv_t roseHandleChainMatch(const struct RoseEngine *t,
+ struct hs_scratch *scratch, ReportID r,
+ u64a end, char in_anchored, char in_catchup);
static really_inline
void initQueue(struct mq *q, u32 qi, const struct RoseEngine *t,
- struct RoseContext *tctxt) {
- struct hs_scratch *scratch = tctxtToScratch(tctxt);
+ struct hs_scratch *scratch) {
const struct NfaInfo *info = getNfaInfoByQueue(t, qi);
assert(scratch->fullState);
q->nfa = getNfaByInfo(t, info);
q->cb = roseNfaAdaptor;
}
q->som_cb = roseNfaSomAdaptor;
- q->context = tctxt;
+ q->context = &scratch->tctxt;
q->report_current = 0;
DEBUG_PRINTF("qi=%u, offset=%llu, fullState=%u, streamState=%u, "
static really_inline
void initRoseQueue(const struct RoseEngine *t, u32 qi,
const struct LeftNfaInfo *left,
- struct RoseContext *tctxt) {
- struct hs_scratch *scratch = tctxtToScratch(tctxt);
+ struct hs_scratch *scratch) {
struct mq *q = scratch->queues + qi;
const struct NfaInfo *info = getNfaInfoByQueue(t, qi);
q->nfa = getNfaByInfo(t, info);
return leftfixDelay[di] == OWB_ZOMBIE_ALWAYS_YES;
}
-hwlmcb_rv_t flushQueuedLiterals_i(struct RoseContext *tctxt, u64a end);
+hwlmcb_rv_t flushQueuedLiterals_i(const struct RoseEngine *t,
+ struct hs_scratch *scratch, u64a end);
static really_inline
-hwlmcb_rv_t flushQueuedLiterals(struct RoseContext *tctxt, u64a end) {
+hwlmcb_rv_t flushQueuedLiterals(const struct RoseEngine *t,
+ struct hs_scratch *scratch, u64a end) {
+ struct RoseContext *tctxt = &scratch->tctxt;
+
if (tctxt->delayLastEndOffset == end) {
DEBUG_PRINTF("no progress, no flush\n");
return HWLM_CONTINUE_MATCHING;
}
- if (!tctxt->filledDelayedSlots && !tctxtToScratch(tctxt)->al_log_sum) {
+ if (!tctxt->filledDelayedSlots && !scratch->al_log_sum) {
tctxt->delayLastEndOffset = end;
return HWLM_CONTINUE_MATCHING;
}
- return flushQueuedLiterals_i(tctxt, end);
+ return flushQueuedLiterals_i(t, scratch, end);
}
static really_inline
-hwlmcb_rv_t cleanUpDelayed(size_t length, u64a offset,
- struct hs_scratch *scratch) {
+hwlmcb_rv_t cleanUpDelayed(const struct RoseEngine *t,
+ struct hs_scratch *scratch, size_t length,
+ u64a offset) {
if (can_stop_matching(scratch)) {
return HWLM_TERMINATE_MATCHING;
}
- struct RoseContext *tctxt = &scratch->tctxt;
- if (flushQueuedLiterals(tctxt, length + offset)
+ if (flushQueuedLiterals(t, scratch, length + offset)
== HWLM_TERMINATE_MATCHING) {
return HWLM_TERMINATE_MATCHING;
}
+ struct RoseContext *tctxt = &scratch->tctxt;
if (tctxt->filledDelayedSlots) {
DEBUG_PRINTF("dirty\n");
scratch->core_info.status |= STATUS_DELAY_DIRTY;
}
static rose_inline
-void roseFlushLastByteHistory(const struct RoseEngine *t, char *state,
- u64a currEnd, struct RoseContext *tctxt) {
+void roseFlushLastByteHistory(const struct RoseEngine *t,
+ struct hs_scratch *scratch, u64a currEnd) {
if (!t->lastByteHistoryIterOffset) {
return;
}
- struct hs_scratch *scratch = tctxtToScratch(tctxt);
+ struct RoseContext *tctxt = &scratch->tctxt;
struct core_info *ci = &scratch->core_info;
/* currEnd is last byte of string + 1 */
assert(ISALIGNED(it));
const u32 numStates = t->rolesWithStateCount;
- void *role_state = getRoleState(state);
+ void *role_state = getRoleState(scratch->core_info.state);
struct mmbit_sparse_state si_state[MAX_SPARSE_ITER_STATES];
}
static rose_inline
-void rosePushDelayedMatch(const struct RoseEngine *t, u32 delay,
- u32 delay_index, u64a offset,
- struct RoseContext *tctxt) {
+void rosePushDelayedMatch(const struct RoseEngine *t,
+ struct hs_scratch *scratch, u32 delay,
+ u32 delay_index, u64a offset) {
assert(delay);
const u32 src_slot_index = delay;
u32 slot_index = (src_slot_index + offset) & DELAY_MASK;
+ struct RoseContext *tctxt = &scratch->tctxt;
if (offset + src_slot_index <= tctxt->delayLastEndOffset) {
DEBUG_PRINTF("skip too late\n");
return;
}
const u32 delay_count = t->delay_count;
- struct fatbit **delaySlots = getDelaySlots(tctxtToScratch(tctxt));
+ struct fatbit **delaySlots = getDelaySlots(scratch);
struct fatbit *slot = delaySlots[slot_index];
DEBUG_PRINTF("pushing tab %u into slot %u\n", delay_index, slot_index);
if (loc + scratch->core_info.buf_offset
<= tctxt->minNonMpvMatchOffset) {
DEBUG_PRINTF("flushing chained\n");
- if (roseCatchUpMPV(t, scratch->core_info.state, loc,
- scratch) == HWLM_TERMINATE_MATCHING) {
+ if (roseCatchUpMPV(t, loc, scratch) ==
+ HWLM_TERMINATE_MATCHING) {
return HWLM_TERMINATE_MATCHING;
}
goto done_queue_empty;
}
}
- if (roseCatchUpTo(t, scratch->core_info.state,
- loc + scratch->core_info.buf_offset, scratch,
+ if (roseCatchUpTo(t, scratch, loc + scratch->core_info.buf_offset,
in_anchored) == HWLM_TERMINATE_MATCHING) {
return HWLM_TERMINATE_MATCHING;
}
assert(is_mpv);
DEBUG_PRINTF("flushing chained\n");
tctxt->next_mpv_offset = 0; /* force us to catch the mpv */
- if (roseCatchUpMPV(t, scratch->core_info.state, loc, scratch)
- == HWLM_TERMINATE_MATCHING) {
+ if (roseCatchUpMPV(t, loc, scratch) == HWLM_TERMINATE_MATCHING) {
return HWLM_TERMINATE_MATCHING;
}
}
done_queue_empty:
if (!mmbit_set(aa, aaCount, qi)) {
- initQueue(q, qi, t, tctxt);
+ initQueue(q, qi, t, scratch);
nfaQueueInitState(q->nfa, q);
pushQueueAt(q, 0, MQE_START, loc);
fatbit_set(activeQueues, qCount, qi);
}
static rose_inline
-hwlmcb_rv_t roseHandleSuffixTrigger(const struct RoseEngine *t,
- u32 qi, u32 top, u64a som,
- u64a end, struct RoseContext *tctxt,
- char in_anchored) {
+hwlmcb_rv_t roseTriggerSuffix(const struct RoseEngine *t,
+ struct hs_scratch *scratch, u32 qi, u32 top,
+ u64a som, u64a end, char in_anchored) {
DEBUG_PRINTF("suffix qi=%u, top event=%u\n", qi, top);
- struct hs_scratch *scratch = tctxtToScratch(tctxt);
- u8 *aa = getActiveLeafArray(t, scratch->core_info.state);
+ struct core_info *ci = &scratch->core_info;
+ u8 *aa = getActiveLeafArray(t, ci->state);
const u32 aaCount = t->activeArrayCount;
const u32 qCount = t->queueCount;
struct mq *q = &scratch->queues[qi];
const struct NfaInfo *info = getNfaInfoByQueue(t, qi);
const struct NFA *nfa = getNfaByInfo(t, info);
- struct core_info *ci = &scratch->core_info;
s64a loc = (s64a)end - ci->buf_offset;
assert(loc <= (s64a)ci->len && loc >= -(s64a)ci->hlen);
if (!mmbit_set(aa, aaCount, qi)) {
- initQueue(q, qi, t, tctxt);
+ initQueue(q, qi, t, scratch);
nfaQueueInitState(nfa, q);
pushQueueAt(q, 0, MQE_START, loc);
fatbit_set(scratch->aqa, qCount, qi);
/* nfa only needs one top; we can go home now */
return HWLM_CONTINUE_MATCHING;
} else if (!fatbit_set(scratch->aqa, qCount, qi)) {
- initQueue(q, qi, t, tctxt);
+ initQueue(q, qi, t, scratch);
loadStreamState(nfa, q, 0);
pushQueueAt(q, 0, MQE_START, 0);
} else if (isQueueFull(q)) {
}
static really_inline
-char roseTestLeftfix(const struct RoseEngine *t, u32 qi, u32 leftfixLag,
- ReportID leftfixReport, u64a end,
- struct RoseContext *tctxt) {
- struct hs_scratch *scratch = tctxtToScratch(tctxt);
+char roseTestLeftfix(const struct RoseEngine *t, struct hs_scratch *scratch,
+ u32 qi, u32 leftfixLag, ReportID leftfixReport, u64a end) {
struct core_info *ci = &scratch->core_info;
u32 ri = queueToLeftIndex(t, qi);
if (!fatbit_set(scratch->aqa, qCount, qi)) {
DEBUG_PRINTF("initing q %u\n", qi);
- initRoseQueue(t, qi, left, tctxt);
+ initRoseQueue(t, qi, left, scratch);
if (ci->buf_offset) { // there have been writes before us!
s32 sp;
if (left->transient) {
DEBUG_PRINTF("leftfix %u died while trying to catch up\n", ri);
mmbit_unset(activeLeftArray, arCount, ri);
assert(!mmbit_isset(activeLeftArray, arCount, ri));
- tctxt->groups &= left->squash_mask;
+ scratch->tctxt.groups &= left->squash_mask;
return 0;
}
}
static rose_inline
-void roseTriggerInfix(const struct RoseEngine *t, u64a start, u64a end, u32 qi,
- u32 topEvent, u8 cancel, struct RoseContext *tctxt) {
- struct core_info *ci = &tctxtToScratch(tctxt)->core_info;
+void roseTriggerInfix(const struct RoseEngine *t, struct hs_scratch *scratch,
+ u64a start, u64a end, u32 qi, u32 topEvent, u8 cancel) {
+ struct core_info *ci = &scratch->core_info;
s64a loc = (s64a)end - ci->buf_offset;
u32 ri = queueToLeftIndex(t, qi);
DEBUG_PRINTF("rose %u (qi=%u) event %u\n", ri, qi, topEvent);
- struct mq *q = tctxtToScratch(tctxt)->queues + qi;
+ struct mq *q = scratch->queues + qi;
const struct NfaInfo *info = getNfaInfoByQueue(t, qi);
- struct hs_scratch *scratch = tctxtToScratch(tctxt);
- char *state = scratch->core_info.state;
+ char *state = ci->state;
u8 *activeLeftArray = getActiveLeftArray(t, state);
const u32 arCount = t->activeLeftCount;
char alive = mmbit_set(activeLeftArray, arCount, ri);
if (cancel) {
DEBUG_PRINTF("dominating top: (re)init\n");
fatbit_set(aqa, qCount, qi);
- initRoseQueue(t, qi, left, tctxt);
+ initRoseQueue(t, qi, left, scratch);
pushQueueAt(q, 0, MQE_START, loc);
nfaQueueInitState(q->nfa, q);
} else if (!fatbit_set(aqa, qCount, qi)) {
DEBUG_PRINTF("initing %u\n", qi);
- initRoseQueue(t, qi, left, tctxt);
+ initRoseQueue(t, qi, left, scratch);
if (alive) {
s32 sp = -(s32)loadRoseDelay(t, state, left);
pushQueueAt(q, 0, MQE_START, sp);
* up */
static rose_inline
hwlmcb_rv_t roseCatchUpAndHandleChainMatch(const struct RoseEngine *t,
- char *state, ReportID r, u64a end,
- struct RoseContext *tctxt,
+ struct hs_scratch *scratch,
+ ReportID r, u64a end,
char in_anchored) {
- struct hs_scratch *scratch = tctxtToScratch(tctxt);
-
- if (roseCatchUpMpvFeeders(t, state, end, scratch, in_anchored)
- == HWLM_TERMINATE_MATCHING) {
+ if (roseCatchUpMpvFeeders(t, scratch, end, in_anchored) ==
+ HWLM_TERMINATE_MATCHING) {
return HWLM_TERMINATE_MATCHING;
}
- return roseHandleChainMatch(t, r, end, tctxt, in_anchored, 0);
+ return roseHandleChainMatch(t, scratch, r, end, in_anchored, 0);
}
static really_inline
* are satisfied.
*/
static rose_inline
-int roseCheckLookaround(const struct RoseEngine *t, u32 lookaroundIndex,
- u32 lookaroundCount, u64a end,
- struct RoseContext *tctxt) {
+int roseCheckLookaround(const struct RoseEngine *t,
+ const struct hs_scratch *scratch, u32 lookaroundIndex,
+ u32 lookaroundCount, u64a end) {
assert(lookaroundIndex != MO_INVALID_IDX);
assert(lookaroundCount > 0);
- const struct core_info *ci = &tctxtToScratch(tctxt)->core_info;
+ const struct core_info *ci = &scratch->core_info;
DEBUG_PRINTF("end=%llu, buf_offset=%llu, buf_end=%llu\n", end,
ci->buf_offset, ci->buf_offset + ci->len);
}
static rose_inline
-u64a roseGetHaigSom(const struct RoseEngine *t, const u32 qi,
- UNUSED const u32 leftfixLag,
- struct RoseContext *tctxt) {
+u64a roseGetHaigSom(const struct RoseEngine *t, struct hs_scratch *scratch,
+ const u32 qi, UNUSED const u32 leftfixLag) {
u32 ri = queueToLeftIndex(t, qi);
UNUSED const struct LeftNfaInfo *left = getLeftTable(t) + ri;
assert(leftfixLag <= left->maxLag);
- struct mq *q = tctxtToScratch(tctxt)->queues + qi;
+ struct mq *q = scratch->queues + qi;
u64a start = ~0ULL;
}
static rose_inline
-hwlmcb_rv_t roseRunProgram(const struct RoseEngine *t, u32 programOffset,
- u64a end, size_t match_len,
- struct RoseContext *tctxt, char in_anchored) {
+hwlmcb_rv_t roseRunProgram(const struct RoseEngine *t,
+ struct hs_scratch *scratch, u32 programOffset,
+ u64a end, size_t match_len, char in_anchored) {
DEBUG_PRINTF("program begins at offset %u\n", programOffset);
assert(programOffset);
// allow the program to squash groups).
int work_done = 0;
- struct hs_scratch *scratch = tctxtToScratch(tctxt);
+ struct RoseContext *tctxt = &scratch->tctxt;
assert(*(const u8 *)pc != ROSE_INSTR_END);
PROGRAM_NEXT_INSTRUCTION
PROGRAM_CASE(CHECK_LOOKAROUND) {
- if (!roseCheckLookaround(t, ri->index, ri->count, end, tctxt)) {
+ if (!roseCheckLookaround(t, scratch, ri->index, ri->count,
+ end)) {
DEBUG_PRINTF("failed lookaround check\n");
assert(ri->fail_jump); // must progress
pc += ri->fail_jump;
PROGRAM_NEXT_INSTRUCTION
PROGRAM_CASE(CHECK_LEFTFIX) {
- if (!roseTestLeftfix(t, ri->queue, ri->lag, ri->report, end,
- tctxt)) {
- DEBUG_PRINTF("failed lookaround check\n");
+ if (!roseTestLeftfix(t, scratch, ri->queue, ri->lag, ri->report,
+ end)) {
+ DEBUG_PRINTF("failed leftfix check\n");
assert(ri->fail_jump); // must progress
pc += ri->fail_jump;
continue;
PROGRAM_NEXT_INSTRUCTION
PROGRAM_CASE(PUSH_DELAYED) {
- rosePushDelayedMatch(t, ri->delay, ri->index, end, tctxt);
+ rosePushDelayedMatch(t, scratch, ri->delay, ri->index, end);
}
PROGRAM_NEXT_INSTRUCTION
PROGRAM_CASE(CATCH_UP) {
- if (roseCatchUpTo(t, scratch->core_info.state, end, scratch,
- in_anchored) == HWLM_TERMINATE_MATCHING) {
+ if (roseCatchUpTo(t, scratch, end, in_anchored) ==
+ HWLM_TERMINATE_MATCHING) {
return HWLM_TERMINATE_MATCHING;
}
}
PROGRAM_NEXT_INSTRUCTION
PROGRAM_CASE(SOM_LEFTFIX) {
- som = roseGetHaigSom(t, ri->queue, ri->lag, tctxt);
+ som = roseGetHaigSom(t, scratch, ri->queue, ri->lag);
DEBUG_PRINTF("som from leftfix is %llu\n", som);
}
PROGRAM_NEXT_INSTRUCTION
PROGRAM_NEXT_INSTRUCTION
PROGRAM_CASE(TRIGGER_INFIX) {
- roseTriggerInfix(t, som, end, ri->queue, ri->event, ri->cancel,
- tctxt);
+ roseTriggerInfix(t, scratch, som, end, ri->queue, ri->event,
+ ri->cancel);
work_done = 1;
}
PROGRAM_NEXT_INSTRUCTION
PROGRAM_CASE(TRIGGER_SUFFIX) {
- if (roseHandleSuffixTrigger(t, ri->queue, ri->event, som, end,
- tctxt, in_anchored) ==
- HWLM_TERMINATE_MATCHING) {
+ if (roseTriggerSuffix(t, scratch, ri->queue, ri->event, som,
+ end, in_anchored)
+ == HWLM_TERMINATE_MATCHING) {
return HWLM_TERMINATE_MATCHING;
}
work_done = 1;
PROGRAM_NEXT_INSTRUCTION
PROGRAM_CASE(REPORT_CHAIN) {
- if (roseCatchUpAndHandleChainMatch(
- t, scratch->core_info.state, ri->report, end,
- tctxt, in_anchored) == HWLM_TERMINATE_MATCHING) {
+ if (roseCatchUpAndHandleChainMatch(t, scratch, ri->report, end,
+ in_anchored) ==
+ HWLM_TERMINATE_MATCHING) {
return HWLM_TERMINATE_MATCHING;
}
work_done = 1;
miracle_loc);
if (!q_active) {
fatbit_set(scratch->aqa, qCount, qi);
- initRoseQueue(t, qi, left, &scratch->tctxt);
+ initRoseQueue(t, qi, left, scratch);
}
q->cur = q->end = 0;
pushQueueAt(q, 0, MQE_START, miracle_loc);
}
if (!fatbit_set(scratch->aqa, qCount, qi)) {
- initRoseQueue(t, qi, left, &scratch->tctxt);
+ initRoseQueue(t, qi, left, scratch);
s32 sp;
if (ci->buf_offset) {
u64a offset) {
struct RoseContext *tctxt = &scratch->tctxt;
- if (roseCatchUpTo(t, state, length + scratch->core_info.buf_offset, scratch,
- 0)
- == HWLM_TERMINATE_MATCHING) {
+ if (roseCatchUpTo(t, scratch, length + scratch->core_info.buf_offset, 0) ==
+ HWLM_TERMINATE_MATCHING) {
return; /* dead; no need to clean up state. */
}
roseSaveNfaStreamState(t, state, scratch);
roseCatchUpLeftfixes(t, state, scratch);
- roseFlushLastByteHistory(t, state, offset + length, tctxt);
+ roseFlushLastByteHistory(t, scratch, offset + length);
tctxt->lastEndOffset = offset + length;
storeGroups(t, state, tctxt->groups);
}
flush_delay_and_exit:
DEBUG_PRINTF("flushing floating\n");
- if (cleanUpDelayed(length, offset, scratch) == HWLM_TERMINATE_MATCHING) {
+ if (cleanUpDelayed(t, scratch, length, offset) == HWLM_TERMINATE_MATCHING) {
return;
}