}
char *pattern = argv[1];
- char *inputFN = argv[2];
+ const char *inputFN = argv[2];
/* First, we attempt to compile the pattern provided on the command line.
* We assume 'DOTALL' semantics, meaning that the '.' meta-character will
const struct SubCastle *sub = getSubCastle(c, subIdx);
const struct RepeatInfo *info = getRepeatInfo(sub);
- union RepeatControl *rctrl = getControl(q->state, sub);
- char *rstate = (char *)q->streamState + sub->streamStateOffset +
+ const union RepeatControl *rctrl = getControl(q->state, sub);
+ const char *rstate = (char *)q->streamState + sub->streamStateOffset +
info->packedCtrlSize;
enum RepeatMatch match =
repeatHasMatch(info, rctrl, rstate, offset);
if (c->exclusive) {
u8 *active = (u8 *)q->streamState;
- u8 *groups = active + c->groupIterOffset;
+ const u8 *groups = active + c->groupIterOffset;
for (u32 i = mmbit_iterate(groups, c->numGroups, MMB_INVALID);
i != MMB_INVALID; i = mmbit_iterate(groups, c->numGroups, i)) {
- u8 *cur = active + i * c->activeIdxSize;
+ const u8 *cur = active + i * c->activeIdxSize;
const u32 activeIdx = partial_load_u32(cur, c->activeIdxSize);
DEBUG_PRINTF("subcastle %u\n", activeIdx);
if (subCastleReportCurrent(c, q,
}
const struct RepeatInfo *info = getRepeatInfo(sub);
- union RepeatControl *rctrl = getControl(q->state, sub);
- char *rstate = (char *)q->streamState + sub->streamStateOffset +
+ const union RepeatControl *rctrl = getControl(q->state, sub);
+ const char *rstate = (char *)q->streamState + sub->streamStateOffset +
info->packedCtrlSize;
enum RepeatMatch match =
repeatHasMatch(info, rctrl, rstate, offset);
if (c->exclusive) {
u8 *active = (u8 *)q->streamState;
- u8 *groups = active + c->groupIterOffset;
+ const u8 *groups = active + c->groupIterOffset;
for (u32 i = mmbit_iterate(groups, c->numGroups, MMB_INVALID);
i != MMB_INVALID; i = mmbit_iterate(groups, c->numGroups, i)) {
- u8 *cur = active + i * c->activeIdxSize;
+ const u8 *cur = active + i * c->activeIdxSize;
const u32 activeIdx = partial_load_u32(cur, c->activeIdxSize);
DEBUG_PRINTF("subcastle %u\n", activeIdx);
if (subCastleInAccept(c, q, report, offset, activeIdx)) {
const struct SubCastle *sub = getSubCastle(c, subIdx);
const struct RepeatInfo *info = getRepeatInfo(sub);
- union RepeatControl *rctrl = getControl(full_state, sub);
- char *rstate = (char *)stream_state + sub->streamStateOffset +
+ const union RepeatControl *rctrl = getControl(full_state, sub);
+ const char *rstate = (char *)stream_state + sub->streamStateOffset +
info->packedCtrlSize;
if (repeatHasMatch(info, rctrl, rstate, offset) == REPEAT_STALE) {
if (c->exclusive) {
u8 *active = (u8 *)stream_state;
- u8 *groups = active + c->groupIterOffset;
+ const u8 *groups = active + c->groupIterOffset;
for (u32 i = mmbit_iterate(groups, c->numGroups, MMB_INVALID);
i != MMB_INVALID; i = mmbit_iterate(groups, c->numGroups, i)) {
- u8 *cur = active + i * c->activeIdxSize;
+ const u8 *cur = active + i * c->activeIdxSize;
const u32 activeIdx = partial_load_u32(cur, c->activeIdxSize);
DEBUG_PRINTF("subcastle %u\n", activeIdx);
subCastleDeactivateStaleSubs(c, offset, full_state,
size_t *mloc, char *found, const u32 subIdx) {
const struct SubCastle *sub = getSubCastle(c, subIdx);
const struct RepeatInfo *info = getRepeatInfo(sub);
- union RepeatControl *rctrl = getControl(full_state, sub);
- char *rstate = (char *)stream_state + sub->streamStateOffset +
+ const union RepeatControl *rctrl = getControl(full_state, sub);
+ const char *rstate = (char *)stream_state + sub->streamStateOffset +
info->packedCtrlSize;
u64a match = repeatNextMatch(info, rctrl, rstate, begin);
if (c->exclusive) {
u8 *active = (u8 *)stream_state;
- u8 *groups = active + c->groupIterOffset;
+ const u8 *groups = active + c->groupIterOffset;
for (u32 i = mmbit_iterate(groups, c->numGroups, MMB_INVALID);
i != MMB_INVALID; i = mmbit_iterate(groups, c->numGroups, i)) {
- u8 *cur = active + i * c->activeIdxSize;
+ const u8 *cur = active + i * c->activeIdxSize;
const u32 activeIdx = partial_load_u32(cur, c->activeIdxSize);
DEBUG_PRINTF("subcastle %u\n", activeIdx);
subCastleFindMatch(c, begin, end, full_state, stream_state, mloc,
}
if (c->exclusive != PURE_EXCLUSIVE) {
- u8 *active = (u8 *)stream_state + c->activeOffset;
+ const u8 *active = (u8 *)stream_state + c->activeOffset;
for (u32 i = mmbit_iterate(active, c->numRepeats, MMB_INVALID);
i != MMB_INVALID;
i = mmbit_iterate(active, c->numRepeats, i)) {
u8 *groups = active + c->groupIterOffset;
for (u32 i = mmbit_iterate(groups, c->numGroups, MMB_INVALID);
i != MMB_INVALID; i = mmbit_iterate(groups, c->numGroups, i)) {
- u8 *cur = active + i * c->activeIdxSize;
+ const u8 *cur = active + i * c->activeIdxSize;
u32 activeIdx = partial_load_u32(cur, c->activeIdxSize);
u64a match = subCastleNextMatch(c, full_state, stream_state,
loc, activeIdx);
char found = 0;
if (c->exclusive) {
- u8 *groups = (u8 *)q->streamState + c->groupIterOffset;
+ const u8 *groups = (u8 *)q->streamState + c->groupIterOffset;
found = mmbit_any(groups, c->numGroups);
}
}
if (c->exclusive) {
- u8 *groups = (u8 *)q->streamState + c->groupIterOffset;
+ const u8 *groups = (u8 *)q->streamState + c->groupIterOffset;
if (mmbit_any_precise(groups, c->numGroups)) {
return 1;
}
char found = 0;
if (c->exclusive) {
- u8 *groups = (u8 *)q->streamState + c->groupIterOffset;
+ const u8 *groups = (u8 *)q->streamState + c->groupIterOffset;
found = mmbit_any_precise(groups, c->numGroups);
}
if (c->exclusive) {
u8 *active = (u8 *)q->streamState;
- u8 *groups = active + c->groupIterOffset;
+ const u8 *groups = active + c->groupIterOffset;
for (u32 i = mmbit_iterate(groups, c->numGroups, MMB_INVALID);
i != MMB_INVALID; i = mmbit_iterate(groups, c->numGroups, i)) {
- u8 *cur = active + i * c->activeIdxSize;
+ const u8 *cur = active + i * c->activeIdxSize;
const u32 activeIdx = partial_load_u32(cur, c->activeIdxSize);
DEBUG_PRINTF("subcastle %u\n", activeIdx);
const struct SubCastle *sub = getSubCastle(c, activeIdx);
const struct mq *q, const u64a offset) {
const struct SubCastle *sub = getSubCastle(c, subIdx);
const struct RepeatInfo *info = getRepeatInfo(sub);
- union RepeatControl *rctrl = getControl(q->state, sub);
+ const union RepeatControl *rctrl = getControl(q->state, sub);
char *packed = (char *)q->streamState + sub->streamStateOffset;
DEBUG_PRINTF("sub %u next match %llu\n", subIdx,
repeatNextMatch(info, rctrl,
DEBUG_PRINTF("offset=%llu\n", offset);
if (c->exclusive) {
u8 *active = (u8 *)q->streamState;
- u8 *groups = active + c->groupIterOffset;
+ const u8 *groups = active + c->groupIterOffset;
for (u32 i = mmbit_iterate(groups, c->numGroups, MMB_INVALID);
i != MMB_INVALID; i = mmbit_iterate(groups, c->numGroups, i)) {
- u8 *cur = active + i * c->activeIdxSize;
+ const u8 *cur = active + i * c->activeIdxSize;
const u32 activeIdx = partial_load_u32(cur, c->activeIdxSize);
DEBUG_PRINTF("packing state for sub %u\n", activeIdx);
subCastleQueueCompressState(c, activeIdx, q, offset);
void *ctxt = q->context;
u8 s = *(u8 *)q->state;
u64a offset = q_cur_offset(q);
- struct gough_som_info *som = getSomInfo(q->state);
+ const struct gough_som_info *som = getSomInfo(q->state);
assert(q_cur_type(q) == MQE_START);
assert(s);
u16 s = *(u16 *)q->state;
const struct mstate_aux *aux = get_aux(m, s);
u64a offset = q_cur_offset(q);
- struct gough_som_info *som = getSomInfo(q->state);
+ const struct gough_som_info *som = getSomInfo(q->state);
assert(q_cur_type(q) == MQE_START);
DEBUG_PRINTF("state %hu\n", s);
assert(s);
void copy_propagate_report_set(vector<pair<ReportID, GoughSSAVar *> > &rep) {
vector<pair<ReportID, GoughSSAVar *> >::iterator it = rep.begin();
while (it != rep.end()) {
- GoughSSAVar *var = it->second;
+ const GoughSSAVar *var = it->second;
if (!var) {
++it;
continue;
}
while (!queue.empty()) {
- GoughSSAVar *v = queue.back();
+ const GoughSSAVar *v = queue.back();
queue.pop_back();
for (GoughSSAVar *var : v->get_inputs()) {
if (var->seen) {
if (contains(aux.containing_v, var)) {
/* def is used by join vertex, value only needs to be live on some
* incoming edges */
- GoughSSAVarJoin *vj = (GoughSSAVarJoin *)var;
+ const GoughSSAVarJoin *vj = (GoughSSAVarJoin *)var;
const flat_set<GoughEdge> &live_edges
= vj->get_edges_for_input(def);
for (const auto &e : live_edges) {
template<class Mask>
bool isMaskZero(Mask &m) {
- u8 *m8 = (u8 *)&m;
+ const u8 *m8 = (u8 *)&m;
for (u32 i = 0; i < sizeof(m); i++) {
if (m8[i]) {
return false;
static
void allocState(NFA *nfa, u32 repeatscratchStateSize,
u32 repeatStreamState) {
- implNFA_t *limex = (implNFA_t *)getMutableImplNfa(nfa);
+ const implNFA_t *limex = (implNFA_t *)getMutableImplNfa(nfa);
// LimEx NFAs now store the following in state:
// 1. state bitvector (always present)
assert(q->state && q->streamState);
const IMPL_NFA_T *limex = getImplNfa(nfa);
- union RepeatControl *repeat_ctrl =
+ const union RepeatControl *repeat_ctrl =
getRepeatControlBase(q->state, sizeof(STATE_T));
- char *repeat_state = q->streamState + limex->stateSize;
+ const char *repeat_state = q->streamState + limex->stateSize;
STATE_T state = *(STATE_T *)q->state;
u64a offset = q->offset + q_last_loc(q) + 1;
assert(q->state && q->streamState);
const IMPL_NFA_T *limex = getImplNfa(nfa);
- union RepeatControl *repeat_ctrl =
+ const union RepeatControl *repeat_ctrl =
getRepeatControlBase(q->state, sizeof(STATE_T));
- char *repeat_state = q->streamState + limex->stateSize;
+ const char *repeat_state = q->streamState + limex->stateSize;
STATE_T state = *(STATE_T *)q->state;
u64a offset = q->offset + q_last_loc(q) + 1;
if (limex->repeatCount) {
u64a offset = q->offset + loc + 1;
- union RepeatControl *repeat_ctrl =
+ const union RepeatControl *repeat_ctrl =
getRepeatControlBase(q->state, sizeof(STATE_T));
- char *repeat_state = q->streamState + limex->stateSize;
+ const char *repeat_state = q->streamState + limex->stateSize;
SQUASH_UNTUG_BR_FN(limex, repeat_ctrl, repeat_state, offset, &state);
}
mstate_aux *getAux(NFA *n, dstate_id_t i) {
assert(isMcClellanType(n->type));
- mcclellan *m = (mcclellan *)getMutableImplNfa(n);
+ const mcclellan *m = (mcclellan *)getMutableImplNfa(n);
mstate_aux *aux_base = (mstate_aux *)((char *)n + m->aux_offset);
mstate_aux *aux = aux_base + i;
continue;
}
- mstate_aux *aux = getAux(n, succ_table[c_prime]);
+ const mstate_aux *aux = getAux(n, succ_table[c_prime]);
if (aux->accept) {
succ_table[c_prime] |= ACCEPT_FLAG;
continue;
}
- mstate_aux *aux = getAux(n, succ_i);
+ const mstate_aux *aux = getAux(n, succ_i);
if (aux->accept) {
succ_i |= ACCEPT_FLAG;
// check successful transition
u16 next = unaligned_load_u16((u8 *)trans);
if (next < wide_limit) {
- mstate_aux *aux = getAux(n, next);
+ const mstate_aux *aux = getAux(n, next);
if (aux->accept) {
next |= ACCEPT_FLAG;
}
if (next_k >= wide_limit) {
continue;
}
- mstate_aux *aux_k = getAux(n, next_k);
+ const mstate_aux *aux_k = getAux(n, next_k);
if (aux_k->accept) {
next_k |= ACCEPT_FLAG;
}
static
mstate_aux *getAux(NFA *n, dstate_id_t i) {
- mcsheng *m = (mcsheng *)getMutableImplNfa(n);
+ const mcsheng *m = (mcsheng *)getMutableImplNfa(n);
mstate_aux *aux_base = (mstate_aux *)((char *)n + m->aux_offset);
mstate_aux *aux = aux_base + i;
static
mstate_aux *getAux64(NFA *n, dstate_id_t i) {
- mcsheng64 *m = (mcsheng64 *)getMutableImplNfa(n);
+ const mcsheng64 *m = (mcsheng64 *)getMutableImplNfa(n);
mstate_aux *aux_base = (mstate_aux *)((char *)n + m->aux_offset);
mstate_aux *aux = aux_base + i;
static
u16 get_edge_flags(NFA *nfa, dstate_id_t target_impl_id) {
- mstate_aux *aux = getAux(nfa, target_impl_id);
+ const mstate_aux *aux = getAux(nfa, target_impl_id);
u16 flags = 0;
if (aux->accept) {
static
u16 get_edge_flags64(NFA *nfa, dstate_id_t target_impl_id) {
- mstate_aux *aux = getAux64(nfa, target_impl_id);
+ const mstate_aux *aux = getAux64(nfa, target_impl_id);
u16 flags = 0;
if (aux->accept) {
return 0;
} else {
const struct mpv *m = getImplNfa(nfa);
- u8 *reporters = (u8 *)q->state + m->reporter_offset;
+ const u8 *reporters = (u8 *)q->state + m->reporter_offset;
if (mmbit_any_precise(reporters, m->kilo_count)) {
DEBUG_PRINTF("next byte\n");
next_event = q->items[q->cur].location;
}
- struct mpv_decomp_state *s = (struct mpv_decomp_state *)q->state;
+ const struct mpv_decomp_state *s = (struct mpv_decomp_state *)q->state;
struct mpv_pq_item *pq
= (struct mpv_pq_item *)(q->state + m->pq_offset);
if (s->pq_size) {
// We assert that the event is different from its predecessor. If it's a
// dupe, you should have used the ordinary pushQueue call.
if (q->end) {
- UNUSED struct mq_item *prev = &q->items[q->end - 1];
+ UNUSED const struct mq_item *prev = &q->items[q->end - 1];
assert(prev->type != e || prev->location != loc);
}
#endif
// We assert that the event is different from its predecessor. If it's a
// dupe, you should have used the ordinary pushQueue call.
if (q->end) {
- UNUSED struct mq_item *prev = &q->items[q->end - 1];
+ UNUSED const struct mq_item *prev = &q->items[q->end - 1];
assert(prev->type != e || prev->location != loc);
}
#endif
/* Need to verify how far the lock covers */
u32 bad_region;
- NGHolder *ap_pref = plan.back().prefix.get();
+ const NGHolder *ap_pref = plan.back().prefix.get();
NGHolder ap_temp;
if (hasBigCycles(*ap_pref)) {
fillRoughMidfix(&ap_temp, g, regions, info, picked);
}
for (const auto &m : infixes) {
- NGHolder *h = m.first;
+ const NGHolder *h = m.first;
const auto &edges = m.second;
removeRedundantLiteralsFromInfix(*h, g, edges, cc);
}
}
for (auto i = children.begin(), e = children.end(); i != e; ++i) {
- Component *child = i->get();
+ const Component *child = i->get();
c = (*i)->accept(v);
if (c != child) {
// Child has been replaced (new Component pointer) or we've been
}
for (auto i = children.begin(), e = children.end(); i != e; ++i) {
- Component *child = i->get();
+ const Component *child = i->get();
c = (*i)->accept(v);
if (c != child) {
// Child has been replaced (new Component pointer) or we've been
}
for (auto i = children.begin(), e = children.end(); i != e; ++i) {
- Component *child = i->get();
+ const Component *child = i->get();
c = (*i)->accept(v);
if (c != child) {
// Child has been replaced (new Component pointer) or we've been
}
if (kind == CONDITION_ASSERTION) {
- Component *a = assertion.get();
+ const Component *a = assertion.get();
c = assertion->accept(v);
if (c != a) {
assertion.reset(c);
}
for (auto i = children.begin(), e = children.end(); i != e; ++i) {
- Component *child = i->get();
+ const Component *child = i->get();
c = (*i)->accept(v);
if (c != child) {
// Child has been replaced (new Component pointer) or we've been
}
for (auto i = children.begin(), e = children.end(); i != e; ++i) {
- Component *child = i->get();
+ const Component *child = i->get();
c = (*i)->accept(v);
if (c != child) {
// Child has been replaced (new Component pointer) or we've been
const size_t length = scratch->core_info.len;
char *state = scratch->core_info.state;
- struct RoseContext *tctxt = &scratch->tctxt;
+ const struct RoseContext *tctxt = &scratch->tctxt;
DEBUG_PRINTF("ftable fd=%u fmd %u\n", t->floatingDistance,
t->floatingMinDistance);
init_for_block(t, scratch, state, is_small_block);
- struct RoseContext *tctxt = &scratch->tctxt;
+ const struct RoseContext *tctxt = &scratch->tctxt;
if (is_small_block) {
const void *sbtable = getSBLiteralMatcher(t);
s64a final_loc, struct hs_scratch *scratch) {
assert(scratch->catchup_pq.qm_size <= t->outfixEndQueue);
- struct RoseContext *tctxt = &scratch->tctxt;
+ const struct RoseContext *tctxt = &scratch->tctxt;
assert(t->activeArrayCount);
assert(scratch->core_info.buf_offset + final_loc
hwlmcb_rv_t roseDelayRebuildCallback(size_t end, u32 id,
struct hs_scratch *scratch) {
- struct RoseContext *tctx = &scratch->tctxt;
+ const struct RoseContext *tctx = &scratch->tctxt;
struct core_info *ci = &scratch->core_info;
const struct RoseEngine *t = ci->rose;
size_t rb_len = MIN(ci->hlen, t->delayRebuildLength);
struct hs_scratch *scratch,
u32 curr_loc) {
struct RoseContext *tctxt = &scratch->tctxt;
- struct fatbit *curr_row = getAnchoredLiteralLog(scratch)[curr_loc - 1];
+ const struct fatbit *curr_row = getAnchoredLiteralLog(scratch)[curr_loc - 1];
u32 region_width = t->anchored_count;
const u32 *programs = getByOffset(t, t->anchoredProgramOffset);
static really_inline
u32 anchored_it_begin(struct hs_scratch *scratch) {
- struct RoseContext *tctxt = &scratch->tctxt;
+ const struct RoseContext *tctxt = &scratch->tctxt;
if (tctxt->lastEndOffset >= scratch->anchored_literal_region_len) {
return MMB_INVALID;
}
return;
}
- struct RoseContext *tctxt = &scratch->tctxt;
- struct core_info *ci = &scratch->core_info;
+ const struct RoseContext *tctxt = &scratch->tctxt;
+ const struct core_info *ci = &scratch->core_info;
/* currEnd is last byte of string + 1 */
if (tctxt->lastEndOffset == ci->buf_offset + ci->len
for (size_t i = 0; i < nfas.size(); i++) {
const NFA *nfa = nfas[i].get();
anchored_matcher_info *ami = (anchored_matcher_info *)curr;
- char *prev_curr = curr;
+ const char *prev_curr = curr;
curr += sizeof(anchored_matcher_info);
assert(!r1.graph() == !r2.graph());
if (r1.graph()) {
- NGHolder *h1 = r1.graph();
- NGHolder *h2 = r2.graph();
+ const NGHolder *h1 = r1.graph();
+ const NGHolder *h2 = r2.graph();
CharReach stop1 = findStopAlphabet(*h1, SOM_NONE);
CharReach stop2 = findStopAlphabet(*h2, SOM_NONE);
CharReach stopboth = stop1 & stop2;
qi = mmbit_iterate(aa, aaCount, qi)) {
DEBUG_PRINTF("saving stream state for qi=%u\n", qi);
- struct mq *q = queues + qi;
+ const struct mq *q = queues + qi;
// If it's active, it should have an active queue (as we should have
// done some work!)
static really_inline
int can_never_match(const struct RoseEngine *t, char *state,
struct hs_scratch *scratch, size_t length, u64a offset) {
- struct RoseContext *tctxt = &scratch->tctxt;
+ const struct RoseContext *tctxt = &scratch->tctxt;
if (tctxt->groups) {
DEBUG_PRINTF("still has active groups\n");
return;
}
- struct core_info *ci = &scratch->core_info;
+ const struct core_info *ci = &scratch->core_info;
const struct RoseLongLitTable *ll_table =
getByOffset(t, t->longLitTableOffset);
assert(ll_table->maxLen);
/* fire any reports from the logs and clear them */
if (offset == scratch->deduper.current_report_offset + 1) {
struct fatbit *done_log = scratch->deduper.som_log[offset % 2];
- u64a *done_starts = scratch->deduper.som_start_log[offset % 2];
+ const u64a *done_starts = scratch->deduper.som_start_log[offset % 2];
halt = clearSomLog(scratch, scratch->deduper.current_report_offset - 1,
done_log, done_starts);
u64a f_offset = scratch->deduper.current_report_offset - 1;
u64a s_offset = scratch->deduper.current_report_offset;
struct fatbit *first_log = scratch->deduper.som_log[f_offset % 2];
- u64a *first_starts = scratch->deduper.som_start_log[f_offset % 2];
+ const u64a *first_starts = scratch->deduper.som_start_log[f_offset % 2];
struct fatbit *second_log = scratch->deduper.som_log[s_offset % 2];
- u64a *second_starts = scratch->deduper.som_start_log[s_offset % 2];
+ const u64a *second_starts = scratch->deduper.som_start_log[s_offset % 2];
halt = clearSomLog(scratch, f_offset, first_log, first_starts) ||
clearSomLog(scratch, s_offset, second_log, second_starts);
const u32 som_store_count = rose->somLocationCount;
assert(som_store_count); // Caller should ensure that we have work to do.
- u8 *som_store_valid = (u8 *)ci->state + rose->stateOffsets.somValid;
+ const u8 *som_store_valid = (u8 *)ci->state + rose->stateOffsets.somValid;
char *stream_som_store = ci->state + rose->stateOffsets.somLocation;
const u64a *som_store = scratch->som_store;
const u8 som_size = rose->somHorizon;
if (level == 0) {
return; // we are done
}
- u8 *block_ptr =
+ const u8 *block_ptr =
mmbit_get_level_root(bits, level) + key * sizeof(MMB_TYPE);
MMB_TYPE real_block = mmb_load(block_ptr);
key >>= MMB_KEY_SHIFT;
comp += sizeof(MMB_TYPE);
while (1) {
if (key_rem < MMB_KEY_BITS) {
- u8 *block_ptr = mmbit_get_level_root(bits, level) +
+ const u8 *block_ptr = mmbit_get_level_root(bits, level) +
key * sizeof(MMB_TYPE);
MMB_TYPE block = mmb_load(block_ptr);
MMB_TYPE block_1 = block & ~mmb_mask_zero_to_nocheck(key_rem);
int ret = shuftiBuildMasks(chars, (u8 *)&lomask, (u8 *)&himask);
ASSERT_NE(-1, ret);
- u8 *lo = (u8 *)&lomask;
- u8 *hi = (u8 *)&himask;
+ const u8 *lo = (u8 *)&lomask;
+ const u8 *hi = (u8 *)&himask;
ASSERT_TRUE(lo['a' % 16] & hi['a' >> 4]);
ASSERT_TRUE(lo['B' % 16] & hi['B' >> 4]);
ASSERT_FALSE(lo['a' % 16] & hi['B' >> 4]);
int ret = shuftiBuildMasks(chars, (u8 *)&lomask, (u8 *)&himask);
ASSERT_NE(-1, ret);
- u8 *lo = (u8 *)&lomask;
- u8 *hi = (u8 *)&himask;
+ const u8 *lo = (u8 *)&lomask;
+ const u8 *hi = (u8 *)&himask;
ASSERT_TRUE(lo['a' % 16] & hi['a' >> 4]);
ASSERT_TRUE(lo['A' % 16] & hi['A' >> 4]);
ASSERT_TRUE(lo['b' % 16] & hi['b' >> 4]);
(u8 *)&lo2m, (u8 *)&hi2m);
ASSERT_TRUE(ret);
- u8 *lo1 = (u8 *)&lo1m;
- u8 *lo2 = (u8 *)&lo2m;
- u8 *hi1 = (u8 *)&hi1m;
- u8 *hi2 = (u8 *)&hi2m;
+ const u8 *lo1 = (u8 *)&lo1m;
+ const u8 *lo2 = (u8 *)&lo2m;
+ const u8 *hi1 = (u8 *)&hi1m;
+ const u8 *hi2 = (u8 *)&hi2m;
ASSERT_NE(0xff,
lo1['a' % 16] | hi1['a' >> 4] | lo2['z' % 16] | hi2['z' >> 4]);
ASSERT_NE(0xff,
(u8 *)&lo2m, (u8 *)&hi2m);
ASSERT_TRUE(ret);
- u8 *lo1 = (u8 *)&lo1m;
- u8 *lo2 = (u8 *)&lo2m;
- u8 *hi1 = (u8 *)&hi1m;
- u8 *hi2 = (u8 *)&hi2m;
+ const u8 *lo1 = (u8 *)&lo1m;
+ const u8 *lo2 = (u8 *)&lo2m;
+ const u8 *hi1 = (u8 *)&hi1m;
+ const u8 *hi2 = (u8 *)&hi2m;
ASSERT_NE(0xff,
lo1['a' % 16] | hi1['a' >> 4] | lo2['z' % 16] | hi2['z' >> 4]);
ASSERT_NE(0xff,
(u8 *)&lo2m, (u8 *)&hi2m);
ASSERT_TRUE(ret);
- u8 *lo1 = (u8 *)&lo1m;
- u8 *lo2 = (u8 *)&lo2m;
- u8 *hi1 = (u8 *)&hi1m;
- u8 *hi2 = (u8 *)&hi2m;
+ const u8 *lo1 = (u8 *)&lo1m;
+ const u8 *lo2 = (u8 *)&lo2m;
+ const u8 *hi1 = (u8 *)&hi1m;
+ const u8 *hi2 = (u8 *)&hi2m;
ASSERT_NE(0xff,
lo1['a' % 16] | hi1['a' >> 4] | lo2['z' % 16] | hi2['z' >> 4]);
ASSERT_EQ(0xff,
(u8 *)&lo2m, (u8 *)&hi2m);
ASSERT_TRUE(ret);
- u8 *lo1 = (u8 *)&lo1m;
- u8 *lo2 = (u8 *)&lo2m;
- u8 *hi1 = (u8 *)&hi1m;
- u8 *hi2 = (u8 *)&hi2m;
+ const u8 *lo1 = (u8 *)&lo1m;
+ const u8 *lo2 = (u8 *)&lo2m;
+ const u8 *hi1 = (u8 *)&hi1m;
+ const u8 *hi2 = (u8 *)&hi2m;
ASSERT_NE(0xff,
lo1['a' % 16] | hi1['a' >> 4] | lo2['z' % 16] | hi2['z' >> 4]);
ASSERT_NE(0xff,