template<class Mask>
u8 *maskGetByte(Mask &m, u32 bit) {
assert(bit < sizeof(m)*8);
- u8 *m8 = (u8 *)&m;
+ u8 *m8 = reinterpret_cast<u8 *>(&m);
return m8 + bit/8;
}
template<class Mask>
bool isMaskZero(Mask &m) {
- const u8 *m8 = (u8 *)&m;
+ const u8 *m8 = reinterpret_cast<u8 *>(&m);
for (u32 i = 0; i < sizeof(m); i++) {
if (m8[i]) {
return false;
template<class Mask>
void maskSetByte(Mask &m, const unsigned int idx, const char val) {
assert(idx < sizeof(m));
- char *m8 = (char *)&m;
+ char *m8 = reinterpret_cast<char *>(&m);
char &byte = m8[idx];
byte = val;
}
static
void allocState(NFA *nfa, u32 repeatscratchStateSize,
u32 repeatStreamState) {
- const implNFA_t *limex = (implNFA_t *)getMutableImplNfa(nfa);
+ const implNFA_t *limex = reinterpret_cast<implNFA_t *>(getMutableImplNfa(nfa));
// LimEx NFAs now store the following in state:
// 1. state bitvector (always present)
u32 tableOffset, tugMaskOffset;
size_t len = repeatAllocSize(br, &tableOffset, &tugMaskOffset);
auto info = make_zeroed_bytecode_ptr<NFARepeatInfo>(len);
- char *info_ptr = (char *)info.get();
+ char *info_ptr = reinterpret_cast<char *>(info.get());
// Collect state space info.
RepeatStateInfo rsi(br.type, br.repeatMin, br.repeatMax, br.minPeriod);
info->tugMaskOffset = tugMaskOffset;
// Fill the RepeatInfo structure.
- RepeatInfo *repeat =
- (RepeatInfo *)(info_ptr + sizeof(NFARepeatInfo));
+ RepeatInfo *repeat = reinterpret_cast<RepeatInfo *>(info_ptr + sizeof(NFARepeatInfo));
repeat->type = br.type;
repeat->repeatMin = depth_to_u32(br.repeatMin);
repeat->repeatMax = depth_to_u32(br.repeatMax);
}
// Fill the tug mask.
- tableRow_t *tugMask = (tableRow_t *)(info_ptr + tugMaskOffset);
+ tableRow_t *tugMask = reinterpret_cast<tableRow_t *>(info_ptr + tugMaskOffset);
for (auto v : br.tug_triggers) {
u32 state_id = args.state_ids.at(v);
assert(state_id != NO_STATE);
const u32 reportListOffset) {
DEBUG_PRINTF("exceptionsOffset=%u\n", exceptionsOffset);
- exception_t *etable = (exception_t *)((char *)limex + exceptionsOffset);
+ exception_t *etable = reinterpret_cast<exception_t *>(reinterpret_cast<char *>(limex) + exceptionsOffset);
assert(ISALIGNED(etable));
map<u32, ExceptionProto> exception_by_state;
limex->exceptionCount = ecount;
if (args.num_states > 64 && args.cc.target_info.has_avx512vbmi()) {
- const u8 *exceptionMask = (const u8 *)(&limex->exceptionMask);
- u8 *shufMask = (u8 *)&limex->exceptionShufMask;
- u8 *bitMask = (u8 *)&limex->exceptionBitMask;
- u8 *andMask = (u8 *)&limex->exceptionAndMask;
+ const u8 *exceptionMask = reinterpret_cast<const u8 *>(&limex->exceptionMask);
+ u8 *shufMask = reinterpret_cast<u8 *>(&limex->exceptionShufMask);
+ u8 *bitMask = reinterpret_cast<u8 *>(&limex->exceptionBitMask);
+ u8 *andMask = reinterpret_cast<u8 *>(&limex->exceptionAndMask);
u32 tot_cnt = 0;
u32 pos = 0;
copy(reachMap.begin(), reachMap.end(), &limex->reachMap[0]);
// Reach table is right after the LimEx structure.
- tableRow_t *reachMask = (tableRow_t *)((char *)limex + reachOffset);
+ tableRow_t *reachMask = reinterpret_cast<tableRow_t *>(reinterpret_cast<char *>(limex) + reachOffset);
assert(ISALIGNED(reachMask));
for (size_t i = 0, end = reach.size(); i < end; i++) {
maskSetBits(reachMask[i], reach[i]);
DEBUG_PRINTF("topsOffset=%u\n", topsOffset);
limex->topOffset = topsOffset;
- tableRow_t *topMasks = (tableRow_t *)((char *)limex + topsOffset);
+ tableRow_t *topMasks = reinterpret_cast<tableRow_t *>(reinterpret_cast<char *>(limex) + topsOffset);
assert(ISALIGNED(topMasks));
for (size_t i = 0, end = tops.size(); i < end; i++) {
static
void writeAccelSsse3Masks(const NFAStateSet &accelMask, implNFA_t *limex) {
- char *perm_base = (char *)&limex->accelPermute;
- char *comp_base = (char *)&limex->accelCompare;
+ char *perm_base = reinterpret_cast<char *>(&limex->accelPermute);
+ char *comp_base = reinterpret_cast<char *>(&limex->accelCompare);
u32 num = 0; // index in accel table.
for (size_t i = accelMask.find_first(); i != accelMask.npos;
// PSHUFB permute and compare masks
size_t mask_idx = sizeof(u_128) * (state_id / 128U);
DEBUG_PRINTF("mask_idx=%zu\n", mask_idx);
- u_128 *perm = (u_128 *)(perm_base + mask_idx);
- u_128 *comp = (u_128 *)(comp_base + mask_idx);
+ u_128 *perm = reinterpret_cast<u_128 *>(perm_base + mask_idx);
+ u_128 *comp = reinterpret_cast<u_128 *>(comp_base + mask_idx);
maskSetByte(*perm, num, ((state_id % 128U) / 8U));
maskSetByte(*comp, num, ~(1U << (state_id % 8U)));
}
// Write accel lookup table.
limex->accelTableOffset = accelTableOffset;
copy(accelTable.begin(), accelTable.end(),
- (u8 *)((char *)limex + accelTableOffset));
+ reinterpret_cast<u8 *>(reinterpret_cast<char *>(limex) + accelTableOffset));
// Write accel aux structures.
limex->accelAuxOffset = accelAuxOffset;
- AccelAux *auxTable = (AccelAux *)((char *)limex + accelAuxOffset);
+ AccelAux *auxTable = reinterpret_cast<AccelAux *>(reinterpret_cast<char *>(limex) + accelAuxOffset);
assert(ISALIGNED(auxTable));
copy(accelAux.begin(), accelAux.end(), auxTable);
mstate_aux *getAux(NFA *n, dstate_id_t i) {
assert(isMcClellanType(n->type));
- const mcclellan *m = (mcclellan *)getMutableImplNfa(n);
- mstate_aux *aux_base = (mstate_aux *)((char *)n + m->aux_offset);
+ const mcclellan *m = reinterpret_cast<const mcclellan *>(getImplNfa(n));
+ mstate_aux *aux_base = reinterpret_cast<mstate_aux *>(reinterpret_cast<u8 *>(n) + m->aux_offset);
mstate_aux *aux = aux_base + i;
- assert((const char *)aux < (const char *)n + m->length);
+ assert(reinterpret_cast<const char *>(aux) < reinterpret_cast<const char *>(n) + m->length);
return aux;
}
assert(n->type == MCCLELLAN_NFA_16);
u8 alphaShift = info.getAlphaShift();
u16 alphaSize = info.impl_alpha_size;
- mcclellan *m = (mcclellan *)getMutableImplNfa(n);
+ mcclellan *m = reinterpret_cast<mcclellan *>(getMutableImplNfa(n));
/* handle the normal states */
for (u32 i = 0; i < m->sherman_limit; i++) {
}
/* handle the sherman states */
- char *sherman_base_offset = (char *)n + m->sherman_offset;
+ char *sherman_base_offset = reinterpret_cast<char *>(n) + m->sherman_offset;
u16 sherman_ceil = m->has_wide == 1 ? m->wide_limit : m->state_count;
for (u16 j = m->sherman_limit; j < sherman_ceil; j++) {
char *sherman_cur
= findMutableShermanState(sherman_base_offset, m->sherman_limit, j);
assert(*(sherman_cur + SHERMAN_TYPE_OFFSET) == SHERMAN_STATE);
- u8 len = *(u8 *)(sherman_cur + SHERMAN_LEN_OFFSET);
- u16 *succs = (u16 *)(sherman_cur + SHERMAN_STATES_OFFSET(len));
+ u8 len = *(reinterpret_cast<u8 *>(sherman_cur + SHERMAN_LEN_OFFSET));
+ u16 *succs = reinterpret_cast<u16 *>(sherman_cur + SHERMAN_STATES_OFFSET(len));
for (u8 i = 0; i < len; i++) {
- u16 succ_i = unaligned_load_u16((u8 *)&succs[i]);
+ u16 succ_i = unaligned_load_u16(reinterpret_cast<u8 *>(&succs[i]));
// wide state has no aux structure.
if (m->has_wide && succ_i >= m->wide_limit) {
continue;
succ_i |= ACCEL_FLAG;
}
- unaligned_store_u16((u8 *)&succs[i], succ_i);
+ unaligned_store_u16(reinterpret_cast<u8 *>(&succs[i]), succ_i);
}
}
/* handle the wide states */
if (m->has_wide) {
u32 wide_limit = m->wide_limit;
- char *wide_base = (char *)n + m->wide_offset;
+ char *wide_base = reinterpret_cast<char *>(n) + m->wide_offset;
assert(*wide_base == WIDE_STATE);
u16 wide_number = verify_u16(info.wide_symbol_chain.size());
// traverse over wide head states.
for (u16 j = wide_limit; j < wide_limit + wide_number; j++) {
char *wide_cur
= findMutableWideEntry16(wide_base, wide_limit, j);
- u16 width = *(const u16 *)(wide_cur + WIDE_WIDTH_OFFSET);
- u16 *trans = (u16 *)(wide_cur + WIDE_TRANSITION_OFFSET16(width));
+ u16 width = *(reinterpret_cast<const u16 *>(wide_cur + WIDE_WIDTH_OFFSET));
+ u16 *trans = reinterpret_cast<u16 *>(wide_cur + WIDE_TRANSITION_OFFSET16(width));
// check successful transition
- u16 next = unaligned_load_u16((u8 *)trans);
+ u16 next = unaligned_load_u16(reinterpret_cast<u8 *>(trans));
if (next < wide_limit) {
const mstate_aux *aux = getAux(n, next);
if (aux->accept) {
if (aux->accel_offset) {
next |= ACCEL_FLAG;
}
- unaligned_store_u16((u8 *)trans, next);
+ unaligned_store_u16(reinterpret_cast<u8 *>(trans), next);
}
trans++;
// check failure transition
for (symbol_t k = 0; k < alphaSize; k++) {
- u16 next_k = unaligned_load_u16((u8 *)&trans[k]);
+ u16 next_k = unaligned_load_u16(reinterpret_cast<u8 *>(&trans[k]));
if (next_k >= wide_limit) {
continue;
}
if (aux_k->accel_offset) {
next_k |= ACCEL_FLAG;
}
- unaligned_store_u16((u8 *)&trans[k], next_k);
+ unaligned_store_u16(reinterpret_cast<u8 *>(&trans[k]), next_k);
}
}
}
nfa->type = MCCLELLAN_NFA_16;
}
- mcclellan *m = (mcclellan *)getMutableImplNfa(nfa);
+ mcclellan *m = reinterpret_cast<mcclellan *>(getMutableImplNfa(nfa));
for (u32 i = 0; i < 256; i++) {
m->remap[i] = verify_u8(info.alpha_remap[i]);
}
for (const auto &reps : rl) {
ro.emplace_back(base_offset);
- report_list *p = (report_list *)((char *)n + base_offset);
+ report_list *p = reinterpret_cast<report_list *>(reinterpret_cast<char *>(n) + base_offset);
u32 i = 0;
for (const ReportID report : reps.reports) {
DEBUG_PRINTF("total_size %zu\n", total_size);
auto nfa = make_zeroed_bytecode_ptr<NFA>(total_size);
- char *nfa_base = (char *)nfa.get();
+ char *nfa_base = reinterpret_cast<char *>(nfa.get());
populateBasicInfo(sizeof(u16), info, total_size, aux_offset, accel_offset,
accel_escape_info.size(), arb, single, nfa.get());
ri->fillReportLists(nfa.get(), aux_offset + aux_size, reportOffsets);
- u16 *succ_table = (u16 *)(nfa_base + sizeof(NFA) + sizeof(mcclellan));
- mstate_aux *aux = (mstate_aux *)(nfa_base + aux_offset);
- mcclellan *m = (mcclellan *)getMutableImplNfa(nfa.get());
+ u16 *succ_table = reinterpret_cast<u16 *>(nfa_base + sizeof(NFA) + sizeof(mcclellan));
+ mstate_aux *aux = reinterpret_cast<mstate_aux *>(nfa_base + aux_offset);
+ mcclellan *m = reinterpret_cast<mcclellan *>(getMutableImplNfa(nfa.get()));
m->wide_limit = wide_limit;
m->wide_offset = wide_offset;
assert(accel_offset + sizeof(NFA) <= sherman_offset);
assert(ISALIGNED_N(accel_offset, alignof(union AccelAux)));
info.strat.buildAccel(i, accel_escape_info.at(i),
- (void *)((char *)m + this_aux->accel_offset));
+ reinterpret_cast<void *>(reinterpret_cast<char *>(m) + this_aux->accel_offset));
}
}
assert(accel_offset + sizeof(NFA) <= sherman_offset);
assert(ISALIGNED_N(accel_offset, alignof(union AccelAux)));
info.strat.buildAccel(i, accel_escape_info.at(i),
- (void *)((char *)m + this_aux->accel_offset));
+ reinterpret_cast<void *>(reinterpret_cast<char *>(m) + this_aux->accel_offset));
}
u8 len = verify_u8(info.impl_alpha_size - info.extra[i].daddytaken);
assert(len <= 9);
dstate_id_t d = info.states[i].daddy;
- *(u8 *)(curr_sherman_entry + SHERMAN_TYPE_OFFSET) = SHERMAN_STATE;
- *(u8 *)(curr_sherman_entry + SHERMAN_LEN_OFFSET) = len;
- *(u16 *)(curr_sherman_entry + SHERMAN_DADDY_OFFSET) = info.implId(d);
- u8 *chars = (u8 *)(curr_sherman_entry + SHERMAN_CHARS_OFFSET);
+ *(reinterpret_cast<u8 *>(curr_sherman_entry + SHERMAN_TYPE_OFFSET)) = SHERMAN_STATE;
+ *(reinterpret_cast<u8 *>(curr_sherman_entry + SHERMAN_LEN_OFFSET)) = len;
+ *(reinterpret_cast<u16 *>(curr_sherman_entry + SHERMAN_DADDY_OFFSET)) = info.implId(d);
+ u8 *chars = reinterpret_cast<u8 *>(curr_sherman_entry + SHERMAN_CHARS_OFFSET);
for (u16 s = 0; s < info.impl_alpha_size; s++) {
if (info.states[i].next[s] != info.states[d].next[s]) {
}
}
- u16 *states = (u16 *)(curr_sherman_entry + SHERMAN_STATES_OFFSET(len));
+ u16 *states = reinterpret_cast<u16 *>(curr_sherman_entry + SHERMAN_STATES_OFFSET(len));
for (u16 s = 0; s < info.impl_alpha_size; s++) {
if (info.states[i].next[s] != info.states[d].next[s]) {
DEBUG_PRINTF("s overrider %hu dad %hu char next %hu\n",
fs, info.implId(d),
info.implId(info.states[i].next[s]));
- unaligned_store_u16((u8 *)states++,
+ unaligned_store_u16(reinterpret_cast<u8 *>(states++),
info.implId(info.states[i].next[s]));
}
}
assert(ISALIGNED_16(wide_base));
char *wide_top = wide_base;
- *(u8 *)(wide_top++) = WIDE_STATE;
+ *(reinterpret_cast<u8 *>(wide_top++)) = WIDE_STATE;
wide_top = ROUNDUP_PTR(wide_top, 2);
- *(u16 *)(wide_top) = wide_number;
+ *(reinterpret_cast<u16 *>(wide_top)) = wide_number;
wide_top += 2;
char *curr_wide_entry = wide_top + wide_number * sizeof(u32);
- u32 *wide_offset_list = (u32 *)wide_top;
+ u32 *wide_offset_list = reinterpret_cast<u32 *>(wide_top);
/* get the order of writing wide states */
vector<size_t> order(wide_number);
const vector<symbol_t> &symbol_chain = info.wide_symbol_chain[i];
u16 width = verify_u16(symbol_chain.size());
- *(u16 *)(curr_wide_entry + WIDE_WIDTH_OFFSET) = width;
- u8 *chars = (u8 *)(curr_wide_entry + WIDE_SYMBOL_OFFSET16);
+ *(reinterpret_cast<u16 *>(curr_wide_entry + WIDE_WIDTH_OFFSET)) = width;
+ u8 *chars = reinterpret_cast<u8 *>(curr_wide_entry + WIDE_SYMBOL_OFFSET16);
// store wide state symbol chain
for (size_t j = 0; j < width; j++) {
}
// store wide state transition table
- u16 *trans = (u16 *)(curr_wide_entry
+ u16 *trans = reinterpret_cast<u16 *>(curr_wide_entry
+ WIDE_TRANSITION_OFFSET16(width));
dstate_id_t tail = state_chain[width - 1];
symbol_t last = symbol_chain[width -1];
*wide_offset_list++ = verify_u32(curr_wide_entry - wide_base);
- curr_wide_entry = (char *)trans;
+ curr_wide_entry = reinterpret_cast<char *>(trans);
}
}
assert(ISALIGNED_N(accel_offset, alignof(union AccelAux)));
auto nfa = make_zeroed_bytecode_ptr<NFA>(total_size);
- char *nfa_base = (char *)nfa.get();
+ char *nfa_base = reinterpret_cast<char *>(nfa.get());
- mcclellan *m = (mcclellan *)getMutableImplNfa(nfa.get());
+ mcclellan *m = reinterpret_cast<mcclellan *>(getMutableImplNfa(nfa.get()));
allocateFSN8(info, accel_escape_info, &m->accel_limit_8,
&m->accept_limit_8);
ri->fillReportLists(nfa.get(), aux_offset + aux_size, reportOffsets);
/* copy in the state information */
- u8 *succ_table = (u8 *)(nfa_base + sizeof(NFA) + sizeof(mcclellan));
- mstate_aux *aux = (mstate_aux *)(nfa_base + aux_offset);
+ u8 *succ_table = reinterpret_cast<u8 *>(nfa_base + sizeof(NFA) + sizeof(mcclellan));
+ mstate_aux *aux = reinterpret_cast<mstate_aux *>(nfa_base + aux_offset);
for (size_t i = 0; i < info.size(); i++) {
if (contains(accel_escape_info, i)) {
accel_offset += info.strat.accelSize();
info.strat.buildAccel(i, accel_escape_info.at(i),
- (void *)((char *)m + aux[j].accel_offset));
+ reinterpret_cast<void *>(reinterpret_cast<char *>(m) + aux[j].accel_offset));
}
fillInBasicState8(info, aux, succ_table, reportOffsets, reports,
}
bool has_accel_mcclellan(const NFA *nfa) {
- const mcclellan *m = (const mcclellan *)getImplNfa(nfa);
+ const mcclellan *m = reinterpret_cast<const mcclellan *>(getImplNfa(nfa));
return m->has_accel;
}
#ifdef HAVE_SVE2
} else if (reach.count() >= 240) {
kp->type = MPV_VERM16;
- vermicelli16Build(~reach, (u8 *)&kp->u.verm16.mask);
+ vermicelli16Build(~reach, reinterpret_casT<u8 *>(&kp->u.verm16.mask));
} else if (reach.count() <= 16) {
kp->type = MPV_NVERM16;
- vermicelli16Build(reach, (u8 *)&kp->u.verm16.mask);
+ vermicelli16Build(reach, reinterpret_cast<u8 *>(&kp->u.verm16.mask));
#endif // HAVE_SVE2
- } else if (shuftiBuildMasks(~reach, (u8 *)&kp->u.shuf.mask_lo,
- (u8 *)&kp->u.shuf.mask_hi) != -1) {
+ } else if (shuftiBuildMasks(~reach,
+ reinterpret_cast<u8 *>(&kp->u.shuf.mask_lo),
+ reinterpret_cast<u8 *>(&kp->u.shuf.mask_hi)) != -1) {
kp->type = MPV_SHUFTI;
} else {
kp->type = MPV_TRUFFLE;
- truffleBuildMasks(~reach, (u8 *)&kp->u.truffle.mask1,
- (u8 *)&kp->u.truffle.mask2);
+ truffleBuildMasks(~reach,
+ reinterpret_cast<u8 *>(&kp->u.truffle.mask1),
+ reinterpret_cast<u8 *>(&kp->u.truffle.mask2));
}
kp->count = verify_u32(puffs.size());
kp->counter_offset = counter_offset;
/* start of real puffette array */
- kp->puffette_offset = verify_u32((char *)*pa - (char *)m);
+ kp->puffette_offset = verify_u32(reinterpret_cast<char *>(*pa) - reinterpret_cast<char *>(m));
for (size_t i = 0; i < puffs.size(); i++) {
assert(!it->first.auto_restart || puffs[i].unbounded);
writePuffette(*pa + i, puffs[i], rm);
auto nfa = make_zeroed_bytecode_ptr<NFA>(len);
- mpv_puffette *pa_base = (mpv_puffette *)
- ((char *)nfa.get() + sizeof(NFA) + sizeof(mpv)
+ char *nfa_base = reinterpret_cast<char *>(nfa.get());
+ mpv_puffette *pa_base = reinterpret_cast<mpv_puffette *>(nfa_base + sizeof(NFA) + sizeof(mpv)
+ sizeof(mpv_kilopuff) * puff_clusters.size()
+ sizeof(mpv_counter_info) * counters.size());
mpv_puffette *pa = pa_base;
min_repeat = min(min_repeat, puffs.front().repeats);
}
- mpv *m = (mpv *)getMutableImplNfa(nfa.get());
+ mpv *m = reinterpret_cast<mpv *>(getMutableImplNfa(nfa.get()));
m->kilo_count = verify_u32(puff_clusters.size());
m->counter_count = verify_u32(counters.size());
m->puffette_count = puffette_count;
m->top_kilo_begin = verify_u32(triggered_puffs.size());
m->top_kilo_end = verify_u32(puff_clusters.size());
- mpv_kilopuff *kp_begin = (mpv_kilopuff *)(m + 1);
+ mpv_kilopuff *kp_begin = reinterpret_cast<mpv_kilopuff *>(m + 1);
mpv_kilopuff *kp = kp_begin;
for (auto it = puff_clusters.begin(); it != puff_clusters.end(); ++it) {
writeKiloPuff(it, rm,
kp, &pa);
++kp;
}
- assert((char *)pa == (char *)nfa.get() + len);
+ assert(reinterpret_cast<char *>(pa) == nfa_base + len);
- mpv_counter_info *out_ci = (mpv_counter_info *)kp;
+ mpv_counter_info *out_ci = reinterpret_cast<mpv_counter_info *>(kp);
for (const auto &counter : counters) {
*out_ci = counter;
++out_ci;
}
- assert((char *)out_ci == (char *)pa_base);
+ assert(reinterpret_cast<char *>(out_ci) == reinterpret_cast<char *>(pa_base));
writeCoreNfa(nfa.get(), len, min_repeat, max_counter, curr_comp_offset,
curr_decomp_offset);