const vector<NFAStateSet> &squash, implNFA_t *limex,
const u32 acceptsOffset, const u32 acceptsEodOffset,
const u32 squashOffset, const u32 reportListOffset) {
- char *limex_base = (char *)limex;
+ char *limex_base = reinterpret_cast<char *>(limex);
DEBUG_PRINTF("acceptsOffset=%u, acceptsEodOffset=%u, squashOffset=%u\n",
acceptsOffset, acceptsEodOffset, squashOffset);
limex->acceptOffset = acceptsOffset;
limex->acceptCount = verify_u32(accepts.size());
DEBUG_PRINTF("NFA has %zu accepts\n", accepts.size());
- NFAAccept *acceptsTable = (NFAAccept *)(limex_base + acceptsOffset);
+ NFAAccept *acceptsTable = reinterpret_cast<NFAAccept *>(limex_base + acceptsOffset);
assert(ISALIGNED(acceptsTable));
transform(accepts.begin(), accepts.end(), acceptsTable,
transform_offset_fn);
limex->acceptEodOffset = acceptsEodOffset;
limex->acceptEodCount = verify_u32(acceptsEod.size());
DEBUG_PRINTF("NFA has %zu EOD accepts\n", acceptsEod.size());
- NFAAccept *acceptsEodTable = (NFAAccept *)(limex_base + acceptsEodOffset);
+ NFAAccept *acceptsEodTable = reinterpret_cast<NFAAccept *>(limex_base + acceptsEodOffset);
assert(ISALIGNED(acceptsEodTable));
transform(acceptsEod.begin(), acceptsEod.end(), acceptsEodTable,
transform_offset_fn);
limex->squashCount = verify_u32(squash.size());
limex->squashOffset = squashOffset;
DEBUG_PRINTF("NFA has %zu report squash masks\n", squash.size());
- tableRow_t *mask = (tableRow_t *)(limex_base + squashOffset);
+ tableRow_t *mask = reinterpret_cast<tableRow_t *>(limex_base + squashOffset);
assert(ISALIGNED(mask));
for (size_t i = 0, end = squash.size(); i < end; i++) {
maskSetBits(mask[i], squash[i]);
for (u32 i = 0; i < num_repeats; i++) {
repeatOffsets[i] = offset;
assert(repeats[i]);
- memcpy((char *)limex + offset, repeats[i].get(), repeats[i].size());
+ memcpy(reinterpret_cast<char *>(limex) + offset, repeats[i].get(), repeats[i].size());
offset += repeats[i].size();
}
// Write repeat offset lookup table.
- assert(ISALIGNED_N((char *)limex + repeatOffsetsOffset, alignof(u32)));
- copy_bytes((char *)limex + repeatOffsetsOffset, repeatOffsets);
+ assert(ISALIGNED_N(reinterpret_cast<char *>(limex) + repeatOffsetsOffset, alignof(u32)));
+ copy_bytes(reinterpret_cast<char *>(limex) + repeatOffsetsOffset, repeatOffsets);
limex->repeatOffset = repeatOffsetsOffset;
limex->repeatCount = num_repeats;
void writeReportList(const vector<ReportID> &reports, implNFA_t *limex,
const u32 reportListOffset) {
DEBUG_PRINTF("reportListOffset=%u\n", reportListOffset);
- assert(ISALIGNED_N((char *)limex + reportListOffset,
+ assert(ISALIGNED_N(reinterpret_cast<char *>(limex) + reportListOffset,
alignof(ReportID)));
- copy_bytes((char *)limex + reportListOffset, reports);
+ copy_bytes(reinterpret_cast<char *>(limex) + reportListOffset, reports);
}
static
auto nfa = make_zeroed_bytecode_ptr<NFA>(nfaSize);
assert(nfa); // otherwise we would have thrown std::bad_alloc
- implNFA_t *limex = (implNFA_t *)getMutableImplNfa(nfa.get());
+ implNFA_t *limex = reinterpret_cast<implNFA_t *>(getMutableImplNfa(nfa.get()));
assert(ISALIGNED(limex));
writeReachMapping(reach, reachMap, limex, reachOffset);
const char *findShermanState(UNUSED const struct mcclellan *m,
const char *sherman_base_offset, u32 sherman_base,
u32 s) {
- const char *rv
- = sherman_base_offset + SHERMAN_FIXED_SIZE * (s - sherman_base);
+ const char *rv = sherman_base_offset + SHERMAN_FIXED_SIZE * (s - sherman_base);
+ // cppcheck-suppress cstyleCast
assert(rv < (const char *)m + m->length - sizeof(struct NFA));
+ // cppcheck-suppress cstyleCast
UNUSED u8 type = *(const u8 *)(rv + SHERMAN_TYPE_OFFSET);
assert(type == SHERMAN_STATE);
return rv;
static really_inline
const char *findWideEntry8(UNUSED const struct mcclellan *m,
const char *wide_base, u32 wide_limit, u32 s) {
+ // cppcheck-suppress cstyleCast
UNUSED u8 type = *(const u8 *)wide_base;
assert(type == WIDE_STATE);
- const u32 entry_offset
- = *(const u32 *)(wide_base
+ // cppcheck-suppress cstyleCast
+ const u32 entry_offset = *(const u32 *)(wide_base
+ WIDE_ENTRY_OFFSET8((s - wide_limit) * sizeof(u32)));
const char *rv = wide_base + entry_offset;
+ // cppcheck-suppress cstyleCast
assert(rv < (const char *)m + m->length - sizeof(struct NFA));
return rv;
}
static really_inline
const char *findWideEntry16(UNUSED const struct mcclellan *m,
const char *wide_base, u32 wide_limit, u32 s) {
+ // cppcheck-suppress cstyleCast
UNUSED u8 type = *(const u8 *)wide_base;
assert(type == WIDE_STATE);
- const u32 entry_offset
- = *(const u32 *)(wide_base
+ // cppcheck-suppress cstyleCast
+ const u32 entry_offset = *(const u32 *)(wide_base
+ WIDE_ENTRY_OFFSET16((s - wide_limit) * sizeof(u32)));
const char *rv = wide_base + entry_offset;
+ // cppcheck-suppress cstyleCast
assert(rv < (const char *)m + m->length - sizeof(struct NFA));
return rv;
}
static really_inline
char *findMutableWideEntry16(char *wide_base, u32 wide_limit, u32 s) {
- u32 entry_offset
- = *(const u32 *)(wide_base
+ // cppcheck-suppress cstyleCast
+ u32 entry_offset = *(const u32 *)(wide_base
+ WIDE_ENTRY_OFFSET16((s - wide_limit) * sizeof(u32)));
return wide_base + entry_offset;
for (const auto &reps : rl) {
ro.emplace_back(base_offset);
- report_list *p = (report_list *)((char *)n + base_offset);
+ report_list *p = reinterpret_cast<report_list *>(reinterpret_cast<char *>(n) + base_offset);
u32 i = 0;
for (const ReportID report : reps.reports) {
void fillAccelAux(struct NFA *n, dfa_info &info,
map<dstate_id_t, AccelScheme> &accelInfo) {
DEBUG_PRINTF("Filling accel aux structures\n");
- T *s = (T *)getMutableImplNfa(n);
+ T *s = reinterpret_cast<T *>(getMutableImplNfa(n));
u32 offset = s->accel_offset;
for (dstate_id_t i = 0; i < info.size(); i++) {
dstate_id_t state_id = info.raw_id(i);
if (accelInfo.find(state_id) != accelInfo.end()) {
s->flags |= SHENG_FLAG_HAS_ACCEL;
- AccelAux *aux = (AccelAux *)((char *)n + offset);
+ AccelAux *aux = reinterpret_cast<AccelAux *>(reinterpret_cast<char *>(n) + offset);
info.strat.buildAccel(state_id, accelInfo[state_id], aux);
sstate_aux *saux =
- (sstate_aux *)((char *)n + s->aux_offset) + state_id;
+ reinterpret_cast<sstate_aux *>(reinterpret_cast<char *>(n) + s->aux_offset) + state_id;
saux->accel = offset;
DEBUG_PRINTF("Accel offset: %u\n", offset);
offset += ROUNDUP_N(sizeof(AccelAux), alignof(AccelAux));
n->type = SHENG_NFA;
n->flags |= info.raw.hasEodReports() ? NFA_ACCEPTS_EOD : 0;
- sheng *s = (sheng *)getMutableImplNfa(n);
+ sheng *s = reinterpret_cast<sheng *>(getMutableImplNfa(n));
s->aux_offset = aux_offset;
s->report_offset = report_offset;
s->accel_offset = accel_offset;
n->type = SHENG_NFA_32;
n->flags |= info.raw.hasEodReports() ? NFA_ACCEPTS_EOD : 0;
- sheng32 *s = (sheng32 *)getMutableImplNfa(n);
+ sheng32 *s = reinterpret_cast<sheng32 *>(getMutableImplNfa(n));
s->aux_offset = aux_offset;
s->report_offset = report_offset;
s->accel_offset = accel_offset;
n->type = SHENG_NFA_64;
n->flags |= info.raw.hasEodReports() ? NFA_ACCEPTS_EOD : 0;
- sheng64 *s = (sheng64 *)getMutableImplNfa(n);
+ sheng64 *s = reinterpret_cast<sheng64 *>(getMutableImplNfa(n));
s->aux_offset = aux_offset;
s->report_offset = report_offset;
s->accel_offset = accel_offset;
static
void fillTops(NFA *n, dfa_info &info, dstate_id_t id,
map<dstate_id_t, AccelScheme> &accelInfo) {
- T *s = (T *)getMutableImplNfa(n);
+ T *s = reinterpret_cast<T *>(getMutableImplNfa(n));
u32 aux_base = s->aux_offset;
DEBUG_PRINTF("Filling tops for state %u\n", id);
- sstate_aux *aux = (sstate_aux *)((char *)n + aux_base) + id;
+ sstate_aux *aux = reinterpret_cast<sstate_aux *>(reinterpret_cast<char *>(n) + aux_base) + id;
DEBUG_PRINTF("Aux structure for state %u, offset %zd\n", id,
- (char *)aux - (char *)n);
+ reinterpret_cast<char *>(aux) - reinterpret_cast<char *>(n));
/* we could conceivably end up in an accept/dead state on a top event,
* so mark top as accept/dead state if it indeed is.
static
void fillAux(NFA *n, dfa_info &info, dstate_id_t id, vector<u32> &reports,
vector<u32> &reports_eod, vector<u32> &report_offsets) {
- T *s = (T *)getMutableImplNfa(n);
+ T *s = reinterpret_cast<T *>(getMutableImplNfa(n));
u32 aux_base = s->aux_offset;
auto raw_id = info.raw_id(id);
auto &state = info[id];
- sstate_aux *aux = (sstate_aux *)((char *)n + aux_base) + id;
+ sstate_aux *aux = reinterpret_cast<sstate_aux *>(reinterpret_cast<char *>(n) + aux_base) + id;
DEBUG_PRINTF("Filling aux and report structures for state %u\n", id);
DEBUG_PRINTF("Aux structure for state %u, offset %zd\n", id,
template <typename T>
static
void fillSingleReport(NFA *n, ReportID r_id) {
- T *s = (T *)getMutableImplNfa(n);
+ T *s = reinterpret_cast<T *>(getMutableImplNfa(n));
DEBUG_PRINTF("Single report ID: %u\n", r_id);
s->report = r_id;
fillAccelOut(accelInfo, accel_states);
}
- if (!createShuffleMasks<T>((T *)getMutableImplNfa(nfa.get()), info, accelInfo)) {
+ T *s = reinterpret_cast<T *>(getMutableImplNfa(nfa.get()));
+ if (!createShuffleMasks<T>(s, info, accelInfo)) {
return bytecode_ptr<NFA>(nullptr);
}
set<dstate_id_t> *accel_states) {
if (!cc.grey.allowSheng) {
DEBUG_PRINTF("Sheng is not allowed!\n");
- bytecode_ptr<NFA>(nullptr);
+ return bytecode_ptr<NFA>(nullptr);
}
#ifdef HAVE_SVE
if (svcntb()<32) {
DEBUG_PRINTF("Sheng32 failed, SVE width is too small!\n");
- bytecode_ptr<NFA>(nullptr);
+ return bytecode_ptr<NFA>(nullptr);
}
#else
if (!cc.target_info.has_avx512vbmi()) {
DEBUG_PRINTF("Sheng32 failed, no HS_CPU_FEATURES_AVX512VBMI!\n");
- bytecode_ptr<NFA>(nullptr);
+ return bytecode_ptr<NFA>(nullptr);
}
#endif
nfa->length = verify_u32(total_size);
nfa->queueIndex = queue;
- char *ptr = (char *)nfa.get() + sizeof(NFA);
+ char *ptr = reinterpret_cast<char *>(nfa.get()) + sizeof(NFA);
char *base_offset = ptr;
- Tamarama *t = (Tamarama *)ptr;
+ Tamarama *t = reinterpret_cast<Tamarama *>(ptr);
t->numSubEngines = verify_u32(subSize);
t->activeIdxSize = verify_u8(activeIdxSize);
copy_bytes(ptr, top_base);
ptr += byte_length(top_base);
- u32 *offsets = (u32 *)ptr;
+ u32 *offsets = reinterpret_cast<u32 *>(ptr);
char *sub_nfa_offset = ptr + sizeof(u32) * subSize;
copyInSubnfas(base_offset, *nfa, tamaInfo, offsets, sub_nfa_offset,
activeIdxSize);
- assert((size_t)(sub_nfa_offset - (char *)nfa.get()) <= total_size);
+ assert(static_cast<size_t>(sub_nfa_offset - reinterpret_cast<char *>(nfa.get())) <= total_size);
return nfa;
}
enum RepeatType rtype = chooseRepeatType(repeatMin, repeatMax, minPeriod,
is_reset);
auto nfa = makeLbrNfa<lbr_verm>(LBR_NFA_VERM, rtype, repeatMax);
- struct lbr_verm *lv = (struct lbr_verm *)getMutableImplNfa(nfa.get());
+ struct lbr_verm *lv = reinterpret_cast<struct lbr_verm *>(getMutableImplNfa(nfa.get()));
lv->c = escapes.find_first();
fillNfa<lbr_verm>(nfa.get(), &lv->common, report, repeatMin, repeatMax,
return true;
}
- const u8 *s = (const u8 *)expression;
+ const u8 *s = reinterpret_cast<const u8 *>(expression);
u32 val;
size_t i = 0;
#endif
void *aligned_malloc_internal(size_t size, size_t align) {
- // cppcheck-suppress cstyleCast
void *mem= nullptr;;
+ // cppcheck-suppress cstyleCast
int rv = posix_memalign(&mem, align, size);
if (rv != 0) {
DEBUG_PRINTF("posix_memalign returned %d when asked for %zu bytes\n",
#include "unaligned.h"
// Aligned loads
+#ifndef __cplusplus__
#define load_u8(a) (*(const u8 *)(a))
#define load_u16(a) (*(const u16 *)(a))
#define load_u32(a) (*(const u32 *)(a))
#define load_u64a(a) (*(const u64a *)(a))
+#else
+#define load_u8(a) (*(reinterpret_cast<const u8 *>(a))
+#define load_u16(a) (*(reinterpret_cast<const u16 *>(a))
+#define load_u32(a) (*(reinterpret_cast<const u32 *>(a))
+#define load_u64a(a) (*(reinterpret_cast<const u64a *>(a))
+#endif // __cplusplus__
#define load_m128(a) load128(a)
#define load_m256(a) load256(a)
#define load_m384(a) load384(a)
#define load_m512(a) load512(a)
// Unaligned loads
+#ifndef __cplusplus__
#define loadu_u8(a) (*(const u8 *)(a))
#define loadu_u16(a) unaligned_load_u16((const u8 *)(a))
#define loadu_u32(a) unaligned_load_u32((const u8 *)(a))
#define loadu_u64a(a) unaligned_load_u64a((const u8 *)(a))
+#else
+#define loadu_u8(a) (*(reinterpret_cast<const u8 *>(a))
+#define loadu_u16(a) unaligned_load_u16(reinterpret_cast<const u8 *>(a))
+#define loadu_u32(a) unaligned_load_u32(reinterpret_cast<const u8 *>(a))
+#define loadu_u64a(a) unaligned_load_u64a(reinterpret_cast<const u8 *>(a))
+#endif // __cplusplus__
#define loadu_m128(a) loadu128(a)
#define loadu_m256(a) loadu256(a)
#define loadu_m384(a) loadu384(a)
#define loadu_m512(a) loadu512(a)
// Aligned stores
-#define store_u8(ptr, a) do { *(u8 *)(ptr) = (a); } while(0)
-#define store_u16(ptr, a) do { *(u16 *)(ptr) = (a); } while(0)
-#define store_u32(ptr, a) do { *(u32 *)(ptr) = (a); } while(0)
-#define store_u64a(ptr, a) do { *(u64a *)(ptr) = (a); } while(0)
+#ifndef __cplusplus__
+#define store_u8(ptr, a) do { *(reinterpret_cast<u8 *>(ptr)) = (a); } while(0)
+#define store_u16(ptr, a) do { *(reinterpret_cast<u16 *>(ptr)) = (a); } while(0)
+#define store_u32(ptr, a) do { *(reinterpret_cast<u32 *>(ptr)) = (a); } while(0)
+#define store_u64a(ptr, a) do { *(reinterpret_cast<u64a *>(ptr)) = (a); } while(0)
+#else
+#endif // __cplusplus__
#define store_m128(ptr, a) store128(ptr, a)
#define store_m256(ptr, a) store256(ptr, a)
#define store_m384(ptr, a) store384(ptr, a)
#define store_m512(ptr, a) store512(ptr, a)
// Unaligned stores
+#ifndef __cplusplus__
#define storeu_u8(ptr, a) do { *(u8 *)(ptr) = (a); } while(0)
+#else
+#define storeu_u8(ptr, a) do { *(reinterpret_cast<u8 *>(ptr)) = (a); } while(0)
+#endif // __cplusplus__
#define storeu_u16(ptr, a) unaligned_store_u16(ptr, a)
#define storeu_u32(ptr, a) unaligned_store_u32(ptr, a)
#define storeu_u64a(ptr, a) unaligned_store_u64a(ptr, a)
// Try the exact match.
corpus = "hatstand" + string(199983, '_') + "teakettle";
err = hs_scan(db, corpus.c_str(), corpus.length(), 0, scratch, record_cb,
- (void *)&c);
+ reinterpret_cast<void *>(&c));
ASSERT_EQ(HS_SUCCESS, err);
ASSERT_EQ(1U, c.matches.size());
ASSERT_EQ(MatchRecord(200000, 0), c.matches[0]);
int vectorCallback(unsigned id, unsigned long long from,
unsigned long long to, unsigned, void *ctx) {
//printf("match id %u at (%llu,%llu)\n", id, from, to);
- vector<Match> *matches = (vector<Match> *)ctx;
+ vector<Match> *matches = reinterpret_cast<vector<Match> *>(ctx);
matches->push_back(Match(id, from, to));
return 0;
}
const string corpus = matchingCorpus(params.max);
initQueue();
- q.buffer = (const u8 *)corpus.c_str();
+ q.buffer = reinterpret_cast<const u8 *>(corpus.c_str());
q.length = corpus.length();
u64a end = corpus.length();
}
size_t len = st.st_size;
- bytes = (char *)mmap(nullptr, len, PROT_READ, MAP_SHARED, fd, 0);
+ bytes = reinterpret_cast<char *>(mmap(nullptr, len, PROT_READ, MAP_SHARED, fd, 0));
if (bytes == MAP_FAILED) {
cout << "mmap failed" << endl;
close(fd);
static really_inline
char *makeHex(const unsigned char *pat, unsigned patlen) {
size_t hexlen = patlen * 4;
- char *hexbuf = (char *)malloc(hexlen + 1);
+ char *hexbuf = reinterpret_cast<char *>(malloc(hexlen + 1));
unsigned i;
char *buf;
for (i = 0, buf = hexbuf; i < patlen; i++, buf += 4) {