*c_16 = *(ptr + 15); \
*c_32 = *(ptr + 31); \
*c_48 = *(ptr + 47); \
- m512 r_msk = set512_64(0ULL, r_msk_base[*c_48], 0ULL, r_msk_base[*c_32],\
+ m512 r_msk = set8x64(0ULL, r_msk_base[*c_48], 0ULL, r_msk_base[*c_32],\
0ULL, r_msk_base[*c_16], 0ULL, r_msk_base[*c_0]);\
*c_0 = *(ptr + 63)
*/
#define PREPARE_FAT_MASKS(n) \
- m512 lo_mask = set64x8(0xf); \
+ m512 lo_mask = set1_64x8(0xf); \
m512 sl_msk[n - 1]; \
FAT_TEDDY_VBMI_LOAD_SHIFT_MASK_M##n
static really_inline
m512 getMask(u8 c, bool noCase) {
u8 k = caseClear8(c, noCase);
- return set64x8(k);
+ return set1_64x8(k);
}
static really_inline
m512 getCaseMask(void) {
- return set64x8(CASE_CLEAR);
+ return set1_64x8(CASE_CLEAR);
}
// The short scan routine. It is used both to scan data up to an
#define AND_STATE JOIN(and_, STATE_T)
#define EQ_STATE(a, b) (!JOIN(noteq_, STATE_T)((a), (b)))
#define OR_STATE JOIN(or_, STATE_T)
-#define EXPAND_STATE JOIN(expand_, STATE_T)
+#define EXPAND_STATE JOIN(broadcast_, STATE_T)
#define SHUFFLE_BYTE_STATE JOIN(shuffle_byte_, STATE_T)
#define TESTBIT_STATE JOIN(testbit_, STATE_T)
#define EXCEPTION_T JOIN(struct NFAException, SIZE)
assert(s_in); /* should not already be dead */
assert(soft_c_end <= hard_c_end);
DEBUG_PRINTF("s_in = %u (adjusted %u)\n", s_in, s_in - 1);
- m512 s = set64x8(s_in - 1);
+ m512 s = set1_64x8(s_in - 1);
const u8 *c = *c_inout;
const u8 *c_end = hard_c_end - SHENG_CHUNK + 1;
if (!do_accel) {
#if defined(HAVE_BMI2) && defined(ARCH_64_BIT)
u32 sheng_limit_x4 = sheng_limit * 0x01010101;
- m512 simd_stop_limit = set16x32(sheng_stop_limit_x4);
- m512 accel_delta = set64x8(sheng_limit - sheng_stop_limit);
+ m512 simd_stop_limit = set1_16x32(sheng_stop_limit_x4);
+ m512 accel_delta = set1_64x8(sheng_limit - sheng_stop_limit);
DEBUG_PRINTF("end %hhu, accel %hu --> limit %hhu\n", sheng_limit,
m->sheng_accel_limit, sheng_stop_limit);
#endif
}
DEBUG_PRINTF("Scanning %lli bytes\n", (s64a)(end - start));
- m512 cur_state = set64x8(*state);
+ m512 cur_state = set1_64x8(*state);
const m512 *masks = s->succ_masks;
while (likely(cur_buf != end)) {
}
DEBUG_PRINTF("Scanning %lli bytes\n", (s64a)(end - start));
- m512 cur_state = set64x8(*state);
+ m512 cur_state = set1_64x8(*state);
const m512 *masks = s->succ_masks;
while (likely(cur_buf != end)) {
return MO_CONTINUE_MATCHING;
}
- m512 cur_state = set64x8(*state);
+ m512 cur_state = set1_64x8(*state);
const m512 *masks = s->succ_masks;
while (likely(end - cur_buf >= 4)) {
return MO_CONTINUE_MATCHING;
}
- m512 cur_state = set64x8(*state);
+ m512 cur_state = set1_64x8(*state);
const m512 *masks = s->succ_masks;
while (likely(end - cur_buf >= 4)) {
DEBUG_PRINTF("shufti %p len %zu\n", buf, buf_end - buf);
DEBUG_PRINTF("b %s\n", buf);
- const m512 low4bits = set64x8(0xf);
+ const m512 low4bits = set1_64x8(0xf);
const m512 zeroes = zeroes512();
- const m512 wide_mask_lo = set4x128(mask_lo);
- const m512 wide_mask_hi = set4x128(mask_hi);
+ const m512 wide_mask_lo = set1_4x128(mask_lo);
+ const m512 wide_mask_hi = set1_4x128(mask_hi);
const u8 *rv;
// small cases.
assert(buf && buf_end);
assert(buf < buf_end);
- const m512 low4bits = set64x8(0xf);
+ const m512 low4bits = set1_64x8(0xf);
const m512 zeroes = zeroes512();
- const m512 wide_mask_lo = set4x128(mask_lo);
- const m512 wide_mask_hi = set4x128(mask_hi);
+ const m512 wide_mask_lo = set1_4x128(mask_lo);
+ const m512 wide_mask_hi = set1_4x128(mask_hi);
const u8 *rv;
if (buf_end - buf < 64) {
DEBUG_PRINTF("buf %p len %zu\n", buf, buf_end - buf);
const m512 ones = ones512();
- const m512 low4bits = set64x8(0xf);
- const m512 wide_mask1_lo = set4x128(mask1_lo);
- const m512 wide_mask1_hi = set4x128(mask1_hi);
- const m512 wide_mask2_lo = set4x128(mask2_lo);
- const m512 wide_mask2_hi = set4x128(mask2_hi);
+ const m512 low4bits = set1_64x8(0xf);
+ const m512 wide_mask1_lo = set1_4x128(mask1_lo);
+ const m512 wide_mask1_hi = set1_4x128(mask1_hi);
+ const m512 wide_mask2_lo = set1_4x128(mask2_lo);
+ const m512 wide_mask2_hi = set1_4x128(mask2_hi);
const u8 *rv;
if (buf_end - buf <= 64) {
static really_inline
u64a block(m512 shuf_mask_lo_highclear, m512 shuf_mask_lo_highset, m512 v) {
- m512 highconst = set64x8(0x80);
- m512 shuf_mask_hi = set8x64(0x8040201008040201);
+ m512 highconst = set1_64x8(0x80);
+ m512 shuf_mask_hi = set1_8x64(0x8040201008040201);
// and now do the real work
m512 shuf1 = pshufb_m512(shuf_mask_lo_highclear, v);
const u8 *truffleExec(m128 shuf_mask_lo_highclear, m128 shuf_mask_lo_highset,
const u8 *buf, const u8 *buf_end) {
DEBUG_PRINTF("len %zu\n", buf_end - buf);
- const m512 wide_clear = set4x128(shuf_mask_lo_highclear);
- const m512 wide_set = set4x128(shuf_mask_lo_highset);
+ const m512 wide_clear = set1_4x128(shuf_mask_lo_highclear);
+ const m512 wide_set = set1_4x128(shuf_mask_lo_highset);
assert(buf && buf_end);
assert(buf < buf_end);
const u8 *rtruffleExec(m128 shuf_mask_lo_highclear, m128 shuf_mask_lo_highset,
const u8 *buf, const u8 *buf_end) {
- const m512 wide_clear = set4x128(shuf_mask_lo_highclear);
- const m512 wide_set = set4x128(shuf_mask_lo_highset);
+ const m512 wide_clear = set1_4x128(shuf_mask_lo_highclear);
+ const m512 wide_set = set1_4x128(shuf_mask_lo_highset);
assert(buf && buf_end);
assert(buf < buf_end);
const u8 *rv;
uintptr_t len = buf_end - buf;
__mmask64 mask = (~0ULL) >> (64 - len);
m512 data = loadu_maskz_m512(mask, buf);
- m512 casemask = set64x8(CASE_CLEAR);
+ m512 casemask = set1_64x8(CASE_CLEAR);
m512 v = and512(casemask, data);
u64a z = eq512mask(chars, v);
const u8 *vermSearchAlignedNocase(m512 chars, const u8 *buf,
const u8 *buf_end, char negate) {
assert((size_t)buf % 64 == 0);
- m512 casemask = set64x8(CASE_CLEAR);
+ m512 casemask = set1_64x8(CASE_CLEAR);
for (; buf + 63 < buf_end; buf += 64) {
m512 data = load512(buf);
// returns NULL if not found
static really_inline
const u8 *vermUnalignNocase(m512 chars, const u8 *buf, char negate) {
- m512 casemask = set64x8(CASE_CLEAR);
+ m512 casemask = set1_64x8(CASE_CLEAR);
m512 data = loadu512(buf); // unaligned
u64a z = eq512mask(chars, and512(casemask, data));
if (negate) {
uintptr_t len = buf_end - buf;
__mmask64 mask = (~0ULL) >> (64 - len);
m512 data = loadu_maskz_m512(mask, buf);
- m512 casemask = set64x8(CASE_CLEAR);
+ m512 casemask = set1_64x8(CASE_CLEAR);
m512 v = and512(casemask, data);
u64a z = eq512mask(chars1, v) & (eq512mask(chars2, v) >> 1);
const u8 *dvermSearchAlignedNocase(m512 chars1, m512 chars2, u8 c1, u8 c2,
const u8 *buf, const u8 *buf_end) {
assert((size_t)buf % 64 == 0);
- m512 casemask = set64x8(CASE_CLEAR);
+ m512 casemask = set1_64x8(CASE_CLEAR);
for (; buf + 64 < buf_end; buf += 64) {
m512 data = load512(buf);
static really_inline
const u8 *dvermPreconditionNocase(m512 chars1, m512 chars2, const u8 *buf) {
/* due to laziness, nonalphas and nocase having interesting behaviour */
- m512 casemask = set64x8(CASE_CLEAR);
+ m512 casemask = set1_64x8(CASE_CLEAR);
m512 data = loadu512(buf); // unaligned
m512 v = and512(casemask, data);
u64a z = eq512mask(chars1, v) & (eq512mask(chars2, v) >> 1);
uintptr_t len = buf_end - buf;
__mmask64 mask = (~0ULL) >> (64 - len);
m512 data = loadu_maskz_m512(mask, buf);
- m512 casemask = set64x8(CASE_CLEAR);
+ m512 casemask = set1_64x8(CASE_CLEAR);
m512 v = and512(casemask, data);
u64a z = eq512mask(chars, v);
const u8 *rvermSearchAlignedNocase(m512 chars, const u8 *buf,
const u8 *buf_end, char negate) {
assert((size_t)buf_end % 64 == 0);
- m512 casemask = set64x8(CASE_CLEAR);
+ m512 casemask = set1_64x8(CASE_CLEAR);
for (; buf + 63 < buf_end; buf_end -= 64) {
m512 data = load512(buf_end - 64);
// returns NULL if not found
static really_inline
const u8 *rvermUnalignNocase(m512 chars, const u8 *buf, char negate) {
- m512 casemask = set64x8(CASE_CLEAR);
+ m512 casemask = set1_64x8(CASE_CLEAR);
m512 data = loadu512(buf); // unaligned
u64a z = eq512mask(chars, and512(casemask, data));
if (negate) {
uintptr_t len = buf_end - buf;
__mmask64 mask = (~0ULL) >> (64 - len);
m512 data = loadu_maskz_m512(mask, buf);
- m512 casemask = set64x8(CASE_CLEAR);
+ m512 casemask = set1_64x8(CASE_CLEAR);
m512 v = and512(casemask, data);
u64a z = eq512mask(chars2, v) & (eq512mask(chars1, v) << 1);
const u8 *rdvermSearchAlignedNocase(m512 chars1, m512 chars2, u8 c1, u8 c2,
const u8 *buf, const u8 *buf_end) {
assert((size_t)buf_end % 64 == 0);
- m512 casemask = set64x8(CASE_CLEAR);
+ m512 casemask = set1_64x8(CASE_CLEAR);
for (; buf + 64 < buf_end; buf_end -= 64) {
m512 data = load512(buf_end - 64);
static really_inline
const u8 *rdvermPreconditionNocase(m512 chars1, m512 chars2, const u8 *buf) {
// due to laziness, nonalphas and nocase having interesting behaviour
- m512 casemask = set64x8(CASE_CLEAR);
+ m512 casemask = set1_64x8(CASE_CLEAR);
m512 data = loadu512(buf);
m512 v = and512(casemask, data);
u64a z = eq512mask(chars2, v) & (eq512mask(chars1, v) << 1);
int validateShuftiMask64x8(const m512 data, const m512 hi_mask,
const m512 lo_mask, const m512 and_mask,
const u64a neg_mask, const u64a valid_data_mask) {
- m512 low4bits = set64x8(0xf);
+ m512 low4bits = set1_64x8(0xf);
m512 c_lo = pshufb_m512(lo_mask, and512(data, low4bits));
m512 c_hi = pshufb_m512(hi_mask,
rshift64_m512(andnot512(low4bits, data), 4));
const m512 lo_mask_1, const m512 lo_mask_2,
const m512 and_mask_hi, const m512 and_mask_lo,
const u64a neg_mask, const u64a valid_data_mask) {
- m512 low4bits = set64x8(0xf);
+ m512 low4bits = set1_64x8(0xf);
m512 data_lo = and512(data, low4bits);
m512 data_hi = and512(rshift64_m512(data, 4), low4bits);
m512 c_lo_1 = pshufb_m512(lo_mask_1, data_lo);
expand64(v[6], m[6]), expand64(v[7], m[7]) };
#if defined(HAVE_AVX512)
- m512 xvec = set64x8(x[7], x[6], x[5], x[4],
+ m512 xvec = set8x64(x[7], x[6], x[5], x[4],
x[3], x[2], x[1], x[0]);
#elif defined(HAVE_AVX2)
m512 xvec = { .lo = set4x64(x[3], x[2], x[1], x[0]),
char cmp[sizeof(m256)];
for (unsigned i = 0; i < 256; i++) {
- m256 simd = set32x8(i);
+ m256 simd = set1_32x8(i);
memset(cmp, i, sizeof(simd));
ASSERT_EQ(0, memcmp(cmp, &simd, sizeof(simd)));
}
char cmp[sizeof(m256)];
for (unsigned i = 0; i < 256; i++) {
- m128 x = set16x8(i);
- m256 y = set32x8(i);
- m256 z = set2x128(x);
+ m128 x = set1_16x8(i);
+ m256 y = set1_32x8(i);
+ m256 z = set1_2x128(x);
memset(cmp, i, sizeof(z));
ASSERT_EQ(0, memcmp(cmp, &z, sizeof(z)));
ASSERT_EQ(0, memcmp(&y, &z, sizeof(z)));