} \
}
+
#define SINGLE_ZSCAN() \
do { \
while (unlikely(z)) { \
return HWLM_SUCCESS;
}
+static really_really_inline
+hwlm_error_t single_zscan(const struct noodTable *n,const u8 *d, const u8 *buf,
+ Z_TYPE z, size_t len, const struct cb_info *cbi) {
+ while (unlikely(z)) {
+ Z_TYPE pos = JOIN(findAndClearLSB_, Z_BITS)(&z);
+ size_t matchPos = d - buf + pos;
+ DEBUG_PRINTF("match pos %zu\n", matchPos);
+ hwlmcb_rv_t rv = final(n, buf, len, 1, cbi, matchPos);
+ RETURN_IF_TERMINATED(rv);
+ }
+ return HWLM_SUCCESS;
+}
+
+static really_really_inline
+hwlm_error_t double_zscan(const struct noodTable *n,const u8 *d, const u8 *buf,
+ Z_TYPE z, size_t len, const struct cb_info *cbi) {
+ while (unlikely(z)) {
+ Z_TYPE pos = JOIN(findAndClearLSB_, Z_BITS)(&z);
+ size_t matchPos = d - buf + pos - 1; \
+ DEBUG_PRINTF("match pos %zu\n", matchPos);
+ hwlmcb_rv_t rv = final(n, buf, len, 0, cbi, matchPos);
+ RETURN_IF_TERMINATED(rv);
+ }
+ return HWLM_SUCCESS;
+}
+
#if defined(HAVE_AVX512)
#define CHUNKSIZE 64
#define MASK_TYPE m512
#include "noodle_engine_sse.c"
#endif
+
static really_inline
hwlm_error_t scanSingleMain(const struct noodTable *n, const u8 *buf,
size_t len, size_t start, bool noCase,
size_t end = len;
assert(offset < end);
-#if !defined(HAVE_AVX512)
hwlm_error_t rv;
-/* if (end - offset <= CHUNKSIZE) {
- rv = scanSingleShort(n, buf, len, noCase, caseMask, mask1, cbi, offset,
- end);
- return rv;
- }*/
-
if (end - offset <= CHUNKSIZE) {
rv = scanSingleUnaligned(n, buf, len, offset, caseMask, mask1,
cbi, offset, end);
s2End, len);
return rv;
-#else // HAVE_AVX512
- return scanSingle512(n, buf, len, noCase, caseMask, mask1, cbi, offset,
- end);
-#endif
}
static really_inline
const MASK_TYPE mask1 = getMask(n->key0, noCase);
const MASK_TYPE mask2 = getMask(n->key1, noCase);
-#if !defined(HAVE_AVX512)
hwlm_error_t rv;
-/* if (end - offset <= CHUNKSIZE) {
- rv = scanDoubleShort(n, buf, len, noCase, caseMask, mask1, mask2, cbi,
- offset, end);
- return rv;
- }*/
if (end - offset <= CHUNKSIZE) {
rv = scanDoubleUnaligned(n, buf, len, offset, caseMask, mask1,
mask2, cbi, offset, end);
mask2, cbi, off, end);
return rv;
-#else // AVX512
- return scanDouble512(n, buf, len, caseMask, mask1, mask2, cbi,
- offset, end);
-#endif // AVX512
}
-
static really_inline
hwlm_error_t scanSingleNoCase(const struct noodTable *n, const u8 *buf,
size_t len, size_t start,
static really_inline
hwlm_error_t scanSingleUnaligned(const struct noodTable *n, const u8 *buf,
- size_t len, size_t offset,
- m256 caseMask, m256 mask1,
+ size_t len, size_t offset, m256 caseMask, m256 mask1,
const struct cb_info *cbi, size_t start,
size_t end) {
const u8 *d = buf + offset;
z &= mask;
- SINGLE_ZSCAN();
-
- return HWLM_SUCCESS;
+ return single_zscan(n, d, buf, z, len, cbi);
}
static really_inline
hwlm_error_t scanDoubleUnaligned(const struct noodTable *n, const u8 *buf,
- size_t len, size_t offset,
- m256 caseMask, m256 mask1, m256 mask2,
+ size_t len, size_t offset, m256 caseMask, m256 mask1, m256 mask2,
const struct cb_info *cbi, size_t start,
size_t end) {
const u8 *d = buf + offset;
DEBUG_PRINTF("mask 0x%08x z 0x%08x\n", mask, z);
z &= mask;
- DOUBLE_ZSCAN();
-
- return HWLM_SUCCESS;
-}
-/*
-// The short scan routine. It is used both to scan data up to an
-// alignment boundary if needed and to finish off data that the aligned scan
-// function can't handle (due to small/unaligned chunk at end)
-static really_inline
-hwlm_error_t scanSingleShort(const struct noodTable *n, const u8 *buf,
- size_t len, m256 caseMask, m256 mask1,
- const struct cb_info *cbi, size_t start,
- size_t end) {
- const u8 *d = buf + start;
- size_t l = end - start;
- DEBUG_PRINTF("l %zu\n", l);
- assert(l <= 32);
- if (!l) {
- return HWLM_SUCCESS;
- }
- m256 v;
-
- if (l < 4) {
- u8 *vp = (u8*)&v;
- switch (l) {
- case 3: vp[2] = d[2]; // fallthrough
- case 2: vp[1] = d[1]; // fallthrough
- case 1: vp[0] = d[0]; // fallthrough
- }
- } else {
- v = masked_move256_len(d, l);
- }
-
- m256 v = and256(v, caseMask);
- // mask out where we can't match
- u32 mask = (0xFFFFFFFF >> (32 - l));
-
- u32 z = mask & movemask256(eq256(mask1, v));
-
- SINGLE_ZSCAN();
-
- return HWLM_SUCCESS;
+ return double_zscan(n, d, buf, z, len, cbi);
}
-static really_inline
-hwlm_error_t scanDoubleShort(const struct noodTable *n, const u8 *buf,
- size_t len, m256 caseMask, m256 mask1,
- m256 mask2, const struct cb_info *cbi,
- size_t start, size_t end) {
- const u8 *d = buf + start;
- size_t l = end - start;
- if (!l) {
- return HWLM_SUCCESS;
- }
- assert(l <= 32);
- u32 mask = (0xFFFFFFFF >> (32 - l));
-
- m256 v;
-
- DEBUG_PRINTF("d %zu\n", d - buf);
- if (l < 4) {
- u8 *vp = (u8*)&v;
- switch (l) {
- case 3: vp[2] = d[2]; // fallthrough
- case 2: vp[1] = d[1]; // fallthrough
- case 1: vp[0] = d[0]; // fallthrough
- }
- } else {
- v = masked_move256_len(d, l);
- }
-
- m256 v = and256(v, caseMask);
-
- u32 z0 = movemask256(eq256(mask1, v));
- u32 z1 = movemask256(eq256(mask2, v));
- u32 z = (z0 << 1) & z1;
-
- // mask out where we can't match
- z &= mask;
-
- DOUBLE_ZSCAN();
-
- return HWLM_SUCCESS;
-}*/
-
static really_inline
hwlm_error_t scanSingleFast(const struct noodTable *n, const u8 *buf,
size_t len, m256 caseMask, m256 mask1,
// On large packet buffers, this prefetch appears to get us about 2%.
__builtin_prefetch(d + 128);
- SINGLE_ZSCAN();
+ hwlm_error_t result = single_zscan(n, d, buf, z, len, cbi);
+ if (unlikely(result != HWLM_SUCCESS))
+ return result;
+
}
return HWLM_SUCCESS;
}
// On large packet buffers, this prefetch appears to get us about 2%.
__builtin_prefetch(d + 128);
- DOUBLE_ZSCAN();
+ hwlm_error_t result = double_zscan(n, d, buf, z, len, cbi);
+ if (unlikely(result != HWLM_SUCCESS))
+ return result;
}
return HWLM_SUCCESS;
// alignment boundary if needed and to finish off data that the aligned scan
// function can't handle (due to small/unaligned chunk at end)
static really_inline
-hwlm_error_t scanSingleShort(const struct noodTable *n, const u8 *buf,
- size_t len, bool noCase, m512 caseMask, m512 mask1,
- const struct cb_info *cbi, size_t start,
- size_t end) {
- const u8 *d = buf + start;
- ptrdiff_t scan_len = end - start;
- DEBUG_PRINTF("scan_len %zu\n", scan_len);
- assert(scan_len <= 64);
- if (!scan_len) {
+hwlm_error_t scanSingleUnaligned(const struct noodTable *n, const u8 *buf,
+ size_t len, size_t offset, m512 caseMask, m512 mask1,
+ const struct cb_info *cbi, size_t start,
+ size_t end) {
+ const u8 *d = buf + offset;
+ DEBUG_PRINTF("start %zu end %zu offset %zu\n", start, end, offset);
+ const size_t l = end - start;
+ assert(l <= 64);
+ if (!l) {
return HWLM_SUCCESS;
}
- __mmask64 k = (~0ULL) >> (64 - scan_len);
+ __mmask64 k = (~0ULL) >> (64 - l);
DEBUG_PRINTF("load mask 0x%016llx\n", k);
m512 v = loadu_maskz_m512(k, d);
-
- if (noCase) {
- v = and512(v, caseMask);
- }
+ v = and512(v, caseMask);
// reuse the load mask to indicate valid bytes
u64a z = masked_eq512mask(k, mask1, v);
- SINGLE_ZSCAN();
-
- return HWLM_SUCCESS;
+ return single_zscan(n, d, buf, z, len, cbi);
}
static really_inline
-hwlm_error_t scanSingle512(const struct noodTable *n, const u8 *buf, size_t len,
- bool noCase, m512 caseMask, m512 mask1,
- const struct cb_info *cbi, size_t start,
- size_t end) {
- const u8 *d = buf + start;
- const u8 *e = buf + end;
- DEBUG_PRINTF("start %p end %p \n", d, e);
+hwlm_error_t scanSingleFast(const struct noodTable *n, const u8 *buf,
+ size_t len, m512 caseMask, m512 mask1,
+ const struct cb_info *cbi, size_t start,
+ size_t end) {
+ const u8 *d = buf + start, *e = buf + end;
assert(d < e);
- if (d + 64 >= e) {
- goto tail;
- }
-
- // peel off first part to cacheline boundary
- const u8 *d1 = ROUNDUP_PTR(d, 64);
- if (scanSingleShort(n, buf, len, noCase, caseMask, mask1, cbi, start,
- d1 - buf) == HWLM_TERMINATED) {
- return HWLM_TERMINATED;
- }
- d = d1;
- for (; d + 64 < e; d += 64) {
- DEBUG_PRINTF("d %p e %p \n", d, e);
- m512 v = noCase ? and512(load512(d), caseMask) : load512(d);
+ for (; d < e; d += 64) {
+ m512 v = and512(load512(d), caseMask);
u64a z = eq512mask(mask1, v);
+
+ // On large packet buffers, this prefetch appears to get us about 2%.
__builtin_prefetch(d + 128);
- SINGLE_ZSCAN();
+ hwlm_error_t result = single_zscan(n, d, buf, z, len, cbi);
+ if (unlikely(result != HWLM_SUCCESS))
+ return result;
}
-
-tail:
- DEBUG_PRINTF("d %p e %p \n", d, e);
- // finish off tail
-
- return scanSingleShort(n, buf, len, noCase, caseMask, mask1, cbi, d - buf,
- e - buf);
+ return HWLM_SUCCESS;
}
static really_inline
-hwlm_error_t scanDoubleShort(const struct noodTable *n, const u8 *buf,
- size_t len, bool noCase, m512 caseMask, m512 mask1,
- m512 mask2, const struct cb_info *cbi,
- u64a *lastz0, size_t start, size_t end) {
- DEBUG_PRINTF("start %zu end %zu last 0x%016llx\n", start, end, *lastz0);
- const u8 *d = buf + start;
- ptrdiff_t scan_len = end - start;
- if (!scan_len) {
+hwlm_error_t scanDoubleUnaligned(const struct noodTable *n, const u8 *buf,
+ size_t len, size_t offset, m512 caseMask,
+ m512 mask1, m512 mask2,
+ const struct cb_info *cbi, size_t start,
+ size_t end) {
+ const u8 *d = buf + offset;
+ DEBUG_PRINTF("start %zu end %zu offset %zu\n", start, end, offset);
+ const size_t l = end - start;
+ assert(l <= 64);
+ if (!l) {
return HWLM_SUCCESS;
}
- assert(scan_len <= 64);
- __mmask64 k = (~0ULL) >> (64 - scan_len);
- DEBUG_PRINTF("load mask 0x%016llx scan_len %zu\n", k, scan_len);
+
+ __mmask64 k = (~0ULL) >> (64 - l);
+ DEBUG_PRINTF("load mask 0x%016llx\n", k);
m512 v = loadu_maskz_m512(k, d);
- if (noCase) {
- v = and512(v, caseMask);
- }
+ v = and512(v, caseMask);
u64a z0 = masked_eq512mask(k, mask1, v);
u64a z1 = masked_eq512mask(k, mask2, v);
- u64a z = (*lastz0 | (z0 << 1)) & z1;
+ u64a z = (z0 << 1) & z1;
DEBUG_PRINTF("z 0x%016llx\n", z);
- DOUBLE_ZSCAN();
- *lastz0 = z0 >> (scan_len - 1);
- return HWLM_SUCCESS;
+ return single_zscan(n, d, buf, z, len, cbi);
}
static really_inline
-hwlm_error_t scanDouble512(const struct noodTable *n, const u8 *buf, size_t len,
- bool noCase, m512 caseMask, m512 mask1, m512 mask2,
- const struct cb_info *cbi, size_t start,
- size_t end) {
- const u8 *d = buf + start;
- const u8 *e = buf + end;
- u64a lastz0 = 0;
+hwlm_error_t scanDoubleFast(const struct noodTable *n, const u8 *buf,
+ size_t len, m512 caseMask, m512 mask1,
+ m512 mask2, const struct cb_info *cbi, size_t start,
+ size_t end) {
+ const u8 *d = buf + start, *e = buf + end;
DEBUG_PRINTF("start %zu end %zu \n", start, end);
assert(d < e);
- if (d + 64 >= e) {
- goto tail;
- }
-
- // peel off first part to cacheline boundary
- const u8 *d1 = ROUNDUP_PTR(d, 64);
- if (scanDoubleShort(n, buf, len, noCase, caseMask, mask1, mask2, cbi,
- &lastz0, start, d1 - buf) == HWLM_TERMINATED) {
- return HWLM_TERMINATED;
- }
- d = d1;
+ u64a lastz0 = 0;
- for (; d + 64 < e; d += 64) {
- DEBUG_PRINTF("d %p e %p 0x%016llx\n", d, e, lastz0);
- m512 v = noCase ? and512(load512(d), caseMask) : load512(d);
+ for (; d < e; d += 64) {
+ m512 v = and512(load512(d), caseMask);
- /* we have to pull the masks out of the AVX registers because we can't
- byte shift between the lanes */
+ // we have to pull the masks out of the AVX registers because we can't
+ // byte shift between the lanes
u64a z0 = eq512mask(mask1, v);
u64a z1 = eq512mask(mask2, v);
u64a z = (lastz0 | (z0 << 1)) & z1;
lastz0 = z0 >> 63;
// On large packet buffers, this prefetch appears to get us about 2%.
- __builtin_prefetch(d + 256);
-
- DEBUG_PRINTF("z 0x%016llx\n", z);
+ __builtin_prefetch(d + 128);
- DOUBLE_ZSCAN();
+ hwlm_error_t result = double_zscan(n, d, buf, z, len, cbi);
+ if (unlikely(result != HWLM_SUCCESS))
+ return result;
}
-
-tail:
- DEBUG_PRINTF("d %p e %p off %zu \n", d, e, d - buf);
- // finish off tail
-
- return scanDoubleShort(n, buf, len, noCase, caseMask, mask1, mask2, cbi,
- &lastz0, d - buf, end);
+ return HWLM_SUCCESS;
}
static really_inline m128 getCaseMask(void) {
return set1_16x8(0xdf);
}
-/*
-static really_inline
-hwlm_error_t scanSingleShort(const struct noodTable *n, const u8 *buf,
- size_t len, m128 caseMask, m128 mask1,
- const struct cb_info *cbi, size_t start,
- size_t end) {
- const u8 *d = buf + start;
- size_t l = end - start;
- DEBUG_PRINTF("l %zu\n", l);
- assert(l <= 16);
- if (!l) {
- return HWLM_SUCCESS;
- }
- m128 v = and128(loadu128(d), caseMask);
-
- // mask out where we can't match
- u32 mask = (0xFFFF >> (16 - l));
- u32 z = mask & movemask128(eq128(mask1, v));
-
- SINGLE_ZSCAN();
-
- return HWLM_SUCCESS;
-}*/
static really_inline
hwlm_error_t scanSingleUnaligned(const struct noodTable *n, const u8 *buf,
- size_t len, size_t offset,
- m128 caseMask, m128 mask1,
+ size_t len, size_t offset, m128 caseMask, m128 mask1,
const struct cb_info *cbi, size_t start,
size_t end) {
const u8 *d = buf + offset;
u32 buf_off = start - offset;
u32 mask = ((1 << l) - 1) << buf_off;
- DEBUG_PRINTF("mask 0x%08x z 0x%08x\n", mask, z);
u32 z = mask & movemask128(eq128(mask1, v));
+ DEBUG_PRINTF("mask 0x%08x z 0x%08x\n", mask, z);
-
- SINGLE_ZSCAN();
-
- return HWLM_SUCCESS;
+ return single_zscan(n, d, buf, z, len, cbi);
}
-/*
-static really_inline
-hwlm_error_t scanDoubleShort(const struct noodTable *n, const u8 *buf,
- size_t len, m128 caseMask, m128 mask1,
- m128 mask2, const struct cb_info *cbi,
- size_t start, size_t end) {
- const u8 *d = buf + start;
- size_t l = end - start;
- if (!l) {
- return HWLM_SUCCESS;
- }
- assert(l <= 32);
-
- DEBUG_PRINTF("d %zu\n", d - buf);
- m128 v = and128(loadu128(d), caseMask);
-
- // mask out where we can't match
- u32 mask = (0xFFFF >> (16 - l));
- u32 z = mask & movemask128(and128(lshiftbyte_m128(eq128(mask1, v), 1),
- eq128(mask2, v)));
-
- DOUBLE_ZSCAN();
-
- return HWLM_SUCCESS;
-}*/
static really_inline
hwlm_error_t scanDoubleUnaligned(const struct noodTable *n, const u8 *buf,
// mask out where we can't match
u32 mask = ((1 << l) - 1) << buf_off;
- DEBUG_PRINTF("mask 0x%08x z 0x%08x\n", mask, z);
u32 z = mask & movemask128(and128(lshiftbyte_m128(eq128(mask1, v), 1),
eq128(mask2, v)));
+ DEBUG_PRINTF("mask 0x%08x z 0x%08x\n", mask, z);
- DOUBLE_ZSCAN();
-
- return HWLM_SUCCESS;
+ return double_zscan(n, d, buf, z, len, cbi);
}
static really_inline
// On large packet buffers, this prefetch appears to get us about 2%.
__builtin_prefetch(d + 128);
+ DEBUG_PRINTF("z 0x%08x\n", z);
- SINGLE_ZSCAN();
+ hwlm_error_t result = single_zscan(n, d, buf, z, len, cbi);
+ if (unlikely(result != HWLM_SUCCESS))
+ return result;
}
return HWLM_SUCCESS;
}
// On large packet buffers, this prefetch appears to get us about 2%.
__builtin_prefetch(d + 128);
DEBUG_PRINTF("z 0x%08x\n", z);
- DOUBLE_ZSCAN();
+
+ hwlm_error_t result = double_zscan(n, d, buf, z, len, cbi);
+ if (unlikely(result != HWLM_SUCCESS))
+ return result;
+
}
return HWLM_SUCCESS;
}