/*
* Copyright (c) 2015-2017, Intel Corporation
+ * Copyright (c) 2020-2024, VectorCamp PC
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
}
static really_inline
-void get_conf_stride_1(const u8 *itPtr, UNUSED const u8 *start_ptr,
- UNUSED const u8 *end_ptr, u32 domain_mask_flipped,
+void get_conf_stride(const u8 *itPtr, UNUSED const u8 *start_ptr,
+ UNUSED const u8 *end_ptr, u32 domain_mask, u8 stride,
const u64a *ft, u64a *conf0, u64a *conf8, m128 *s) {
- /* +1: the zones ensure that we can read the byte at z->end */
assert(itPtr >= start_ptr && itPtr + ITER_BYTES <= end_ptr);
- u64a domain_mask = ~domain_mask_flipped;
+ // get_conf_stride_4
u64a it_hi = *(const u64a *)itPtr;
u64a it_lo = *(const u64a *)(itPtr + 8);
u64a reach0 = domain_mask & it_hi;
- u64a reach1 = domain_mask & (it_hi >> 8);
+ u64a reach4 = domain_mask & (it_hi >> 32);
+ u64a reach8 = domain_mask & it_lo;
+ u64a reach12 = domain_mask & (it_lo >> 32);
+
+ m128 st0 = load_m128_from_u64a(ft + reach0);
+ m128 st4 = load_m128_from_u64a(ft + reach4);
+ m128 st8 = load_m128_from_u64a(ft + reach8);
+ m128 st12 = load_m128_from_u64a(ft + reach12);
+
+ st4 = lshiftbyte_m128(st4, 4);
+ st12 = lshiftbyte_m128(st12, 4);
+
+ *s = or128(*s, st0);
+ *s = or128(*s, st4);
+
+ if (stride == 4) {
+ *conf0 = movq(*s);
+ *s = rshiftbyte_m128(*s, 8);
+ *conf0 ^= ~0ULL;
+
+ *s = or128(*s, st8);
+ *s = or128(*s, st12);
+ *conf8 = movq(*s);
+ *s = rshiftbyte_m128(*s, 8);
+ *conf8 ^= ~0ULL;
+ return;
+ }
+
+ // get_conf_stride_2
u64a reach2 = domain_mask & (it_hi >> 16);
+ u64a reach6 = domain_mask & (it_hi >> 48);
+ u64a reach10 = domain_mask & (it_lo >> 16);
+ u64a reach14 = domain_mask & (it_lo >> 48);
+
+ m128 st2 = load_m128_from_u64a(ft + reach2);
+ m128 st6 = load_m128_from_u64a(ft + reach6);
+ m128 st10 = load_m128_from_u64a(ft + reach10);
+ m128 st14 = load_m128_from_u64a(ft + reach14);
+
+ st2 = lshiftbyte_m128(st2, 2);
+ st6 = lshiftbyte_m128(st6, 6);
+ st10 = lshiftbyte_m128(st10, 2);
+ st14 = lshiftbyte_m128(st14, 6);
+
+ *s = or128(*s, st2);
+ *s = or128(*s, st6);
+
+ if (stride == 2) {
+ *conf0 = movq(*s);
+ *s = rshiftbyte_m128(*s, 8);
+ *conf0 ^= ~0ULL;
+
+ *s = or128(*s, st8);
+ *s = or128(*s, st10);
+ *s = or128(*s, st12);
+ *s = or128(*s, st14);
+
+ *conf8 = movq(*s);
+ *s = rshiftbyte_m128(*s, 8);
+ *conf8 ^= ~0ULL;
+ return;
+ }
+
+ // get_conf_stride_1
+ u64a reach1 = domain_mask & (it_hi >> 8);
u64a reach3 = domain_mask & (it_hi >> 24);
- u64a reach4 = domain_mask & (it_hi >> 32);
u64a reach5 = domain_mask & (it_hi >> 40);
- u64a reach6 = domain_mask & (it_hi >> 48);
u64a reach7 = domain_mask & ((it_hi >> 56) | (it_lo << 8));
- u64a reach8 = domain_mask & it_lo;
u64a reach9 = domain_mask & (it_lo >> 8);
- u64a reach10 = domain_mask & (it_lo >> 16);
u64a reach11 = domain_mask & (it_lo >> 24);
- u64a reach12 = domain_mask & (it_lo >> 32);
u64a reach13 = domain_mask & (it_lo >> 40);
- u64a reach14 = domain_mask & (it_lo >> 48);
u64a reach15 = domain_mask & unaligned_load_u32(itPtr + 15);
- m128 st0 = load_m128_from_u64a(ft + reach0);
- m128 st1 = lshiftbyte_m128(load_m128_from_u64a(ft + reach1), 1);
- m128 st2 = lshiftbyte_m128(load_m128_from_u64a(ft + reach2), 2);
- m128 st3 = lshiftbyte_m128(load_m128_from_u64a(ft + reach3), 3);
- m128 st4 = lshiftbyte_m128(load_m128_from_u64a(ft + reach4), 4);
- m128 st5 = lshiftbyte_m128(load_m128_from_u64a(ft + reach5), 5);
- m128 st6 = lshiftbyte_m128(load_m128_from_u64a(ft + reach6), 6);
- m128 st7 = lshiftbyte_m128(load_m128_from_u64a(ft + reach7), 7);
- m128 st8 = load_m128_from_u64a(ft + reach8);
- m128 st9 = lshiftbyte_m128(load_m128_from_u64a(ft + reach9), 1);
- m128 st10 = lshiftbyte_m128(load_m128_from_u64a(ft + reach10), 2);
- m128 st11 = lshiftbyte_m128(load_m128_from_u64a(ft + reach11), 3);
- m128 st12 = lshiftbyte_m128(load_m128_from_u64a(ft + reach12), 4);
- m128 st13 = lshiftbyte_m128(load_m128_from_u64a(ft + reach13), 5);
- m128 st14 = lshiftbyte_m128(load_m128_from_u64a(ft + reach14), 6);
- m128 st15 = lshiftbyte_m128(load_m128_from_u64a(ft + reach15), 7);
+ m128 st1 = load_m128_from_u64a(ft + reach1);
+ m128 st3 = load_m128_from_u64a(ft + reach3);
+ m128 st5 = load_m128_from_u64a(ft + reach5);
+ m128 st7 = load_m128_from_u64a(ft + reach7);
+ m128 st9 = load_m128_from_u64a(ft + reach9);
+ m128 st11 = load_m128_from_u64a(ft + reach11);
+ m128 st13 = load_m128_from_u64a(ft + reach13);
+ m128 st15 = load_m128_from_u64a(ft + reach15);
+
+ st1 = lshiftbyte_m128(st1, 1);
+ st3 = lshiftbyte_m128(st3, 3);
+ st5 = lshiftbyte_m128(st5, 5);
+ st7 = lshiftbyte_m128(st7, 7);
+ st9 = lshiftbyte_m128(st9, 1);
+ st11 = lshiftbyte_m128(st11, 3);
+ st13 = lshiftbyte_m128(st13, 5);
+ st15 = lshiftbyte_m128(st15, 7);
st0 = or128(st0, st1);
st2 = or128(st2, st3);
*s = rshiftbyte_m128(st, 8);
}
-static really_inline
-void get_conf_stride_2(const u8 *itPtr, UNUSED const u8 *start_ptr,
- UNUSED const u8 *end_ptr, u32 domain_mask_flipped,
- const u64a *ft, u64a *conf0, u64a *conf8, m128 *s) {
- assert(itPtr >= start_ptr && itPtr + ITER_BYTES <= end_ptr);
-
- u64a reach0 = andn(domain_mask_flipped, itPtr);
- u64a reach2 = andn(domain_mask_flipped, itPtr + 2);
- u64a reach4 = andn(domain_mask_flipped, itPtr + 4);
- u64a reach6 = andn(domain_mask_flipped, itPtr + 6);
-
- m128 st0 = load_m128_from_u64a(ft + reach0);
- m128 st2 = load_m128_from_u64a(ft + reach2);
- m128 st4 = load_m128_from_u64a(ft + reach4);
- m128 st6 = load_m128_from_u64a(ft + reach6);
-
- u64a reach8 = andn(domain_mask_flipped, itPtr + 8);
- u64a reach10 = andn(domain_mask_flipped, itPtr + 10);
- u64a reach12 = andn(domain_mask_flipped, itPtr + 12);
- u64a reach14 = andn(domain_mask_flipped, itPtr + 14);
-
- m128 st8 = load_m128_from_u64a(ft + reach8);
- m128 st10 = load_m128_from_u64a(ft + reach10);
- m128 st12 = load_m128_from_u64a(ft + reach12);
- m128 st14 = load_m128_from_u64a(ft + reach14);
-
- st2 = lshiftbyte_m128(st2, 2);
- st4 = lshiftbyte_m128(st4, 4);
- st6 = lshiftbyte_m128(st6, 6);
-
- *s = or128(*s, st0);
- *s = or128(*s, st2);
- *s = or128(*s, st4);
- *s = or128(*s, st6);
-
- *conf0 = movq(*s);
- *s = rshiftbyte_m128(*s, 8);
- *conf0 ^= ~0ULL;
-
- st10 = lshiftbyte_m128(st10, 2);
- st12 = lshiftbyte_m128(st12, 4);
- st14 = lshiftbyte_m128(st14, 6);
-
- *s = or128(*s, st8);
- *s = or128(*s, st10);
- *s = or128(*s, st12);
- *s = or128(*s, st14);
-
- *conf8 = movq(*s);
- *s = rshiftbyte_m128(*s, 8);
- *conf8 ^= ~0ULL;
-}
-
-static really_inline
-void get_conf_stride_4(const u8 *itPtr, UNUSED const u8 *start_ptr,
- UNUSED const u8 *end_ptr, u32 domain_mask_flipped,
- const u64a *ft, u64a *conf0, u64a *conf8, m128 *s) {
- assert(itPtr >= start_ptr && itPtr + ITER_BYTES <= end_ptr);
-
- u64a reach0 = andn(domain_mask_flipped, itPtr);
- u64a reach4 = andn(domain_mask_flipped, itPtr + 4);
- u64a reach8 = andn(domain_mask_flipped, itPtr + 8);
- u64a reach12 = andn(domain_mask_flipped, itPtr + 12);
-
- m128 st0 = load_m128_from_u64a(ft + reach0);
- m128 st4 = load_m128_from_u64a(ft + reach4);
- m128 st8 = load_m128_from_u64a(ft + reach8);
- m128 st12 = load_m128_from_u64a(ft + reach12);
-
- st4 = lshiftbyte_m128(st4, 4);
- st12 = lshiftbyte_m128(st12, 4);
-
- *s = or128(*s, st0);
- *s = or128(*s, st4);
- *conf0 = movq(*s);
- *s = rshiftbyte_m128(*s, 8);
- *conf0 ^= ~0ULL;
-
- *s = or128(*s, st8);
- *s = or128(*s, st12);
- *conf8 = movq(*s);
- *s = rshiftbyte_m128(*s, 8);
- *conf8 ^= ~0ULL;
-}
-
static really_inline
void do_confirm_fdr(u64a *conf, u8 offset, hwlmcb_rv_t *control,
const u32 *confBase, const struct FDR_Runtime_Args *a,
#define INVALID_MATCH_ID (~0U)
-#define FDR_MAIN_LOOP(zz, s, get_conf_fn) \
- do { \
- const u8 *tryFloodDetect = zz->floodPtr; \
- const u8 *start_ptr = zz->start; \
- const u8 *end_ptr = zz->end; \
- for (const u8 *itPtr = ROUNDDOWN_PTR(start_ptr, 64); itPtr + 4*ITER_BYTES <= end_ptr; \
- itPtr += 4*ITER_BYTES) { \
- __builtin_prefetch(itPtr); \
- } \
- \
- for (const u8 *itPtr = start_ptr; itPtr + ITER_BYTES <= end_ptr; \
- itPtr += ITER_BYTES) { \
- if (unlikely(itPtr > tryFloodDetect)) { \
- tryFloodDetect = floodDetect(fdr, a, &itPtr, tryFloodDetect,\
- &floodBackoff, &control, \
- ITER_BYTES); \
- if (unlikely(control == HWLM_TERMINATE_MATCHING)) { \
- return HWLM_TERMINATED; \
- } \
- } \
- __builtin_prefetch(itPtr + ITER_BYTES); \
- u64a conf0; \
- u64a conf8; \
- get_conf_fn(itPtr, start_ptr, end_ptr, domain_mask_flipped, \
- ft, &conf0, &conf8, &s); \
- do_confirm_fdr(&conf0, 0, &control, confBase, a, itPtr, \
- &last_match_id, zz); \
- do_confirm_fdr(&conf8, 8, &control, confBase, a, itPtr, \
- &last_match_id, zz); \
- if (unlikely(control == HWLM_TERMINATE_MATCHING)) { \
- return HWLM_TERMINATED; \
- } \
- } /* end for loop */ \
- } while (0) \
-
static never_inline
hwlm_error_t fdr_engine_exec(const struct FDR *fdr,
const struct FDR_Runtime_Args *a,
u32 floodBackoff = FLOOD_BACKOFF_START;
u32 last_match_id = INVALID_MATCH_ID;
- u32 domain_mask_flipped = ~fdr->domainMask;
- u8 stride = fdr->stride;
+
const u64a *ft =
(const u64a *)((const u8 *)fdr + ROUNDUP_CL(sizeof(struct FDR)));
assert(ISALIGNED_CL(ft));
for (size_t curZone = 0; curZone < numZone; curZone++) {
struct zone *z = &zones[curZone];
- dumpZoneInfo(z, curZone);
-
- /* When a zone contains less data than is processed in an iteration
- * of FDR_MAIN_LOOP(), we need to scan over some extra data.
- *
- * We have chosen to scan this extra data at the start of the
- * iteration. The extra data is either data we have already scanned or
- * garbage (if it is earlier than offset 0),
- *
- * As a result we need to shift the incoming state back so that it will
- * properly line up with the data being scanned.
- *
- * We also need to forbid reporting any matches in the data being
- * rescanned as they have already been reported (or are over garbage but
- * later stages should also provide that safety guarantee).
- */
-
- u8 shift = z->shift;
-
- state = variable_byte_shift_m128(state, shift);
-
- state = or128(state, load128(zone_or_mask[shift]));
-
- switch (stride) {
- case 1:
- FDR_MAIN_LOOP(z, state, get_conf_stride_1);
- break;
- case 2:
- FDR_MAIN_LOOP(z, state, get_conf_stride_2);
- break;
- case 4:
- FDR_MAIN_LOOP(z, state, get_conf_stride_4);
- break;
- default:
- break;
- }
+ m128 zone_mask = load128(zone_or_mask[z->shift]);
+
+ const u8 *cacheline = ROUNDDOWN_PTR(z->start, 64);
+ __builtin_prefetch(cacheline);
+
+ const u8 *tryFloodDetect = z->floodPtr;
+
+ state = variable_byte_shift_m128(state, z->shift);
+ state = or128(state, zone_mask);
+
+ for (const u8 *itPtr = z->start; itPtr + ITER_BYTES <= z->end; itPtr += ITER_BYTES) {
+ if (unlikely(itPtr > tryFloodDetect)) {
+ tryFloodDetect = floodDetect(fdr, a, &itPtr, tryFloodDetect,
+ &floodBackoff, &control,
+ ITER_BYTES);
+ if (unlikely(control == HWLM_TERMINATE_MATCHING)) {
+ return HWLM_TERMINATED;
+ }
+ }
+ u64a conf0;
+ u64a conf8;
+
+ cacheline += 64;
+ __builtin_prefetch(cacheline);
+
+ get_conf_stride(itPtr, z->start, z->end, fdr->domainMask, fdr->stride, ft, &conf0, &conf8, &state);
+
+ do_confirm_fdr(&conf0, 0, &control, confBase, a, itPtr, &last_match_id, z);
+ do_confirm_fdr(&conf8, 8, &control, confBase, a, itPtr, &last_match_id, z);
+ if (unlikely(control == HWLM_TERMINATE_MATCHING)) {
+ return HWLM_TERMINATED;
+ }
+ } /* end for loop */
}
return HWLM_SUCCESS;