#include "rose_internal.h"
#include "rose_program.h"
#include "rose_types.h"
+#include "validate_mask.h"
#include "runtime.h"
#include "scratch.h"
#include "ue2common.h"
return !!(reach[c / 8U] & (u8)1U << (c % 8U));
}
+/*
+ * Generate a 8-byte valid_mask with #high bytes 0 from the highest side
+ * and #low bytes 0 from the lowest side
+ * and (8 - high - low) bytes '0xff' in the middle.
+ */
+static rose_inline
+u64a generateValidMask(const s32 high, const s32 low) {
+ assert(high + low < 8);
+ DEBUG_PRINTF("high %d low %d\n", high, low);
+ const u64a ones = ~0ull;
+ return (ones << ((high + low) * 8)) >> (high * 8);
+}
+
+/*
+ * Do the single-byte check if only one lookaround entry exists
+ * and it's a single mask.
+ * Return success if the byte is in the future or before history
+ * (offset is greater than (history) buffer length).
+ */
+static rose_inline
+int roseCheckByte(const struct core_info *ci, u8 and_mask, u8 cmp_mask,
+ u8 negation, s32 checkOffset, u64a end) {
+ DEBUG_PRINTF("end=%llu, buf_offset=%llu, buf_end=%llu\n", end,
+ ci->buf_offset, ci->buf_offset + ci->len);
+ if (unlikely(checkOffset < 0 && (u64a)(0 - checkOffset) > end)) {
+ DEBUG_PRINTF("too early, fail\n");
+ return 0;
+ }
+
+ const s64a base_offset = end - ci->buf_offset;
+ s64a offset = base_offset + checkOffset;
+ DEBUG_PRINTF("checkOffset=%d offset=%lld\n", checkOffset, offset);
+ u8 c;
+ if (offset >= 0) {
+ if (offset >= (s64a)ci->len) {
+ DEBUG_PRINTF("in the future\n");
+ return 1;
+ } else {
+ assert(offset < (s64a)ci->len);
+ DEBUG_PRINTF("check byte in buffer\n");
+ c = ci->buf[offset];
+ }
+ } else {
+ if (offset >= -(s64a) ci->hlen) {
+ DEBUG_PRINTF("check byte in history\n");
+ c = ci->hbuf[ci->hlen + offset];
+ } else {
+ DEBUG_PRINTF("before history and return\n");
+ return 1;
+ }
+ }
+
+ if (((and_mask & c) != cmp_mask) ^ negation) {
+ DEBUG_PRINTF("char 0x%02x at offset %lld failed byte check\n",
+ c, offset);
+ return 0;
+ }
+
+ DEBUG_PRINTF("real offset=%lld char=%02x\n", offset, c);
+ DEBUG_PRINTF("OK :)\n");
+ return 1;
+}
+
+static rose_inline
+int roseCheckMask(const struct core_info *ci, u64a and_mask, u64a cmp_mask,
+ u64a neg_mask, s32 checkOffset, u64a end) {
+ const s64a base_offset = (s64a)end - ci->buf_offset;
+ s64a offset = base_offset + checkOffset;
+ DEBUG_PRINTF("rel offset %lld\n",base_offset);
+ DEBUG_PRINTF("checkOffset %d offset %lld\n", checkOffset, offset);
+ if (unlikely(checkOffset < 0 && (u64a)(0 - checkOffset) > end)) {
+ DEBUG_PRINTF("too early, fail\n");
+ return 0;
+ }
+
+ u64a data = 0;
+ u64a valid_data_mask = ~0ULL; // mask for validate check.
+ //A 0xff byte means that this byte is in the buffer.
+ s32 shift_l = 0; // size of bytes in the future.
+ s32 shift_r = 0; // size of bytes before the history.
+ s32 h_len = 0; // size of bytes in the history buffer.
+ s32 c_len = 8; // size of bytes in the current buffer.
+ //s64a c_start = offset; // offset of start pointer in current buffer.
+ if (offset < 0) {
+ // in or before history buffer.
+ if (offset + 8 <= -(s64a)ci->hlen) {
+ DEBUG_PRINTF("before history and return\n");
+ return 1;
+ }
+ const u8 *h_start = ci->hbuf; // start pointer in history buffer.
+ if (offset < -(s64a)ci->hlen) {
+ // some bytes are before history.
+ shift_r = -(offset + (s64a)ci->hlen);
+ DEBUG_PRINTF("shift_r %d", shift_r);
+ } else {
+ h_start += ci->hlen + offset;
+ }
+ if (offset + 7 < 0) {
+ DEBUG_PRINTF("all in history buffer\n");
+ data = partial_load_u64a(h_start, 8 - shift_r);
+ } else {
+ // history part
+ c_len = offset + 8;
+ h_len = -offset - shift_r;
+ DEBUG_PRINTF("%d bytes in history\n", h_len);
+ s64a data_h = 0;
+ data_h = partial_load_u64a(h_start, h_len);
+ // current part
+ if (c_len > (s64a)ci->len) {
+ shift_l = c_len - ci->len;
+ c_len = ci->len;
+ }
+ data = partial_load_u64a(ci->buf, c_len);
+ data <<= h_len << 3;
+ data |= data_h;
+ }
+ if (shift_r) {
+ data <<= shift_r << 3;
+ }
+ } else {
+ // current buffer.
+ if (offset + c_len > (s64a)ci->len) {
+ if (offset >= (s64a)ci->len) {
+ DEBUG_PRINTF("all in the future\n");
+ return 1;
+ }
+ // some bytes in the future.
+ shift_l = offset + c_len - ci->len;
+ c_len = ci->len - offset;
+ data = partial_load_u64a(ci->buf + offset, c_len);
+ } else {
+ data = unaligned_load_u64a(ci->buf + offset);
+ }
+ }
+
+ if (shift_l || shift_r) {
+ valid_data_mask = generateValidMask(shift_l, shift_r);
+ }
+ DEBUG_PRINTF("valid_data_mask %llx\n", valid_data_mask);
+
+ if (validateMask(data, valid_data_mask,
+ and_mask, cmp_mask, neg_mask)) {
+ DEBUG_PRINTF("check mask successfully\n");
+ return 1;
+ } else {
+ return 0;
+ }
+}
/**
* \brief Scan around a literal, checking that that "lookaround" reach masks
* are satisfied.
}
PROGRAM_NEXT_INSTRUCTION
+ PROGRAM_CASE(CHECK_MASK) {
+ struct core_info *ci = &scratch->core_info;
+ if (!roseCheckMask(ci, ri->and_mask, ri->cmp_mask,
+ ri->neg_mask, ri->offset, end)) {
+ DEBUG_PRINTF("failed mask check\n");
+ assert(ri->fail_jump); // must progress
+ pc += ri->fail_jump;
+ continue;
+ }
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(CHECK_BYTE) {
+ const struct core_info *ci = &scratch->core_info;
+ if (!roseCheckByte(ci, ri->and_mask, ri->cmp_mask,
+ ri->negation, ri->offset, end)) {
+ DEBUG_PRINTF("failed byte check\n");
+ assert(ri->fail_jump); // must progress
+ pc += ri->fail_jump;
+ continue;
+ }
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
PROGRAM_CASE(CHECK_INFIX) {
if (!roseTestInfix(t, scratch, ri->queue, ri->lag, ri->report,
end)) {
#include "util/make_unique.h"
#include "util/multibit_build.h"
#include "util/order_check.h"
+#include "util/popcount.h"
#include "util/queue_index_factory.h"
#include "util/report_manager.h"
#include "util/ue2string.h"
case ROSE_INSTR_CHECK_BOUNDS: return &u.checkBounds;
case ROSE_INSTR_CHECK_NOT_HANDLED: return &u.checkNotHandled;
case ROSE_INSTR_CHECK_LOOKAROUND: return &u.checkLookaround;
+ case ROSE_INSTR_CHECK_MASK: return &u.checkMask;
+ case ROSE_INSTR_CHECK_BYTE: return &u.checkByte;
case ROSE_INSTR_CHECK_INFIX: return &u.checkInfix;
case ROSE_INSTR_CHECK_PREFIX: return &u.checkPrefix;
case ROSE_INSTR_ANCHORED_DELAY: return &u.anchoredDelay;
case ROSE_INSTR_CHECK_BOUNDS: return sizeof(u.checkBounds);
case ROSE_INSTR_CHECK_NOT_HANDLED: return sizeof(u.checkNotHandled);
case ROSE_INSTR_CHECK_LOOKAROUND: return sizeof(u.checkLookaround);
+ case ROSE_INSTR_CHECK_MASK: return sizeof(u.checkMask);
+ case ROSE_INSTR_CHECK_BYTE: return sizeof(u.checkByte);
case ROSE_INSTR_CHECK_INFIX: return sizeof(u.checkInfix);
case ROSE_INSTR_CHECK_PREFIX: return sizeof(u.checkPrefix);
case ROSE_INSTR_ANCHORED_DELAY: return sizeof(u.anchoredDelay);
ROSE_STRUCT_CHECK_BOUNDS checkBounds;
ROSE_STRUCT_CHECK_NOT_HANDLED checkNotHandled;
ROSE_STRUCT_CHECK_LOOKAROUND checkLookaround;
+ ROSE_STRUCT_CHECK_MASK checkMask;
+ ROSE_STRUCT_CHECK_BYTE checkByte;
ROSE_STRUCT_CHECK_INFIX checkInfix;
ROSE_STRUCT_CHECK_PREFIX checkPrefix;
ROSE_STRUCT_ANCHORED_DELAY anchoredDelay;
case ROSE_INSTR_CHECK_LOOKAROUND:
ri.u.checkLookaround.fail_jump = jump_val;
break;
+ case ROSE_INSTR_CHECK_MASK:
+ ri.u.checkMask.fail_jump = jump_val;
+ break;
+ case ROSE_INSTR_CHECK_BYTE:
+ ri.u.checkByte.fail_jump = jump_val;
+ break;
case ROSE_INSTR_CHECK_INFIX:
ri.u.checkInfix.fail_jump = jump_val;
break;
return verify_u32(idx);
}
+static
+bool checkReachMask(const CharReach &cr, u8 &andmask, u8 &cmpmask) {
+ size_t reach_size = cr.count();
+ assert(reach_size > 0);
+ // check whether entry_size is some power of 2.
+ if ((reach_size - 1) & reach_size) {
+ return false;
+ }
+ make_and_cmp_mask(cr, &andmask, &cmpmask);
+ if ((1 << popcount32((u8)(~andmask))) ^ reach_size) {
+ return false;
+ }
+ return true;
+}
+
+static
+bool checkReachWithFlip(const CharReach &cr, u8 &andmask,
+ u8 &cmpmask, u8 &flip) {
+ if (checkReachMask(cr, andmask, cmpmask)) {
+ flip = 0;
+ return true;
+ }
+ if (checkReachMask(~cr, andmask, cmpmask)) {
+ flip = 1;
+ return true;
+ }
+ return false;
+}
+
+static
+bool makeRoleByte(const vector<LookEntry> &look,
+ vector<RoseInstruction> &program) {
+ if (look.size() == 1) {
+ const auto &entry = look[0];
+ u8 andmask_u8, cmpmask_u8;
+ u8 flip;
+ if (!checkReachWithFlip(entry.reach, andmask_u8, cmpmask_u8, flip)) {
+ return false;
+ }
+ s32 checkbyte_offset = verify_s32(entry.offset);
+ DEBUG_PRINTF("CHECK BYTE offset=%d\n", checkbyte_offset);
+ auto ri = RoseInstruction(ROSE_INSTR_CHECK_BYTE,
+ JumpTarget::NEXT_BLOCK);
+ ri.u.checkByte.and_mask = andmask_u8;
+ ri.u.checkByte.cmp_mask = cmpmask_u8;
+ ri.u.checkByte.negation = flip;
+ ri.u.checkByte.offset = checkbyte_offset;
+ program.push_back(ri);
+ return true;
+ }
+ return false;
+}
+
+static
+bool makeRoleMask(const vector<LookEntry> &look,
+ vector<RoseInstruction> &program) {
+ if (look.back().offset < look.front().offset + 8) {
+ s32 base_offset = verify_s32(look.front().offset);
+ u64a and_mask = 0;
+ u64a cmp_mask = 0;
+ u64a neg_mask = 0;
+ for (const auto &entry : look) {
+ u8 andmask_u8, cmpmask_u8, flip;
+ if (!checkReachWithFlip(entry.reach, andmask_u8,
+ cmpmask_u8, flip)) {
+ return false;
+ }
+ DEBUG_PRINTF("entry offset %d\n", entry.offset);
+ u32 shift = (entry.offset - base_offset) << 3;
+ and_mask |= (u64a)andmask_u8 << shift;
+ cmp_mask |= (u64a)cmpmask_u8 << shift;
+ if (flip) {
+ neg_mask |= 0xffLLU << shift;
+ }
+ }
+ DEBUG_PRINTF("CHECK MASK and_mask=%llx cmp_mask=%llx\n",
+ and_mask, cmp_mask);
+ auto ri = RoseInstruction(ROSE_INSTR_CHECK_MASK,
+ JumpTarget::NEXT_BLOCK);
+ ri.u.checkMask.and_mask = and_mask;
+ ri.u.checkMask.cmp_mask = cmp_mask;
+ ri.u.checkMask.neg_mask = neg_mask;
+ ri.u.checkMask.offset = base_offset;
+ program.push_back(ri);
+ return true;
+ }
+ return false;
+}
+
static
void makeRoleLookaround(RoseBuildImpl &build, build_context &bc, RoseVertex v,
vector<RoseInstruction> &program) {
return;
}
+ if (makeRoleByte(look, program)) {
+ return;
+ }
+
+ if (makeRoleMask(look, program)) {
+ return;
+ }
+
DEBUG_PRINTF("role has lookaround\n");
u32 look_idx = addLookaround(bc, look);
u32 look_count = verify_u32(look.size());
--- /dev/null
+/*
+ * Copyright (c) 2016, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ue2common.h"
+
+// check positive bytes in cmp_result.
+// return one if the check passed, zero otherwise.
+static really_inline
+int posValidateMask(const u64a cmp_result, const u64a pos_mask) {
+ return !(cmp_result & pos_mask);
+}
+
+/*
+ * check negative bytes in cmp_result.
+ * return one if any byte in cmp_result is not 0, zero otherwise.
+ * check lowest 7 bits and highest bit of every byte respectively.
+ */
+static really_inline
+int negValidateMask(const u64a cmp_result, const u64a neg_mask) {
+ const u64a count_mask = 0x7f7f7f7f7f7f7f7f;
+ // check lowest 7 bits of every byte.
+ // the highest bit should be 1 if check passed.
+ u64a check_low = (cmp_result & count_mask) + count_mask;
+ // check the highest bit of every byte.
+ // combine the highest bit and 0x7f to 0xff if check passes.
+ // flip all 0xff to 0x00 and 0x7f to 0x80.
+ u64a check_all = ~(check_low | cmp_result | count_mask);
+ return !(check_all & neg_mask);
+}
+
+static really_inline
+int validateMask(u64a data, u64a valid_data_mask, u64a and_mask,
+ u64a cmp_mask, u64a neg_mask) {
+ // skip some byte where valid_data_mask is 0x00 there.
+ and_mask &= valid_data_mask;
+ cmp_mask &= valid_data_mask;
+ neg_mask &= valid_data_mask;
+ u64a cmp_result = (data & and_mask) ^ cmp_mask;
+ /* do the positive check first since it's cheaper */
+ if (posValidateMask(cmp_result, ~neg_mask)
+ && negValidateMask(cmp_result, neg_mask)) {
+ return 1;
+ } else {
+ DEBUG_PRINTF("data %llx valid_data_mask(vdm) %llx\n",
+ data, valid_data_mask);
+ DEBUG_PRINTF("and_mask & vdm %llx cmp_mask & vdm %llx\n", and_mask,
+ cmp_mask);
+ DEBUG_PRINTF("cmp_result %llx neg_mask & vdm %llx\n",
+ cmp_result, neg_mask);
+ return 0;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2016, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#include "rose/validate_mask.h"
+#include "gtest/gtest.h"
+
+#define ONES64 0xffffffffffffffffULL
+
+/* valid_data_mask is flexible, don't need to be fixed in Info */
+struct ValidateMaskTestInfo {
+ u64a data;
+ u64a and_mask;
+ u64a cmp_mask;
+ u64a neg_mask;
+};
+
+static const ValidateMaskTestInfo testBasic[] = {
+ /* data is randomly picked */
+ {0x1234abcd4321dcbaULL, 0xff09bbdd7f7ffeffULL,
+ 0x1200abcd4561dcbbULL, 0xffff00ffULL},
+ /* data = "VaLiDaTe" */
+ {0x56614c6944615465ULL, 0xe0feffffdf7b5480ULL,
+ 0x40614c6946615400ULL, 0xff0000ff000000ULL},
+ /* data = "\0\0\0MASK\0" */
+ {0x4d41534b00ULL, 0xfffffefebfdf002cULL,
+ 0x5536344c0173002cULL, 0xffffff0000ff00ffULL},
+ /* data = "FOo14foo" */
+ {0x464f6f3134666f6fULL, 0xdfdffffef8c0f000ULL,
+ 0x46466f3030406000ULL, 0xff000000000000ULL},
+ /* data = "FOo14foo" with different cmp_mask and neg_mask*/
+ {0x464f6f3134666f6fULL, 0xdfdffffef8c0f000ULL,
+ 0x44464f3034606f60ULL, 0xffffff00ffffffffULL},
+};
+
+/*
+ * generate 37 different valid_data_mask
+ * 8 from 0xff to 0xff00000000000000
+ * 7 from 0xffff to 0xffff000000000000
+ * ...
+ * 0xffffffffffffffff and 0
+ */
+static int initLegalValidMasks(u64a validMasks[]) {
+ u64a data = ONES64;
+ int num = 0;
+ for (int i = 0; i < 64; i += 8) {
+ for (int j = 0; j <= i; j += 8) {
+ validMasks[num] = data << j;
+ num++;
+ }
+ data >>= 8;
+ }
+ validMasks[num] = 0;
+ num++;
+ return num;
+}
+
+/*
+ * generate all 256 neg_masks
+ * including 0, 0xff, 0xff00,..., 0xffffffffffffffff
+ */
+static int initLegalNegMasks(u64a negMasks[]) {
+ u64a data = 0;
+ u64a offset;
+ int num = 0;
+ while (data != ONES64) {
+ negMasks[num] = data;
+ num++;
+ offset = (data | (data +1)) ^ data;
+ data += 0xfeULL * offset + 1;
+ }
+ negMasks[num] = data;
+ num++;
+ return num;
+}
+
+
+/*
+ * check all legal valid_mask(37 different) for validateMask[]
+ */
+TEST(ValidateMask, ValidMaskTests) {
+ u64a validMasks[256];
+ int num = initLegalValidMasks(validMasks);
+
+ for (const auto &t : testBasic) {
+ for (int i = 0; i < num; i++) {
+ EXPECT_EQ(1, validateMask(t.data,
+ validMasks[i],
+ t.and_mask,
+ t.cmp_mask,
+ t.neg_mask));
+ }
+ }
+}
+
+/*
+ * fix neg_mask to 0 and ONES64,
+ * check output of ValidateMask on different valid_mask,
+ * for neg_mask = 0,
+ */
+TEST(ValidateMask, AdvancedValidMaskTests) {
+ u64a validMasks[256];
+ int num = initLegalValidMasks(validMasks);
+ int bool_result;
+ for (const auto &t: testBasic) {
+ for (int i = 0; i < num; i++) {
+ bool_result = !(validMasks[i] & t.neg_mask);
+ EXPECT_EQ(bool_result, validateMask(t.data,
+ validMasks[i],
+ t.and_mask,
+ t.cmp_mask,
+ 0));
+ bool_result = (validMasks[i] | t.neg_mask) == t.neg_mask;
+ EXPECT_EQ(bool_result, validateMask(t.data,
+ validMasks[i],
+ t.and_mask,
+ t.cmp_mask,
+ ONES64));
+ }
+ }
+}
+
+/*
+ * test every pair of valid_data_mask and neg_mask
+ * and compute the expect output by a formula
+ */
+TEST(ValidateMask, FullTests) {
+ u64a validMasks[256];
+ u64a negMasks[256];
+ int vm_num = initLegalValidMasks(validMasks);
+ int nm_num = initLegalNegMasks(negMasks);
+ int bool_result;
+ for (const auto &t: testBasic) {
+ for (int i = 0; i < vm_num; i++) {
+ for (int j = 0; j < nm_num; j++) {
+ /*
+ * treat t.neg_mask as a truthtable (a negative truthtable)
+ * we expect validateMask output 1 if and only if
+ * the truthtable(tt) and neg_mask(nm) looks same
+ * under "&" operation with valid_data_mask(vdm)
+ * that is
+ * output = (tt & vdm) == (nm & vdm) ? 1 : 0;
+ */
+ bool_result = (t.neg_mask & validMasks[i]) ==
+ (negMasks[j] & validMasks[i]);
+ EXPECT_EQ(bool_result, validateMask(t.data,
+ validMasks[i],
+ t.and_mask,
+ t.cmp_mask,
+ negMasks[j]));
+ }
+ }
+ }
+}
+
+/*
+ * drop the original validateMask[].neg_mask
+ * and test more neg_mask and valid_mask manually
+ */
+TEST(ValidateMask, ManualTest_0) {
+ const auto &t = testBasic[0];
+ EXPECT_EQ(1, validateMask(t.data, ONES64 << 8,
+ t.and_mask, t.cmp_mask, 0xffff0000ULL));
+ EXPECT_EQ(1, validateMask(t.data, (ONES64 << 16) >> 8,
+ t.and_mask, t.cmp_mask, 0xffff0000ULL));
+ EXPECT_EQ(1, validateMask(t.data, ONES64 << 16,
+ t.and_mask, t.cmp_mask, 0xffffff00ULL));
+ EXPECT_EQ(1, validateMask(t.data, ONES64 << 24,
+ t.and_mask, t.cmp_mask, 0xff00ffffULL));
+ EXPECT_EQ(1, validateMask(t.data, ONES64 >> 32,
+ t.and_mask, t.cmp_mask, 0xffffffff00ffULL));
+ EXPECT_EQ(1, validateMask(t.data, ONES64 >> 40,
+ t.and_mask, t.cmp_mask, 0xff00ffULL));
+ EXPECT_EQ(1, validateMask(t.data, 0,
+ t.and_mask, t.cmp_mask, ONES64));
+ EXPECT_EQ(1, validateMask(t.data, 0,
+ t.and_mask, t.cmp_mask, ~t.neg_mask));
+ EXPECT_EQ(0, validateMask(t.data, ONES64 << 16,
+ t.and_mask, t.cmp_mask, 0xff0000ffULL));
+ EXPECT_EQ(0, validateMask(t.data, ONES64,
+ t.and_mask, t.cmp_mask, 0xffff0000ULL));
+ EXPECT_EQ(0, validateMask(t.data, ONES64 >> 32,
+ t.and_mask, t.cmp_mask, 0xff00ffULL));
+ EXPECT_EQ(0, validateMask(t.data, ONES64 << 8,
+ t.and_mask, t.cmp_mask, 0xffffffffULL));
+ EXPECT_EQ(0, validateMask(t.data, ONES64 << 16,
+ t.and_mask, t.cmp_mask, 0xff0000ffULL));
+}
+
+TEST(ValidateMask, ManualTest_1) {
+ const auto &t = testBasic[1];
+ EXPECT_EQ(1, validateMask(t.data, ONES64 << 16,
+ t.and_mask, t.cmp_mask, 0xff0000ff00ffffULL));
+ EXPECT_EQ(1, validateMask(t.data, ONES64 << 32,
+ t.and_mask, t.cmp_mask, 0xff000000000000ULL));
+ EXPECT_EQ(1, validateMask(t.data, ONES64 << 32,
+ t.and_mask, t.cmp_mask, 0xff0000ffff00ffULL));
+ EXPECT_EQ(1, validateMask(t.data, ONES64 << 56,
+ t.and_mask, t.cmp_mask, 0));
+ EXPECT_EQ(1, validateMask(t.data, ONES64 >> 8,
+ t.and_mask, t.cmp_mask, 0xffff0000ff000000ULL));
+ EXPECT_EQ(1, validateMask(t.data, ONES64 >> 16,
+ t.and_mask, t.cmp_mask, 0xff000000ULL));
+ EXPECT_EQ(1, validateMask(t.data, (ONES64 << 32) >> 16,
+ t.and_mask, t.cmp_mask, 0xff00ff00));
+ EXPECT_EQ(1, validateMask(t.data, ONES64 >> 40,
+ t.and_mask, t.cmp_mask, 0xff00000000ULL));
+ EXPECT_EQ(0, validateMask(t.data, ONES64,
+ t.and_mask, t.cmp_mask, 0));
+ EXPECT_EQ(0, validateMask(t.data, ONES64 << 48,
+ t.and_mask, t.cmp_mask, 0));
+ EXPECT_EQ(0, validateMask(t.data, ONES64 << 56,
+ t.and_mask, t.cmp_mask, 0xff00000000000000ULL));
+ EXPECT_EQ(0, validateMask(t.data, ONES64 << 16,
+ t.and_mask, t.cmp_mask, 0xff0000ffff0000ULL));
+ EXPECT_EQ(0, validateMask(t.data, ONES64 >> 8,
+ t.and_mask, t.cmp_mask, 0xff000000ULL));
+ EXPECT_EQ(0, validateMask(t.data, ONES64 >> 16,
+ t.and_mask, t.cmp_mask, 0xffff000000ULL));
+ EXPECT_EQ(0, validateMask(t.data, (ONES64 << 40) >> 16,
+ t.and_mask, t.cmp_mask, 0xff000000000000ULL));
+ EXPECT_EQ(0, validateMask(t.data, ONES64 << 8,
+ t.and_mask, t.cmp_mask, ONES64));
+}
+
+TEST(ValidateMask, ManualTest_2) {
+ const auto &t = testBasic[2];
+ EXPECT_EQ(1, validateMask(t.data, ONES64 << 24,
+ t.and_mask, t.cmp_mask, 0xffffff0000000000ULL));
+ EXPECT_EQ(1, validateMask(t.data, ONES64 << 56,
+ t.and_mask, t.cmp_mask, 0xff00000000000000ULL));
+ EXPECT_EQ(1, validateMask(t.data, ONES64 << 56,
+ t.and_mask, t.cmp_mask, 0xff00ffffff00ffffULL));
+ EXPECT_EQ(1, validateMask(t.data, 0,
+ t.and_mask, t.cmp_mask, ONES64));
+ EXPECT_EQ(1, validateMask(t.data, ONES64 >> 24,
+ t.and_mask, t.cmp_mask, 0xff00ffULL));
+ EXPECT_EQ(1, validateMask(t.data, ONES64 >> 32,
+ t.and_mask, t.cmp_mask, 0xffff00ff00ffULL));
+ EXPECT_EQ(1, validateMask(t.data, (ONES64 << 32) >> 24,
+ t.and_mask, t.cmp_mask, 0xff0000ULL));
+ EXPECT_EQ(1, validateMask(t.data, (ONES64 << 32) >> 24,
+ t.and_mask, t.cmp_mask, 0xff00ffULL));
+ EXPECT_EQ(1, validateMask(t.data, (ONES64 << 56) >> 40,
+ t.and_mask, t.cmp_mask, 0xff0000ULL));
+ EXPECT_EQ(1, validateMask(t.data, (ONES64 << 56) >> 32,
+ t.and_mask, t.cmp_mask, 0));
+ EXPECT_EQ(1, validateMask(t.data, ONES64 >> 40,
+ t.and_mask, t.cmp_mask, 0xffffffff00ffULL));
+ EXPECT_EQ(0, validateMask(t.data, ONES64,
+ t.and_mask, t.cmp_mask, 0));
+ EXPECT_EQ(0, validateMask(t.data, ONES64,
+ t.and_mask, t.cmp_mask, ONES64));
+ EXPECT_EQ(0, validateMask(t.data, ONES64 << 56,
+ t.and_mask, t.cmp_mask, 0));
+ EXPECT_EQ(0, validateMask(t.data, ONES64 << 48,
+ t.and_mask, t.cmp_mask, 0xff00000000000000ULL));
+ EXPECT_EQ(0, validateMask(t.data, ONES64 << 8,
+ t.and_mask, t.cmp_mask, 0xffffff00000000ffULL));
+ EXPECT_EQ(0, validateMask(t.data, ONES64 >> 32,
+ t.and_mask, t.cmp_mask, 0xffff00ULL));
+ EXPECT_EQ(0, validateMask(t.data, ONES64 >> 32,
+ t.and_mask, t.cmp_mask, 0xffffffULL));
+ EXPECT_EQ(0, validateMask(t.data, ONES64 >> 16,
+ t.and_mask, t.cmp_mask, 0xff00ffULL));
+ EXPECT_EQ(0, validateMask(t.data, (ONES64 << 32) >> 24,
+ t.and_mask, t.cmp_mask, 0));
+ EXPECT_EQ(0, validateMask(t.data, (ONES64 << 32) >> 24,
+ t.and_mask, t.cmp_mask, 0xffffff00000000ffULL));
+ EXPECT_EQ(0, validateMask(t.data, (ONES64 << 32) >> 24,
+ t.and_mask, t.cmp_mask, 0xffffff000000ff00ULL));
+ EXPECT_EQ(0, validateMask(t.data, (ONES64 << 56) >> 40,
+ t.and_mask, t.cmp_mask, 0));
+ EXPECT_EQ(0, validateMask(t.data, (ONES64 << 56) >> 48,
+ t.and_mask, t.cmp_mask, 0xff00ULL));
+}