/** \brief get the block for this key on the current level as a u8 ptr */
static really_inline
u8 *mmbit_get_block_ptr(u8 *bits, u32 max_level, u32 level, u32 key) {
- return mmbit_get_level_root(bits, level) +
- (key >> (mmbit_get_ks(max_level, level) + MMB_KEY_SHIFT)) *
- sizeof(MMB_TYPE);
+ u8 *level_root = mmbit_get_level_root(bits, level);
+ u32 ks = mmbit_get_ks(max_level, level);
+ return level_root + ((u64a)key >> (ks + MMB_KEY_SHIFT)) * sizeof(MMB_TYPE);
}
/** \brief get the block for this key on the current level as a const u8 ptr */
static really_inline
const u8 *mmbit_get_block_ptr_const(const u8 *bits, u32 max_level, u32 level,
u32 key) {
- return mmbit_get_level_root_const(bits, level) +
- (key >> (mmbit_get_ks(max_level, level) + MMB_KEY_SHIFT)) *
- sizeof(MMB_TYPE);
+ const u8 *level_root = mmbit_get_level_root_const(bits, level);
+ u32 ks = mmbit_get_ks(max_level, level);
+ return level_root + ((u64a)key >> (ks + MMB_KEY_SHIFT)) * sizeof(MMB_TYPE);
}
/** \brief get the _byte_ for this key on the current level as a u8 ptr */
u8 *mmbit_get_byte_ptr(u8 *bits, u32 max_level, u32 level, u32 key) {
u8 *level_root = mmbit_get_level_root(bits, level);
u32 ks = mmbit_get_ks(max_level, level);
- return level_root + (key >> (ks + MMB_KEY_SHIFT - 3));
+ return level_root + ((u64a)key >> (ks + MMB_KEY_SHIFT - 3));
}
/** \brief get our key value for the current level */
}
static really_inline
-MMB_TYPE get_lowhi_masks(u32 level, u32 max_level, u32 block_min, u32 block_max,
- u32 block_base) {
+MMB_TYPE get_lowhi_masks(u32 level, u32 max_level, u64a block_min, u64a block_max,
+ u64a block_base) {
const u32 level_shift = (max_level - level) * MMB_KEY_SHIFT;
- u32 lshift = (block_min - block_base) >> level_shift;
- u32 ushift = (block_max - block_base) >> level_shift;
+ u64a lshift = (block_min - block_base) >> level_shift;
+ u64a ushift = (block_max - block_base) >> level_shift;
MMB_TYPE lmask = lshift < 64 ? ~mmb_mask_zero_to_nocheck(lshift) : 0;
MMB_TYPE umask =
ushift < 63 ? mmb_mask_zero_to_nocheck(ushift + 1) : MMB_ALL_ONES;
static really_inline
u32 mmbit_iterate_bounded_big(const u8 *bits, u32 total_bits, u32 it_start, u32 it_end) {
- u32 key = 0;
+ u64a key = 0;
u32 ks = mmbit_keyshift(total_bits);
const u32 max_level = mmbit_maxlevel_from_keyshift(ks);
u32 level = 0;
assert(level <= max_level);
u32 block_width = MMB_KEY_BITS << ks;
- u32 block_base = key*block_width;
- u32 block_min = MAX(it_start, block_base);
- u32 block_max = MIN(it_end, block_base + block_width - 1);
+ u64a block_base = key * block_width;
+ u64a block_min = MAX(it_start, block_base);
+ u64a block_max = MIN(it_end, block_base + block_width - 1);
const u8 *block_ptr =
mmbit_get_level_root_const(bits, level) + key * sizeof(MMB_TYPE);
MMB_TYPE block = mmb_load(block_ptr);
// No bit found, go up a level
// we know that this block didn't have any answers, so we can push
// our start iterator forward.
- it_start = block_base + block_width;
- if (it_start > it_end) {
+ u64a next_start = block_base + block_width;
+ if (next_start > it_end) {
break;
}
if (level-- == 0) {
break;
}
+ it_start = next_start;
key >>= MMB_KEY_SHIFT;
ks += MMB_KEY_SHIFT;
}
ASSERT_TRUE(ba != nullptr);
// Set one bit on and run some checks.
- for (u32 i = 0; i < test_size; i += stride) {
+ for (u64a i = 0; i < test_size; i += stride) {
+ SCOPED_TRACE(i);
+
mmbit_clear(ba, test_size);
mmbit_set(ba, test_size, i);
// Scanning from one past our bit to the end should find nothing.
if (i != test_size - 1) {
- ASSERT_EQ(MMB_INVALID, mmbit_iterate_bounded(ba, test_size, i + 1, test_size));
+ // Ordinary iterator.
+ ASSERT_EQ(MMB_INVALID, mmbit_iterate(ba, test_size, i));
+
+ // Bounded iterator.
+ ASSERT_EQ(MMB_INVALID,
+ mmbit_iterate_bounded(ba, test_size, i + 1, test_size));
}
}
}
// Switch everything on.
fill_mmbit(ba, test_size);
- for (u32 i = 0; i < test_size; i += stride) {
+ for (u64a i = 0; i < test_size; i += stride) {
if (i != 0) {
ASSERT_EQ(0U, mmbit_iterate_bounded(ba, test_size, 0, i));
}
// Set every even-numbered bit and see what we can see.
mmbit_clear(ba, test_size);
- for (u32 i = 0; i < test_size; i += 2) {
+ for (u64a i = 0; i < test_size; i += 2) {
mmbit_set(ba, test_size, i);
}
u32 even_stride = stride % 2 ? stride + 1 : stride;
- for (u32 i = 0; i < test_size; i += even_stride) {
+ for (u64a i = 0; i < test_size; i += even_stride) {
// Scanning from each even bit to the end should find itself.
ASSERT_EQ(i, mmbit_iterate_bounded(ba, test_size, i, test_size));
// Set every odd-numbered bit and see what we can see.
mmbit_clear(ba, test_size);
- for (u32 i = 1; i < test_size; i += 2) {
+ for (u64a i = 1; i < test_size; i += 2) {
mmbit_set(ba, test_size, i);
}
u32 even_stride = stride % 2 ? stride + 1 : stride;
- for (u32 i = 0; i < test_size; i += even_stride) {
+ for (u64a i = 0; i < test_size; i += even_stride) {
// Scanning from each even bit to the end should find i+1.
if (i+1 < test_size) {
ASSERT_EQ(i+1, mmbit_iterate_bounded(ba, test_size, i, test_size));
mmbit_clear(ba, test_size);
ASSERT_FALSE(mmbit_any(ba, test_size));
- for (u32 i = 0; i < test_size; i += stride) {
+ for (u64a i = 0; i < test_size; i += stride) {
SCOPED_TRACE(i);
// set a bit that wasn't set before
mmbit_clear(ba, test_size);
ASSERT_EQ(MMB_INVALID, mmbit_iterate(ba, test_size, MMB_INVALID));
- for (u32 i = 0; i < test_size; i += stride) {
+ for (u64a i = 0; i < test_size; i += stride) {
SCOPED_TRACE(i);
mmbit_clear(ba, test_size);
mmbit_set(ba, test_size, i);
ASSERT_EQ(MMB_INVALID, mmbit_iterate(ba, test_size, MMB_INVALID));
// Set all bits.
- for (u32 i = 0; i < test_size; i += stride) {
+ for (u64a i = 0; i < test_size; i += stride) {
mmbit_set(ba, test_size, i);
}
// Find all bits.
u32 it = MMB_INVALID;
- for (u32 i = 0; i < test_size; i += stride) {
+ for (u64a i = 0; i < test_size; i += stride) {
ASSERT_EQ(i, mmbit_iterate(ba, test_size, it));
it = i;
}
mmbit_clear(ba, test_size);
ASSERT_FALSE(mmbit_any_precise(ba, test_size));
- for (u32 i = 0; i < test_size; i += stride) {
+ for (u64a i = 0; i < test_size; i += stride) {
SCOPED_TRACE(i);
mmbit_clear(ba, test_size);
mmbit_set(ba, test_size, i);
mmbit_clear(ba, test_size);
ASSERT_FALSE(mmbit_any(ba, test_size));
- for (u32 i = 0; i < test_size; i += stride) {
+ for (u64a i = 0; i < test_size; i += stride) {
SCOPED_TRACE(i);
mmbit_clear(ba, test_size);
mmbit_set(ba, test_size, i);
fill_mmbit(ba, test_size);
// Use mmbit_unset_range to switch off any single bit.
- for (u32 i = 0; i < test_size; i += stride) {
+ for (u64a i = 0; i < test_size; i += stride) {
SCOPED_TRACE(i);
ASSERT_TRUE(mmbit_isset(ba, test_size, i));
mmbit_unset_range(ba, test_size, i, i + 1);
// Use mmbit_unset_range to switch off all bits.
mmbit_unset_range(ba, test_size, 0, test_size);
- for (u32 i = 0; i < test_size; i += stride) {
+ for (u64a i = 0; i < test_size; i += stride) {
SCOPED_TRACE(i);
ASSERT_FALSE(mmbit_isset(ba, test_size, i));
}
ASSERT_TRUE(ba != nullptr);
// Use mmbit_unset_range to switch off bits in chunks of 3.
- for (u32 i = 0; i < test_size - 3; i += stride) {
+ for (u64a i = 0; i < test_size - 3; i += stride) {
// Switch on the bit before, the bits in question, and the bit after.
if (i > 0) {
mmbit_set(ba, test_size, i - 1);
}
- for (u32 j = i; j < min(i + 4, test_size); j++) {
+ for (u64a j = i; j < min(i + 4, (u64a)test_size); j++) {
mmbit_set(ba, test_size, j);
}
mmbit_init_range(ba, test_size, 0, test_size);
// Make sure they're all set.
- for (u32 i = 0; i < test_size; i += stride) {
+ for (u64a i = 0; i < test_size; i += stride) {
SCOPED_TRACE(i);
ASSERT_TRUE(mmbit_isset(ba, test_size, i));
}
SCOPED_TRACE(test_size);
ASSERT_TRUE(ba != nullptr);
- for (u32 i = 0; i < test_size; i += stride) {
+ for (u64a i = 0; i < test_size; i += stride) {
mmbit_init_range(ba, test_size, i, i + 1);
// Only bit 'i' should be on.
ASSERT_EQ(chunk_begin, mmbit_iterate(ba, test_size, MMB_INVALID));
// All bits in the chunk should be on.
- for (u32 i = chunk_begin; i < chunk_end; i += stride) {
+ for (u64a i = chunk_begin; i < chunk_end; i += stride) {
SCOPED_TRACE(i);
ASSERT_TRUE(mmbit_isset(ba, test_size, i));
}
vector<mmbit_sparse_iter> it;
vector<u32> bits;
bits.reserve(test_size / stride);
- for (u32 i = 0; i < test_size; i += stride) {
+ for (u64a i = 0; i < test_size; i += stride) {
bits.push_back(i);
}
mmbBuildSparseIterator(it, bits, test_size);
// Switch every third bits on in state
mmbit_clear(ba, test_size);
ASSERT_FALSE(mmbit_any(ba, test_size));
- for (u32 i = 0; i < test_size; i += 3) {
+ for (u64a i = 0; i < test_size; i += 3) {
mmbit_set(ba, test_size, i);
}
ASSERT_EQ(0U, val);
ASSERT_EQ(0U, idx);
- for (u32 i = 0; i < test_size - 3; i += 3) {
+ for (u64a i = 0; i < test_size - 3; i += 3) {
mmbit_unset(ba, test_size, i);
val = mmbit_sparse_iter_begin(ba, test_size, &idx, &it[0], &state[0]);
ASSERT_EQ(i+3, val);
vector<mmbit_sparse_iter> it;
vector<u32> bits;
bits.reserve(test_size / stride);
- for (u32 i = 0; i < test_size; i += stride) {
+ for (u64a i = 0; i < test_size; i += stride) {
bits.push_back(i);
}
mmbBuildSparseIterator(it, bits, test_size);
vector<mmbit_sparse_iter> it;
vector<u32> bits;
bits.reserve(test_size / stride);
- for (u32 i = 0; i < test_size; i += stride) {
+ for (u64a i = 0; i < test_size; i += stride) {
bits.push_back(i);
mmbit_set(ba, test_size, i);
}
vector<mmbit_sparse_iter> it;
vector<u32> bits;
bits.reserve(test_size / stride);
- for (u32 i = 0; i < test_size; i += stride) {
+ for (u64a i = 0; i < test_size; i += stride) {
bits.push_back(i);
}
mmbBuildSparseIterator(it, bits, test_size);
vector<mmbit_sparse_iter> it;
vector<u32> bits;
bits.reserve(test_size / stride);
- for (u32 i = 0; i < test_size; i += stride) {
+ for (u64a i = 0; i < test_size; i += stride) {
bits.push_back(i);
}
mmbBuildSparseIterator(it, bits, test_size);
// Two sparse iterators: one for even bits, one for odd ones
vector<u32> even, odd;
- for (u32 i = 0; i < test_size; i += 2) {
+ for (u64a i = 0; i < test_size; i += 2) {
even.push_back(i);
}
- for (u32 i = 1; i < test_size; i += 2) {
+ for (u64a i = 1; i < test_size; i += 2) {
odd.push_back(i);
}
{ 1U << 28, 15073 },
{ 1U << 29, 24413 },
{ 1U << 30, 50377 },
+ { 1U << 31, 104729 },
- // XXX: cases this large segfault in mmbit_set, FIXME NOW
- //{ 1U << 31, 3701 },
+ // { UINT32_MAX, 104729 }, // Very slow
};
INSTANTIATE_TEST_CASE_P(MultiBit, MultiBitTest, ValuesIn(multibitTests));