if (s.readLevel > 0) {
assert(s.state.swap_if(Slot::Freeing, Slot::WaitingToBeFreed));
} else {
- memset(s.key, 0, sizeof(s.key));
+ memset(s.key_, 0, sizeof(s.key_));
memset(&s.seBasics, 0, sizeof(s.seBasics));
--shared->count;
assert(s.state.swap_if(Slot::Freeing, Slot::Empty));
void
Rock::DirMap::Slot::setKey(const cache_key *const aKey)
{
- memcpy(key, aKey, sizeof(key));
+ memcpy(key_, aKey, sizeof(key_));
}
bool
Rock::DirMap::Slot::checkKey(const cache_key *const aKey) const
{
- const uint64_t *const k = reinterpret_cast<const uint64_t *>(&key);
- return k[0] == key[0] && k[1] == key[1];
+ const uint32_t *const k = reinterpret_cast<const uint32_t *>(aKey);
+ return k[0] == key_[0] && k[1] == key_[1] &&
+ k[2] == key_[2] && k[3] == key_[3];
}
Rock::DirMap::Shared::Shared(const int aLimit): limit(aLimit), count(0)
AtomicWordT<uint8_t> state; ///< slot state
AtomicWord readLevel; ///< read level
- AtomicWordT<uint64_t> key[2]; ///< MD5 entry key
+
+ // we want two uint64_t, but older GCCs lack __sync_fetch_and_add_8
+ AtomicWordT<uint32_t> key_[4]; ///< MD5 entry key
+
StoreEntryBasics seBasics; ///< basic store entry data
};