{
Ipc::Mem::PageId slot;
if (freeSlots->pop(slot)) {
- debugs(20, 5, "got a previously free slot: " << slot);
+ const auto slotId = slot.number - 1;
+ debugs(20, 5, "got a previously free slot: " << slotId);
if (Ipc::Mem::GetPage(Ipc::Mem::PageId::cachePage, page)) {
debugs(20, 5, "and got a previously free page: " << page);
- return slot.number - 1;
+ map->prepFreeSlice(slotId);
+ return slotId;
} else {
- debugs(20, 3, "but there is no free page, returning " << slot);
+ debugs(20, 3, "but there is no free page, returning " << slotId);
freeSlots->push(slot);
}
}
assert(!waitingFor); // noteFreeMapSlice() should have cleared it
assert(slot.set());
assert(page.set());
- debugs(20, 5, "got previously busy " << slot << " and " << page);
- return slot.number - 1;
+ const auto slotId = slot.number - 1;
+ map->prepFreeSlice(slotId);
+ debugs(20, 5, "got previously busy " << slotId << " and " << page);
+ return slotId;
}
assert(waitingFor.slot == &slot && waitingFor.page == &page);
waitingFor.slot = NULL;
// allocate the first slice during the first write
if (!coreOff) {
assert(sidCurrent < 0);
- sidCurrent = reserveSlotForWriting(); // throws on failures
+ sidCurrent = dir->reserveSlotForWriting(); // throws on failures
assert(sidCurrent >= 0);
writeAnchor().start = sidCurrent;
}
// We do not write a full buffer without overflow because
// we would not yet know what to set the nextSlot to.
if (overflow) {
- const SlotId sidNext = reserveSlotForWriting(); // throws
+ const auto sidNext = dir->reserveSlotForWriting(); // throws
assert(sidNext >= 0);
writeToDisk(sidNext);
} else if (Store::Root().transientReaders(*e)) {
theFile->write(r);
}
-/// finds and returns a free db slot to fill or throws
-Rock::SlotId
-Rock::IoState::reserveSlotForWriting()
-{
- Ipc::Mem::PageId pageId;
- if (dir->useFreeSlot(pageId))
- return pageId.number-1;
-
- // This may happen when the number of available db slots is close to the
- // number of concurrent requests reading or writing those slots, which may
- // happen when the db is "small" compared to the request traffic OR when we
- // are rebuilding and have not loaded "many" entries or empty slots yet.
- throw TexcHere("ran out of free db slots");
-}
-
void
Rock::IoState::finishedWriting(const int errFlag)
{
size_t writeToBuffer(char const *buf, size_t size);
void writeToDisk(const SlotId nextSlot);
void writeBufToDisk(const SlotId nextSlot, const bool eof, const bool lastWrite);
- SlotId reserveSlotForWriting();
void callBack(int errflag);
return diskOffset(map->sliceLimit());
}
-bool
-Rock::SwapDir::useFreeSlot(Ipc::Mem::PageId &pageId)
+Rock::SlotId
+Rock::SwapDir::reserveSlotForWriting()
{
+ Ipc::Mem::PageId pageId;
+
if (freeSlots->pop(pageId)) {
- debugs(47, 5, "got a previously free slot: " << pageId);
- return true;
+ const auto slotId = pageId.number - 1;
+ debugs(47, 5, "got a previously free slot: " << slotId);
+ map->prepFreeSlice(slotId);
+ return slotId;
}
// catch free slots delivered to noteFreeMapSlice()
if (map->purgeOne()) {
assert(!waitingForPage); // noteFreeMapSlice() should have cleared it
assert(pageId.set());
- debugs(47, 5, "got a previously busy slot: " << pageId);
- return true;
+ const auto slotId = pageId.number - 1;
+ debugs(47, 5, "got a previously busy slot: " << slotId);
+ map->prepFreeSlice(slotId);
+ return slotId;
}
assert(waitingForPage == &pageId);
waitingForPage = NULL;
+ // This may happen when the number of available db slots is close to the
+ // number of concurrent requests reading or writing those slots, which may
+ // happen when the db is "small" compared to the request traffic OR when we
+ // are rebuilding and have not loaded "many" entries or empty slots yet.
debugs(47, 3, "cannot get a slot; entries: " << map->entryCount());
- return false;
+ throw TexcHere("ran out of free db slots");
}
bool
int64_t slotLimitAbsolute() const; ///< Rock store implementation limit
int64_t slotLimitActual() const; ///< total number of slots in this db
- /// removes a slot from a list of free slots or returns false
- bool useFreeSlot(Ipc::Mem::PageId &pageId);
/// whether the given slot ID may point to a slot in this db
bool validSlotId(const SlotId slotId) const;
+
+ /// finds and returns a free db slot to fill or throws
+ SlotId reserveSlotForWriting();
+
/// purges one or more entries to make full() false and free some slots
void purgeSome();
while (sliceId >= 0) {
Slice &slice = sliceAt(sliceId);
const SliceId nextId = slice.next;
- slice.size = 0;
- slice.next = -1;
+ slice.clear();
if (cleaner)
cleaner->noteFreeMapSlice(sliceId); // might change slice state
if (sliceId == splicingPoint) {
debugs(54, 7, "freed chain #" << chainId << " in " << path);
}
+void
+Ipc::StoreMap::prepFreeSlice(const SliceId sliceId)
+{
+ // TODO: Move freeSlots here, along with reserveSlotForWriting() logic.
+ assert(validSlice(sliceId));
+ sliceAt(sliceId).clear();
+}
+
Ipc::StoreMap::SliceId
Ipc::StoreMap::sliceContaining(const sfileno fileno, const uint64_t bytesNeeded) const
{
return *this;
}
+ /// restore default-constructed state
+ void clear() { size = 0; next = -1; }
+
std::atomic<Size> size; ///< slice contents size
std::atomic<StoreMapSliceId> next; ///< ID of the next entry slice
};
/// readable anchor for the entry created by openForReading()
const Anchor &readableEntry(const AnchorId anchorId) const;
+ /// prepare a chain-unaffiliated slice for being added to an entry chain
+ void prepFreeSlice(const SliceId sliceId);
+
/// Returns the ID of the entry slice containing n-th byte or
/// a negative ID if the entry does not store that many bytes (yet).
/// Requires a read lock.