Allow STORE_MEMORY_CLIENTs to open disk files if needed and possible.
STORE_*_CLIENT designation is rather buggy (several known XXXs). Some
collapsed clients are marked as STORE_MEMORY_CLIENTs (for the lack of info at
determination time) but their hit content may actually come from a disk cache.
Do not abandon writing a collapsed cache entry when we cannot cache the entry
in RAM if the entry can be cached on disk instead. Both shared memory cache
and the disk cache have to refuse to cache the entry for it to become
non-collapsible. This dual refusal is difficult to detect because each cache
may make the caching decision at different times. Added StoreEntry methods to
track those decisions and react to them.
Recognize disk cache as a potential source of the collapsed entry when the
memory cache is configured. While collapsed entries would normally be found in
the shared memory cache, caching policies and other factors may prohibit
memory caching but still allow disk caching. Memory cache is still preferred.
e.mem_obj->memCache.io = MemObject::ioWriting;
slot->set(e);
map->startAppending(index);
+ e.memOutDecision(true);
return true;
}
case MemObject::ioUndecided:
if (!shouldCache(e) || !startCaching(e)) {
e.mem_obj->memCache.io = MemObject::ioDone;
- Store::Root().transientsAbandon(e);
+ e.memOutDecision(false);
return;
}
break;
/// whether we may start writing to disk (now or in the future)
virtual bool mayStartSwapOut();
virtual void trimMemory(const bool preserveSwappable);
+
+ // called when a decision to cache in memory has been made
+ void memOutDecision(const bool willCacheInRam);
+ // called when a decision to cache on disk has been made
+ void swapOutDecision(const MemObject::SwapOut::Decision &decision);
+
void abort();
void unlink();
void makePublic();
void kickProducer();
#endif
+protected:
+ void transientsAbandonmentCheck();
+
private:
static MemAllocator *pool;
void scheduleDiskRead();
void scheduleMemRead();
void scheduleRead();
- void startSwapin();
+ bool startSwapin();
void unpackHeader(char const *buf, ssize_t len);
int type;
return result;
}
+/**
+ * Abandon the transient entry our worker has created if neither the shared
+ * memory cache nor the disk cache wants to store it. Collapsed requests, if
+ * any, should notice and use Plan B instead of getting stuck waiting for us
+ * to start swapping the entry out.
+ */
+void
+StoreEntry::transientsAbandonmentCheck() {
+ if (mem_obj && !mem_obj->smpCollapsed && // this worker is responsible
+ mem_obj->xitTable.index >= 0 && // other workers may be interested
+ mem_obj->memCache.index < 0 && // rejected by the shared memory cache
+ mem_obj->swapout.decision == MemObject::SwapOut::swImpossible) {
+ debugs(20, 7, "cannot be shared: " << *this);
+ if (!shutting_down) // Store::Root() is FATALly missing during shutdown
+ Store::Root().transientsAbandon(*this);
+ }
+}
+
+void
+StoreEntry::memOutDecision(const bool willCacheInRam) {
+ transientsAbandonmentCheck();
+}
+
+void
+StoreEntry::swapOutDecision(const MemObject::SwapOut::Decision &decision) {
+ // Abandon our transient entry if neither shared memory nor disk wants it.
+ assert(mem_obj);
+ mem_obj->swapout.decision = decision;
+ transientsAbandonmentCheck();
+}
+
void
StoreEntry::trimMemory(const bool preserveSwappable)
{
const int64_t len = entry->objectLen();
- // If we do not know the entry length, then we have to open the swap file,
- // which is only possible if there is one AND if we are allowed to use it.
- const bool canSwapIn = entry->swap_filen >= 0 &&
- getType() == STORE_DISK_CLIENT;
+ // If we do not know the entry length, then we have to open the swap file.
+ const bool canSwapIn = entry->swap_filen >= 0;
if (len < 0)
return canSwapIn;
return true; // if we lack prefix, we can swap it in
// If we cannot swap in, make sure we have what we want in RAM. Otherwise,
- // scheduleRead calls scheduleDiskRead which asserts on STORE_MEM_CLIENTs.
+ // scheduleRead calls scheduleDiskRead which asserts without a swap file.
const MemObject *mem = entry->mem_obj;
return mem &&
mem->inmem_lo <= copyInto.offset && copyInto.offset < mem->endOffset();
* if needed.
*/
- if (STORE_DISK_CLIENT == getType() && swapin_sio == NULL)
- startSwapin();
- else
- scheduleRead();
+ if (STORE_DISK_CLIENT == getType() && swapin_sio == NULL) {
+ if (!startSwapin())
+ return; // failure
+ }
+ scheduleRead();
}
-void
+/// opens the swapin "file" if possible; otherwise, fail()s and returns false
+bool
store_client::startSwapin()
{
debugs(90, 3, "store_client::doCopy: Need to open swap in file");
/* yuck -- this causes a TCP_SWAPFAIL_MISS on the client side */
fail();
flags.store_copying = false;
- return;
+ return false;
} else if (!flags.disk_io_pending) {
/* Don't set store_io_pending here */
storeSwapInStart(this);
if (swapin_sio == NULL) {
fail();
flags.store_copying = false;
- return;
+ return false;
}
- /*
- * If the open succeeds we either copy from memory, or
- * schedule a disk read in the next block.
- */
- scheduleRead();
-
- return;
+ return true;
} else {
debugs(90, DBG_IMPORTANT, "WARNING: Averted multiple fd operation (1)");
flags.store_copying = false;
- return;
+ return false;
}
}
store_client::scheduleDiskRead()
{
/* What the client wants is not in memory. Schedule a disk read */
- assert(STORE_DISK_CLIENT == getType());
+ if (getType() == STORE_DISK_CLIENT) {
+ // we should have called startSwapin() already
+ assert(swapin_sio != NULL);
+ } else
+ if (!swapin_sio && !startSwapin()) {
+ debugs(90, 3, "bailing after swapin start failure for " << *entry);
+ assert(!flags.store_copying);
+ return;
+ }
assert(!flags.disk_io_pending);
- debugs(90, 3, "store_client::doCopy: reading from STORE");
+ debugs(90, 3, "reading " << *entry << " from disk");
fileRead();
bool found = false;
if (memStore)
found = memStore->anchorCollapsed(collapsed, inSync);
- else if (Config.cacheSwap.n_configured)
+ if (!found && Config.cacheSwap.n_configured)
found = anchorCollapsedOnDisk(collapsed, inSync);
if (found) {
e->swap_dirn << ", fileno " << std::hex << std::setw(8) << std::setfill('0') <<
std::uppercase << e->swap_filen);
e->swap_status = SWAPOUT_WRITING;
- mem->swapout.decision = MemObject::SwapOut::swStarted;
+ e->swapOutDecision(MemObject::SwapOut::swStarted);
/* If we start swapping out objects with OutOfBand Metadata,
* then this code needs changing
*/
if (sio == NULL) {
e->swap_status = SWAPOUT_NONE;
- mem->swapout.decision = MemObject::SwapOut::swImpossible;
+ e->swapOutDecision(MemObject::SwapOut::swImpossible);
delete c;
xfree((char*)buf);
storeLog(STORE_LOG_SWAPOUTFAIL, e);
return false;
assert(mem_obj);
- MemObject::SwapOut::Decision &decision = mem_obj->swapout.decision;
+ const MemObject::SwapOut::Decision &decision = mem_obj->swapout.decision;
// if we decided that starting is not possible, do not repeat same checks
if (decision == MemObject::SwapOut::swImpossible) {
// if we swapped out already, do not start over
if (swap_status == SWAPOUT_DONE) {
debugs(20, 3, "already did");
- decision = MemObject::SwapOut::swImpossible;
+ swapOutDecision(MemObject::SwapOut::swImpossible);
return false;
}
// if we stared swapping out already, do not start over
if (decision == MemObject::SwapOut::swStarted) {
debugs(20, 3, "already started");
- decision = MemObject::SwapOut::swImpossible;
+ swapOutDecision(MemObject::SwapOut::swImpossible);
return false;
}
if (!checkCachable()) {
debugs(20, 3, HERE << "not cachable");
- decision = MemObject::SwapOut::swImpossible;
+ swapOutDecision(MemObject::SwapOut::swImpossible);
return false;
}
if (EBIT_TEST(flags, ENTRY_SPECIAL)) {
debugs(20, 3, HERE << url() << " SPECIAL");
- decision = MemObject::SwapOut::swImpossible;
+ swapOutDecision(MemObject::SwapOut::swImpossible);
return false;
}
if (mem_obj->inmem_lo > 0) {
debugs(20, 3, "storeSwapOut: (inmem_lo > 0) imem_lo:" << mem_obj->inmem_lo);
- decision = MemObject::SwapOut::swImpossible;
+ swapOutDecision(MemObject::SwapOut::swImpossible);
return false;
}
if (!mem_obj->isContiguous()) {
debugs(20, 3, "storeSwapOut: not Contiguous");
- decision = MemObject::SwapOut::swImpossible;
+ swapOutDecision(MemObject::SwapOut::swImpossible);
return false;
}
if (expectedEnd > store_maxobjsize) {
debugs(20, 3, HERE << "will not fit: " << expectedEnd <<
" > " << store_maxobjsize);
- decision = MemObject::SwapOut::swImpossible;
+ swapOutDecision(MemObject::SwapOut::swImpossible);
return false; // known to outgrow the limit eventually
}
if (currentEnd > store_maxobjsize) {
debugs(20, 3, HERE << "does not fit: " << currentEnd <<
" > " << store_maxobjsize);
- decision = MemObject::SwapOut::swImpossible;
+ swapOutDecision(MemObject::SwapOut::swImpossible);
return false; // already does not fit and may only get bigger
}
}
}
- decision = MemObject::SwapOut::swPossible;
+ swapOutDecision(MemObject::SwapOut::swPossible);
return true;
}