/*
- * Copyright (C) 1996-2016 The Squid Software Foundation and contributors
+ * Copyright (C) 1996-2018 The Squid Software Foundation and contributors
*
* Squid software is distributed under GPLv2+ license and includes
* contributions from numerous individuals and organizations.
*/
STDIRSELECT *storeDirSelectSwapDir = storeDirSelectSwapDirLeastLoad;
+/// The entry size to use for Disk::canStore() size limit checks.
+/// This is an optimization to avoid similar calculations in every cache_dir.
+static int64_t
+objectSizeForDirSelection(const StoreEntry &entry)
+{
+ // entry.objectLen() is negative here when we are still STORE_PENDING
+ int64_t minSize = entry.mem_obj->expectedReplySize();
+
+ // If entry size is unknown, use already accumulated bytes as an estimate.
+ // Controller::accumulateMore() guarantees that there are enough of them.
+ if (minSize < 0)
+ minSize = entry.mem_obj->endOffset();
+
+ assert(minSize >= 0);
+ minSize += entry.mem_obj->swap_hdr_sz;
+ return minSize;
+}
+
/**
* This new selection scheme simply does round-robin on all SwapDirs.
* A SwapDir is skipped if it is over the max_size (100%) limit, or
static int
storeDirSelectSwapDirRoundRobin(const StoreEntry * e)
{
- // e->objectLen() is negative at this point when we are still STORE_PENDING
- ssize_t objsize = e->mem_obj->expectedReplySize();
- if (objsize != -1)
- objsize += e->mem_obj->swap_hdr_sz;
+ const int64_t objsize = objectSizeForDirSelection(*e);
// Increment the first candidate once per selection (not once per
// iteration) to reduce bias when some disk(s) attract more entries.
storeDirSelectSwapDirLeastLoad(const StoreEntry * e)
{
int64_t most_free = 0;
- ssize_t least_objsize = -1;
+ int64_t best_objsize = -1;
int least_load = INT_MAX;
int load;
int dirn = -1;
int i;
RefCount<SwapDir> SD;
- // e->objectLen() is negative at this point when we are still STORE_PENDING
- ssize_t objsize = e->mem_obj->expectedReplySize();
-
- if (objsize != -1)
- objsize += e->mem_obj->swap_hdr_sz;
+ const int64_t objsize = objectSizeForDirSelection(*e);
for (i = 0; i < Config.cacheSwap.n_configured; ++i) {
SD = dynamic_cast<SwapDir *>(INDEXSD(i));
/* If the load is equal, then look in more details */
if (load == least_load) {
- /* closest max-size fit */
-
- if (least_objsize != -1)
- if (SD->maxObjectSize() > least_objsize)
+ /* best max-size fit */
+ if (best_objsize != -1) {
+ // cache_dir with the smallest max-size gets the known-size object
+ // cache_dir with the largest max-size gets the unknown-size object
+ if ((objsize != -1 && SD->maxObjectSize() > best_objsize) ||
+ (objsize == -1 && SD->maxObjectSize() < best_objsize))
continue;
+ }
/* most free */
if (cur_free < most_free)
}
least_load = load;
- least_objsize = SD->maxObjectSize();
+ best_objsize = SD->maxObjectSize();
most_free = cur_free;
dirn = i;
}
return dirn;
}
+Store::Disks::Disks():
+ largestMinimumObjectSize(-1),
+ largestMaximumObjectSize(-1),
+ secondLargestMaximumObjectSize(-1)
+{
+}
+
SwapDir *
Store::Disks::store(int const x) const
{
int64_t
Store::Disks::maxObjectSize() const
{
- int64_t result = -1;
+ return largestMaximumObjectSize;
+}
+
+void
+Store::Disks::updateLimits()
+{
+ largestMinimumObjectSize = -1;
+ largestMaximumObjectSize = -1;
+ secondLargestMaximumObjectSize = -1;
for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
- if (dir(i).active() && store(i)->maxObjectSize() > result)
- result = store(i)->maxObjectSize();
+ const auto &disk = dir(i);
+ if (!disk.active())
+ continue;
+
+ if (disk.minObjectSize() > largestMinimumObjectSize)
+ largestMinimumObjectSize = disk.minObjectSize();
+
+ const auto diskMaxObjectSize = disk.maxObjectSize();
+ if (diskMaxObjectSize > largestMaximumObjectSize) {
+ if (largestMaximumObjectSize >= 0) // was set
+ secondLargestMaximumObjectSize = largestMaximumObjectSize;
+ largestMaximumObjectSize = diskMaxObjectSize;
+ }
}
+}
- return result;
+int64_t
+Store::Disks::accumulateMore(const StoreEntry &entry) const
+{
+ const auto accumulated = entry.mem_obj->availableForSwapOut();
+
+ /*
+ * Keep accumulating more bytes until the set of disks eligible to accept
+ * the entry becomes stable, and, hence, accumulating more is not going to
+ * affect the cache_dir selection. A stable set is usually reached
+ * immediately (or soon) because most configurations either do not use
+ * cache_dirs with explicit min-size/max-size limits or use the same
+ * max-size limit for all cache_dirs (and low min-size limits).
+ */
+
+ // Can the set of min-size cache_dirs accepting this entry change?
+ if (accumulated < largestMinimumObjectSize)
+ return largestMinimumObjectSize - accumulated;
+
+ // Can the set of max-size cache_dirs accepting this entry change
+ // (other than when the entry exceeds the largest maximum; see below)?
+ if (accumulated <= secondLargestMaximumObjectSize)
+ return secondLargestMaximumObjectSize - accumulated + 1;
+
+ /*
+ * Checking largestMaximumObjectSize instead eliminates the risk of starting
+ * to swap out an entry that later grows too big, but also implies huge
+ * accumulation in most environments. Accumulating huge entries not only
+ * consumes lots of RAM but also creates a burst of doPages() write requests
+ * that overwhelm the disk. To avoid these problems, we take the risk and
+ * allow swap out now. The disk will quit swapping out if the entry
+ * eventually grows too big for its selected cache_dir.
+ */
+ debugs(20, 3, "no: " << accumulated << '>' <<
+ secondLargestMaximumObjectSize << ',' << largestMinimumObjectSize);
+ return 0;
}
void
dir(collapsed.swap_dirn).updateCollapsed(collapsed);
}
+bool
+Store::Disks::smpAware() const
+{
+ for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
+ // A mix is not supported, but we conservatively check every
+ // dir because features like collapsed revalidation should
+ // currently be disabled if any dir is SMP-aware
+ if (dir(i).smpAware())
+ return true;
+ }
+ return false;
+}
+
/* Store::Disks globals that should be converted to use RegisteredRunner */
void
// Check for store_dirs_rebuilding because fatal() often calls us in early
// initialization phases, before store log is initialized and ready. Also,
- // some stores probably do not support log cleanup during Store rebuilding.
+ // some stores do not support log cleanup during Store rebuilding.
if (StoreController::store_dirs_rebuilding) {
debugs(20, DBG_IMPORTANT, "Not currently OK to rewrite swap log.");
debugs(20, DBG_IMPORTANT, "storeDirWriteCleanLogs: Operation aborted.");