disks. This algorithm does not spread objects by size, so any
I/O loading per-disk may appear very unbalanced and volatile.
+ If several cache_dirs use similar min-size, max-size, or other
+ limits to to reject certain responses, then do not group such
+ cache_dir lines together, to avoid round-robin selection bias
+ towards the first cache_dir after the group. Instead, interleave
+ cache_dir lines from different groups. For example:
+
+ store_dir_select_algorithm round-robin
+ cache_dir rock /hdd1 ... min-size=100000
+ cache_dir rock /ssd1 ... max-size=99999
+ cache_dir rock /hdd2 ... min-size=100000
+ cache_dir rock /ssd2 ... max-size=99999
+ cache_dir rock /hdd3 ... min-size=100000
+ cache_dir rock /ssd3 ... max-size=99999
DOC_END
NAME: max_open_disk_fds
static int
storeDirSelectSwapDirRoundRobin(const StoreEntry * e)
{
- static int dirn = 0;
- int i;
- int load;
- RefCount<SwapDir> sd;
-
// e->objectLen() is negative at this point when we are still STORE_PENDING
ssize_t objsize = e->mem_obj->expectedReplySize();
if (objsize != -1)
objsize += e->mem_obj->swap_hdr_sz;
- for (i = 0; i < Config.cacheSwap.n_configured; ++i) {
- if (++dirn >= Config.cacheSwap.n_configured)
- dirn = 0;
+ // Increment the first candidate once per selection (not once per
+ // iteration) to reduce bias when some disk(s) attract more entries.
+ static int firstCandidate = 0;
+ if (++firstCandidate >= Config.cacheSwap.n_configured)
+ firstCandidate = 0;
- sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
+ for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
+ const int dirn = (firstCandidate + i) % Config.cacheSwap.n_configured;
+ const SwapDir *sd = dynamic_cast<SwapDir*>(INDEXSD(dirn));
+ int load = 0;
if (!sd->canStore(*e, objsize, load))
continue;