]> git.ipfire.org Git - thirdparty/squid.git/blame - src/store_dir.cc
Polish peek-and-splice related code part2
[thirdparty/squid.git] / src / store_dir.cc
CommitLineData
f1dc9b30 1/*
bbc27441 2 * Copyright (C) 1996-2014 The Squid Software Foundation and contributors
e25c139f 3 *
bbc27441
AJ
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
f1dc9b30 7 */
8
bbc27441
AJ
9/* DEBUG: section 47 Store Directory Routines */
10
582c2af2
FC
11#include "squid.h"
12#include "globals.h"
13#include "mem_node.h"
528b2c61 14#include "MemObject.h"
9487bae9 15#include "MemStore.h"
582c2af2 16#include "profiler/Profiler.h"
4d5904f7 17#include "SquidConfig.h"
a98bcbee 18#include "SquidMath.h"
985c86bc 19#include "SquidTime.h"
582c2af2 20#include "Store.h"
fb548aaf 21#include "store_key_md5.h"
21d845b1 22#include "StoreHashIndex.h"
4b981814 23#include "swap_log_op.h"
602d9612 24#include "SwapDir.h"
5bed43d6 25#include "tools.h"
9a9954ba 26#include "Transients.h"
85407535 27
074d6a40
AJ
28#include <cerrno>
29#include <climits>
c0db87f2 30#if HAVE_STATVFS
31#if HAVE_SYS_STATVFS_H
32#include <sys/statvfs.h>
33#endif
ec15e022 34#endif /* HAVE_STATVFS */
35/* statfs() needs <sys/param.h> and <sys/mount.h> on BSD systems */
36#if HAVE_SYS_PARAM_H
37#include <sys/param.h>
203526a1 38#endif
ec15e022 39#if HAVE_SYS_MOUNT_H
40#include <sys/mount.h>
41#endif
42/* Windows and Linux use sys/vfs.h */
6c86a065 43#if HAVE_SYS_VFS_H
44#include <sys/vfs.h>
45#endif
582c2af2
FC
46#if HAVE_SYS_WAIT_H
47#include <sys/wait.h>
48#endif
c0db87f2 49
65a53c8e 50static STDIRSELECT storeDirSelectSwapDirRoundRobin;
51static STDIRSELECT storeDirSelectSwapDirLeastLoad;
c8f4eac4 52
b07b21cc 53/*
54 * store_dirs_rebuilding is initialized to _1_ as a hack so that
55 * storeDirWriteCleanLogs() doesn't try to do anything unless _all_
56 * cache_dirs have been read. For example, without this hack, Squid
57 * will try to write clean log files if -kparse fails (becasue it
58 * calls fatal()).
59 */
60int StoreController::store_dirs_rebuilding = 1;
bef81ea5 61
c8f4eac4 62StoreController::StoreController() : swapDir (new StoreHashIndex())
9a9954ba 63 , memStore(NULL), transients(NULL)
c8f4eac4 64{}
65
66StoreController::~StoreController()
9487bae9
AR
67{
68 delete memStore;
9a9954ba 69 delete transients;
9487bae9 70}
65a53c8e 71
72/*
73 * This function pointer is set according to 'store_dir_select_algorithm'
74 * in squid.conf.
75 */
76STDIRSELECT *storeDirSelectSwapDir = storeDirSelectSwapDirLeastLoad;
d141c677 77
9838d6c8 78void
c8f4eac4 79StoreController::init()
596dddc1 80{
57af1e3f 81 if (Config.memShared && IamWorkerProcess()) {
60be8b2d
AR
82 memStore = new MemStore;
83 memStore->init();
84 }
9487bae9 85
c8f4eac4 86 swapDir->init();
62e76326 87
65a53c8e 88 if (0 == strcasecmp(Config.store_dir_select_algorithm, "round-robin")) {
62e76326 89 storeDirSelectSwapDir = storeDirSelectSwapDirRoundRobin;
e0236918 90 debugs(47, DBG_IMPORTANT, "Using Round Robin store dir selection");
65a53c8e 91 } else {
62e76326 92 storeDirSelectSwapDir = storeDirSelectSwapDirLeastLoad;
e0236918 93 debugs(47, DBG_IMPORTANT, "Using Least Load store dir selection");
65a53c8e 94 }
9a9954ba
AR
95
96 if (UsingSmp() && IamWorkerProcess() && Config.onoff.collapsed_forwarding) {
97 transients = new Transients;
98 transients->init();
99 }
85407535 100}
101
102void
c8f4eac4 103StoreController::createOneStore(Store &aStore)
596dddc1 104{
62e76326 105 /*
154c7949 106 * On Windows, fork() is not available.
107 * The following is a workaround for create store directories sequentially
108 * when running on native Windows port.
109 */
7aa9bb3e 110#if !_SQUID_WINDOWS_
62e76326 111
154c7949 112 if (fork())
62e76326 113 return;
114
099a1791 115#endif
62e76326 116
c8f4eac4 117 aStore.create();
62e76326 118
7aa9bb3e 119#if !_SQUID_WINDOWS_
62e76326 120
154c7949 121 exit(0);
62e76326 122
099a1791 123#endif
154c7949 124}
125
126void
c8f4eac4 127StoreController::create()
154c7949 128{
c8f4eac4 129 swapDir->create();
62e76326 130
7aa9bb3e 131#if !_SQUID_WINDOWS_
62e76326 132
8a1c8f2c 133 pid_t pid;
62e76326 134
b2c141d4 135 do {
62e76326 136 int status;
1191b93b 137#if _SQUID_NEXT_
62e76326 138
139 pid = wait3(&status, WNOHANG, NULL);
b2c141d4 140#else
62e76326 141
142 pid = waitpid(-1, &status, 0);
b2c141d4 143#endif
62e76326 144
b2c141d4 145 } while (pid > 0 || (pid < 0 && errno == EINTR));
62e76326 146
099a1791 147#endif
596dddc1 148}
149
a8a33c46 150/**
cd748f27 151 * Determine whether the given directory can handle this object
152 * size
153 *
154 * Note: if the object size is -1, then the only swapdirs that
a8a33c46 155 * will return true here are ones that have min and max unset,
cd748f27 156 * ie any-sized-object swapdirs. This is a good thing.
157 */
c8f4eac4 158bool
3e62bd58 159SwapDir::objectSizeIsAcceptable(int64_t objsize) const
cd748f27 160{
a8a33c46 161 // If the swapdir has no range limits, then it definitely can
b6662ffd 162 if (min_objsize <= 0 && max_objsize == -1)
c8f4eac4 163 return true;
d68f43a0 164
165 /*
a8a33c46
A
166 * If the object size is -1 and the storedir has limits we
167 * can't store it there.
d68f43a0 168 */
a8a33c46 169 if (objsize == -1)
c8f4eac4 170 return false;
d68f43a0 171
a8a33c46 172 // Else, make sure that the object size will fit.
b475997c
AJ
173 if (max_objsize == -1 && min_objsize <= objsize)
174 return true;
175 else
176 return min_objsize <= objsize && max_objsize > objsize;
cd748f27 177}
178
d141c677 179/*
180 * This new selection scheme simply does round-robin on all SwapDirs.
8e8d4f30 181 * A SwapDir is skipped if it is over the max_size (100%) limit, or
182 * overloaded.
d141c677 183 */
184static int
8e8d4f30 185storeDirSelectSwapDirRoundRobin(const StoreEntry * e)
d141c677 186{
aa1a691e
AR
187 // e->objectLen() is negative at this point when we are still STORE_PENDING
188 ssize_t objsize = e->mem_obj->expectedReplySize();
246e6cc1
AJ
189 if (objsize != -1)
190 objsize += e->mem_obj->swap_hdr_sz;
191
29a238a3
AR
192 // Increment the first candidate once per selection (not once per
193 // iteration) to reduce bias when some disk(s) attract more entries.
194 static int firstCandidate = 0;
195 if (++firstCandidate >= Config.cacheSwap.n_configured)
196 firstCandidate = 0;
62e76326 197
29a238a3
AR
198 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
199 const int dirn = (firstCandidate + i) % Config.cacheSwap.n_configured;
200 const SwapDir *sd = dynamic_cast<SwapDir*>(INDEXSD(dirn));
62e76326 201
29a238a3 202 int load = 0;
aa1a691e 203 if (!sd->canStore(*e, objsize, load))
62e76326 204 continue;
205
62e76326 206 if (load < 0 || load > 1000) {
207 continue;
208 }
209
210 return dirn;
d141c677 211 }
62e76326 212
8e8d4f30 213 return -1;
d141c677 214}
960a01e3 215
a2899918 216/*
cd748f27 217 * Spread load across all of the store directories
218 *
219 * Note: We should modify this later on to prefer sticking objects
220 * in the *tightest fit* swapdir to conserve space, along with the
26ac0430 221 * actual swapdir usage. But for now, this hack will do while
cd748f27 222 * testing, so you should order your swapdirs in the config file
638402dd 223 * from smallest max-size= to largest max-size=.
cd748f27 224 *
225 * We also have to choose nleast == nconf since we need to consider
226 * ALL swapdirs, regardless of state. Again, this is a hack while
227 * we sort out the real usefulness of this algorithm.
a2899918 228 */
65a53c8e 229static int
230storeDirSelectSwapDirLeastLoad(const StoreEntry * e)
a2899918 231{
cc34568d 232 int64_t most_free = 0;
8e8d4f30 233 ssize_t least_objsize = -1;
234 int least_load = INT_MAX;
cd748f27 235 int load;
236 int dirn = -1;
237 int i;
c8f4eac4 238 RefCount<SwapDir> SD;
cd748f27 239
aa1a691e
AR
240 // e->objectLen() is negative at this point when we are still STORE_PENDING
241 ssize_t objsize = e->mem_obj->expectedReplySize();
62e76326 242
cd748f27 243 if (objsize != -1)
62e76326 244 objsize += e->mem_obj->swap_hdr_sz;
245
5db6bf73 246 for (i = 0; i < Config.cacheSwap.n_configured; ++i) {
c8f4eac4 247 SD = dynamic_cast<SwapDir *>(INDEXSD(i));
1a6347cd 248 SD->flags.selected = false;
62e76326 249
aa1a691e 250 if (!SD->canStore(*e, objsize, load))
62e76326 251 continue;
252
aa1a691e 253 if (load < 0 || load > 1000)
62e76326 254 continue;
255
256 if (load > least_load)
257 continue;
258
cc34568d 259 const int64_t cur_free = SD->maxSize() - SD->currentSize();
62e76326 260
261 /* If the load is equal, then look in more details */
262 if (load == least_load) {
b51ec8c8 263 /* closest max-size fit */
62e76326 264
265 if (least_objsize != -1)
b51ec8c8 266 if (SD->maxObjectSize() > least_objsize)
62e76326 267 continue;
268
269 /* most free */
270 if (cur_free < most_free)
271 continue;
272 }
273
274 least_load = load;
b51ec8c8 275 least_objsize = SD->maxObjectSize();
62e76326 276 most_free = cur_free;
277 dirn = i;
a2899918 278 }
62e76326 279
ade906c8 280 if (dirn >= 0)
1a6347cd 281 dynamic_cast<SwapDir *>(INDEXSD(dirn))->flags.selected = true;
62e76326 282
cd748f27 283 return dirn;
596dddc1 284}
285
b109de6b 286/*
287 * An entry written to the swap log MUST have the following
288 * properties.
289 * 1. It MUST be a public key. It does no good to log
290 * a public ADD, change the key, then log a private
291 * DEL. So we need to log a DEL before we change a
292 * key from public to private.
cd748f27 293 * 2. It MUST have a valid (> -1) swap_filen.
b109de6b 294 */
4683e377 295void
5830cdb3 296storeDirSwapLog(const StoreEntry * e, int op)
4683e377 297{
d3b3ab85 298 assert (e);
d46a87a8 299 assert(!EBIT_TEST(e->flags, KEY_PRIVATE));
cd748f27 300 assert(e->swap_filen >= 0);
6c57e268 301 /*
302 * icons and such; don't write them to the swap log
303 */
62e76326 304
d46a87a8 305 if (EBIT_TEST(e->flags, ENTRY_SPECIAL))
62e76326 306 return;
307
b109de6b 308 assert(op > SWAP_LOG_NOP && op < SWAP_LOG_MAX);
62e76326 309
26ac0430
AJ
310 debugs(20, 3, "storeDirSwapLog: " <<
311 swap_log_op_str[op] << " " <<
312 e->getMD5Text() << " " <<
313 e->swap_dirn << " " <<
bf8fe701 314 std::hex << std::uppercase << std::setfill('0') << std::setw(8) << e->swap_filen);
62e76326 315
c8f4eac4 316 dynamic_cast<SwapDir *>(INDEXSD(e->swap_dirn))->logEntry(*e, op);
317}
318
93bc1434
AR
319void
320StoreController::getStats(StoreInfoStats &stats) const
321{
322 if (memStore)
323 memStore->getStats(stats);
324 else {
325 // move this code to a non-shared memory cache class when we have it
326 stats.mem.shared = false;
327 stats.mem.capacity = Config.memMaxSize;
328 stats.mem.size = mem_node::StoreMemSize();
329 stats.mem.count = hot_obj_count;
330 }
331
332 swapDir->getStats(stats);
333
334 // low-level info not specific to memory or disk cache
335 stats.store_entry_count = StoreEntry::inUseCount();
336 stats.mem_object_count = MemObject::inUseCount();
337}
338
c932b107 339void
c8f4eac4 340StoreController::stat(StoreEntry &output) const
c932b107 341{
c8f4eac4 342 storeAppendPrintf(&output, "Store Directory Statistics:\n");
343 storeAppendPrintf(&output, "Store Entries : %lu\n",
62e76326 344 (unsigned long int)StoreEntry::inUseCount());
c91ca3ce 345 storeAppendPrintf(&output, "Maximum Swap Size : %" PRIu64 " KB\n",
cc34568d 346 maxSize() >> 10);
57f583f1 347 storeAppendPrintf(&output, "Current Store Swap Size: %.2f KB\n",
cc34568d 348 currentSize() / 1024.0);
57f583f1 349 storeAppendPrintf(&output, "Current Capacity : %.2f%% used, %.2f%% free\n",
cc34568d
DK
350 Math::doublePercent(currentSize(), maxSize()),
351 Math::doublePercent((maxSize() - currentSize()), maxSize()));
ada9124c
AR
352
353 if (memStore)
354 memStore->stat(output);
e3ef2b09 355
c8f4eac4 356 /* now the swapDir */
357 swapDir->stat(output);
5d406e78 358}
359
c8f4eac4 360/* if needed, this could be taught to cache the result */
12e11a5c 361uint64_t
c8f4eac4 362StoreController::maxSize() const
f4e3fa54 363{
c8f4eac4 364 /* TODO: include memory cache ? */
365 return swapDir->maxSize();
366}
62e76326 367
12e11a5c 368uint64_t
c8f4eac4 369StoreController::minSize() const
370{
371 /* TODO: include memory cache ? */
372 return swapDir->minSize();
f4e3fa54 373}
374
39c1e1d9
DK
375uint64_t
376StoreController::currentSize() const
377{
378 return swapDir->currentSize();
379}
380
381uint64_t
382StoreController::currentCount() const
383{
384 return swapDir->currentCount();
385}
386
af2fda07
DK
387int64_t
388StoreController::maxObjectSize() const
389{
390 return swapDir->maxObjectSize();
391}
392
f4e3fa54 393void
c8f4eac4 394SwapDir::diskFull()
f4e3fa54 395{
cc34568d 396 if (currentSize() >= maxSize())
62e76326 397 return;
398
cc34568d 399 max_size = currentSize();
62e76326 400
e0236918 401 debugs(20, DBG_IMPORTANT, "WARNING: Shrinking cache_dir #" << index << " to " << currentSize() / 1024.0 << " KB");
f4e3fa54 402}
95dcd2b8 403
404void
405storeDirOpenSwapLogs(void)
406{
d3b3ab85 407 for (int dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
c8f4eac4 408 dynamic_cast<SwapDir *>(INDEXSD(dirn))->openLog();
95dcd2b8 409}
410
411void
412storeDirCloseSwapLogs(void)
413{
d3b3ab85 414 for (int dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
c8f4eac4 415 dynamic_cast<SwapDir *>(INDEXSD(dirn))->closeLog();
95dcd2b8 416}
417
b2c141d4 418/*
419 * storeDirWriteCleanLogs
26ac0430 420 *
b2c141d4 421 * Writes a "clean" swap log file from in-memory metadata.
cd748f27 422 * This is a rewrite of the original function to troll each
423 * StoreDir and write the logs, and flush at the end of
424 * the run. Thanks goes to Eric Stern, since this solution
425 * came out of his COSS code.
b2c141d4 426 */
b2c141d4 427int
428storeDirWriteCleanLogs(int reopen)
95dcd2b8 429{
6a566b9c 430 const StoreEntry *e = NULL;
b2c141d4 431 int n = 0;
62e76326 432
e812ecfc 433 struct timeval start;
434 double dt;
c8f4eac4 435 RefCount<SwapDir> sd;
b2c141d4 436 int dirn;
6a566b9c 437 int notdone = 1;
62e76326 438
bef81ea5 439 if (StoreController::store_dirs_rebuilding) {
e0236918
FC
440 debugs(20, DBG_IMPORTANT, "Not currently OK to rewrite swap log.");
441 debugs(20, DBG_IMPORTANT, "storeDirWriteCleanLogs: Operation aborted.");
62e76326 442 return 0;
b2c141d4 443 }
62e76326 444
e0236918 445 debugs(20, DBG_IMPORTANT, "storeDirWriteCleanLogs: Starting...");
e812ecfc 446 getCurrentTime();
447 start = current_time;
62e76326 448
5db6bf73 449 for (dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn) {
c8f4eac4 450 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
62e76326 451
452 if (sd->writeCleanStart() < 0) {
e0236918 453 debugs(20, DBG_IMPORTANT, "log.clean.start() failed for dir #" << sd->index);
62e76326 454 continue;
455 }
6a566b9c 456 }
62e76326 457
e78ef51b 458 /*
459 * This may look inefficient as CPU wise it is more efficient to do this
460 * sequentially, but I/O wise the parallellism helps as it allows more
461 * hdd spindles to be active.
d3b3ab85 462 */
c1dd71ae 463 while (notdone) {
62e76326 464 notdone = 0;
465
5db6bf73 466 for (dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn) {
c8f4eac4 467 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
62e76326 468
469 if (NULL == sd->cleanLog)
470 continue;
471
472 e = sd->cleanLog->nextEntry();
473
474 if (!e)
475 continue;
476
477 notdone = 1;
478
479 if (!sd->canLog(*e))
480 continue;
481
482 sd->cleanLog->write(*e);
483
484 if ((++n & 0xFFFF) == 0) {
485 getCurrentTime();
e0236918 486 debugs(20, DBG_IMPORTANT, " " << std::setw(7) << n <<
bf8fe701 487 " entries written so far.");
62e76326 488 }
489 }
6a566b9c 490 }
62e76326 491
6a566b9c 492 /* Flush */
5db6bf73 493 for (dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
c8f4eac4 494 dynamic_cast<SwapDir *>(INDEXSD(dirn))->writeCleanDone();
62e76326 495
b2c141d4 496 if (reopen)
62e76326 497 storeDirOpenSwapLogs();
498
e812ecfc 499 getCurrentTime();
62e76326 500
e812ecfc 501 dt = tvSubDsec(start, current_time);
62e76326 502
e0236918
FC
503 debugs(20, DBG_IMPORTANT, " Finished. Wrote " << n << " entries.");
504 debugs(20, DBG_IMPORTANT, " Took "<< std::setw(3)<< std::setprecision(2) << dt <<
bf8fe701 505 " seconds ("<< std::setw(6) << ((double) n / (dt > 0.0 ? dt : 1.0)) << " entries/sec).");
62e76326 506
b2c141d4 507 return n;
95dcd2b8 508}
d141c677 509
c8f4eac4 510StoreSearch *
30abd221 511StoreController::search(String const url, HttpRequest *request)
c8f4eac4 512{
513 /* cheat, for now you can't search the memory hot cache */
514 return swapDir->search(url, request);
515}
516
517StorePointer
518StoreHashIndex::store(int const x) const
519{
520 return INDEXSD(x);
521}
522
14911a4e
AR
523SwapDir &
524StoreHashIndex::dir(const int i) const
525{
526 SwapDir *sd = dynamic_cast<SwapDir*>(INDEXSD(i));
527 assert(sd);
528 return *sd;
529}
530
cd748f27 531void
c8f4eac4 532StoreController::sync(void)
cd748f27 533{
9487bae9
AR
534 if (memStore)
535 memStore->sync();
c8f4eac4 536 swapDir->sync();
cd748f27 537}
538
539/*
26ac0430 540 * handle callbacks all avaliable fs'es
cd748f27 541 */
c8f4eac4 542int
543StoreController::callback()
cd748f27 544{
1d5161bd 545 /* This will likely double count. Thats ok. */
546 PROF_start(storeDirCallback);
547
c8f4eac4 548 /* mem cache callbacks ? */
549 int result = swapDir->callback();
1d5161bd 550
551 PROF_stop(storeDirCallback);
c8f4eac4 552
553 return result;
d141c677 554}
90d42c28 555
556int
557storeDirGetBlkSize(const char *path, int *blksize)
558{
559#if HAVE_STATVFS
62e76326 560
90d42c28 561 struct statvfs sfs;
62e76326 562
90d42c28 563 if (statvfs(path, &sfs)) {
e0236918 564 debugs(50, DBG_IMPORTANT, "" << path << ": " << xstrerror());
62e76326 565 *blksize = 2048;
566 return 1;
90d42c28 567 }
62e76326 568
6759a7aa 569 *blksize = (int) sfs.f_frsize;
90d42c28 570#else
62e76326 571
90d42c28 572 struct statfs sfs;
62e76326 573
90d42c28 574 if (statfs(path, &sfs)) {
e0236918 575 debugs(50, DBG_IMPORTANT, "" << path << ": " << xstrerror());
62e76326 576 *blksize = 2048;
577 return 1;
90d42c28 578 }
62e76326 579
90d42c28 580 *blksize = (int) sfs.f_bsize;
6759a7aa 581#endif
4b3af09f 582 /*
583 * Sanity check; make sure we have a meaningful value.
584 */
62e76326 585
d5b72fe7 586 if (*blksize < 512)
62e76326 587 *blksize = 2048;
588
90d42c28 589 return 0;
590}
781d6656 591
592#define fsbtoblk(num, fsbs, bs) \
593 (((fsbs) != 0 && (fsbs) < (bs)) ? \
594 (num) / ((bs) / (fsbs)) : (num) * ((fsbs) / (bs)))
595int
596storeDirGetUFSStats(const char *path, int *totl_kb, int *free_kb, int *totl_in, int *free_in)
597{
598#if HAVE_STATVFS
62e76326 599
781d6656 600 struct statvfs sfs;
62e76326 601
781d6656 602 if (statvfs(path, &sfs)) {
e0236918 603 debugs(50, DBG_IMPORTANT, "" << path << ": " << xstrerror());
62e76326 604 return 1;
781d6656 605 }
62e76326 606
781d6656 607 *totl_kb = (int) fsbtoblk(sfs.f_blocks, sfs.f_frsize, 1024);
608 *free_kb = (int) fsbtoblk(sfs.f_bfree, sfs.f_frsize, 1024);
609 *totl_in = (int) sfs.f_files;
610 *free_in = (int) sfs.f_ffree;
611#else
62e76326 612
781d6656 613 struct statfs sfs;
62e76326 614
781d6656 615 if (statfs(path, &sfs)) {
e0236918 616 debugs(50, DBG_IMPORTANT, "" << path << ": " << xstrerror());
62e76326 617 return 1;
781d6656 618 }
62e76326 619
781d6656 620 *totl_kb = (int) fsbtoblk(sfs.f_blocks, sfs.f_bsize, 1024);
621 *free_kb = (int) fsbtoblk(sfs.f_bfree, sfs.f_bsize, 1024);
622 *totl_in = (int) sfs.f_files;
623 *free_in = (int) sfs.f_ffree;
624#endif
62e76326 625
781d6656 626 return 0;
627}
c8f4eac4 628
629void
e1f7507e 630allocate_new_swapdir(SquidConfig::_cacheSwap * swap)
c8f4eac4 631{
632 if (swap->swapDirs == NULL) {
633 swap->n_allocated = 4;
7d3c4ca1 634 swap->swapDirs = static_cast<SwapDir::Pointer *>(xcalloc(swap->n_allocated, sizeof(SwapDir::Pointer)));
c8f4eac4 635 }
636
637 if (swap->n_allocated == swap->n_configured) {
c8f4eac4 638 swap->n_allocated <<= 1;
7d3c4ca1 639 SwapDir::Pointer *const tmp = static_cast<SwapDir::Pointer *>(xcalloc(swap->n_allocated, sizeof(SwapDir::Pointer)));
41d00cd3 640 memcpy(tmp, swap->swapDirs, swap->n_configured * sizeof(SwapDir *));
c8f4eac4 641 xfree(swap->swapDirs);
642 swap->swapDirs = tmp;
643 }
644}
645
646void
e1f7507e 647free_cachedir(SquidConfig::_cacheSwap * swap)
c8f4eac4 648{
649 int i;
650 /* DON'T FREE THESE FOR RECONFIGURE */
651
652 if (reconfiguring)
653 return;
654
5db6bf73 655 for (i = 0; i < swap->n_configured; ++i) {
c8f4eac4 656 /* TODO XXX this lets the swapdir free resources asynchronously
657 * swap->swapDirs[i]->deactivate();
26ac0430 658 * but there may be such a means already.
c8f4eac4 659 * RBC 20041225
660 */
661 swap->swapDirs[i] = NULL;
662 }
663
664 safe_free(swap->swapDirs);
665 swap->swapDirs = NULL;
666 swap->n_allocated = 0;
667 swap->n_configured = 0;
668}
669
670/* this should be a virtual method on StoreEntry,
671 * i.e. e->referenced()
672 * so that the entry can notify the creating Store
673 */
674void
675StoreController::reference(StoreEntry &e)
676{
c5426f8f
AR
677 // special entries do not belong to any specific Store, but are IN_MEMORY
678 if (EBIT_TEST(e.flags, ENTRY_SPECIAL))
679 return;
680
c8f4eac4 681 /* Notify the fs that we're referencing this object again */
682
683 if (e.swap_dirn > -1)
4c973beb 684 swapDir->reference(e);
c8f4eac4 685
9487bae9
AR
686 // Notify the memory cache that we're referencing this object again
687 if (memStore && e.mem_status == IN_MEMORY)
688 memStore->reference(e);
689
690 // TODO: move this code to a non-shared memory cache class when we have it
c8f4eac4 691 if (e.mem_obj) {
692 if (mem_policy->Referenced)
693 mem_policy->Referenced(mem_policy, &e, &e.mem_obj->repl);
694 }
695}
696
4c973beb 697bool
54347cbd 698StoreController::dereference(StoreEntry &e, bool wantsLocalMemory)
c8f4eac4 699{
c5426f8f
AR
700 // special entries do not belong to any specific Store, but are IN_MEMORY
701 if (EBIT_TEST(e.flags, ENTRY_SPECIAL))
54347cbd
AR
702 return true;
703
704 bool keepInStoreTable = false; // keep only if somebody needs it there
c5426f8f 705
c8f4eac4 706 /* Notify the fs that we're not referencing this object any more */
707
708 if (e.swap_filen > -1)
54347cbd 709 keepInStoreTable = swapDir->dereference(e, wantsLocalMemory) || keepInStoreTable;
c8f4eac4 710
9487bae9
AR
711 // Notify the memory cache that we're not referencing this object any more
712 if (memStore && e.mem_status == IN_MEMORY)
54347cbd 713 keepInStoreTable = memStore->dereference(e, wantsLocalMemory) || keepInStoreTable;
9487bae9
AR
714
715 // TODO: move this code to a non-shared memory cache class when we have it
c8f4eac4 716 if (e.mem_obj) {
717 if (mem_policy->Dereferenced)
718 mem_policy->Dereferenced(mem_policy, &e, &e.mem_obj->repl);
54347cbd
AR
719 // non-shared memory cache relies on store_table
720 if (!memStore)
721 keepInStoreTable = wantsLocalMemory || keepInStoreTable;
c8f4eac4 722 }
4c973beb
AR
723
724 return keepInStoreTable;
c8f4eac4 725}
726
727StoreEntry *
6ca34f6f 728StoreController::get(const cache_key *key)
1bfe9ade
AR
729{
730 if (StoreEntry *e = find(key)) {
731 // this is not very precise: some get()s are not initiated by clients
9d4e9cfb 732 e->touch();
1bfe9ade
AR
733 return e;
734 }
735 return NULL;
736}
737
738/// Internal method to implements the guts of the Store::get() API:
739/// returns an in-transit or cached object with a given key, if any.
740StoreEntry *
741StoreController::find(const cache_key *key)
c8f4eac4 742{
44def0f9 743 if (StoreEntry *e = swapDir->get(key)) {
9487bae9
AR
744 // TODO: ignore and maybe handleIdleEntry() unlocked intransit entries
745 // because their backing store slot may be gone already.
171d5429 746 debugs(20, 3, HERE << "got in-transit entry: " << *e);
44def0f9
AR
747 return e;
748 }
749
99921d9d
AR
750 // Must search transients before caches because we must sync those we find.
751 if (transients) {
752 if (StoreEntry *e = transients->get(key)) {
753 debugs(20, 3, "got shared in-transit entry: " << *e);
754 bool inSync = false;
755 const bool found = anchorCollapsed(*e, inSync);
756 if (!found || inSync)
757 return e;
758 assert(!e->locked()); // ensure release will destroyStoreEntry()
759 e->release(); // do not let others into the same trap
760 return NULL;
761 }
762 }
763
9487bae9
AR
764 if (memStore) {
765 if (StoreEntry *e = memStore->get(key)) {
766 debugs(20, 3, HERE << "got mem-cached entry: " << *e);
767 return e;
768 }
769 }
770
022f96ad
AR
771 // TODO: this disk iteration is misplaced; move to StoreHashIndex when
772 // the global store_table is no longer used for in-transit objects.
44def0f9
AR
773 if (const int cacheDirs = Config.cacheSwap.n_configured) {
774 // ask each cache_dir until the entry is found; use static starting
775 // point to avoid asking the same subset of disks more often
776 // TODO: coordinate with put() to be able to guess the right disk often
14911a4e 777 static int idx = 0;
44def0f9 778 for (int n = 0; n < cacheDirs; ++n) {
14911a4e 779 idx = (idx + 1) % cacheDirs;
44def0f9 780 SwapDir *sd = dynamic_cast<SwapDir*>(INDEXSD(idx));
14911a4e
AR
781 if (!sd->active())
782 continue;
783
44def0f9 784 if (StoreEntry *e = sd->get(key)) {
eccba1d9 785 debugs(20, 3, HERE << "cache_dir " << idx <<
9199139f 786 " got cached entry: " << *e);
44def0f9
AR
787 return e;
788 }
789 }
790 }
c8f4eac4 791
eccba1d9 792 debugs(20, 4, HERE << "none of " << Config.cacheSwap.n_configured <<
9199139f 793 " cache_dirs have " << storeKeyText(key));
44def0f9 794 return NULL;
c8f4eac4 795}
796
797void
6ca34f6f 798StoreController::get(String const key, STOREGETCLIENT aCallback, void *aCallbackData)
c8f4eac4 799{
800 fatal("not implemented");
801}
802
ce49546e 803/// updates the collapsed entry with the corresponding on-disk entry, if any
4475555f 804/// In other words, the SwapDir::anchorCollapsed() API applied to all disks.
ce49546e 805bool
4475555f 806StoreController::anchorCollapsedOnDisk(StoreEntry &collapsed, bool &inSync)
ce49546e
AR
807{
808 // TODO: move this loop to StoreHashIndex, just like the one in get().
809 if (const int cacheDirs = Config.cacheSwap.n_configured) {
810 // ask each cache_dir until the entry is found; use static starting
811 // point to avoid asking the same subset of disks more often
812 // TODO: coordinate with put() to be able to guess the right disk often
813 static int idx = 0;
814 for (int n = 0; n < cacheDirs; ++n) {
815 idx = (idx + 1) % cacheDirs;
816 SwapDir *sd = dynamic_cast<SwapDir*>(INDEXSD(idx));
817 if (!sd->active())
818 continue;
819
4475555f 820 if (sd->anchorCollapsed(collapsed, inSync)) {
ce49546e
AR
821 debugs(20, 3, "cache_dir " << idx << " anchors " << collapsed);
822 return true;
823 }
824 }
825 }
826
827 debugs(20, 4, "none of " << Config.cacheSwap.n_configured <<
828 " cache_dirs have " << collapsed);
829 return false;
830}
831
1bfe9ade
AR
832void StoreController::markForUnlink(StoreEntry &e)
833{
834 if (transients && e.mem_obj && e.mem_obj->xitTable.index >= 0)
835 transients->markForUnlink(e);
836 if (memStore && e.mem_obj && e.mem_obj->memCache.index >= 0)
837 memStore->markForUnlink(e);
838 if (e.swap_filen >= 0)
839 e.store()->markForUnlink(e);
840}
841
96a7de88
DK
842// move this into [non-shared] memory cache class when we have one
843/// whether e should be kept in local RAM for possible future caching
844bool
97754f5a 845StoreController::keepForLocalMemoryCache(StoreEntry &e) const
96a7de88
DK
846{
847 if (!e.memoryCachable())
848 return false;
849
850 // does the current and expected size obey memory caching limits?
851 assert(e.mem_obj);
852 const int64_t loadedSize = e.mem_obj->endOffset();
853 const int64_t expectedSize = e.mem_obj->expectedReplySize(); // may be < 0
854 const int64_t ramSize = max(loadedSize, expectedSize);
855 const int64_t ramLimit = min(
817138f8
A
856 static_cast<int64_t>(Config.memMaxSize),
857 static_cast<int64_t>(Config.Store.maxInMemObjSize));
96a7de88
DK
858 return ramSize <= ramLimit;
859}
860
861void
4475555f 862StoreController::memoryOut(StoreEntry &e, const bool preserveSwappable)
96a7de88
DK
863{
864 bool keepInLocalMemory = false;
865 if (memStore)
4475555f 866 memStore->write(e); // leave keepInLocalMemory false
96a7de88
DK
867 else
868 keepInLocalMemory = keepForLocalMemoryCache(e);
869
870 debugs(20, 7, HERE << "keepInLocalMemory: " << keepInLocalMemory);
871
872 if (!keepInLocalMemory)
873 e.trimMemory(preserveSwappable);
874}
875
ce49546e
AR
876void
877StoreController::memoryUnlink(StoreEntry &e)
878{
ce49546e
AR
879 if (memStore)
880 memStore->unlink(e);
881 else // TODO: move into [non-shared] memory cache class when we have one
882 e.destroyMemObject();
883}
884
4475555f 885void
29c56e41 886StoreController::memoryDisconnect(StoreEntry &e)
4475555f
AR
887{
888 if (memStore)
29c56e41 889 memStore->disconnect(e);
4475555f
AR
890 // else nothing to do for non-shared memory cache
891}
892
893void
894StoreController::transientsAbandon(StoreEntry &e)
895{
896 if (transients) {
897 assert(e.mem_obj);
898 if (e.mem_obj->xitTable.index >= 0)
899 transients->abandon(e);
900 }
901}
902
99921d9d
AR
903void
904StoreController::transientsCompleteWriting(StoreEntry &e)
905{
906 if (transients) {
907 assert(e.mem_obj);
908 if (e.mem_obj->xitTable.index >= 0)
909 transients->completeWriting(e);
910 }
911}
912
d366a7fa
AR
913int
914StoreController::transientReaders(const StoreEntry &e) const
915{
916 return (transients && e.mem_obj && e.mem_obj->xitTable.index >= 0) ?
9d4e9cfb 917 transients->readers(e) : 0;
d366a7fa
AR
918}
919
4475555f
AR
920void
921StoreController::transientsDisconnect(MemObject &mem_obj)
922{
923 if (transients)
924 transients->disconnect(mem_obj);
925}
926
9487bae9
AR
927void
928StoreController::handleIdleEntry(StoreEntry &e)
929{
930 bool keepInLocalMemory = false;
c5426f8f
AR
931
932 if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) {
933 // Icons (and cache digests?) should stay in store_table until we
934 // have a dedicated storage for them (that would not purge them).
935 // They are not managed [well] by any specific Store handled below.
936 keepInLocalMemory = true;
d3cd2e81 937 } else if (memStore) {
9487bae9
AR
938 // leave keepInLocalMemory false; memStore maintains its own cache
939 } else {
96a7de88 940 keepInLocalMemory = keepForLocalMemoryCache(e) && // in good shape and
9199139f
AR
941 // the local memory cache is not overflowing
942 (mem_node::InUseCount() <= store_pages_max);
9487bae9
AR
943 }
944
54347cbd 945 // An idle, unlocked entry that only belongs to a SwapDir which controls
4c973beb 946 // its own index, should not stay in the global store_table.
54347cbd 947 if (!dereference(e, keepInLocalMemory)) {
9487bae9
AR
948 debugs(20, 5, HERE << "destroying unlocked entry: " << &e << ' ' << e);
949 destroyStoreEntry(static_cast<hash_link*>(&e));
950 return;
951 }
952
c5426f8f
AR
953 debugs(20, 5, HERE << "keepInLocalMemory: " << keepInLocalMemory);
954
9487bae9
AR
955 // TODO: move this into [non-shared] memory cache class when we have one
956 if (keepInLocalMemory) {
957 e.setMemStatus(IN_MEMORY);
958 e.mem_obj->unlinkRequest();
959 } else {
960 e.purgeMem(); // may free e
961 }
962}
963
9a9954ba
AR
964void
965StoreController::allowCollapsing(StoreEntry *e, const RequestFlags &reqFlags,
966 const HttpRequestMethod &reqMethod)
967{
968 e->makePublic(); // this is needed for both local and SMP collapsing
969 if (transients)
99921d9d 970 transients->startWriting(e, reqFlags, reqMethod);
4475555f 971 debugs(20, 3, "may " << (transients && e->mem_obj->xitTable.index >= 0 ?
9d4e9cfb 972 "SMP-" : "locally-") << "collapse " << *e);
9a9954ba
AR
973}
974
ce49546e 975void
6919be24 976StoreController::syncCollapsed(const sfileno xitIndex)
ce49546e 977{
99921d9d 978 assert(transients);
6919be24
AR
979
980 StoreEntry *collapsed = transients->findCollapsed(xitIndex);
981 if (!collapsed) { // the entry is no longer locally active, ignore update
982 debugs(20, 7, "not SMP-syncing not-transient " << xitIndex);
99921d9d
AR
983 return;
984 }
6919be24
AR
985 assert(collapsed->mem_obj);
986 assert(collapsed->mem_obj->smpCollapsed);
ce49546e
AR
987
988 debugs(20, 7, "syncing " << *collapsed);
989
6919be24 990 bool abandoned = transients->abandoned(*collapsed);
4475555f 991 bool found = false;
ce49546e 992 bool inSync = false;
1bfe9ade
AR
993 if (memStore && collapsed->mem_obj->memCache.io == MemObject::ioDone) {
994 found = true;
995 inSync = true;
996 debugs(20, 7, "fully mem-loaded " << *collapsed);
997 } else if (memStore && collapsed->mem_obj->memCache.index >= 0) {
4475555f 998 found = true;
ce49546e 999 inSync = memStore->updateCollapsed(*collapsed);
4475555f
AR
1000 } else if (collapsed->swap_filen >= 0) {
1001 found = true;
ce49546e 1002 inSync = collapsed->store()->updateCollapsed(*collapsed);
4475555f 1003 } else {
99921d9d 1004 found = anchorCollapsed(*collapsed, inSync);
4475555f 1005 }
ce49546e 1006
6919be24
AR
1007 if (abandoned && collapsed->store_status == STORE_PENDING) {
1008 debugs(20, 3, "aborting abandoned but STORE_PENDING " << *collapsed);
1009 collapsed->abort();
1010 return;
1011 }
1012
ce49546e
AR
1013 if (inSync) {
1014 debugs(20, 5, "synced " << *collapsed);
1015 collapsed->invokeHandlers();
4475555f 1016 } else if (found) { // unrecoverable problem syncing this entry
99921d9d 1017 debugs(20, 3, "aborting unsyncable " << *collapsed);
ce49546e 1018 collapsed->abort();
4475555f
AR
1019 } else { // the entry is still not in one of the caches
1020 debugs(20, 7, "waiting " << *collapsed);
ce49546e
AR
1021 }
1022}
9a9954ba 1023
99921d9d
AR
1024/// Called for in-transit entries that are not yet anchored to a cache.
1025/// For cached entries, return true after synchronizing them with their cache
1026/// (making inSync true on success). For not-yet-cached entries, return false.
1027bool
1028StoreController::anchorCollapsed(StoreEntry &collapsed, bool &inSync)
1029{
1030 // this method is designed to work with collapsed transients only
1031 assert(collapsed.mem_obj);
1032 assert(collapsed.mem_obj->xitTable.index >= 0);
1033 assert(collapsed.mem_obj->smpCollapsed);
1034
1035 debugs(20, 7, "anchoring " << collapsed);
1036
1037 bool found = false;
1038 if (memStore)
1039 found = memStore->anchorCollapsed(collapsed, inSync);
0cdcf3d7 1040 if (!found && Config.cacheSwap.n_configured)
99921d9d
AR
1041 found = anchorCollapsedOnDisk(collapsed, inSync);
1042
1043 if (found) {
1044 if (inSync)
1045 debugs(20, 7, "anchored " << collapsed);
1046 else
1047 debugs(20, 5, "failed to anchor " << collapsed);
1048 } else {
1049 debugs(20, 7, "skipping not yet cached " << collapsed);
1050 }
1051
1052 return found;
1053}
1054
c8f4eac4 1055StoreHashIndex::StoreHashIndex()
1056{
47f6e231 1057 if (store_table)
26ac0430 1058 abort();
c8f4eac4 1059 assert (store_table == NULL);
1060}
1061
1062StoreHashIndex::~StoreHashIndex()
1063{
1064 if (store_table) {
1065 hashFreeItems(store_table, destroyStoreEntry);
1066 hashFreeMemory(store_table);
1067 store_table = NULL;
1068 }
1069}
1070
1071int
1072StoreHashIndex::callback()
1073{
1074 int result = 0;
1075 int j;
1076 static int ndir = 0;
1077
1078 do {
1079 j = 0;
1080
5db6bf73 1081 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
c8f4eac4 1082 if (ndir >= Config.cacheSwap.n_configured)
1083 ndir = ndir % Config.cacheSwap.n_configured;
1084
1085 int temp_result = store(ndir)->callback();
1086
1087 ++ndir;
1088
1089 j += temp_result;
1090
1091 result += temp_result;
1092
1093 if (j > 100)
1094 fatal ("too much io\n");
1095 }
1096 } while (j > 0);
1097
5db6bf73 1098 ++ndir;
c8f4eac4 1099
1100 return result;
1101}
1102
1103void
1104StoreHashIndex::create()
1105{
608622b8 1106 if (Config.cacheSwap.n_configured == 0) {
a8163539
TX
1107 debugs(0, DBG_PARSE_NOTE(DBG_CRITICAL), "No cache_dir stores are configured.");
1108 }
1109
5db6bf73 1110 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
14911a4e
AR
1111 if (dir(i).active())
1112 store(i)->create();
1113 }
c8f4eac4 1114}
1115
1116/* Lookup an object in the cache.
1117 * return just a reference to object, don't start swapping in yet. */
1118StoreEntry *
6ca34f6f 1119StoreHashIndex::get(const cache_key *key)
c8f4eac4 1120{
1121 PROF_start(storeGet);
bf8fe701 1122 debugs(20, 3, "storeGet: looking up " << storeKeyText(key));
c8f4eac4 1123 StoreEntry *p = static_cast<StoreEntry *>(hash_lookup(store_table, key));
1124 PROF_stop(storeGet);
1125 return p;
1126}
1127
1128void
6ca34f6f 1129StoreHashIndex::get(String const key, STOREGETCLIENT aCallback, void *aCallbackData)
c8f4eac4 1130{
1131 fatal("not implemented");
1132}
1133
1134void
1135StoreHashIndex::init()
1136{
90d881c4
TX
1137 if (Config.Store.objectsPerBucket <= 0)
1138 fatal("'store_objects_per_bucket' should be larger than 0.");
1139
1140 if (Config.Store.avgObjectSize <= 0)
1141 fatal("'store_avg_object_size' should be larger than 0.");
1142
c8f4eac4 1143 /* Calculate size of hash table (maximum currently 64k buckets). */
1144 /* this is very bogus, its specific to the any Store maintaining an
1145 * in-core index, not global */
58d5c5dd 1146 size_t buckets = (Store::Root().maxSize() + Config.memMaxSize) / Config.Store.avgObjectSize;
e0236918 1147 debugs(20, DBG_IMPORTANT, "Swap maxSize " << (Store::Root().maxSize() >> 10) <<
13a07022 1148 " + " << ( Config.memMaxSize >> 10) << " KB, estimated " << buckets << " objects");
c8f4eac4 1149 buckets /= Config.Store.objectsPerBucket;
e0236918 1150 debugs(20, DBG_IMPORTANT, "Target number of buckets: " << buckets);
c8f4eac4 1151 /* ideally the full scan period should be configurable, for the
1152 * moment it remains at approximately 24 hours. */
1153 store_hash_buckets = storeKeyHashBuckets(buckets);
e0236918
FC
1154 debugs(20, DBG_IMPORTANT, "Using " << store_hash_buckets << " Store buckets");
1155 debugs(20, DBG_IMPORTANT, "Max Mem size: " << ( Config.memMaxSize >> 10) << " KB" <<
9199139f 1156 (Config.memShared ? " [shared]" : ""));
e0236918 1157 debugs(20, DBG_IMPORTANT, "Max Swap size: " << (Store::Root().maxSize() >> 10) << " KB");
c8f4eac4 1158
1159 store_table = hash_create(storeKeyHashCmp,
1160 store_hash_buckets, storeKeyHashHash);
1161
5db6bf73 1162 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
c8f4eac4 1163 /* this starts a search of the store dirs, loading their
1164 * index. under the new Store api this should be
1165 * driven by the StoreHashIndex, not by each store.
bef81ea5 1166 *
1167 * That is, the HashIndex should perform a search of each dir it is
26ac0430 1168 * indexing to do the hash insertions. The search is then able to
bef81ea5 1169 * decide 'from-memory', or 'from-clean-log' or 'from-dirty-log' or
1170 * 'from-no-log'.
1171 *
c8f4eac4 1172 * Step 1: make the store rebuilds use a search internally
bef81ea5 1173 * Step 2: change the search logic to use the four modes described
1174 * above
1175 * Step 3: have the hash index walk the searches itself.
c8f4eac4 1176 */
14911a4e
AR
1177 if (dir(i).active())
1178 store(i)->init();
13a07022 1179 }
c8f4eac4 1180}
1181
12e11a5c 1182uint64_t
c8f4eac4 1183StoreHashIndex::maxSize() const
1184{
12e11a5c 1185 uint64_t result = 0;
c8f4eac4 1186
5db6bf73 1187 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
39c1e1d9
DK
1188 if (dir(i).doReportStat())
1189 result += store(i)->maxSize();
1190 }
c8f4eac4 1191
1192 return result;
1193}
1194
12e11a5c 1195uint64_t
c8f4eac4 1196StoreHashIndex::minSize() const
1197{
12e11a5c 1198 uint64_t result = 0;
c8f4eac4 1199
5db6bf73 1200 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
39c1e1d9
DK
1201 if (dir(i).doReportStat())
1202 result += store(i)->minSize();
1203 }
1204
1205 return result;
1206}
1207
1208uint64_t
1209StoreHashIndex::currentSize() const
1210{
1211 uint64_t result = 0;
1212
5db6bf73 1213 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
39c1e1d9
DK
1214 if (dir(i).doReportStat())
1215 result += store(i)->currentSize();
1216 }
1217
1218 return result;
1219}
1220
1221uint64_t
1222StoreHashIndex::currentCount() const
1223{
1224 uint64_t result = 0;
1225
5db6bf73 1226 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
39c1e1d9
DK
1227 if (dir(i).doReportStat())
1228 result += store(i)->currentCount();
1229 }
c8f4eac4 1230
1231 return result;
1232}
1233
af2fda07
DK
1234int64_t
1235StoreHashIndex::maxObjectSize() const
1236{
1237 int64_t result = -1;
1238
5db6bf73 1239 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
af2fda07
DK
1240 if (dir(i).active() && store(i)->maxObjectSize() > result)
1241 result = store(i)->maxObjectSize();
1242 }
1243
1244 return result;
1245}
1246
93bc1434
AR
1247void
1248StoreHashIndex::getStats(StoreInfoStats &stats) const
1249{
1250 // accumulate per-disk cache stats
5db6bf73 1251 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
93bc1434
AR
1252 StoreInfoStats dirStats;
1253 store(i)->getStats(dirStats);
1254 stats += dirStats;
1255 }
1256
1257 // common to all disks
1258 stats.swap.open_disk_fd = store_open_disk_fd;
1259
1260 // memory cache stats are collected in StoreController::getStats(), for now
1261}
1262
c8f4eac4 1263void
1264StoreHashIndex::stat(StoreEntry & output) const
1265{
1266 int i;
1267
1268 /* Now go through each store, calling its stat routine */
1269
5db6bf73 1270 for (i = 0; i < Config.cacheSwap.n_configured; ++i) {
c8f4eac4 1271 storeAppendPrintf(&output, "\n");
1272 store(i)->stat(output);
1273 }
1274}
1275
1276void
4c973beb
AR
1277StoreHashIndex::reference(StoreEntry &e)
1278{
1279 e.store()->reference(e);
1280}
c8f4eac4 1281
4c973beb 1282bool
54347cbd 1283StoreHashIndex::dereference(StoreEntry &e, bool wantsLocalMemory)
4c973beb 1284{
54347cbd 1285 return e.store()->dereference(e, wantsLocalMemory);
4c973beb 1286}
c8f4eac4 1287
1288void
1289StoreHashIndex::maintain()
1290{
1291 int i;
1292 /* walk each fs */
1293
5db6bf73 1294 for (i = 0; i < Config.cacheSwap.n_configured; ++i) {
c8f4eac4 1295 /* XXX FixMe: This should be done "in parallell" on the different
1296 * cache_dirs, not one at a time.
1297 */
1298 /* call the maintain function .. */
1299 store(i)->maintain();
1300 }
1301}
1302
c8f4eac4 1303void
1304StoreHashIndex::sync()
1305{
1306 for (int i = 0; i < Config.cacheSwap.n_configured; ++i)
1307 store(i)->sync();
1308}
1309
1310StoreSearch *
30abd221 1311StoreHashIndex::search(String const url, HttpRequest *)
c8f4eac4 1312{
1313 if (url.size())
1314 fatal ("Cannot search by url yet\n");
1315
1316 return new StoreSearchHashIndex (this);
1317}
1318
1319CBDATA_CLASS_INIT(StoreSearchHashIndex);
aa839030 1320
6fd5ccc3
AJ
1321StoreSearchHashIndex::StoreSearchHashIndex(RefCount<StoreHashIndex> aSwapDir) :
1322 sd(aSwapDir),
1323 callback(NULL),
1324 cbdata(NULL),
1325 _done(false),
1326 bucket(0)
c8f4eac4 1327{}
1328
1329/* do not link
1330StoreSearchHashIndex::StoreSearchHashIndex(StoreSearchHashIndex const &);
1331*/
1332
1333StoreSearchHashIndex::~StoreSearchHashIndex()
1334{}
1335
1336void
70efcae0 1337StoreSearchHashIndex::next(void (aCallback)(void *), void *aCallbackData)
c8f4eac4 1338{
1339 next();
70efcae0 1340 aCallback (aCallbackData);
c8f4eac4 1341}
1342
1343bool
1344StoreSearchHashIndex::next()
1345{
cfb88efb 1346 if (!entries.empty())
c8f4eac4 1347 entries.pop_back();
1348
1349 while (!isDone() && !entries.size())
1350 copyBucket();
1351
1352 return currentItem() != NULL;
1353}
1354
1355bool
1356StoreSearchHashIndex::error() const
1357{
1358 return false;
1359}
1360
1361bool
1362StoreSearchHashIndex::isDone() const
1363{
1364 return bucket >= store_hash_buckets || _done;
1365}
1366
1367StoreEntry *
1368StoreSearchHashIndex::currentItem()
1369{
1370 if (!entries.size())
1371 return NULL;
1372
1373 return entries.back();
1374}
1375
1376void
1377StoreSearchHashIndex::copyBucket()
1378{
1379 /* probably need to lock the store entries...
1380 * we copy them all to prevent races on the links. */
1381 debugs(47, 3, "StoreSearchHashIndex::copyBucket #" << bucket);
1382 assert (!entries.size());
1383 hash_link *link_ptr = NULL;
1384 hash_link *link_next = NULL;
1385 link_next = hash_get_bucket(store_table, bucket);
1386
1387 while (NULL != (link_ptr = link_next)) {
1388 link_next = link_ptr->next;
1389 StoreEntry *e = (StoreEntry *) link_ptr;
1390
1391 entries.push_back(e);
1392 }
1393
5db6bf73 1394 ++bucket;
c8f4eac4 1395 debugs(47,3, "got entries: " << entries.size());
1396}