]> git.ipfire.org Git - thirdparty/squid.git/blame - src/store_dir.cc
SourceFormat Enforcement
[thirdparty/squid.git] / src / store_dir.cc
CommitLineData
f1dc9b30 1/*
bbc27441 2 * Copyright (C) 1996-2014 The Squid Software Foundation and contributors
e25c139f 3 *
bbc27441
AJ
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
f1dc9b30 7 */
8
bbc27441
AJ
9/* DEBUG: section 47 Store Directory Routines */
10
582c2af2
FC
11#include "squid.h"
12#include "globals.h"
13#include "mem_node.h"
528b2c61 14#include "MemObject.h"
9487bae9 15#include "MemStore.h"
582c2af2 16#include "profiler/Profiler.h"
4d5904f7 17#include "SquidConfig.h"
a98bcbee 18#include "SquidMath.h"
985c86bc 19#include "SquidTime.h"
582c2af2 20#include "Store.h"
fb548aaf 21#include "store_key_md5.h"
21d845b1 22#include "StoreHashIndex.h"
4b981814 23#include "swap_log_op.h"
602d9612 24#include "SwapDir.h"
5bed43d6 25#include "tools.h"
9a9954ba 26#include "Transients.h"
ed6e9fb9
AJ
27// for tvSubDsec() which should be in SquidTime.h
28#include "util.h"
85407535 29
074d6a40
AJ
30#include <cerrno>
31#include <climits>
582c2af2
FC
32#if HAVE_SYS_WAIT_H
33#include <sys/wait.h>
34#endif
c0db87f2 35
65a53c8e 36static STDIRSELECT storeDirSelectSwapDirRoundRobin;
37static STDIRSELECT storeDirSelectSwapDirLeastLoad;
c8f4eac4 38
b07b21cc 39/*
40 * store_dirs_rebuilding is initialized to _1_ as a hack so that
41 * storeDirWriteCleanLogs() doesn't try to do anything unless _all_
42 * cache_dirs have been read. For example, without this hack, Squid
43 * will try to write clean log files if -kparse fails (becasue it
44 * calls fatal()).
45 */
46int StoreController::store_dirs_rebuilding = 1;
bef81ea5 47
c8f4eac4 48StoreController::StoreController() : swapDir (new StoreHashIndex())
f53969cc 49 , memStore(NULL), transients(NULL)
c8f4eac4 50{}
51
52StoreController::~StoreController()
9487bae9
AR
53{
54 delete memStore;
9a9954ba 55 delete transients;
9487bae9 56}
65a53c8e 57
58/*
59 * This function pointer is set according to 'store_dir_select_algorithm'
60 * in squid.conf.
61 */
62STDIRSELECT *storeDirSelectSwapDir = storeDirSelectSwapDirLeastLoad;
d141c677 63
9838d6c8 64void
c8f4eac4 65StoreController::init()
596dddc1 66{
57af1e3f 67 if (Config.memShared && IamWorkerProcess()) {
60be8b2d
AR
68 memStore = new MemStore;
69 memStore->init();
70 }
9487bae9 71
c8f4eac4 72 swapDir->init();
62e76326 73
65a53c8e 74 if (0 == strcasecmp(Config.store_dir_select_algorithm, "round-robin")) {
62e76326 75 storeDirSelectSwapDir = storeDirSelectSwapDirRoundRobin;
e0236918 76 debugs(47, DBG_IMPORTANT, "Using Round Robin store dir selection");
65a53c8e 77 } else {
62e76326 78 storeDirSelectSwapDir = storeDirSelectSwapDirLeastLoad;
e0236918 79 debugs(47, DBG_IMPORTANT, "Using Least Load store dir selection");
65a53c8e 80 }
9a9954ba
AR
81
82 if (UsingSmp() && IamWorkerProcess() && Config.onoff.collapsed_forwarding) {
83 transients = new Transients;
84 transients->init();
85 }
85407535 86}
87
88void
c8f4eac4 89StoreController::createOneStore(Store &aStore)
596dddc1 90{
62e76326 91 /*
154c7949 92 * On Windows, fork() is not available.
93 * The following is a workaround for create store directories sequentially
94 * when running on native Windows port.
95 */
7aa9bb3e 96#if !_SQUID_WINDOWS_
62e76326 97
154c7949 98 if (fork())
62e76326 99 return;
100
099a1791 101#endif
62e76326 102
c8f4eac4 103 aStore.create();
62e76326 104
7aa9bb3e 105#if !_SQUID_WINDOWS_
62e76326 106
154c7949 107 exit(0);
62e76326 108
099a1791 109#endif
154c7949 110}
111
112void
c8f4eac4 113StoreController::create()
154c7949 114{
c8f4eac4 115 swapDir->create();
62e76326 116
7aa9bb3e 117#if !_SQUID_WINDOWS_
62e76326 118
8a1c8f2c 119 pid_t pid;
62e76326 120
b2c141d4 121 do {
62e76326 122 int status;
1191b93b 123#if _SQUID_NEXT_
62e76326 124
125 pid = wait3(&status, WNOHANG, NULL);
b2c141d4 126#else
62e76326 127
128 pid = waitpid(-1, &status, 0);
b2c141d4 129#endif
62e76326 130
b2c141d4 131 } while (pid > 0 || (pid < 0 && errno == EINTR));
62e76326 132
099a1791 133#endif
596dddc1 134}
135
a8a33c46 136/**
cd748f27 137 * Determine whether the given directory can handle this object
138 * size
139 *
140 * Note: if the object size is -1, then the only swapdirs that
a8a33c46 141 * will return true here are ones that have min and max unset,
cd748f27 142 * ie any-sized-object swapdirs. This is a good thing.
143 */
c8f4eac4 144bool
3e62bd58 145SwapDir::objectSizeIsAcceptable(int64_t objsize) const
cd748f27 146{
a8a33c46 147 // If the swapdir has no range limits, then it definitely can
b6662ffd 148 if (min_objsize <= 0 && max_objsize == -1)
c8f4eac4 149 return true;
d68f43a0 150
151 /*
a8a33c46
A
152 * If the object size is -1 and the storedir has limits we
153 * can't store it there.
d68f43a0 154 */
a8a33c46 155 if (objsize == -1)
c8f4eac4 156 return false;
d68f43a0 157
a8a33c46 158 // Else, make sure that the object size will fit.
b475997c
AJ
159 if (max_objsize == -1 && min_objsize <= objsize)
160 return true;
161 else
162 return min_objsize <= objsize && max_objsize > objsize;
cd748f27 163}
164
d141c677 165/*
166 * This new selection scheme simply does round-robin on all SwapDirs.
8e8d4f30 167 * A SwapDir is skipped if it is over the max_size (100%) limit, or
168 * overloaded.
d141c677 169 */
170static int
8e8d4f30 171storeDirSelectSwapDirRoundRobin(const StoreEntry * e)
d141c677 172{
aa1a691e
AR
173 // e->objectLen() is negative at this point when we are still STORE_PENDING
174 ssize_t objsize = e->mem_obj->expectedReplySize();
246e6cc1
AJ
175 if (objsize != -1)
176 objsize += e->mem_obj->swap_hdr_sz;
177
29a238a3
AR
178 // Increment the first candidate once per selection (not once per
179 // iteration) to reduce bias when some disk(s) attract more entries.
180 static int firstCandidate = 0;
181 if (++firstCandidate >= Config.cacheSwap.n_configured)
182 firstCandidate = 0;
62e76326 183
29a238a3
AR
184 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
185 const int dirn = (firstCandidate + i) % Config.cacheSwap.n_configured;
186 const SwapDir *sd = dynamic_cast<SwapDir*>(INDEXSD(dirn));
62e76326 187
29a238a3 188 int load = 0;
aa1a691e 189 if (!sd->canStore(*e, objsize, load))
62e76326 190 continue;
191
62e76326 192 if (load < 0 || load > 1000) {
193 continue;
194 }
195
196 return dirn;
d141c677 197 }
62e76326 198
8e8d4f30 199 return -1;
d141c677 200}
960a01e3 201
a2899918 202/*
cd748f27 203 * Spread load across all of the store directories
204 *
205 * Note: We should modify this later on to prefer sticking objects
206 * in the *tightest fit* swapdir to conserve space, along with the
26ac0430 207 * actual swapdir usage. But for now, this hack will do while
cd748f27 208 * testing, so you should order your swapdirs in the config file
638402dd 209 * from smallest max-size= to largest max-size=.
cd748f27 210 *
211 * We also have to choose nleast == nconf since we need to consider
212 * ALL swapdirs, regardless of state. Again, this is a hack while
213 * we sort out the real usefulness of this algorithm.
a2899918 214 */
65a53c8e 215static int
216storeDirSelectSwapDirLeastLoad(const StoreEntry * e)
a2899918 217{
cc34568d 218 int64_t most_free = 0;
8e8d4f30 219 ssize_t least_objsize = -1;
220 int least_load = INT_MAX;
cd748f27 221 int load;
222 int dirn = -1;
223 int i;
c8f4eac4 224 RefCount<SwapDir> SD;
cd748f27 225
aa1a691e
AR
226 // e->objectLen() is negative at this point when we are still STORE_PENDING
227 ssize_t objsize = e->mem_obj->expectedReplySize();
62e76326 228
cd748f27 229 if (objsize != -1)
62e76326 230 objsize += e->mem_obj->swap_hdr_sz;
231
5db6bf73 232 for (i = 0; i < Config.cacheSwap.n_configured; ++i) {
c8f4eac4 233 SD = dynamic_cast<SwapDir *>(INDEXSD(i));
1a6347cd 234 SD->flags.selected = false;
62e76326 235
aa1a691e 236 if (!SD->canStore(*e, objsize, load))
62e76326 237 continue;
238
aa1a691e 239 if (load < 0 || load > 1000)
62e76326 240 continue;
241
242 if (load > least_load)
243 continue;
244
cc34568d 245 const int64_t cur_free = SD->maxSize() - SD->currentSize();
62e76326 246
247 /* If the load is equal, then look in more details */
248 if (load == least_load) {
b51ec8c8 249 /* closest max-size fit */
62e76326 250
251 if (least_objsize != -1)
b51ec8c8 252 if (SD->maxObjectSize() > least_objsize)
62e76326 253 continue;
254
255 /* most free */
256 if (cur_free < most_free)
257 continue;
258 }
259
260 least_load = load;
b51ec8c8 261 least_objsize = SD->maxObjectSize();
62e76326 262 most_free = cur_free;
263 dirn = i;
a2899918 264 }
62e76326 265
ade906c8 266 if (dirn >= 0)
1a6347cd 267 dynamic_cast<SwapDir *>(INDEXSD(dirn))->flags.selected = true;
62e76326 268
cd748f27 269 return dirn;
596dddc1 270}
271
b109de6b 272/*
273 * An entry written to the swap log MUST have the following
274 * properties.
275 * 1. It MUST be a public key. It does no good to log
276 * a public ADD, change the key, then log a private
277 * DEL. So we need to log a DEL before we change a
278 * key from public to private.
cd748f27 279 * 2. It MUST have a valid (> -1) swap_filen.
b109de6b 280 */
4683e377 281void
5830cdb3 282storeDirSwapLog(const StoreEntry * e, int op)
4683e377 283{
d3b3ab85 284 assert (e);
d46a87a8 285 assert(!EBIT_TEST(e->flags, KEY_PRIVATE));
cd748f27 286 assert(e->swap_filen >= 0);
6c57e268 287 /*
288 * icons and such; don't write them to the swap log
289 */
62e76326 290
d46a87a8 291 if (EBIT_TEST(e->flags, ENTRY_SPECIAL))
62e76326 292 return;
293
b109de6b 294 assert(op > SWAP_LOG_NOP && op < SWAP_LOG_MAX);
62e76326 295
26ac0430
AJ
296 debugs(20, 3, "storeDirSwapLog: " <<
297 swap_log_op_str[op] << " " <<
298 e->getMD5Text() << " " <<
299 e->swap_dirn << " " <<
bf8fe701 300 std::hex << std::uppercase << std::setfill('0') << std::setw(8) << e->swap_filen);
62e76326 301
c8f4eac4 302 dynamic_cast<SwapDir *>(INDEXSD(e->swap_dirn))->logEntry(*e, op);
303}
304
93bc1434
AR
305void
306StoreController::getStats(StoreInfoStats &stats) const
307{
308 if (memStore)
309 memStore->getStats(stats);
310 else {
311 // move this code to a non-shared memory cache class when we have it
312 stats.mem.shared = false;
313 stats.mem.capacity = Config.memMaxSize;
314 stats.mem.size = mem_node::StoreMemSize();
315 stats.mem.count = hot_obj_count;
316 }
317
318 swapDir->getStats(stats);
319
320 // low-level info not specific to memory or disk cache
321 stats.store_entry_count = StoreEntry::inUseCount();
322 stats.mem_object_count = MemObject::inUseCount();
323}
324
c932b107 325void
c8f4eac4 326StoreController::stat(StoreEntry &output) const
c932b107 327{
c8f4eac4 328 storeAppendPrintf(&output, "Store Directory Statistics:\n");
329 storeAppendPrintf(&output, "Store Entries : %lu\n",
62e76326 330 (unsigned long int)StoreEntry::inUseCount());
c91ca3ce 331 storeAppendPrintf(&output, "Maximum Swap Size : %" PRIu64 " KB\n",
cc34568d 332 maxSize() >> 10);
57f583f1 333 storeAppendPrintf(&output, "Current Store Swap Size: %.2f KB\n",
cc34568d 334 currentSize() / 1024.0);
57f583f1 335 storeAppendPrintf(&output, "Current Capacity : %.2f%% used, %.2f%% free\n",
cc34568d
DK
336 Math::doublePercent(currentSize(), maxSize()),
337 Math::doublePercent((maxSize() - currentSize()), maxSize()));
ada9124c
AR
338
339 if (memStore)
340 memStore->stat(output);
e3ef2b09 341
c8f4eac4 342 /* now the swapDir */
343 swapDir->stat(output);
5d406e78 344}
345
c8f4eac4 346/* if needed, this could be taught to cache the result */
12e11a5c 347uint64_t
c8f4eac4 348StoreController::maxSize() const
f4e3fa54 349{
c8f4eac4 350 /* TODO: include memory cache ? */
351 return swapDir->maxSize();
352}
62e76326 353
12e11a5c 354uint64_t
c8f4eac4 355StoreController::minSize() const
356{
357 /* TODO: include memory cache ? */
358 return swapDir->minSize();
f4e3fa54 359}
360
39c1e1d9
DK
361uint64_t
362StoreController::currentSize() const
363{
364 return swapDir->currentSize();
365}
366
367uint64_t
368StoreController::currentCount() const
369{
370 return swapDir->currentCount();
371}
372
af2fda07
DK
373int64_t
374StoreController::maxObjectSize() const
375{
376 return swapDir->maxObjectSize();
377}
378
f4e3fa54 379void
c8f4eac4 380SwapDir::diskFull()
f4e3fa54 381{
cc34568d 382 if (currentSize() >= maxSize())
62e76326 383 return;
384
cc34568d 385 max_size = currentSize();
62e76326 386
e0236918 387 debugs(20, DBG_IMPORTANT, "WARNING: Shrinking cache_dir #" << index << " to " << currentSize() / 1024.0 << " KB");
f4e3fa54 388}
95dcd2b8 389
390void
391storeDirOpenSwapLogs(void)
392{
d3b3ab85 393 for (int dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
c8f4eac4 394 dynamic_cast<SwapDir *>(INDEXSD(dirn))->openLog();
95dcd2b8 395}
396
397void
398storeDirCloseSwapLogs(void)
399{
d3b3ab85 400 for (int dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
c8f4eac4 401 dynamic_cast<SwapDir *>(INDEXSD(dirn))->closeLog();
95dcd2b8 402}
403
b2c141d4 404/*
405 * storeDirWriteCleanLogs
26ac0430 406 *
b2c141d4 407 * Writes a "clean" swap log file from in-memory metadata.
cd748f27 408 * This is a rewrite of the original function to troll each
409 * StoreDir and write the logs, and flush at the end of
410 * the run. Thanks goes to Eric Stern, since this solution
411 * came out of his COSS code.
b2c141d4 412 */
b2c141d4 413int
414storeDirWriteCleanLogs(int reopen)
95dcd2b8 415{
6a566b9c 416 const StoreEntry *e = NULL;
b2c141d4 417 int n = 0;
62e76326 418
e812ecfc 419 struct timeval start;
420 double dt;
c8f4eac4 421 RefCount<SwapDir> sd;
b2c141d4 422 int dirn;
6a566b9c 423 int notdone = 1;
62e76326 424
bef81ea5 425 if (StoreController::store_dirs_rebuilding) {
e0236918
FC
426 debugs(20, DBG_IMPORTANT, "Not currently OK to rewrite swap log.");
427 debugs(20, DBG_IMPORTANT, "storeDirWriteCleanLogs: Operation aborted.");
62e76326 428 return 0;
b2c141d4 429 }
62e76326 430
e0236918 431 debugs(20, DBG_IMPORTANT, "storeDirWriteCleanLogs: Starting...");
e812ecfc 432 getCurrentTime();
433 start = current_time;
62e76326 434
5db6bf73 435 for (dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn) {
c8f4eac4 436 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
62e76326 437
438 if (sd->writeCleanStart() < 0) {
e0236918 439 debugs(20, DBG_IMPORTANT, "log.clean.start() failed for dir #" << sd->index);
62e76326 440 continue;
441 }
6a566b9c 442 }
62e76326 443
e78ef51b 444 /*
445 * This may look inefficient as CPU wise it is more efficient to do this
446 * sequentially, but I/O wise the parallellism helps as it allows more
447 * hdd spindles to be active.
d3b3ab85 448 */
c1dd71ae 449 while (notdone) {
62e76326 450 notdone = 0;
451
5db6bf73 452 for (dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn) {
c8f4eac4 453 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
62e76326 454
455 if (NULL == sd->cleanLog)
456 continue;
457
458 e = sd->cleanLog->nextEntry();
459
460 if (!e)
461 continue;
462
463 notdone = 1;
464
465 if (!sd->canLog(*e))
466 continue;
467
468 sd->cleanLog->write(*e);
469
470 if ((++n & 0xFFFF) == 0) {
471 getCurrentTime();
e0236918 472 debugs(20, DBG_IMPORTANT, " " << std::setw(7) << n <<
bf8fe701 473 " entries written so far.");
62e76326 474 }
475 }
6a566b9c 476 }
62e76326 477
6a566b9c 478 /* Flush */
5db6bf73 479 for (dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
c8f4eac4 480 dynamic_cast<SwapDir *>(INDEXSD(dirn))->writeCleanDone();
62e76326 481
b2c141d4 482 if (reopen)
62e76326 483 storeDirOpenSwapLogs();
484
e812ecfc 485 getCurrentTime();
62e76326 486
e812ecfc 487 dt = tvSubDsec(start, current_time);
62e76326 488
e0236918
FC
489 debugs(20, DBG_IMPORTANT, " Finished. Wrote " << n << " entries.");
490 debugs(20, DBG_IMPORTANT, " Took "<< std::setw(3)<< std::setprecision(2) << dt <<
bf8fe701 491 " seconds ("<< std::setw(6) << ((double) n / (dt > 0.0 ? dt : 1.0)) << " entries/sec).");
62e76326 492
b2c141d4 493 return n;
95dcd2b8 494}
d141c677 495
c8f4eac4 496StoreSearch *
30abd221 497StoreController::search(String const url, HttpRequest *request)
c8f4eac4 498{
499 /* cheat, for now you can't search the memory hot cache */
500 return swapDir->search(url, request);
501}
502
503StorePointer
504StoreHashIndex::store(int const x) const
505{
506 return INDEXSD(x);
507}
508
14911a4e
AR
509SwapDir &
510StoreHashIndex::dir(const int i) const
511{
512 SwapDir *sd = dynamic_cast<SwapDir*>(INDEXSD(i));
513 assert(sd);
514 return *sd;
515}
516
cd748f27 517void
c8f4eac4 518StoreController::sync(void)
cd748f27 519{
9487bae9
AR
520 if (memStore)
521 memStore->sync();
c8f4eac4 522 swapDir->sync();
cd748f27 523}
524
525/*
26ac0430 526 * handle callbacks all avaliable fs'es
cd748f27 527 */
c8f4eac4 528int
529StoreController::callback()
cd748f27 530{
1d5161bd 531 /* This will likely double count. Thats ok. */
532 PROF_start(storeDirCallback);
533
c8f4eac4 534 /* mem cache callbacks ? */
535 int result = swapDir->callback();
1d5161bd 536
537 PROF_stop(storeDirCallback);
c8f4eac4 538
539 return result;
d141c677 540}
90d42c28 541
542int
543storeDirGetBlkSize(const char *path, int *blksize)
544{
90d42c28 545 struct statvfs sfs;
62e76326 546
615540cd 547 if (xstatvfs(path, &sfs)) {
e0236918 548 debugs(50, DBG_IMPORTANT, "" << path << ": " << xstrerror());
62e76326 549 *blksize = 2048;
550 return 1;
90d42c28 551 }
62e76326 552
6759a7aa 553 *blksize = (int) sfs.f_frsize;
62e76326 554
615540cd 555 // Sanity check; make sure we have a meaningful value.
d5b72fe7 556 if (*blksize < 512)
62e76326 557 *blksize = 2048;
558
90d42c28 559 return 0;
560}
781d6656 561
562#define fsbtoblk(num, fsbs, bs) \
563 (((fsbs) != 0 && (fsbs) < (bs)) ? \
564 (num) / ((bs) / (fsbs)) : (num) * ((fsbs) / (bs)))
565int
566storeDirGetUFSStats(const char *path, int *totl_kb, int *free_kb, int *totl_in, int *free_in)
567{
781d6656 568 struct statvfs sfs;
62e76326 569
615540cd 570 if (xstatvfs(path, &sfs)) {
e0236918 571 debugs(50, DBG_IMPORTANT, "" << path << ": " << xstrerror());
62e76326 572 return 1;
781d6656 573 }
62e76326 574
781d6656 575 *totl_kb = (int) fsbtoblk(sfs.f_blocks, sfs.f_frsize, 1024);
576 *free_kb = (int) fsbtoblk(sfs.f_bfree, sfs.f_frsize, 1024);
577 *totl_in = (int) sfs.f_files;
578 *free_in = (int) sfs.f_ffree;
781d6656 579 return 0;
580}
c8f4eac4 581
582void
e1f7507e 583allocate_new_swapdir(SquidConfig::_cacheSwap * swap)
c8f4eac4 584{
585 if (swap->swapDirs == NULL) {
586 swap->n_allocated = 4;
7d3c4ca1 587 swap->swapDirs = static_cast<SwapDir::Pointer *>(xcalloc(swap->n_allocated, sizeof(SwapDir::Pointer)));
c8f4eac4 588 }
589
590 if (swap->n_allocated == swap->n_configured) {
c8f4eac4 591 swap->n_allocated <<= 1;
7d3c4ca1 592 SwapDir::Pointer *const tmp = static_cast<SwapDir::Pointer *>(xcalloc(swap->n_allocated, sizeof(SwapDir::Pointer)));
41d00cd3 593 memcpy(tmp, swap->swapDirs, swap->n_configured * sizeof(SwapDir *));
c8f4eac4 594 xfree(swap->swapDirs);
595 swap->swapDirs = tmp;
596 }
597}
598
599void
e1f7507e 600free_cachedir(SquidConfig::_cacheSwap * swap)
c8f4eac4 601{
602 int i;
603 /* DON'T FREE THESE FOR RECONFIGURE */
604
605 if (reconfiguring)
606 return;
607
5db6bf73 608 for (i = 0; i < swap->n_configured; ++i) {
c8f4eac4 609 /* TODO XXX this lets the swapdir free resources asynchronously
610 * swap->swapDirs[i]->deactivate();
26ac0430 611 * but there may be such a means already.
c8f4eac4 612 * RBC 20041225
613 */
614 swap->swapDirs[i] = NULL;
615 }
616
617 safe_free(swap->swapDirs);
618 swap->swapDirs = NULL;
619 swap->n_allocated = 0;
620 swap->n_configured = 0;
621}
622
623/* this should be a virtual method on StoreEntry,
624 * i.e. e->referenced()
625 * so that the entry can notify the creating Store
626 */
627void
628StoreController::reference(StoreEntry &e)
629{
c5426f8f
AR
630 // special entries do not belong to any specific Store, but are IN_MEMORY
631 if (EBIT_TEST(e.flags, ENTRY_SPECIAL))
632 return;
633
c8f4eac4 634 /* Notify the fs that we're referencing this object again */
635
636 if (e.swap_dirn > -1)
4c973beb 637 swapDir->reference(e);
c8f4eac4 638
9487bae9
AR
639 // Notify the memory cache that we're referencing this object again
640 if (memStore && e.mem_status == IN_MEMORY)
641 memStore->reference(e);
642
643 // TODO: move this code to a non-shared memory cache class when we have it
c8f4eac4 644 if (e.mem_obj) {
645 if (mem_policy->Referenced)
646 mem_policy->Referenced(mem_policy, &e, &e.mem_obj->repl);
647 }
648}
649
4c973beb 650bool
54347cbd 651StoreController::dereference(StoreEntry &e, bool wantsLocalMemory)
c8f4eac4 652{
c5426f8f
AR
653 // special entries do not belong to any specific Store, but are IN_MEMORY
654 if (EBIT_TEST(e.flags, ENTRY_SPECIAL))
54347cbd
AR
655 return true;
656
657 bool keepInStoreTable = false; // keep only if somebody needs it there
c5426f8f 658
c8f4eac4 659 /* Notify the fs that we're not referencing this object any more */
660
661 if (e.swap_filen > -1)
54347cbd 662 keepInStoreTable = swapDir->dereference(e, wantsLocalMemory) || keepInStoreTable;
c8f4eac4 663
9487bae9
AR
664 // Notify the memory cache that we're not referencing this object any more
665 if (memStore && e.mem_status == IN_MEMORY)
54347cbd 666 keepInStoreTable = memStore->dereference(e, wantsLocalMemory) || keepInStoreTable;
9487bae9
AR
667
668 // TODO: move this code to a non-shared memory cache class when we have it
c8f4eac4 669 if (e.mem_obj) {
670 if (mem_policy->Dereferenced)
671 mem_policy->Dereferenced(mem_policy, &e, &e.mem_obj->repl);
54347cbd
AR
672 // non-shared memory cache relies on store_table
673 if (!memStore)
674 keepInStoreTable = wantsLocalMemory || keepInStoreTable;
c8f4eac4 675 }
4c973beb
AR
676
677 return keepInStoreTable;
c8f4eac4 678}
679
680StoreEntry *
6ca34f6f 681StoreController::get(const cache_key *key)
1bfe9ade
AR
682{
683 if (StoreEntry *e = find(key)) {
684 // this is not very precise: some get()s are not initiated by clients
9d4e9cfb 685 e->touch();
1bfe9ade
AR
686 return e;
687 }
688 return NULL;
689}
690
691/// Internal method to implements the guts of the Store::get() API:
692/// returns an in-transit or cached object with a given key, if any.
693StoreEntry *
694StoreController::find(const cache_key *key)
c8f4eac4 695{
44def0f9 696 if (StoreEntry *e = swapDir->get(key)) {
9487bae9
AR
697 // TODO: ignore and maybe handleIdleEntry() unlocked intransit entries
698 // because their backing store slot may be gone already.
171d5429 699 debugs(20, 3, HERE << "got in-transit entry: " << *e);
44def0f9
AR
700 return e;
701 }
702
99921d9d
AR
703 // Must search transients before caches because we must sync those we find.
704 if (transients) {
705 if (StoreEntry *e = transients->get(key)) {
706 debugs(20, 3, "got shared in-transit entry: " << *e);
707 bool inSync = false;
708 const bool found = anchorCollapsed(*e, inSync);
709 if (!found || inSync)
710 return e;
711 assert(!e->locked()); // ensure release will destroyStoreEntry()
712 e->release(); // do not let others into the same trap
713 return NULL;
714 }
715 }
716
9487bae9
AR
717 if (memStore) {
718 if (StoreEntry *e = memStore->get(key)) {
719 debugs(20, 3, HERE << "got mem-cached entry: " << *e);
720 return e;
721 }
722 }
723
022f96ad
AR
724 // TODO: this disk iteration is misplaced; move to StoreHashIndex when
725 // the global store_table is no longer used for in-transit objects.
44def0f9
AR
726 if (const int cacheDirs = Config.cacheSwap.n_configured) {
727 // ask each cache_dir until the entry is found; use static starting
728 // point to avoid asking the same subset of disks more often
729 // TODO: coordinate with put() to be able to guess the right disk often
14911a4e 730 static int idx = 0;
44def0f9 731 for (int n = 0; n < cacheDirs; ++n) {
14911a4e 732 idx = (idx + 1) % cacheDirs;
44def0f9 733 SwapDir *sd = dynamic_cast<SwapDir*>(INDEXSD(idx));
14911a4e
AR
734 if (!sd->active())
735 continue;
736
44def0f9 737 if (StoreEntry *e = sd->get(key)) {
eccba1d9 738 debugs(20, 3, HERE << "cache_dir " << idx <<
9199139f 739 " got cached entry: " << *e);
44def0f9
AR
740 return e;
741 }
742 }
743 }
c8f4eac4 744
eccba1d9 745 debugs(20, 4, HERE << "none of " << Config.cacheSwap.n_configured <<
9199139f 746 " cache_dirs have " << storeKeyText(key));
44def0f9 747 return NULL;
c8f4eac4 748}
749
750void
6ca34f6f 751StoreController::get(String const key, STOREGETCLIENT aCallback, void *aCallbackData)
c8f4eac4 752{
753 fatal("not implemented");
754}
755
ce49546e 756/// updates the collapsed entry with the corresponding on-disk entry, if any
4475555f 757/// In other words, the SwapDir::anchorCollapsed() API applied to all disks.
ce49546e 758bool
4475555f 759StoreController::anchorCollapsedOnDisk(StoreEntry &collapsed, bool &inSync)
ce49546e
AR
760{
761 // TODO: move this loop to StoreHashIndex, just like the one in get().
762 if (const int cacheDirs = Config.cacheSwap.n_configured) {
763 // ask each cache_dir until the entry is found; use static starting
764 // point to avoid asking the same subset of disks more often
765 // TODO: coordinate with put() to be able to guess the right disk often
766 static int idx = 0;
767 for (int n = 0; n < cacheDirs; ++n) {
768 idx = (idx + 1) % cacheDirs;
769 SwapDir *sd = dynamic_cast<SwapDir*>(INDEXSD(idx));
770 if (!sd->active())
771 continue;
772
4475555f 773 if (sd->anchorCollapsed(collapsed, inSync)) {
ce49546e
AR
774 debugs(20, 3, "cache_dir " << idx << " anchors " << collapsed);
775 return true;
776 }
777 }
778 }
779
780 debugs(20, 4, "none of " << Config.cacheSwap.n_configured <<
781 " cache_dirs have " << collapsed);
782 return false;
783}
784
1bfe9ade
AR
785void StoreController::markForUnlink(StoreEntry &e)
786{
787 if (transients && e.mem_obj && e.mem_obj->xitTable.index >= 0)
788 transients->markForUnlink(e);
789 if (memStore && e.mem_obj && e.mem_obj->memCache.index >= 0)
790 memStore->markForUnlink(e);
791 if (e.swap_filen >= 0)
792 e.store()->markForUnlink(e);
793}
794
96a7de88
DK
795// move this into [non-shared] memory cache class when we have one
796/// whether e should be kept in local RAM for possible future caching
797bool
97754f5a 798StoreController::keepForLocalMemoryCache(StoreEntry &e) const
96a7de88
DK
799{
800 if (!e.memoryCachable())
801 return false;
802
803 // does the current and expected size obey memory caching limits?
804 assert(e.mem_obj);
805 const int64_t loadedSize = e.mem_obj->endOffset();
806 const int64_t expectedSize = e.mem_obj->expectedReplySize(); // may be < 0
807 const int64_t ramSize = max(loadedSize, expectedSize);
808 const int64_t ramLimit = min(
817138f8
A
809 static_cast<int64_t>(Config.memMaxSize),
810 static_cast<int64_t>(Config.Store.maxInMemObjSize));
96a7de88
DK
811 return ramSize <= ramLimit;
812}
813
814void
4475555f 815StoreController::memoryOut(StoreEntry &e, const bool preserveSwappable)
96a7de88
DK
816{
817 bool keepInLocalMemory = false;
818 if (memStore)
4475555f 819 memStore->write(e); // leave keepInLocalMemory false
96a7de88
DK
820 else
821 keepInLocalMemory = keepForLocalMemoryCache(e);
822
823 debugs(20, 7, HERE << "keepInLocalMemory: " << keepInLocalMemory);
824
825 if (!keepInLocalMemory)
826 e.trimMemory(preserveSwappable);
827}
828
ce49546e
AR
829void
830StoreController::memoryUnlink(StoreEntry &e)
831{
ce49546e
AR
832 if (memStore)
833 memStore->unlink(e);
834 else // TODO: move into [non-shared] memory cache class when we have one
835 e.destroyMemObject();
836}
837
4475555f 838void
29c56e41 839StoreController::memoryDisconnect(StoreEntry &e)
4475555f
AR
840{
841 if (memStore)
29c56e41 842 memStore->disconnect(e);
4475555f
AR
843 // else nothing to do for non-shared memory cache
844}
845
846void
847StoreController::transientsAbandon(StoreEntry &e)
848{
849 if (transients) {
850 assert(e.mem_obj);
851 if (e.mem_obj->xitTable.index >= 0)
852 transients->abandon(e);
853 }
854}
855
99921d9d
AR
856void
857StoreController::transientsCompleteWriting(StoreEntry &e)
858{
859 if (transients) {
860 assert(e.mem_obj);
861 if (e.mem_obj->xitTable.index >= 0)
862 transients->completeWriting(e);
863 }
864}
865
d366a7fa
AR
866int
867StoreController::transientReaders(const StoreEntry &e) const
868{
869 return (transients && e.mem_obj && e.mem_obj->xitTable.index >= 0) ?
9d4e9cfb 870 transients->readers(e) : 0;
d366a7fa
AR
871}
872
4475555f
AR
873void
874StoreController::transientsDisconnect(MemObject &mem_obj)
875{
876 if (transients)
877 transients->disconnect(mem_obj);
878}
879
9487bae9
AR
880void
881StoreController::handleIdleEntry(StoreEntry &e)
882{
883 bool keepInLocalMemory = false;
c5426f8f
AR
884
885 if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) {
886 // Icons (and cache digests?) should stay in store_table until we
887 // have a dedicated storage for them (that would not purge them).
888 // They are not managed [well] by any specific Store handled below.
889 keepInLocalMemory = true;
d3cd2e81 890 } else if (memStore) {
9487bae9
AR
891 // leave keepInLocalMemory false; memStore maintains its own cache
892 } else {
96a7de88 893 keepInLocalMemory = keepForLocalMemoryCache(e) && // in good shape and
9199139f
AR
894 // the local memory cache is not overflowing
895 (mem_node::InUseCount() <= store_pages_max);
9487bae9
AR
896 }
897
54347cbd 898 // An idle, unlocked entry that only belongs to a SwapDir which controls
4c973beb 899 // its own index, should not stay in the global store_table.
54347cbd 900 if (!dereference(e, keepInLocalMemory)) {
9487bae9
AR
901 debugs(20, 5, HERE << "destroying unlocked entry: " << &e << ' ' << e);
902 destroyStoreEntry(static_cast<hash_link*>(&e));
903 return;
904 }
905
c5426f8f
AR
906 debugs(20, 5, HERE << "keepInLocalMemory: " << keepInLocalMemory);
907
9487bae9
AR
908 // TODO: move this into [non-shared] memory cache class when we have one
909 if (keepInLocalMemory) {
910 e.setMemStatus(IN_MEMORY);
911 e.mem_obj->unlinkRequest();
912 } else {
913 e.purgeMem(); // may free e
914 }
915}
916
9a9954ba
AR
917void
918StoreController::allowCollapsing(StoreEntry *e, const RequestFlags &reqFlags,
919 const HttpRequestMethod &reqMethod)
920{
921 e->makePublic(); // this is needed for both local and SMP collapsing
922 if (transients)
99921d9d 923 transients->startWriting(e, reqFlags, reqMethod);
4475555f 924 debugs(20, 3, "may " << (transients && e->mem_obj->xitTable.index >= 0 ?
9d4e9cfb 925 "SMP-" : "locally-") << "collapse " << *e);
9a9954ba
AR
926}
927
ce49546e 928void
6919be24 929StoreController::syncCollapsed(const sfileno xitIndex)
ce49546e 930{
99921d9d 931 assert(transients);
6919be24
AR
932
933 StoreEntry *collapsed = transients->findCollapsed(xitIndex);
934 if (!collapsed) { // the entry is no longer locally active, ignore update
935 debugs(20, 7, "not SMP-syncing not-transient " << xitIndex);
99921d9d
AR
936 return;
937 }
6919be24
AR
938 assert(collapsed->mem_obj);
939 assert(collapsed->mem_obj->smpCollapsed);
ce49546e
AR
940
941 debugs(20, 7, "syncing " << *collapsed);
942
6919be24 943 bool abandoned = transients->abandoned(*collapsed);
4475555f 944 bool found = false;
ce49546e 945 bool inSync = false;
1bfe9ade
AR
946 if (memStore && collapsed->mem_obj->memCache.io == MemObject::ioDone) {
947 found = true;
948 inSync = true;
949 debugs(20, 7, "fully mem-loaded " << *collapsed);
950 } else if (memStore && collapsed->mem_obj->memCache.index >= 0) {
4475555f 951 found = true;
ce49546e 952 inSync = memStore->updateCollapsed(*collapsed);
4475555f
AR
953 } else if (collapsed->swap_filen >= 0) {
954 found = true;
ce49546e 955 inSync = collapsed->store()->updateCollapsed(*collapsed);
4475555f 956 } else {
99921d9d 957 found = anchorCollapsed(*collapsed, inSync);
4475555f 958 }
ce49546e 959
6919be24
AR
960 if (abandoned && collapsed->store_status == STORE_PENDING) {
961 debugs(20, 3, "aborting abandoned but STORE_PENDING " << *collapsed);
962 collapsed->abort();
963 return;
964 }
965
ce49546e
AR
966 if (inSync) {
967 debugs(20, 5, "synced " << *collapsed);
968 collapsed->invokeHandlers();
4475555f 969 } else if (found) { // unrecoverable problem syncing this entry
99921d9d 970 debugs(20, 3, "aborting unsyncable " << *collapsed);
ce49546e 971 collapsed->abort();
4475555f
AR
972 } else { // the entry is still not in one of the caches
973 debugs(20, 7, "waiting " << *collapsed);
ce49546e
AR
974 }
975}
9a9954ba 976
99921d9d
AR
977/// Called for in-transit entries that are not yet anchored to a cache.
978/// For cached entries, return true after synchronizing them with their cache
979/// (making inSync true on success). For not-yet-cached entries, return false.
980bool
981StoreController::anchorCollapsed(StoreEntry &collapsed, bool &inSync)
982{
983 // this method is designed to work with collapsed transients only
984 assert(collapsed.mem_obj);
985 assert(collapsed.mem_obj->xitTable.index >= 0);
986 assert(collapsed.mem_obj->smpCollapsed);
987
988 debugs(20, 7, "anchoring " << collapsed);
989
990 bool found = false;
991 if (memStore)
992 found = memStore->anchorCollapsed(collapsed, inSync);
0cdcf3d7 993 if (!found && Config.cacheSwap.n_configured)
99921d9d
AR
994 found = anchorCollapsedOnDisk(collapsed, inSync);
995
996 if (found) {
997 if (inSync)
998 debugs(20, 7, "anchored " << collapsed);
999 else
1000 debugs(20, 5, "failed to anchor " << collapsed);
1001 } else {
1002 debugs(20, 7, "skipping not yet cached " << collapsed);
1003 }
1004
1005 return found;
1006}
1007
c8f4eac4 1008StoreHashIndex::StoreHashIndex()
1009{
47f6e231 1010 if (store_table)
26ac0430 1011 abort();
c8f4eac4 1012 assert (store_table == NULL);
1013}
1014
1015StoreHashIndex::~StoreHashIndex()
1016{
1017 if (store_table) {
1018 hashFreeItems(store_table, destroyStoreEntry);
1019 hashFreeMemory(store_table);
1020 store_table = NULL;
1021 }
1022}
1023
1024int
1025StoreHashIndex::callback()
1026{
1027 int result = 0;
1028 int j;
1029 static int ndir = 0;
1030
1031 do {
1032 j = 0;
1033
5db6bf73 1034 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
c8f4eac4 1035 if (ndir >= Config.cacheSwap.n_configured)
1036 ndir = ndir % Config.cacheSwap.n_configured;
1037
1038 int temp_result = store(ndir)->callback();
1039
1040 ++ndir;
1041
1042 j += temp_result;
1043
1044 result += temp_result;
1045
1046 if (j > 100)
1047 fatal ("too much io\n");
1048 }
1049 } while (j > 0);
1050
5db6bf73 1051 ++ndir;
c8f4eac4 1052
1053 return result;
1054}
1055
1056void
1057StoreHashIndex::create()
1058{
608622b8 1059 if (Config.cacheSwap.n_configured == 0) {
a8163539
TX
1060 debugs(0, DBG_PARSE_NOTE(DBG_CRITICAL), "No cache_dir stores are configured.");
1061 }
1062
5db6bf73 1063 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
14911a4e
AR
1064 if (dir(i).active())
1065 store(i)->create();
1066 }
c8f4eac4 1067}
1068
1069/* Lookup an object in the cache.
1070 * return just a reference to object, don't start swapping in yet. */
1071StoreEntry *
6ca34f6f 1072StoreHashIndex::get(const cache_key *key)
c8f4eac4 1073{
1074 PROF_start(storeGet);
bf8fe701 1075 debugs(20, 3, "storeGet: looking up " << storeKeyText(key));
c8f4eac4 1076 StoreEntry *p = static_cast<StoreEntry *>(hash_lookup(store_table, key));
1077 PROF_stop(storeGet);
1078 return p;
1079}
1080
1081void
6ca34f6f 1082StoreHashIndex::get(String const key, STOREGETCLIENT aCallback, void *aCallbackData)
c8f4eac4 1083{
1084 fatal("not implemented");
1085}
1086
1087void
1088StoreHashIndex::init()
1089{
90d881c4
TX
1090 if (Config.Store.objectsPerBucket <= 0)
1091 fatal("'store_objects_per_bucket' should be larger than 0.");
1092
1093 if (Config.Store.avgObjectSize <= 0)
1094 fatal("'store_avg_object_size' should be larger than 0.");
1095
c8f4eac4 1096 /* Calculate size of hash table (maximum currently 64k buckets). */
1097 /* this is very bogus, its specific to the any Store maintaining an
1098 * in-core index, not global */
58d5c5dd 1099 size_t buckets = (Store::Root().maxSize() + Config.memMaxSize) / Config.Store.avgObjectSize;
e0236918 1100 debugs(20, DBG_IMPORTANT, "Swap maxSize " << (Store::Root().maxSize() >> 10) <<
13a07022 1101 " + " << ( Config.memMaxSize >> 10) << " KB, estimated " << buckets << " objects");
c8f4eac4 1102 buckets /= Config.Store.objectsPerBucket;
e0236918 1103 debugs(20, DBG_IMPORTANT, "Target number of buckets: " << buckets);
c8f4eac4 1104 /* ideally the full scan period should be configurable, for the
1105 * moment it remains at approximately 24 hours. */
1106 store_hash_buckets = storeKeyHashBuckets(buckets);
e0236918
FC
1107 debugs(20, DBG_IMPORTANT, "Using " << store_hash_buckets << " Store buckets");
1108 debugs(20, DBG_IMPORTANT, "Max Mem size: " << ( Config.memMaxSize >> 10) << " KB" <<
9199139f 1109 (Config.memShared ? " [shared]" : ""));
e0236918 1110 debugs(20, DBG_IMPORTANT, "Max Swap size: " << (Store::Root().maxSize() >> 10) << " KB");
c8f4eac4 1111
1112 store_table = hash_create(storeKeyHashCmp,
1113 store_hash_buckets, storeKeyHashHash);
1114
5db6bf73 1115 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
c8f4eac4 1116 /* this starts a search of the store dirs, loading their
1117 * index. under the new Store api this should be
1118 * driven by the StoreHashIndex, not by each store.
bef81ea5 1119 *
1120 * That is, the HashIndex should perform a search of each dir it is
26ac0430 1121 * indexing to do the hash insertions. The search is then able to
bef81ea5 1122 * decide 'from-memory', or 'from-clean-log' or 'from-dirty-log' or
1123 * 'from-no-log'.
1124 *
c8f4eac4 1125 * Step 1: make the store rebuilds use a search internally
bef81ea5 1126 * Step 2: change the search logic to use the four modes described
1127 * above
1128 * Step 3: have the hash index walk the searches itself.
c8f4eac4 1129 */
14911a4e
AR
1130 if (dir(i).active())
1131 store(i)->init();
13a07022 1132 }
c8f4eac4 1133}
1134
12e11a5c 1135uint64_t
c8f4eac4 1136StoreHashIndex::maxSize() const
1137{
12e11a5c 1138 uint64_t result = 0;
c8f4eac4 1139
5db6bf73 1140 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
39c1e1d9
DK
1141 if (dir(i).doReportStat())
1142 result += store(i)->maxSize();
1143 }
c8f4eac4 1144
1145 return result;
1146}
1147
12e11a5c 1148uint64_t
c8f4eac4 1149StoreHashIndex::minSize() const
1150{
12e11a5c 1151 uint64_t result = 0;
c8f4eac4 1152
5db6bf73 1153 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
39c1e1d9
DK
1154 if (dir(i).doReportStat())
1155 result += store(i)->minSize();
1156 }
1157
1158 return result;
1159}
1160
1161uint64_t
1162StoreHashIndex::currentSize() const
1163{
1164 uint64_t result = 0;
1165
5db6bf73 1166 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
39c1e1d9
DK
1167 if (dir(i).doReportStat())
1168 result += store(i)->currentSize();
1169 }
1170
1171 return result;
1172}
1173
1174uint64_t
1175StoreHashIndex::currentCount() const
1176{
1177 uint64_t result = 0;
1178
5db6bf73 1179 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
39c1e1d9
DK
1180 if (dir(i).doReportStat())
1181 result += store(i)->currentCount();
1182 }
c8f4eac4 1183
1184 return result;
1185}
1186
af2fda07
DK
1187int64_t
1188StoreHashIndex::maxObjectSize() const
1189{
1190 int64_t result = -1;
1191
5db6bf73 1192 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
af2fda07
DK
1193 if (dir(i).active() && store(i)->maxObjectSize() > result)
1194 result = store(i)->maxObjectSize();
1195 }
1196
1197 return result;
1198}
1199
93bc1434
AR
1200void
1201StoreHashIndex::getStats(StoreInfoStats &stats) const
1202{
1203 // accumulate per-disk cache stats
5db6bf73 1204 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
93bc1434
AR
1205 StoreInfoStats dirStats;
1206 store(i)->getStats(dirStats);
1207 stats += dirStats;
1208 }
1209
1210 // common to all disks
1211 stats.swap.open_disk_fd = store_open_disk_fd;
1212
1213 // memory cache stats are collected in StoreController::getStats(), for now
1214}
1215
c8f4eac4 1216void
1217StoreHashIndex::stat(StoreEntry & output) const
1218{
1219 int i;
1220
1221 /* Now go through each store, calling its stat routine */
1222
5db6bf73 1223 for (i = 0; i < Config.cacheSwap.n_configured; ++i) {
c8f4eac4 1224 storeAppendPrintf(&output, "\n");
1225 store(i)->stat(output);
1226 }
1227}
1228
1229void
4c973beb
AR
1230StoreHashIndex::reference(StoreEntry &e)
1231{
1232 e.store()->reference(e);
1233}
c8f4eac4 1234
4c973beb 1235bool
54347cbd 1236StoreHashIndex::dereference(StoreEntry &e, bool wantsLocalMemory)
4c973beb 1237{
54347cbd 1238 return e.store()->dereference(e, wantsLocalMemory);
4c973beb 1239}
c8f4eac4 1240
1241void
1242StoreHashIndex::maintain()
1243{
1244 int i;
1245 /* walk each fs */
1246
5db6bf73 1247 for (i = 0; i < Config.cacheSwap.n_configured; ++i) {
c8f4eac4 1248 /* XXX FixMe: This should be done "in parallell" on the different
1249 * cache_dirs, not one at a time.
1250 */
1251 /* call the maintain function .. */
1252 store(i)->maintain();
1253 }
1254}
1255
c8f4eac4 1256void
1257StoreHashIndex::sync()
1258{
1259 for (int i = 0; i < Config.cacheSwap.n_configured; ++i)
1260 store(i)->sync();
1261}
1262
1263StoreSearch *
30abd221 1264StoreHashIndex::search(String const url, HttpRequest *)
c8f4eac4 1265{
1266 if (url.size())
1267 fatal ("Cannot search by url yet\n");
1268
1269 return new StoreSearchHashIndex (this);
1270}
1271
1272CBDATA_CLASS_INIT(StoreSearchHashIndex);
aa839030 1273
6fd5ccc3 1274StoreSearchHashIndex::StoreSearchHashIndex(RefCount<StoreHashIndex> aSwapDir) :
f53969cc
SM
1275 sd(aSwapDir),
1276 callback(NULL),
1277 cbdata(NULL),
1278 _done(false),
1279 bucket(0)
c8f4eac4 1280{}
1281
1282/* do not link
1283StoreSearchHashIndex::StoreSearchHashIndex(StoreSearchHashIndex const &);
1284*/
1285
1286StoreSearchHashIndex::~StoreSearchHashIndex()
1287{}
1288
1289void
70efcae0 1290StoreSearchHashIndex::next(void (aCallback)(void *), void *aCallbackData)
c8f4eac4 1291{
1292 next();
70efcae0 1293 aCallback (aCallbackData);
c8f4eac4 1294}
1295
1296bool
1297StoreSearchHashIndex::next()
1298{
cfb88efb 1299 if (!entries.empty())
c8f4eac4 1300 entries.pop_back();
1301
1302 while (!isDone() && !entries.size())
1303 copyBucket();
1304
1305 return currentItem() != NULL;
1306}
1307
1308bool
1309StoreSearchHashIndex::error() const
1310{
1311 return false;
1312}
1313
1314bool
1315StoreSearchHashIndex::isDone() const
1316{
1317 return bucket >= store_hash_buckets || _done;
1318}
1319
1320StoreEntry *
1321StoreSearchHashIndex::currentItem()
1322{
1323 if (!entries.size())
1324 return NULL;
1325
1326 return entries.back();
1327}
1328
1329void
1330StoreSearchHashIndex::copyBucket()
1331{
1332 /* probably need to lock the store entries...
1333 * we copy them all to prevent races on the links. */
1334 debugs(47, 3, "StoreSearchHashIndex::copyBucket #" << bucket);
1335 assert (!entries.size());
1336 hash_link *link_ptr = NULL;
1337 hash_link *link_next = NULL;
1338 link_next = hash_get_bucket(store_table, bucket);
1339
1340 while (NULL != (link_ptr = link_next)) {
1341 link_next = link_ptr->next;
1342 StoreEntry *e = (StoreEntry *) link_ptr;
1343
1344 entries.push_back(e);
1345 }
1346
5db6bf73 1347 ++bucket;
c8f4eac4 1348 debugs(47,3, "got entries: " << entries.size());
1349}
f53969cc 1350