]> git.ipfire.org Git - thirdparty/squid.git/blame - src/store_dir.cc
Portability: provide xstatvfs() shim for system call statvfs()
[thirdparty/squid.git] / src / store_dir.cc
CommitLineData
f1dc9b30 1/*
bbc27441 2 * Copyright (C) 1996-2014 The Squid Software Foundation and contributors
e25c139f 3 *
bbc27441
AJ
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
f1dc9b30 7 */
8
bbc27441
AJ
9/* DEBUG: section 47 Store Directory Routines */
10
582c2af2
FC
11#include "squid.h"
12#include "globals.h"
13#include "mem_node.h"
528b2c61 14#include "MemObject.h"
9487bae9 15#include "MemStore.h"
582c2af2 16#include "profiler/Profiler.h"
4d5904f7 17#include "SquidConfig.h"
a98bcbee 18#include "SquidMath.h"
985c86bc 19#include "SquidTime.h"
582c2af2 20#include "Store.h"
fb548aaf 21#include "store_key_md5.h"
21d845b1 22#include "StoreHashIndex.h"
4b981814 23#include "swap_log_op.h"
602d9612 24#include "SwapDir.h"
5bed43d6 25#include "tools.h"
9a9954ba 26#include "Transients.h"
85407535 27
074d6a40
AJ
28#include <cerrno>
29#include <climits>
582c2af2
FC
30#if HAVE_SYS_WAIT_H
31#include <sys/wait.h>
32#endif
c0db87f2 33
65a53c8e 34static STDIRSELECT storeDirSelectSwapDirRoundRobin;
35static STDIRSELECT storeDirSelectSwapDirLeastLoad;
c8f4eac4 36
b07b21cc 37/*
38 * store_dirs_rebuilding is initialized to _1_ as a hack so that
39 * storeDirWriteCleanLogs() doesn't try to do anything unless _all_
40 * cache_dirs have been read. For example, without this hack, Squid
41 * will try to write clean log files if -kparse fails (becasue it
42 * calls fatal()).
43 */
44int StoreController::store_dirs_rebuilding = 1;
bef81ea5 45
c8f4eac4 46StoreController::StoreController() : swapDir (new StoreHashIndex())
9a9954ba 47 , memStore(NULL), transients(NULL)
c8f4eac4 48{}
49
50StoreController::~StoreController()
9487bae9
AR
51{
52 delete memStore;
9a9954ba 53 delete transients;
9487bae9 54}
65a53c8e 55
56/*
57 * This function pointer is set according to 'store_dir_select_algorithm'
58 * in squid.conf.
59 */
60STDIRSELECT *storeDirSelectSwapDir = storeDirSelectSwapDirLeastLoad;
d141c677 61
9838d6c8 62void
c8f4eac4 63StoreController::init()
596dddc1 64{
57af1e3f 65 if (Config.memShared && IamWorkerProcess()) {
60be8b2d
AR
66 memStore = new MemStore;
67 memStore->init();
68 }
9487bae9 69
c8f4eac4 70 swapDir->init();
62e76326 71
65a53c8e 72 if (0 == strcasecmp(Config.store_dir_select_algorithm, "round-robin")) {
62e76326 73 storeDirSelectSwapDir = storeDirSelectSwapDirRoundRobin;
e0236918 74 debugs(47, DBG_IMPORTANT, "Using Round Robin store dir selection");
65a53c8e 75 } else {
62e76326 76 storeDirSelectSwapDir = storeDirSelectSwapDirLeastLoad;
e0236918 77 debugs(47, DBG_IMPORTANT, "Using Least Load store dir selection");
65a53c8e 78 }
9a9954ba
AR
79
80 if (UsingSmp() && IamWorkerProcess() && Config.onoff.collapsed_forwarding) {
81 transients = new Transients;
82 transients->init();
83 }
85407535 84}
85
86void
c8f4eac4 87StoreController::createOneStore(Store &aStore)
596dddc1 88{
62e76326 89 /*
154c7949 90 * On Windows, fork() is not available.
91 * The following is a workaround for create store directories sequentially
92 * when running on native Windows port.
93 */
7aa9bb3e 94#if !_SQUID_WINDOWS_
62e76326 95
154c7949 96 if (fork())
62e76326 97 return;
98
099a1791 99#endif
62e76326 100
c8f4eac4 101 aStore.create();
62e76326 102
7aa9bb3e 103#if !_SQUID_WINDOWS_
62e76326 104
154c7949 105 exit(0);
62e76326 106
099a1791 107#endif
154c7949 108}
109
110void
c8f4eac4 111StoreController::create()
154c7949 112{
c8f4eac4 113 swapDir->create();
62e76326 114
7aa9bb3e 115#if !_SQUID_WINDOWS_
62e76326 116
8a1c8f2c 117 pid_t pid;
62e76326 118
b2c141d4 119 do {
62e76326 120 int status;
1191b93b 121#if _SQUID_NEXT_
62e76326 122
123 pid = wait3(&status, WNOHANG, NULL);
b2c141d4 124#else
62e76326 125
126 pid = waitpid(-1, &status, 0);
b2c141d4 127#endif
62e76326 128
b2c141d4 129 } while (pid > 0 || (pid < 0 && errno == EINTR));
62e76326 130
099a1791 131#endif
596dddc1 132}
133
a8a33c46 134/**
cd748f27 135 * Determine whether the given directory can handle this object
136 * size
137 *
138 * Note: if the object size is -1, then the only swapdirs that
a8a33c46 139 * will return true here are ones that have min and max unset,
cd748f27 140 * ie any-sized-object swapdirs. This is a good thing.
141 */
c8f4eac4 142bool
3e62bd58 143SwapDir::objectSizeIsAcceptable(int64_t objsize) const
cd748f27 144{
a8a33c46 145 // If the swapdir has no range limits, then it definitely can
b6662ffd 146 if (min_objsize <= 0 && max_objsize == -1)
c8f4eac4 147 return true;
d68f43a0 148
149 /*
a8a33c46
A
150 * If the object size is -1 and the storedir has limits we
151 * can't store it there.
d68f43a0 152 */
a8a33c46 153 if (objsize == -1)
c8f4eac4 154 return false;
d68f43a0 155
a8a33c46 156 // Else, make sure that the object size will fit.
b475997c
AJ
157 if (max_objsize == -1 && min_objsize <= objsize)
158 return true;
159 else
160 return min_objsize <= objsize && max_objsize > objsize;
cd748f27 161}
162
d141c677 163/*
164 * This new selection scheme simply does round-robin on all SwapDirs.
8e8d4f30 165 * A SwapDir is skipped if it is over the max_size (100%) limit, or
166 * overloaded.
d141c677 167 */
168static int
8e8d4f30 169storeDirSelectSwapDirRoundRobin(const StoreEntry * e)
d141c677 170{
aa1a691e
AR
171 // e->objectLen() is negative at this point when we are still STORE_PENDING
172 ssize_t objsize = e->mem_obj->expectedReplySize();
246e6cc1
AJ
173 if (objsize != -1)
174 objsize += e->mem_obj->swap_hdr_sz;
175
29a238a3
AR
176 // Increment the first candidate once per selection (not once per
177 // iteration) to reduce bias when some disk(s) attract more entries.
178 static int firstCandidate = 0;
179 if (++firstCandidate >= Config.cacheSwap.n_configured)
180 firstCandidate = 0;
62e76326 181
29a238a3
AR
182 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
183 const int dirn = (firstCandidate + i) % Config.cacheSwap.n_configured;
184 const SwapDir *sd = dynamic_cast<SwapDir*>(INDEXSD(dirn));
62e76326 185
29a238a3 186 int load = 0;
aa1a691e 187 if (!sd->canStore(*e, objsize, load))
62e76326 188 continue;
189
62e76326 190 if (load < 0 || load > 1000) {
191 continue;
192 }
193
194 return dirn;
d141c677 195 }
62e76326 196
8e8d4f30 197 return -1;
d141c677 198}
960a01e3 199
a2899918 200/*
cd748f27 201 * Spread load across all of the store directories
202 *
203 * Note: We should modify this later on to prefer sticking objects
204 * in the *tightest fit* swapdir to conserve space, along with the
26ac0430 205 * actual swapdir usage. But for now, this hack will do while
cd748f27 206 * testing, so you should order your swapdirs in the config file
638402dd 207 * from smallest max-size= to largest max-size=.
cd748f27 208 *
209 * We also have to choose nleast == nconf since we need to consider
210 * ALL swapdirs, regardless of state. Again, this is a hack while
211 * we sort out the real usefulness of this algorithm.
a2899918 212 */
65a53c8e 213static int
214storeDirSelectSwapDirLeastLoad(const StoreEntry * e)
a2899918 215{
cc34568d 216 int64_t most_free = 0;
8e8d4f30 217 ssize_t least_objsize = -1;
218 int least_load = INT_MAX;
cd748f27 219 int load;
220 int dirn = -1;
221 int i;
c8f4eac4 222 RefCount<SwapDir> SD;
cd748f27 223
aa1a691e
AR
224 // e->objectLen() is negative at this point when we are still STORE_PENDING
225 ssize_t objsize = e->mem_obj->expectedReplySize();
62e76326 226
cd748f27 227 if (objsize != -1)
62e76326 228 objsize += e->mem_obj->swap_hdr_sz;
229
5db6bf73 230 for (i = 0; i < Config.cacheSwap.n_configured; ++i) {
c8f4eac4 231 SD = dynamic_cast<SwapDir *>(INDEXSD(i));
1a6347cd 232 SD->flags.selected = false;
62e76326 233
aa1a691e 234 if (!SD->canStore(*e, objsize, load))
62e76326 235 continue;
236
aa1a691e 237 if (load < 0 || load > 1000)
62e76326 238 continue;
239
240 if (load > least_load)
241 continue;
242
cc34568d 243 const int64_t cur_free = SD->maxSize() - SD->currentSize();
62e76326 244
245 /* If the load is equal, then look in more details */
246 if (load == least_load) {
b51ec8c8 247 /* closest max-size fit */
62e76326 248
249 if (least_objsize != -1)
b51ec8c8 250 if (SD->maxObjectSize() > least_objsize)
62e76326 251 continue;
252
253 /* most free */
254 if (cur_free < most_free)
255 continue;
256 }
257
258 least_load = load;
b51ec8c8 259 least_objsize = SD->maxObjectSize();
62e76326 260 most_free = cur_free;
261 dirn = i;
a2899918 262 }
62e76326 263
ade906c8 264 if (dirn >= 0)
1a6347cd 265 dynamic_cast<SwapDir *>(INDEXSD(dirn))->flags.selected = true;
62e76326 266
cd748f27 267 return dirn;
596dddc1 268}
269
b109de6b 270/*
271 * An entry written to the swap log MUST have the following
272 * properties.
273 * 1. It MUST be a public key. It does no good to log
274 * a public ADD, change the key, then log a private
275 * DEL. So we need to log a DEL before we change a
276 * key from public to private.
cd748f27 277 * 2. It MUST have a valid (> -1) swap_filen.
b109de6b 278 */
4683e377 279void
5830cdb3 280storeDirSwapLog(const StoreEntry * e, int op)
4683e377 281{
d3b3ab85 282 assert (e);
d46a87a8 283 assert(!EBIT_TEST(e->flags, KEY_PRIVATE));
cd748f27 284 assert(e->swap_filen >= 0);
6c57e268 285 /*
286 * icons and such; don't write them to the swap log
287 */
62e76326 288
d46a87a8 289 if (EBIT_TEST(e->flags, ENTRY_SPECIAL))
62e76326 290 return;
291
b109de6b 292 assert(op > SWAP_LOG_NOP && op < SWAP_LOG_MAX);
62e76326 293
26ac0430
AJ
294 debugs(20, 3, "storeDirSwapLog: " <<
295 swap_log_op_str[op] << " " <<
296 e->getMD5Text() << " " <<
297 e->swap_dirn << " " <<
bf8fe701 298 std::hex << std::uppercase << std::setfill('0') << std::setw(8) << e->swap_filen);
62e76326 299
c8f4eac4 300 dynamic_cast<SwapDir *>(INDEXSD(e->swap_dirn))->logEntry(*e, op);
301}
302
93bc1434
AR
303void
304StoreController::getStats(StoreInfoStats &stats) const
305{
306 if (memStore)
307 memStore->getStats(stats);
308 else {
309 // move this code to a non-shared memory cache class when we have it
310 stats.mem.shared = false;
311 stats.mem.capacity = Config.memMaxSize;
312 stats.mem.size = mem_node::StoreMemSize();
313 stats.mem.count = hot_obj_count;
314 }
315
316 swapDir->getStats(stats);
317
318 // low-level info not specific to memory or disk cache
319 stats.store_entry_count = StoreEntry::inUseCount();
320 stats.mem_object_count = MemObject::inUseCount();
321}
322
c932b107 323void
c8f4eac4 324StoreController::stat(StoreEntry &output) const
c932b107 325{
c8f4eac4 326 storeAppendPrintf(&output, "Store Directory Statistics:\n");
327 storeAppendPrintf(&output, "Store Entries : %lu\n",
62e76326 328 (unsigned long int)StoreEntry::inUseCount());
c91ca3ce 329 storeAppendPrintf(&output, "Maximum Swap Size : %" PRIu64 " KB\n",
cc34568d 330 maxSize() >> 10);
57f583f1 331 storeAppendPrintf(&output, "Current Store Swap Size: %.2f KB\n",
cc34568d 332 currentSize() / 1024.0);
57f583f1 333 storeAppendPrintf(&output, "Current Capacity : %.2f%% used, %.2f%% free\n",
cc34568d
DK
334 Math::doublePercent(currentSize(), maxSize()),
335 Math::doublePercent((maxSize() - currentSize()), maxSize()));
ada9124c
AR
336
337 if (memStore)
338 memStore->stat(output);
e3ef2b09 339
c8f4eac4 340 /* now the swapDir */
341 swapDir->stat(output);
5d406e78 342}
343
c8f4eac4 344/* if needed, this could be taught to cache the result */
12e11a5c 345uint64_t
c8f4eac4 346StoreController::maxSize() const
f4e3fa54 347{
c8f4eac4 348 /* TODO: include memory cache ? */
349 return swapDir->maxSize();
350}
62e76326 351
12e11a5c 352uint64_t
c8f4eac4 353StoreController::minSize() const
354{
355 /* TODO: include memory cache ? */
356 return swapDir->minSize();
f4e3fa54 357}
358
39c1e1d9
DK
359uint64_t
360StoreController::currentSize() const
361{
362 return swapDir->currentSize();
363}
364
365uint64_t
366StoreController::currentCount() const
367{
368 return swapDir->currentCount();
369}
370
af2fda07
DK
371int64_t
372StoreController::maxObjectSize() const
373{
374 return swapDir->maxObjectSize();
375}
376
f4e3fa54 377void
c8f4eac4 378SwapDir::diskFull()
f4e3fa54 379{
cc34568d 380 if (currentSize() >= maxSize())
62e76326 381 return;
382
cc34568d 383 max_size = currentSize();
62e76326 384
e0236918 385 debugs(20, DBG_IMPORTANT, "WARNING: Shrinking cache_dir #" << index << " to " << currentSize() / 1024.0 << " KB");
f4e3fa54 386}
95dcd2b8 387
388void
389storeDirOpenSwapLogs(void)
390{
d3b3ab85 391 for (int dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
c8f4eac4 392 dynamic_cast<SwapDir *>(INDEXSD(dirn))->openLog();
95dcd2b8 393}
394
395void
396storeDirCloseSwapLogs(void)
397{
d3b3ab85 398 for (int dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
c8f4eac4 399 dynamic_cast<SwapDir *>(INDEXSD(dirn))->closeLog();
95dcd2b8 400}
401
b2c141d4 402/*
403 * storeDirWriteCleanLogs
26ac0430 404 *
b2c141d4 405 * Writes a "clean" swap log file from in-memory metadata.
cd748f27 406 * This is a rewrite of the original function to troll each
407 * StoreDir and write the logs, and flush at the end of
408 * the run. Thanks goes to Eric Stern, since this solution
409 * came out of his COSS code.
b2c141d4 410 */
b2c141d4 411int
412storeDirWriteCleanLogs(int reopen)
95dcd2b8 413{
6a566b9c 414 const StoreEntry *e = NULL;
b2c141d4 415 int n = 0;
62e76326 416
e812ecfc 417 struct timeval start;
418 double dt;
c8f4eac4 419 RefCount<SwapDir> sd;
b2c141d4 420 int dirn;
6a566b9c 421 int notdone = 1;
62e76326 422
bef81ea5 423 if (StoreController::store_dirs_rebuilding) {
e0236918
FC
424 debugs(20, DBG_IMPORTANT, "Not currently OK to rewrite swap log.");
425 debugs(20, DBG_IMPORTANT, "storeDirWriteCleanLogs: Operation aborted.");
62e76326 426 return 0;
b2c141d4 427 }
62e76326 428
e0236918 429 debugs(20, DBG_IMPORTANT, "storeDirWriteCleanLogs: Starting...");
e812ecfc 430 getCurrentTime();
431 start = current_time;
62e76326 432
5db6bf73 433 for (dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn) {
c8f4eac4 434 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
62e76326 435
436 if (sd->writeCleanStart() < 0) {
e0236918 437 debugs(20, DBG_IMPORTANT, "log.clean.start() failed for dir #" << sd->index);
62e76326 438 continue;
439 }
6a566b9c 440 }
62e76326 441
e78ef51b 442 /*
443 * This may look inefficient as CPU wise it is more efficient to do this
444 * sequentially, but I/O wise the parallellism helps as it allows more
445 * hdd spindles to be active.
d3b3ab85 446 */
c1dd71ae 447 while (notdone) {
62e76326 448 notdone = 0;
449
5db6bf73 450 for (dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn) {
c8f4eac4 451 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
62e76326 452
453 if (NULL == sd->cleanLog)
454 continue;
455
456 e = sd->cleanLog->nextEntry();
457
458 if (!e)
459 continue;
460
461 notdone = 1;
462
463 if (!sd->canLog(*e))
464 continue;
465
466 sd->cleanLog->write(*e);
467
468 if ((++n & 0xFFFF) == 0) {
469 getCurrentTime();
e0236918 470 debugs(20, DBG_IMPORTANT, " " << std::setw(7) << n <<
bf8fe701 471 " entries written so far.");
62e76326 472 }
473 }
6a566b9c 474 }
62e76326 475
6a566b9c 476 /* Flush */
5db6bf73 477 for (dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
c8f4eac4 478 dynamic_cast<SwapDir *>(INDEXSD(dirn))->writeCleanDone();
62e76326 479
b2c141d4 480 if (reopen)
62e76326 481 storeDirOpenSwapLogs();
482
e812ecfc 483 getCurrentTime();
62e76326 484
e812ecfc 485 dt = tvSubDsec(start, current_time);
62e76326 486
e0236918
FC
487 debugs(20, DBG_IMPORTANT, " Finished. Wrote " << n << " entries.");
488 debugs(20, DBG_IMPORTANT, " Took "<< std::setw(3)<< std::setprecision(2) << dt <<
bf8fe701 489 " seconds ("<< std::setw(6) << ((double) n / (dt > 0.0 ? dt : 1.0)) << " entries/sec).");
62e76326 490
b2c141d4 491 return n;
95dcd2b8 492}
d141c677 493
c8f4eac4 494StoreSearch *
30abd221 495StoreController::search(String const url, HttpRequest *request)
c8f4eac4 496{
497 /* cheat, for now you can't search the memory hot cache */
498 return swapDir->search(url, request);
499}
500
501StorePointer
502StoreHashIndex::store(int const x) const
503{
504 return INDEXSD(x);
505}
506
14911a4e
AR
507SwapDir &
508StoreHashIndex::dir(const int i) const
509{
510 SwapDir *sd = dynamic_cast<SwapDir*>(INDEXSD(i));
511 assert(sd);
512 return *sd;
513}
514
cd748f27 515void
c8f4eac4 516StoreController::sync(void)
cd748f27 517{
9487bae9
AR
518 if (memStore)
519 memStore->sync();
c8f4eac4 520 swapDir->sync();
cd748f27 521}
522
523/*
26ac0430 524 * handle callbacks all avaliable fs'es
cd748f27 525 */
c8f4eac4 526int
527StoreController::callback()
cd748f27 528{
1d5161bd 529 /* This will likely double count. Thats ok. */
530 PROF_start(storeDirCallback);
531
c8f4eac4 532 /* mem cache callbacks ? */
533 int result = swapDir->callback();
1d5161bd 534
535 PROF_stop(storeDirCallback);
c8f4eac4 536
537 return result;
d141c677 538}
90d42c28 539
540int
541storeDirGetBlkSize(const char *path, int *blksize)
542{
90d42c28 543 struct statvfs sfs;
62e76326 544
615540cd 545 if (xstatvfs(path, &sfs)) {
e0236918 546 debugs(50, DBG_IMPORTANT, "" << path << ": " << xstrerror());
62e76326 547 *blksize = 2048;
548 return 1;
90d42c28 549 }
62e76326 550
6759a7aa 551 *blksize = (int) sfs.f_frsize;
62e76326 552
615540cd 553 // Sanity check; make sure we have a meaningful value.
d5b72fe7 554 if (*blksize < 512)
62e76326 555 *blksize = 2048;
556
90d42c28 557 return 0;
558}
781d6656 559
560#define fsbtoblk(num, fsbs, bs) \
561 (((fsbs) != 0 && (fsbs) < (bs)) ? \
562 (num) / ((bs) / (fsbs)) : (num) * ((fsbs) / (bs)))
563int
564storeDirGetUFSStats(const char *path, int *totl_kb, int *free_kb, int *totl_in, int *free_in)
565{
781d6656 566 struct statvfs sfs;
62e76326 567
615540cd 568 if (xstatvfs(path, &sfs)) {
e0236918 569 debugs(50, DBG_IMPORTANT, "" << path << ": " << xstrerror());
62e76326 570 return 1;
781d6656 571 }
62e76326 572
781d6656 573 *totl_kb = (int) fsbtoblk(sfs.f_blocks, sfs.f_frsize, 1024);
574 *free_kb = (int) fsbtoblk(sfs.f_bfree, sfs.f_frsize, 1024);
575 *totl_in = (int) sfs.f_files;
576 *free_in = (int) sfs.f_ffree;
781d6656 577 return 0;
578}
c8f4eac4 579
580void
e1f7507e 581allocate_new_swapdir(SquidConfig::_cacheSwap * swap)
c8f4eac4 582{
583 if (swap->swapDirs == NULL) {
584 swap->n_allocated = 4;
7d3c4ca1 585 swap->swapDirs = static_cast<SwapDir::Pointer *>(xcalloc(swap->n_allocated, sizeof(SwapDir::Pointer)));
c8f4eac4 586 }
587
588 if (swap->n_allocated == swap->n_configured) {
c8f4eac4 589 swap->n_allocated <<= 1;
7d3c4ca1 590 SwapDir::Pointer *const tmp = static_cast<SwapDir::Pointer *>(xcalloc(swap->n_allocated, sizeof(SwapDir::Pointer)));
41d00cd3 591 memcpy(tmp, swap->swapDirs, swap->n_configured * sizeof(SwapDir *));
c8f4eac4 592 xfree(swap->swapDirs);
593 swap->swapDirs = tmp;
594 }
595}
596
597void
e1f7507e 598free_cachedir(SquidConfig::_cacheSwap * swap)
c8f4eac4 599{
600 int i;
601 /* DON'T FREE THESE FOR RECONFIGURE */
602
603 if (reconfiguring)
604 return;
605
5db6bf73 606 for (i = 0; i < swap->n_configured; ++i) {
c8f4eac4 607 /* TODO XXX this lets the swapdir free resources asynchronously
608 * swap->swapDirs[i]->deactivate();
26ac0430 609 * but there may be such a means already.
c8f4eac4 610 * RBC 20041225
611 */
612 swap->swapDirs[i] = NULL;
613 }
614
615 safe_free(swap->swapDirs);
616 swap->swapDirs = NULL;
617 swap->n_allocated = 0;
618 swap->n_configured = 0;
619}
620
621/* this should be a virtual method on StoreEntry,
622 * i.e. e->referenced()
623 * so that the entry can notify the creating Store
624 */
625void
626StoreController::reference(StoreEntry &e)
627{
c5426f8f
AR
628 // special entries do not belong to any specific Store, but are IN_MEMORY
629 if (EBIT_TEST(e.flags, ENTRY_SPECIAL))
630 return;
631
c8f4eac4 632 /* Notify the fs that we're referencing this object again */
633
634 if (e.swap_dirn > -1)
4c973beb 635 swapDir->reference(e);
c8f4eac4 636
9487bae9
AR
637 // Notify the memory cache that we're referencing this object again
638 if (memStore && e.mem_status == IN_MEMORY)
639 memStore->reference(e);
640
641 // TODO: move this code to a non-shared memory cache class when we have it
c8f4eac4 642 if (e.mem_obj) {
643 if (mem_policy->Referenced)
644 mem_policy->Referenced(mem_policy, &e, &e.mem_obj->repl);
645 }
646}
647
4c973beb 648bool
54347cbd 649StoreController::dereference(StoreEntry &e, bool wantsLocalMemory)
c8f4eac4 650{
c5426f8f
AR
651 // special entries do not belong to any specific Store, but are IN_MEMORY
652 if (EBIT_TEST(e.flags, ENTRY_SPECIAL))
54347cbd
AR
653 return true;
654
655 bool keepInStoreTable = false; // keep only if somebody needs it there
c5426f8f 656
c8f4eac4 657 /* Notify the fs that we're not referencing this object any more */
658
659 if (e.swap_filen > -1)
54347cbd 660 keepInStoreTable = swapDir->dereference(e, wantsLocalMemory) || keepInStoreTable;
c8f4eac4 661
9487bae9
AR
662 // Notify the memory cache that we're not referencing this object any more
663 if (memStore && e.mem_status == IN_MEMORY)
54347cbd 664 keepInStoreTable = memStore->dereference(e, wantsLocalMemory) || keepInStoreTable;
9487bae9
AR
665
666 // TODO: move this code to a non-shared memory cache class when we have it
c8f4eac4 667 if (e.mem_obj) {
668 if (mem_policy->Dereferenced)
669 mem_policy->Dereferenced(mem_policy, &e, &e.mem_obj->repl);
54347cbd
AR
670 // non-shared memory cache relies on store_table
671 if (!memStore)
672 keepInStoreTable = wantsLocalMemory || keepInStoreTable;
c8f4eac4 673 }
4c973beb
AR
674
675 return keepInStoreTable;
c8f4eac4 676}
677
678StoreEntry *
6ca34f6f 679StoreController::get(const cache_key *key)
1bfe9ade
AR
680{
681 if (StoreEntry *e = find(key)) {
682 // this is not very precise: some get()s are not initiated by clients
9d4e9cfb 683 e->touch();
1bfe9ade
AR
684 return e;
685 }
686 return NULL;
687}
688
689/// Internal method to implements the guts of the Store::get() API:
690/// returns an in-transit or cached object with a given key, if any.
691StoreEntry *
692StoreController::find(const cache_key *key)
c8f4eac4 693{
44def0f9 694 if (StoreEntry *e = swapDir->get(key)) {
9487bae9
AR
695 // TODO: ignore and maybe handleIdleEntry() unlocked intransit entries
696 // because their backing store slot may be gone already.
171d5429 697 debugs(20, 3, HERE << "got in-transit entry: " << *e);
44def0f9
AR
698 return e;
699 }
700
99921d9d
AR
701 // Must search transients before caches because we must sync those we find.
702 if (transients) {
703 if (StoreEntry *e = transients->get(key)) {
704 debugs(20, 3, "got shared in-transit entry: " << *e);
705 bool inSync = false;
706 const bool found = anchorCollapsed(*e, inSync);
707 if (!found || inSync)
708 return e;
709 assert(!e->locked()); // ensure release will destroyStoreEntry()
710 e->release(); // do not let others into the same trap
711 return NULL;
712 }
713 }
714
9487bae9
AR
715 if (memStore) {
716 if (StoreEntry *e = memStore->get(key)) {
717 debugs(20, 3, HERE << "got mem-cached entry: " << *e);
718 return e;
719 }
720 }
721
022f96ad
AR
722 // TODO: this disk iteration is misplaced; move to StoreHashIndex when
723 // the global store_table is no longer used for in-transit objects.
44def0f9
AR
724 if (const int cacheDirs = Config.cacheSwap.n_configured) {
725 // ask each cache_dir until the entry is found; use static starting
726 // point to avoid asking the same subset of disks more often
727 // TODO: coordinate with put() to be able to guess the right disk often
14911a4e 728 static int idx = 0;
44def0f9 729 for (int n = 0; n < cacheDirs; ++n) {
14911a4e 730 idx = (idx + 1) % cacheDirs;
44def0f9 731 SwapDir *sd = dynamic_cast<SwapDir*>(INDEXSD(idx));
14911a4e
AR
732 if (!sd->active())
733 continue;
734
44def0f9 735 if (StoreEntry *e = sd->get(key)) {
eccba1d9 736 debugs(20, 3, HERE << "cache_dir " << idx <<
9199139f 737 " got cached entry: " << *e);
44def0f9
AR
738 return e;
739 }
740 }
741 }
c8f4eac4 742
eccba1d9 743 debugs(20, 4, HERE << "none of " << Config.cacheSwap.n_configured <<
9199139f 744 " cache_dirs have " << storeKeyText(key));
44def0f9 745 return NULL;
c8f4eac4 746}
747
748void
6ca34f6f 749StoreController::get(String const key, STOREGETCLIENT aCallback, void *aCallbackData)
c8f4eac4 750{
751 fatal("not implemented");
752}
753
ce49546e 754/// updates the collapsed entry with the corresponding on-disk entry, if any
4475555f 755/// In other words, the SwapDir::anchorCollapsed() API applied to all disks.
ce49546e 756bool
4475555f 757StoreController::anchorCollapsedOnDisk(StoreEntry &collapsed, bool &inSync)
ce49546e
AR
758{
759 // TODO: move this loop to StoreHashIndex, just like the one in get().
760 if (const int cacheDirs = Config.cacheSwap.n_configured) {
761 // ask each cache_dir until the entry is found; use static starting
762 // point to avoid asking the same subset of disks more often
763 // TODO: coordinate with put() to be able to guess the right disk often
764 static int idx = 0;
765 for (int n = 0; n < cacheDirs; ++n) {
766 idx = (idx + 1) % cacheDirs;
767 SwapDir *sd = dynamic_cast<SwapDir*>(INDEXSD(idx));
768 if (!sd->active())
769 continue;
770
4475555f 771 if (sd->anchorCollapsed(collapsed, inSync)) {
ce49546e
AR
772 debugs(20, 3, "cache_dir " << idx << " anchors " << collapsed);
773 return true;
774 }
775 }
776 }
777
778 debugs(20, 4, "none of " << Config.cacheSwap.n_configured <<
779 " cache_dirs have " << collapsed);
780 return false;
781}
782
1bfe9ade
AR
783void StoreController::markForUnlink(StoreEntry &e)
784{
785 if (transients && e.mem_obj && e.mem_obj->xitTable.index >= 0)
786 transients->markForUnlink(e);
787 if (memStore && e.mem_obj && e.mem_obj->memCache.index >= 0)
788 memStore->markForUnlink(e);
789 if (e.swap_filen >= 0)
790 e.store()->markForUnlink(e);
791}
792
96a7de88
DK
793// move this into [non-shared] memory cache class when we have one
794/// whether e should be kept in local RAM for possible future caching
795bool
97754f5a 796StoreController::keepForLocalMemoryCache(StoreEntry &e) const
96a7de88
DK
797{
798 if (!e.memoryCachable())
799 return false;
800
801 // does the current and expected size obey memory caching limits?
802 assert(e.mem_obj);
803 const int64_t loadedSize = e.mem_obj->endOffset();
804 const int64_t expectedSize = e.mem_obj->expectedReplySize(); // may be < 0
805 const int64_t ramSize = max(loadedSize, expectedSize);
806 const int64_t ramLimit = min(
817138f8
A
807 static_cast<int64_t>(Config.memMaxSize),
808 static_cast<int64_t>(Config.Store.maxInMemObjSize));
96a7de88
DK
809 return ramSize <= ramLimit;
810}
811
812void
4475555f 813StoreController::memoryOut(StoreEntry &e, const bool preserveSwappable)
96a7de88
DK
814{
815 bool keepInLocalMemory = false;
816 if (memStore)
4475555f 817 memStore->write(e); // leave keepInLocalMemory false
96a7de88
DK
818 else
819 keepInLocalMemory = keepForLocalMemoryCache(e);
820
821 debugs(20, 7, HERE << "keepInLocalMemory: " << keepInLocalMemory);
822
823 if (!keepInLocalMemory)
824 e.trimMemory(preserveSwappable);
825}
826
ce49546e
AR
827void
828StoreController::memoryUnlink(StoreEntry &e)
829{
ce49546e
AR
830 if (memStore)
831 memStore->unlink(e);
832 else // TODO: move into [non-shared] memory cache class when we have one
833 e.destroyMemObject();
834}
835
4475555f 836void
29c56e41 837StoreController::memoryDisconnect(StoreEntry &e)
4475555f
AR
838{
839 if (memStore)
29c56e41 840 memStore->disconnect(e);
4475555f
AR
841 // else nothing to do for non-shared memory cache
842}
843
844void
845StoreController::transientsAbandon(StoreEntry &e)
846{
847 if (transients) {
848 assert(e.mem_obj);
849 if (e.mem_obj->xitTable.index >= 0)
850 transients->abandon(e);
851 }
852}
853
99921d9d
AR
854void
855StoreController::transientsCompleteWriting(StoreEntry &e)
856{
857 if (transients) {
858 assert(e.mem_obj);
859 if (e.mem_obj->xitTable.index >= 0)
860 transients->completeWriting(e);
861 }
862}
863
d366a7fa
AR
864int
865StoreController::transientReaders(const StoreEntry &e) const
866{
867 return (transients && e.mem_obj && e.mem_obj->xitTable.index >= 0) ?
9d4e9cfb 868 transients->readers(e) : 0;
d366a7fa
AR
869}
870
4475555f
AR
871void
872StoreController::transientsDisconnect(MemObject &mem_obj)
873{
874 if (transients)
875 transients->disconnect(mem_obj);
876}
877
9487bae9
AR
878void
879StoreController::handleIdleEntry(StoreEntry &e)
880{
881 bool keepInLocalMemory = false;
c5426f8f
AR
882
883 if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) {
884 // Icons (and cache digests?) should stay in store_table until we
885 // have a dedicated storage for them (that would not purge them).
886 // They are not managed [well] by any specific Store handled below.
887 keepInLocalMemory = true;
d3cd2e81 888 } else if (memStore) {
9487bae9
AR
889 // leave keepInLocalMemory false; memStore maintains its own cache
890 } else {
96a7de88 891 keepInLocalMemory = keepForLocalMemoryCache(e) && // in good shape and
9199139f
AR
892 // the local memory cache is not overflowing
893 (mem_node::InUseCount() <= store_pages_max);
9487bae9
AR
894 }
895
54347cbd 896 // An idle, unlocked entry that only belongs to a SwapDir which controls
4c973beb 897 // its own index, should not stay in the global store_table.
54347cbd 898 if (!dereference(e, keepInLocalMemory)) {
9487bae9
AR
899 debugs(20, 5, HERE << "destroying unlocked entry: " << &e << ' ' << e);
900 destroyStoreEntry(static_cast<hash_link*>(&e));
901 return;
902 }
903
c5426f8f
AR
904 debugs(20, 5, HERE << "keepInLocalMemory: " << keepInLocalMemory);
905
9487bae9
AR
906 // TODO: move this into [non-shared] memory cache class when we have one
907 if (keepInLocalMemory) {
908 e.setMemStatus(IN_MEMORY);
909 e.mem_obj->unlinkRequest();
910 } else {
911 e.purgeMem(); // may free e
912 }
913}
914
9a9954ba
AR
915void
916StoreController::allowCollapsing(StoreEntry *e, const RequestFlags &reqFlags,
917 const HttpRequestMethod &reqMethod)
918{
919 e->makePublic(); // this is needed for both local and SMP collapsing
920 if (transients)
99921d9d 921 transients->startWriting(e, reqFlags, reqMethod);
4475555f 922 debugs(20, 3, "may " << (transients && e->mem_obj->xitTable.index >= 0 ?
9d4e9cfb 923 "SMP-" : "locally-") << "collapse " << *e);
9a9954ba
AR
924}
925
ce49546e 926void
6919be24 927StoreController::syncCollapsed(const sfileno xitIndex)
ce49546e 928{
99921d9d 929 assert(transients);
6919be24
AR
930
931 StoreEntry *collapsed = transients->findCollapsed(xitIndex);
932 if (!collapsed) { // the entry is no longer locally active, ignore update
933 debugs(20, 7, "not SMP-syncing not-transient " << xitIndex);
99921d9d
AR
934 return;
935 }
6919be24
AR
936 assert(collapsed->mem_obj);
937 assert(collapsed->mem_obj->smpCollapsed);
ce49546e
AR
938
939 debugs(20, 7, "syncing " << *collapsed);
940
6919be24 941 bool abandoned = transients->abandoned(*collapsed);
4475555f 942 bool found = false;
ce49546e 943 bool inSync = false;
1bfe9ade
AR
944 if (memStore && collapsed->mem_obj->memCache.io == MemObject::ioDone) {
945 found = true;
946 inSync = true;
947 debugs(20, 7, "fully mem-loaded " << *collapsed);
948 } else if (memStore && collapsed->mem_obj->memCache.index >= 0) {
4475555f 949 found = true;
ce49546e 950 inSync = memStore->updateCollapsed(*collapsed);
4475555f
AR
951 } else if (collapsed->swap_filen >= 0) {
952 found = true;
ce49546e 953 inSync = collapsed->store()->updateCollapsed(*collapsed);
4475555f 954 } else {
99921d9d 955 found = anchorCollapsed(*collapsed, inSync);
4475555f 956 }
ce49546e 957
6919be24
AR
958 if (abandoned && collapsed->store_status == STORE_PENDING) {
959 debugs(20, 3, "aborting abandoned but STORE_PENDING " << *collapsed);
960 collapsed->abort();
961 return;
962 }
963
ce49546e
AR
964 if (inSync) {
965 debugs(20, 5, "synced " << *collapsed);
966 collapsed->invokeHandlers();
4475555f 967 } else if (found) { // unrecoverable problem syncing this entry
99921d9d 968 debugs(20, 3, "aborting unsyncable " << *collapsed);
ce49546e 969 collapsed->abort();
4475555f
AR
970 } else { // the entry is still not in one of the caches
971 debugs(20, 7, "waiting " << *collapsed);
ce49546e
AR
972 }
973}
9a9954ba 974
99921d9d
AR
975/// Called for in-transit entries that are not yet anchored to a cache.
976/// For cached entries, return true after synchronizing them with their cache
977/// (making inSync true on success). For not-yet-cached entries, return false.
978bool
979StoreController::anchorCollapsed(StoreEntry &collapsed, bool &inSync)
980{
981 // this method is designed to work with collapsed transients only
982 assert(collapsed.mem_obj);
983 assert(collapsed.mem_obj->xitTable.index >= 0);
984 assert(collapsed.mem_obj->smpCollapsed);
985
986 debugs(20, 7, "anchoring " << collapsed);
987
988 bool found = false;
989 if (memStore)
990 found = memStore->anchorCollapsed(collapsed, inSync);
0cdcf3d7 991 if (!found && Config.cacheSwap.n_configured)
99921d9d
AR
992 found = anchorCollapsedOnDisk(collapsed, inSync);
993
994 if (found) {
995 if (inSync)
996 debugs(20, 7, "anchored " << collapsed);
997 else
998 debugs(20, 5, "failed to anchor " << collapsed);
999 } else {
1000 debugs(20, 7, "skipping not yet cached " << collapsed);
1001 }
1002
1003 return found;
1004}
1005
c8f4eac4 1006StoreHashIndex::StoreHashIndex()
1007{
47f6e231 1008 if (store_table)
26ac0430 1009 abort();
c8f4eac4 1010 assert (store_table == NULL);
1011}
1012
1013StoreHashIndex::~StoreHashIndex()
1014{
1015 if (store_table) {
1016 hashFreeItems(store_table, destroyStoreEntry);
1017 hashFreeMemory(store_table);
1018 store_table = NULL;
1019 }
1020}
1021
1022int
1023StoreHashIndex::callback()
1024{
1025 int result = 0;
1026 int j;
1027 static int ndir = 0;
1028
1029 do {
1030 j = 0;
1031
5db6bf73 1032 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
c8f4eac4 1033 if (ndir >= Config.cacheSwap.n_configured)
1034 ndir = ndir % Config.cacheSwap.n_configured;
1035
1036 int temp_result = store(ndir)->callback();
1037
1038 ++ndir;
1039
1040 j += temp_result;
1041
1042 result += temp_result;
1043
1044 if (j > 100)
1045 fatal ("too much io\n");
1046 }
1047 } while (j > 0);
1048
5db6bf73 1049 ++ndir;
c8f4eac4 1050
1051 return result;
1052}
1053
1054void
1055StoreHashIndex::create()
1056{
608622b8 1057 if (Config.cacheSwap.n_configured == 0) {
a8163539
TX
1058 debugs(0, DBG_PARSE_NOTE(DBG_CRITICAL), "No cache_dir stores are configured.");
1059 }
1060
5db6bf73 1061 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
14911a4e
AR
1062 if (dir(i).active())
1063 store(i)->create();
1064 }
c8f4eac4 1065}
1066
1067/* Lookup an object in the cache.
1068 * return just a reference to object, don't start swapping in yet. */
1069StoreEntry *
6ca34f6f 1070StoreHashIndex::get(const cache_key *key)
c8f4eac4 1071{
1072 PROF_start(storeGet);
bf8fe701 1073 debugs(20, 3, "storeGet: looking up " << storeKeyText(key));
c8f4eac4 1074 StoreEntry *p = static_cast<StoreEntry *>(hash_lookup(store_table, key));
1075 PROF_stop(storeGet);
1076 return p;
1077}
1078
1079void
6ca34f6f 1080StoreHashIndex::get(String const key, STOREGETCLIENT aCallback, void *aCallbackData)
c8f4eac4 1081{
1082 fatal("not implemented");
1083}
1084
1085void
1086StoreHashIndex::init()
1087{
90d881c4
TX
1088 if (Config.Store.objectsPerBucket <= 0)
1089 fatal("'store_objects_per_bucket' should be larger than 0.");
1090
1091 if (Config.Store.avgObjectSize <= 0)
1092 fatal("'store_avg_object_size' should be larger than 0.");
1093
c8f4eac4 1094 /* Calculate size of hash table (maximum currently 64k buckets). */
1095 /* this is very bogus, its specific to the any Store maintaining an
1096 * in-core index, not global */
58d5c5dd 1097 size_t buckets = (Store::Root().maxSize() + Config.memMaxSize) / Config.Store.avgObjectSize;
e0236918 1098 debugs(20, DBG_IMPORTANT, "Swap maxSize " << (Store::Root().maxSize() >> 10) <<
13a07022 1099 " + " << ( Config.memMaxSize >> 10) << " KB, estimated " << buckets << " objects");
c8f4eac4 1100 buckets /= Config.Store.objectsPerBucket;
e0236918 1101 debugs(20, DBG_IMPORTANT, "Target number of buckets: " << buckets);
c8f4eac4 1102 /* ideally the full scan period should be configurable, for the
1103 * moment it remains at approximately 24 hours. */
1104 store_hash_buckets = storeKeyHashBuckets(buckets);
e0236918
FC
1105 debugs(20, DBG_IMPORTANT, "Using " << store_hash_buckets << " Store buckets");
1106 debugs(20, DBG_IMPORTANT, "Max Mem size: " << ( Config.memMaxSize >> 10) << " KB" <<
9199139f 1107 (Config.memShared ? " [shared]" : ""));
e0236918 1108 debugs(20, DBG_IMPORTANT, "Max Swap size: " << (Store::Root().maxSize() >> 10) << " KB");
c8f4eac4 1109
1110 store_table = hash_create(storeKeyHashCmp,
1111 store_hash_buckets, storeKeyHashHash);
1112
5db6bf73 1113 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
c8f4eac4 1114 /* this starts a search of the store dirs, loading their
1115 * index. under the new Store api this should be
1116 * driven by the StoreHashIndex, not by each store.
bef81ea5 1117 *
1118 * That is, the HashIndex should perform a search of each dir it is
26ac0430 1119 * indexing to do the hash insertions. The search is then able to
bef81ea5 1120 * decide 'from-memory', or 'from-clean-log' or 'from-dirty-log' or
1121 * 'from-no-log'.
1122 *
c8f4eac4 1123 * Step 1: make the store rebuilds use a search internally
bef81ea5 1124 * Step 2: change the search logic to use the four modes described
1125 * above
1126 * Step 3: have the hash index walk the searches itself.
c8f4eac4 1127 */
14911a4e
AR
1128 if (dir(i).active())
1129 store(i)->init();
13a07022 1130 }
c8f4eac4 1131}
1132
12e11a5c 1133uint64_t
c8f4eac4 1134StoreHashIndex::maxSize() const
1135{
12e11a5c 1136 uint64_t result = 0;
c8f4eac4 1137
5db6bf73 1138 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
39c1e1d9
DK
1139 if (dir(i).doReportStat())
1140 result += store(i)->maxSize();
1141 }
c8f4eac4 1142
1143 return result;
1144}
1145
12e11a5c 1146uint64_t
c8f4eac4 1147StoreHashIndex::minSize() const
1148{
12e11a5c 1149 uint64_t result = 0;
c8f4eac4 1150
5db6bf73 1151 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
39c1e1d9
DK
1152 if (dir(i).doReportStat())
1153 result += store(i)->minSize();
1154 }
1155
1156 return result;
1157}
1158
1159uint64_t
1160StoreHashIndex::currentSize() const
1161{
1162 uint64_t result = 0;
1163
5db6bf73 1164 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
39c1e1d9
DK
1165 if (dir(i).doReportStat())
1166 result += store(i)->currentSize();
1167 }
1168
1169 return result;
1170}
1171
1172uint64_t
1173StoreHashIndex::currentCount() const
1174{
1175 uint64_t result = 0;
1176
5db6bf73 1177 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
39c1e1d9
DK
1178 if (dir(i).doReportStat())
1179 result += store(i)->currentCount();
1180 }
c8f4eac4 1181
1182 return result;
1183}
1184
af2fda07
DK
1185int64_t
1186StoreHashIndex::maxObjectSize() const
1187{
1188 int64_t result = -1;
1189
5db6bf73 1190 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
af2fda07
DK
1191 if (dir(i).active() && store(i)->maxObjectSize() > result)
1192 result = store(i)->maxObjectSize();
1193 }
1194
1195 return result;
1196}
1197
93bc1434
AR
1198void
1199StoreHashIndex::getStats(StoreInfoStats &stats) const
1200{
1201 // accumulate per-disk cache stats
5db6bf73 1202 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
93bc1434
AR
1203 StoreInfoStats dirStats;
1204 store(i)->getStats(dirStats);
1205 stats += dirStats;
1206 }
1207
1208 // common to all disks
1209 stats.swap.open_disk_fd = store_open_disk_fd;
1210
1211 // memory cache stats are collected in StoreController::getStats(), for now
1212}
1213
c8f4eac4 1214void
1215StoreHashIndex::stat(StoreEntry & output) const
1216{
1217 int i;
1218
1219 /* Now go through each store, calling its stat routine */
1220
5db6bf73 1221 for (i = 0; i < Config.cacheSwap.n_configured; ++i) {
c8f4eac4 1222 storeAppendPrintf(&output, "\n");
1223 store(i)->stat(output);
1224 }
1225}
1226
1227void
4c973beb
AR
1228StoreHashIndex::reference(StoreEntry &e)
1229{
1230 e.store()->reference(e);
1231}
c8f4eac4 1232
4c973beb 1233bool
54347cbd 1234StoreHashIndex::dereference(StoreEntry &e, bool wantsLocalMemory)
4c973beb 1235{
54347cbd 1236 return e.store()->dereference(e, wantsLocalMemory);
4c973beb 1237}
c8f4eac4 1238
1239void
1240StoreHashIndex::maintain()
1241{
1242 int i;
1243 /* walk each fs */
1244
5db6bf73 1245 for (i = 0; i < Config.cacheSwap.n_configured; ++i) {
c8f4eac4 1246 /* XXX FixMe: This should be done "in parallell" on the different
1247 * cache_dirs, not one at a time.
1248 */
1249 /* call the maintain function .. */
1250 store(i)->maintain();
1251 }
1252}
1253
c8f4eac4 1254void
1255StoreHashIndex::sync()
1256{
1257 for (int i = 0; i < Config.cacheSwap.n_configured; ++i)
1258 store(i)->sync();
1259}
1260
1261StoreSearch *
30abd221 1262StoreHashIndex::search(String const url, HttpRequest *)
c8f4eac4 1263{
1264 if (url.size())
1265 fatal ("Cannot search by url yet\n");
1266
1267 return new StoreSearchHashIndex (this);
1268}
1269
1270CBDATA_CLASS_INIT(StoreSearchHashIndex);
aa839030 1271
6fd5ccc3
AJ
1272StoreSearchHashIndex::StoreSearchHashIndex(RefCount<StoreHashIndex> aSwapDir) :
1273 sd(aSwapDir),
1274 callback(NULL),
1275 cbdata(NULL),
1276 _done(false),
1277 bucket(0)
c8f4eac4 1278{}
1279
1280/* do not link
1281StoreSearchHashIndex::StoreSearchHashIndex(StoreSearchHashIndex const &);
1282*/
1283
1284StoreSearchHashIndex::~StoreSearchHashIndex()
1285{}
1286
1287void
70efcae0 1288StoreSearchHashIndex::next(void (aCallback)(void *), void *aCallbackData)
c8f4eac4 1289{
1290 next();
70efcae0 1291 aCallback (aCallbackData);
c8f4eac4 1292}
1293
1294bool
1295StoreSearchHashIndex::next()
1296{
cfb88efb 1297 if (!entries.empty())
c8f4eac4 1298 entries.pop_back();
1299
1300 while (!isDone() && !entries.size())
1301 copyBucket();
1302
1303 return currentItem() != NULL;
1304}
1305
1306bool
1307StoreSearchHashIndex::error() const
1308{
1309 return false;
1310}
1311
1312bool
1313StoreSearchHashIndex::isDone() const
1314{
1315 return bucket >= store_hash_buckets || _done;
1316}
1317
1318StoreEntry *
1319StoreSearchHashIndex::currentItem()
1320{
1321 if (!entries.size())
1322 return NULL;
1323
1324 return entries.back();
1325}
1326
1327void
1328StoreSearchHashIndex::copyBucket()
1329{
1330 /* probably need to lock the store entries...
1331 * we copy them all to prevent races on the links. */
1332 debugs(47, 3, "StoreSearchHashIndex::copyBucket #" << bucket);
1333 assert (!entries.size());
1334 hash_link *link_ptr = NULL;
1335 hash_link *link_next = NULL;
1336 link_next = hash_get_bucket(store_table, bucket);
1337
1338 while (NULL != (link_ptr = link_next)) {
1339 link_next = link_ptr->next;
1340 StoreEntry *e = (StoreEntry *) link_ptr;
1341
1342 entries.push_back(e);
1343 }
1344
5db6bf73 1345 ++bucket;
c8f4eac4 1346 debugs(47,3, "got entries: " << entries.size());
1347}