]> git.ipfire.org Git - thirdparty/squid.git/blame - src/store_dir.cc
Polishing touches to address PREVIEW review concerns dated 2013/07/03.
[thirdparty/squid.git] / src / store_dir.cc
CommitLineData
f1dc9b30 1
2/*
f1dc9b30 3 * DEBUG: section 47 Store Directory Routines
4 * AUTHOR: Duane Wessels
5 *
2b6662ba 6 * SQUID Web Proxy Cache http://www.squid-cache.org/
e25c139f 7 * ----------------------------------------------------------
f1dc9b30 8 *
2b6662ba 9 * Squid is the result of efforts by numerous individuals from
10 * the Internet community; see the CONTRIBUTORS file for full
11 * details. Many organizations have provided support for Squid's
12 * development; see the SPONSORS file for full details. Squid is
13 * Copyrighted (C) 2001 by the Regents of the University of
14 * California; see the COPYRIGHT file for full details. Squid
15 * incorporates software developed and/or copyrighted by other
16 * sources; see the CREDITS file for full details.
f1dc9b30 17 *
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2 of the License, or
21 * (at your option) any later version.
26ac0430 22 *
f1dc9b30 23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
26ac0430 27 *
f1dc9b30 28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
cbdec147 30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
e25c139f 31 *
f1dc9b30 32 */
33
582c2af2
FC
34#include "squid.h"
35#include "globals.h"
36#include "mem_node.h"
528b2c61 37#include "MemObject.h"
9487bae9 38#include "MemStore.h"
582c2af2 39#include "profiler/Profiler.h"
4d5904f7 40#include "SquidConfig.h"
a98bcbee 41#include "SquidMath.h"
985c86bc 42#include "SquidTime.h"
582c2af2 43#include "Store.h"
fb548aaf 44#include "store_key_md5.h"
21d845b1 45#include "StoreHashIndex.h"
4b981814 46#include "swap_log_op.h"
602d9612 47#include "SwapDir.h"
5bed43d6 48#include "tools.h"
9a9954ba 49#include "Transients.h"
85407535 50
c0db87f2 51#if HAVE_STATVFS
52#if HAVE_SYS_STATVFS_H
53#include <sys/statvfs.h>
54#endif
ec15e022 55#endif /* HAVE_STATVFS */
56/* statfs() needs <sys/param.h> and <sys/mount.h> on BSD systems */
57#if HAVE_SYS_PARAM_H
58#include <sys/param.h>
203526a1 59#endif
411abf44
A
60#if HAVE_LIMITS_H
61#include <limits.h>
62#endif
ec15e022 63#if HAVE_SYS_MOUNT_H
64#include <sys/mount.h>
65#endif
66/* Windows and Linux use sys/vfs.h */
6c86a065 67#if HAVE_SYS_VFS_H
68#include <sys/vfs.h>
69#endif
582c2af2
FC
70#if HAVE_SYS_WAIT_H
71#include <sys/wait.h>
72#endif
21d845b1
FC
73#if HAVE_ERRNO_H
74#include <errno.h>
75#endif
c0db87f2 76
65a53c8e 77static STDIRSELECT storeDirSelectSwapDirRoundRobin;
78static STDIRSELECT storeDirSelectSwapDirLeastLoad;
c8f4eac4 79
b07b21cc 80/*
81 * store_dirs_rebuilding is initialized to _1_ as a hack so that
82 * storeDirWriteCleanLogs() doesn't try to do anything unless _all_
83 * cache_dirs have been read. For example, without this hack, Squid
84 * will try to write clean log files if -kparse fails (becasue it
85 * calls fatal()).
86 */
87int StoreController::store_dirs_rebuilding = 1;
bef81ea5 88
c8f4eac4 89StoreController::StoreController() : swapDir (new StoreHashIndex())
9a9954ba 90 , memStore(NULL), transients(NULL)
c8f4eac4 91{}
92
93StoreController::~StoreController()
9487bae9
AR
94{
95 delete memStore;
9a9954ba 96 delete transients;
9487bae9 97}
65a53c8e 98
99/*
100 * This function pointer is set according to 'store_dir_select_algorithm'
101 * in squid.conf.
102 */
103STDIRSELECT *storeDirSelectSwapDir = storeDirSelectSwapDirLeastLoad;
d141c677 104
9838d6c8 105void
c8f4eac4 106StoreController::init()
596dddc1 107{
57af1e3f 108 if (Config.memShared && IamWorkerProcess()) {
60be8b2d
AR
109 memStore = new MemStore;
110 memStore->init();
111 }
9487bae9 112
c8f4eac4 113 swapDir->init();
62e76326 114
65a53c8e 115 if (0 == strcasecmp(Config.store_dir_select_algorithm, "round-robin")) {
62e76326 116 storeDirSelectSwapDir = storeDirSelectSwapDirRoundRobin;
e0236918 117 debugs(47, DBG_IMPORTANT, "Using Round Robin store dir selection");
65a53c8e 118 } else {
62e76326 119 storeDirSelectSwapDir = storeDirSelectSwapDirLeastLoad;
e0236918 120 debugs(47, DBG_IMPORTANT, "Using Least Load store dir selection");
65a53c8e 121 }
9a9954ba
AR
122
123 if (UsingSmp() && IamWorkerProcess() && Config.onoff.collapsed_forwarding) {
124 transients = new Transients;
125 transients->init();
126 }
85407535 127}
128
129void
c8f4eac4 130StoreController::createOneStore(Store &aStore)
596dddc1 131{
62e76326 132 /*
154c7949 133 * On Windows, fork() is not available.
134 * The following is a workaround for create store directories sequentially
135 * when running on native Windows port.
136 */
7aa9bb3e 137#if !_SQUID_WINDOWS_
62e76326 138
154c7949 139 if (fork())
62e76326 140 return;
141
099a1791 142#endif
62e76326 143
c8f4eac4 144 aStore.create();
62e76326 145
7aa9bb3e 146#if !_SQUID_WINDOWS_
62e76326 147
154c7949 148 exit(0);
62e76326 149
099a1791 150#endif
154c7949 151}
152
153void
c8f4eac4 154StoreController::create()
154c7949 155{
c8f4eac4 156 swapDir->create();
62e76326 157
7aa9bb3e 158#if !_SQUID_WINDOWS_
62e76326 159
8a1c8f2c 160 pid_t pid;
62e76326 161
b2c141d4 162 do {
62e76326 163 int status;
1191b93b 164#if _SQUID_NEXT_
62e76326 165
166 pid = wait3(&status, WNOHANG, NULL);
b2c141d4 167#else
62e76326 168
169 pid = waitpid(-1, &status, 0);
b2c141d4 170#endif
62e76326 171
b2c141d4 172 } while (pid > 0 || (pid < 0 && errno == EINTR));
62e76326 173
099a1791 174#endif
596dddc1 175}
176
a8a33c46 177/**
cd748f27 178 * Determine whether the given directory can handle this object
179 * size
180 *
181 * Note: if the object size is -1, then the only swapdirs that
a8a33c46 182 * will return true here are ones that have min and max unset,
cd748f27 183 * ie any-sized-object swapdirs. This is a good thing.
184 */
c8f4eac4 185bool
3e62bd58 186SwapDir::objectSizeIsAcceptable(int64_t objsize) const
cd748f27 187{
a8a33c46 188 // If the swapdir has no range limits, then it definitely can
b6662ffd 189 if (min_objsize <= 0 && max_objsize == -1)
c8f4eac4 190 return true;
d68f43a0 191
192 /*
a8a33c46
A
193 * If the object size is -1 and the storedir has limits we
194 * can't store it there.
d68f43a0 195 */
a8a33c46 196 if (objsize == -1)
c8f4eac4 197 return false;
d68f43a0 198
a8a33c46 199 // Else, make sure that the object size will fit.
b475997c
AJ
200 if (max_objsize == -1 && min_objsize <= objsize)
201 return true;
202 else
203 return min_objsize <= objsize && max_objsize > objsize;
cd748f27 204}
205
d141c677 206/*
207 * This new selection scheme simply does round-robin on all SwapDirs.
8e8d4f30 208 * A SwapDir is skipped if it is over the max_size (100%) limit, or
209 * overloaded.
d141c677 210 */
211static int
8e8d4f30 212storeDirSelectSwapDirRoundRobin(const StoreEntry * e)
d141c677 213{
214 static int dirn = 0;
215 int i;
8e8d4f30 216 int load;
c8f4eac4 217 RefCount<SwapDir> sd;
62e76326 218
aa1a691e
AR
219 // e->objectLen() is negative at this point when we are still STORE_PENDING
220 ssize_t objsize = e->mem_obj->expectedReplySize();
246e6cc1
AJ
221 if (objsize != -1)
222 objsize += e->mem_obj->swap_hdr_sz;
223
5db6bf73 224 for (i = 0; i < Config.cacheSwap.n_configured; ++i) {
62e76326 225 if (++dirn >= Config.cacheSwap.n_configured)
226 dirn = 0;
227
c8f4eac4 228 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
62e76326 229
aa1a691e 230 if (!sd->canStore(*e, objsize, load))
62e76326 231 continue;
232
62e76326 233 if (load < 0 || load > 1000) {
234 continue;
235 }
236
237 return dirn;
d141c677 238 }
62e76326 239
8e8d4f30 240 return -1;
d141c677 241}
960a01e3 242
a2899918 243/*
cd748f27 244 * Spread load across all of the store directories
245 *
246 * Note: We should modify this later on to prefer sticking objects
247 * in the *tightest fit* swapdir to conserve space, along with the
26ac0430 248 * actual swapdir usage. But for now, this hack will do while
cd748f27 249 * testing, so you should order your swapdirs in the config file
638402dd 250 * from smallest max-size= to largest max-size=.
cd748f27 251 *
252 * We also have to choose nleast == nconf since we need to consider
253 * ALL swapdirs, regardless of state. Again, this is a hack while
254 * we sort out the real usefulness of this algorithm.
a2899918 255 */
65a53c8e 256static int
257storeDirSelectSwapDirLeastLoad(const StoreEntry * e)
a2899918 258{
cc34568d 259 int64_t most_free = 0;
8e8d4f30 260 ssize_t least_objsize = -1;
261 int least_load = INT_MAX;
cd748f27 262 int load;
263 int dirn = -1;
264 int i;
c8f4eac4 265 RefCount<SwapDir> SD;
cd748f27 266
aa1a691e
AR
267 // e->objectLen() is negative at this point when we are still STORE_PENDING
268 ssize_t objsize = e->mem_obj->expectedReplySize();
62e76326 269
cd748f27 270 if (objsize != -1)
62e76326 271 objsize += e->mem_obj->swap_hdr_sz;
272
5db6bf73 273 for (i = 0; i < Config.cacheSwap.n_configured; ++i) {
c8f4eac4 274 SD = dynamic_cast<SwapDir *>(INDEXSD(i));
1a6347cd 275 SD->flags.selected = false;
62e76326 276
aa1a691e 277 if (!SD->canStore(*e, objsize, load))
62e76326 278 continue;
279
aa1a691e 280 if (load < 0 || load > 1000)
62e76326 281 continue;
282
283 if (load > least_load)
284 continue;
285
cc34568d 286 const int64_t cur_free = SD->maxSize() - SD->currentSize();
62e76326 287
288 /* If the load is equal, then look in more details */
289 if (load == least_load) {
b51ec8c8 290 /* closest max-size fit */
62e76326 291
292 if (least_objsize != -1)
b51ec8c8 293 if (SD->maxObjectSize() > least_objsize)
62e76326 294 continue;
295
296 /* most free */
297 if (cur_free < most_free)
298 continue;
299 }
300
301 least_load = load;
b51ec8c8 302 least_objsize = SD->maxObjectSize();
62e76326 303 most_free = cur_free;
304 dirn = i;
a2899918 305 }
62e76326 306
ade906c8 307 if (dirn >= 0)
1a6347cd 308 dynamic_cast<SwapDir *>(INDEXSD(dirn))->flags.selected = true;
62e76326 309
cd748f27 310 return dirn;
596dddc1 311}
312
b109de6b 313/*
314 * An entry written to the swap log MUST have the following
315 * properties.
316 * 1. It MUST be a public key. It does no good to log
317 * a public ADD, change the key, then log a private
318 * DEL. So we need to log a DEL before we change a
319 * key from public to private.
cd748f27 320 * 2. It MUST have a valid (> -1) swap_filen.
b109de6b 321 */
4683e377 322void
5830cdb3 323storeDirSwapLog(const StoreEntry * e, int op)
4683e377 324{
d3b3ab85 325 assert (e);
d46a87a8 326 assert(!EBIT_TEST(e->flags, KEY_PRIVATE));
cd748f27 327 assert(e->swap_filen >= 0);
6c57e268 328 /*
329 * icons and such; don't write them to the swap log
330 */
62e76326 331
d46a87a8 332 if (EBIT_TEST(e->flags, ENTRY_SPECIAL))
62e76326 333 return;
334
b109de6b 335 assert(op > SWAP_LOG_NOP && op < SWAP_LOG_MAX);
62e76326 336
26ac0430
AJ
337 debugs(20, 3, "storeDirSwapLog: " <<
338 swap_log_op_str[op] << " " <<
339 e->getMD5Text() << " " <<
340 e->swap_dirn << " " <<
bf8fe701 341 std::hex << std::uppercase << std::setfill('0') << std::setw(8) << e->swap_filen);
62e76326 342
c8f4eac4 343 dynamic_cast<SwapDir *>(INDEXSD(e->swap_dirn))->logEntry(*e, op);
344}
345
93bc1434
AR
346void
347StoreController::getStats(StoreInfoStats &stats) const
348{
349 if (memStore)
350 memStore->getStats(stats);
351 else {
352 // move this code to a non-shared memory cache class when we have it
353 stats.mem.shared = false;
354 stats.mem.capacity = Config.memMaxSize;
355 stats.mem.size = mem_node::StoreMemSize();
356 stats.mem.count = hot_obj_count;
357 }
358
359 swapDir->getStats(stats);
360
361 // low-level info not specific to memory or disk cache
362 stats.store_entry_count = StoreEntry::inUseCount();
363 stats.mem_object_count = MemObject::inUseCount();
364}
365
c932b107 366void
c8f4eac4 367StoreController::stat(StoreEntry &output) const
c932b107 368{
c8f4eac4 369 storeAppendPrintf(&output, "Store Directory Statistics:\n");
370 storeAppendPrintf(&output, "Store Entries : %lu\n",
62e76326 371 (unsigned long int)StoreEntry::inUseCount());
c91ca3ce 372 storeAppendPrintf(&output, "Maximum Swap Size : %" PRIu64 " KB\n",
cc34568d 373 maxSize() >> 10);
57f583f1 374 storeAppendPrintf(&output, "Current Store Swap Size: %.2f KB\n",
cc34568d 375 currentSize() / 1024.0);
57f583f1 376 storeAppendPrintf(&output, "Current Capacity : %.2f%% used, %.2f%% free\n",
cc34568d
DK
377 Math::doublePercent(currentSize(), maxSize()),
378 Math::doublePercent((maxSize() - currentSize()), maxSize()));
ada9124c
AR
379
380 if (memStore)
381 memStore->stat(output);
e3ef2b09 382
c8f4eac4 383 /* now the swapDir */
384 swapDir->stat(output);
5d406e78 385}
386
c8f4eac4 387/* if needed, this could be taught to cache the result */
12e11a5c 388uint64_t
c8f4eac4 389StoreController::maxSize() const
f4e3fa54 390{
c8f4eac4 391 /* TODO: include memory cache ? */
392 return swapDir->maxSize();
393}
62e76326 394
12e11a5c 395uint64_t
c8f4eac4 396StoreController::minSize() const
397{
398 /* TODO: include memory cache ? */
399 return swapDir->minSize();
f4e3fa54 400}
401
39c1e1d9
DK
402uint64_t
403StoreController::currentSize() const
404{
405 return swapDir->currentSize();
406}
407
408uint64_t
409StoreController::currentCount() const
410{
411 return swapDir->currentCount();
412}
413
af2fda07
DK
414int64_t
415StoreController::maxObjectSize() const
416{
417 return swapDir->maxObjectSize();
418}
419
f4e3fa54 420void
c8f4eac4 421SwapDir::diskFull()
f4e3fa54 422{
cc34568d 423 if (currentSize() >= maxSize())
62e76326 424 return;
425
cc34568d 426 max_size = currentSize();
62e76326 427
e0236918 428 debugs(20, DBG_IMPORTANT, "WARNING: Shrinking cache_dir #" << index << " to " << currentSize() / 1024.0 << " KB");
f4e3fa54 429}
95dcd2b8 430
431void
432storeDirOpenSwapLogs(void)
433{
d3b3ab85 434 for (int dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
c8f4eac4 435 dynamic_cast<SwapDir *>(INDEXSD(dirn))->openLog();
95dcd2b8 436}
437
438void
439storeDirCloseSwapLogs(void)
440{
d3b3ab85 441 for (int dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
c8f4eac4 442 dynamic_cast<SwapDir *>(INDEXSD(dirn))->closeLog();
95dcd2b8 443}
444
b2c141d4 445/*
446 * storeDirWriteCleanLogs
26ac0430 447 *
b2c141d4 448 * Writes a "clean" swap log file from in-memory metadata.
cd748f27 449 * This is a rewrite of the original function to troll each
450 * StoreDir and write the logs, and flush at the end of
451 * the run. Thanks goes to Eric Stern, since this solution
452 * came out of his COSS code.
b2c141d4 453 */
b2c141d4 454int
455storeDirWriteCleanLogs(int reopen)
95dcd2b8 456{
6a566b9c 457 const StoreEntry *e = NULL;
b2c141d4 458 int n = 0;
62e76326 459
e812ecfc 460 struct timeval start;
461 double dt;
c8f4eac4 462 RefCount<SwapDir> sd;
b2c141d4 463 int dirn;
6a566b9c 464 int notdone = 1;
62e76326 465
bef81ea5 466 if (StoreController::store_dirs_rebuilding) {
e0236918
FC
467 debugs(20, DBG_IMPORTANT, "Not currently OK to rewrite swap log.");
468 debugs(20, DBG_IMPORTANT, "storeDirWriteCleanLogs: Operation aborted.");
62e76326 469 return 0;
b2c141d4 470 }
62e76326 471
e0236918 472 debugs(20, DBG_IMPORTANT, "storeDirWriteCleanLogs: Starting...");
e812ecfc 473 getCurrentTime();
474 start = current_time;
62e76326 475
5db6bf73 476 for (dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn) {
c8f4eac4 477 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
62e76326 478
479 if (sd->writeCleanStart() < 0) {
e0236918 480 debugs(20, DBG_IMPORTANT, "log.clean.start() failed for dir #" << sd->index);
62e76326 481 continue;
482 }
6a566b9c 483 }
62e76326 484
e78ef51b 485 /*
486 * This may look inefficient as CPU wise it is more efficient to do this
487 * sequentially, but I/O wise the parallellism helps as it allows more
488 * hdd spindles to be active.
d3b3ab85 489 */
c1dd71ae 490 while (notdone) {
62e76326 491 notdone = 0;
492
5db6bf73 493 for (dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn) {
c8f4eac4 494 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
62e76326 495
496 if (NULL == sd->cleanLog)
497 continue;
498
499 e = sd->cleanLog->nextEntry();
500
501 if (!e)
502 continue;
503
504 notdone = 1;
505
506 if (!sd->canLog(*e))
507 continue;
508
509 sd->cleanLog->write(*e);
510
511 if ((++n & 0xFFFF) == 0) {
512 getCurrentTime();
e0236918 513 debugs(20, DBG_IMPORTANT, " " << std::setw(7) << n <<
bf8fe701 514 " entries written so far.");
62e76326 515 }
516 }
6a566b9c 517 }
62e76326 518
6a566b9c 519 /* Flush */
5db6bf73 520 for (dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
c8f4eac4 521 dynamic_cast<SwapDir *>(INDEXSD(dirn))->writeCleanDone();
62e76326 522
b2c141d4 523 if (reopen)
62e76326 524 storeDirOpenSwapLogs();
525
e812ecfc 526 getCurrentTime();
62e76326 527
e812ecfc 528 dt = tvSubDsec(start, current_time);
62e76326 529
e0236918
FC
530 debugs(20, DBG_IMPORTANT, " Finished. Wrote " << n << " entries.");
531 debugs(20, DBG_IMPORTANT, " Took "<< std::setw(3)<< std::setprecision(2) << dt <<
bf8fe701 532 " seconds ("<< std::setw(6) << ((double) n / (dt > 0.0 ? dt : 1.0)) << " entries/sec).");
62e76326 533
b2c141d4 534 return n;
95dcd2b8 535}
d141c677 536
c8f4eac4 537StoreSearch *
30abd221 538StoreController::search(String const url, HttpRequest *request)
c8f4eac4 539{
540 /* cheat, for now you can't search the memory hot cache */
541 return swapDir->search(url, request);
542}
543
544StorePointer
545StoreHashIndex::store(int const x) const
546{
547 return INDEXSD(x);
548}
549
14911a4e
AR
550SwapDir &
551StoreHashIndex::dir(const int i) const
552{
553 SwapDir *sd = dynamic_cast<SwapDir*>(INDEXSD(i));
554 assert(sd);
555 return *sd;
556}
557
cd748f27 558void
c8f4eac4 559StoreController::sync(void)
cd748f27 560{
9487bae9
AR
561 if (memStore)
562 memStore->sync();
c8f4eac4 563 swapDir->sync();
cd748f27 564}
565
566/*
26ac0430 567 * handle callbacks all avaliable fs'es
cd748f27 568 */
c8f4eac4 569int
570StoreController::callback()
cd748f27 571{
1d5161bd 572 /* This will likely double count. Thats ok. */
573 PROF_start(storeDirCallback);
574
c8f4eac4 575 /* mem cache callbacks ? */
576 int result = swapDir->callback();
1d5161bd 577
578 PROF_stop(storeDirCallback);
c8f4eac4 579
580 return result;
d141c677 581}
90d42c28 582
583int
584storeDirGetBlkSize(const char *path, int *blksize)
585{
586#if HAVE_STATVFS
62e76326 587
90d42c28 588 struct statvfs sfs;
62e76326 589
90d42c28 590 if (statvfs(path, &sfs)) {
e0236918 591 debugs(50, DBG_IMPORTANT, "" << path << ": " << xstrerror());
62e76326 592 *blksize = 2048;
593 return 1;
90d42c28 594 }
62e76326 595
6759a7aa 596 *blksize = (int) sfs.f_frsize;
90d42c28 597#else
62e76326 598
90d42c28 599 struct statfs sfs;
62e76326 600
90d42c28 601 if (statfs(path, &sfs)) {
e0236918 602 debugs(50, DBG_IMPORTANT, "" << path << ": " << xstrerror());
62e76326 603 *blksize = 2048;
604 return 1;
90d42c28 605 }
62e76326 606
90d42c28 607 *blksize = (int) sfs.f_bsize;
6759a7aa 608#endif
4b3af09f 609 /*
610 * Sanity check; make sure we have a meaningful value.
611 */
62e76326 612
d5b72fe7 613 if (*blksize < 512)
62e76326 614 *blksize = 2048;
615
90d42c28 616 return 0;
617}
781d6656 618
619#define fsbtoblk(num, fsbs, bs) \
620 (((fsbs) != 0 && (fsbs) < (bs)) ? \
621 (num) / ((bs) / (fsbs)) : (num) * ((fsbs) / (bs)))
622int
623storeDirGetUFSStats(const char *path, int *totl_kb, int *free_kb, int *totl_in, int *free_in)
624{
625#if HAVE_STATVFS
62e76326 626
781d6656 627 struct statvfs sfs;
62e76326 628
781d6656 629 if (statvfs(path, &sfs)) {
e0236918 630 debugs(50, DBG_IMPORTANT, "" << path << ": " << xstrerror());
62e76326 631 return 1;
781d6656 632 }
62e76326 633
781d6656 634 *totl_kb = (int) fsbtoblk(sfs.f_blocks, sfs.f_frsize, 1024);
635 *free_kb = (int) fsbtoblk(sfs.f_bfree, sfs.f_frsize, 1024);
636 *totl_in = (int) sfs.f_files;
637 *free_in = (int) sfs.f_ffree;
638#else
62e76326 639
781d6656 640 struct statfs sfs;
62e76326 641
781d6656 642 if (statfs(path, &sfs)) {
e0236918 643 debugs(50, DBG_IMPORTANT, "" << path << ": " << xstrerror());
62e76326 644 return 1;
781d6656 645 }
62e76326 646
781d6656 647 *totl_kb = (int) fsbtoblk(sfs.f_blocks, sfs.f_bsize, 1024);
648 *free_kb = (int) fsbtoblk(sfs.f_bfree, sfs.f_bsize, 1024);
649 *totl_in = (int) sfs.f_files;
650 *free_in = (int) sfs.f_ffree;
651#endif
62e76326 652
781d6656 653 return 0;
654}
c8f4eac4 655
656void
e1f7507e 657allocate_new_swapdir(SquidConfig::_cacheSwap * swap)
c8f4eac4 658{
659 if (swap->swapDirs == NULL) {
660 swap->n_allocated = 4;
7d3c4ca1 661 swap->swapDirs = static_cast<SwapDir::Pointer *>(xcalloc(swap->n_allocated, sizeof(SwapDir::Pointer)));
c8f4eac4 662 }
663
664 if (swap->n_allocated == swap->n_configured) {
c8f4eac4 665 swap->n_allocated <<= 1;
7d3c4ca1 666 SwapDir::Pointer *const tmp = static_cast<SwapDir::Pointer *>(xcalloc(swap->n_allocated, sizeof(SwapDir::Pointer)));
41d00cd3 667 memcpy(tmp, swap->swapDirs, swap->n_configured * sizeof(SwapDir *));
c8f4eac4 668 xfree(swap->swapDirs);
669 swap->swapDirs = tmp;
670 }
671}
672
673void
e1f7507e 674free_cachedir(SquidConfig::_cacheSwap * swap)
c8f4eac4 675{
676 int i;
677 /* DON'T FREE THESE FOR RECONFIGURE */
678
679 if (reconfiguring)
680 return;
681
5db6bf73 682 for (i = 0; i < swap->n_configured; ++i) {
c8f4eac4 683 /* TODO XXX this lets the swapdir free resources asynchronously
684 * swap->swapDirs[i]->deactivate();
26ac0430 685 * but there may be such a means already.
c8f4eac4 686 * RBC 20041225
687 */
688 swap->swapDirs[i] = NULL;
689 }
690
691 safe_free(swap->swapDirs);
692 swap->swapDirs = NULL;
693 swap->n_allocated = 0;
694 swap->n_configured = 0;
695}
696
697/* this should be a virtual method on StoreEntry,
698 * i.e. e->referenced()
699 * so that the entry can notify the creating Store
700 */
701void
702StoreController::reference(StoreEntry &e)
703{
c5426f8f
AR
704 // special entries do not belong to any specific Store, but are IN_MEMORY
705 if (EBIT_TEST(e.flags, ENTRY_SPECIAL))
706 return;
707
c8f4eac4 708 /* Notify the fs that we're referencing this object again */
709
710 if (e.swap_dirn > -1)
4c973beb 711 swapDir->reference(e);
c8f4eac4 712
9487bae9
AR
713 // Notify the memory cache that we're referencing this object again
714 if (memStore && e.mem_status == IN_MEMORY)
715 memStore->reference(e);
716
717 // TODO: move this code to a non-shared memory cache class when we have it
c8f4eac4 718 if (e.mem_obj) {
719 if (mem_policy->Referenced)
720 mem_policy->Referenced(mem_policy, &e, &e.mem_obj->repl);
721 }
722}
723
4c973beb 724bool
54347cbd 725StoreController::dereference(StoreEntry &e, bool wantsLocalMemory)
c8f4eac4 726{
c5426f8f
AR
727 // special entries do not belong to any specific Store, but are IN_MEMORY
728 if (EBIT_TEST(e.flags, ENTRY_SPECIAL))
54347cbd
AR
729 return true;
730
731 bool keepInStoreTable = false; // keep only if somebody needs it there
c5426f8f 732
c8f4eac4 733 /* Notify the fs that we're not referencing this object any more */
734
735 if (e.swap_filen > -1)
54347cbd 736 keepInStoreTable = swapDir->dereference(e, wantsLocalMemory) || keepInStoreTable;
c8f4eac4 737
9487bae9
AR
738 // Notify the memory cache that we're not referencing this object any more
739 if (memStore && e.mem_status == IN_MEMORY)
54347cbd 740 keepInStoreTable = memStore->dereference(e, wantsLocalMemory) || keepInStoreTable;
9487bae9
AR
741
742 // TODO: move this code to a non-shared memory cache class when we have it
c8f4eac4 743 if (e.mem_obj) {
744 if (mem_policy->Dereferenced)
745 mem_policy->Dereferenced(mem_policy, &e, &e.mem_obj->repl);
54347cbd
AR
746 // non-shared memory cache relies on store_table
747 if (!memStore)
748 keepInStoreTable = wantsLocalMemory || keepInStoreTable;
c8f4eac4 749 }
4c973beb
AR
750
751 return keepInStoreTable;
c8f4eac4 752}
753
754StoreEntry *
6ca34f6f 755StoreController::get(const cache_key *key)
1bfe9ade
AR
756{
757 if (StoreEntry *e = find(key)) {
758 // this is not very precise: some get()s are not initiated by clients
759 e->touch();
760 return e;
761 }
762 return NULL;
763}
764
765/// Internal method to implements the guts of the Store::get() API:
766/// returns an in-transit or cached object with a given key, if any.
767StoreEntry *
768StoreController::find(const cache_key *key)
c8f4eac4 769{
44def0f9 770 if (StoreEntry *e = swapDir->get(key)) {
9487bae9
AR
771 // TODO: ignore and maybe handleIdleEntry() unlocked intransit entries
772 // because their backing store slot may be gone already.
171d5429 773 debugs(20, 3, HERE << "got in-transit entry: " << *e);
44def0f9
AR
774 return e;
775 }
776
99921d9d
AR
777 // Must search transients before caches because we must sync those we find.
778 if (transients) {
779 if (StoreEntry *e = transients->get(key)) {
780 debugs(20, 3, "got shared in-transit entry: " << *e);
781 bool inSync = false;
782 const bool found = anchorCollapsed(*e, inSync);
783 if (!found || inSync)
784 return e;
785 assert(!e->locked()); // ensure release will destroyStoreEntry()
786 e->release(); // do not let others into the same trap
787 return NULL;
788 }
789 }
790
9487bae9
AR
791 if (memStore) {
792 if (StoreEntry *e = memStore->get(key)) {
793 debugs(20, 3, HERE << "got mem-cached entry: " << *e);
794 return e;
795 }
796 }
797
022f96ad
AR
798 // TODO: this disk iteration is misplaced; move to StoreHashIndex when
799 // the global store_table is no longer used for in-transit objects.
44def0f9
AR
800 if (const int cacheDirs = Config.cacheSwap.n_configured) {
801 // ask each cache_dir until the entry is found; use static starting
802 // point to avoid asking the same subset of disks more often
803 // TODO: coordinate with put() to be able to guess the right disk often
14911a4e 804 static int idx = 0;
44def0f9 805 for (int n = 0; n < cacheDirs; ++n) {
14911a4e 806 idx = (idx + 1) % cacheDirs;
44def0f9 807 SwapDir *sd = dynamic_cast<SwapDir*>(INDEXSD(idx));
14911a4e
AR
808 if (!sd->active())
809 continue;
810
44def0f9 811 if (StoreEntry *e = sd->get(key)) {
eccba1d9 812 debugs(20, 3, HERE << "cache_dir " << idx <<
9199139f 813 " got cached entry: " << *e);
44def0f9
AR
814 return e;
815 }
816 }
817 }
c8f4eac4 818
eccba1d9 819 debugs(20, 4, HERE << "none of " << Config.cacheSwap.n_configured <<
9199139f 820 " cache_dirs have " << storeKeyText(key));
44def0f9 821 return NULL;
c8f4eac4 822}
823
824void
6ca34f6f 825StoreController::get(String const key, STOREGETCLIENT aCallback, void *aCallbackData)
c8f4eac4 826{
827 fatal("not implemented");
828}
829
ce49546e 830/// updates the collapsed entry with the corresponding on-disk entry, if any
4475555f 831/// In other words, the SwapDir::anchorCollapsed() API applied to all disks.
ce49546e 832bool
4475555f 833StoreController::anchorCollapsedOnDisk(StoreEntry &collapsed, bool &inSync)
ce49546e
AR
834{
835 // TODO: move this loop to StoreHashIndex, just like the one in get().
836 if (const int cacheDirs = Config.cacheSwap.n_configured) {
837 // ask each cache_dir until the entry is found; use static starting
838 // point to avoid asking the same subset of disks more often
839 // TODO: coordinate with put() to be able to guess the right disk often
840 static int idx = 0;
841 for (int n = 0; n < cacheDirs; ++n) {
842 idx = (idx + 1) % cacheDirs;
843 SwapDir *sd = dynamic_cast<SwapDir*>(INDEXSD(idx));
844 if (!sd->active())
845 continue;
846
4475555f 847 if (sd->anchorCollapsed(collapsed, inSync)) {
ce49546e
AR
848 debugs(20, 3, "cache_dir " << idx << " anchors " << collapsed);
849 return true;
850 }
851 }
852 }
853
854 debugs(20, 4, "none of " << Config.cacheSwap.n_configured <<
855 " cache_dirs have " << collapsed);
856 return false;
857}
858
1bfe9ade
AR
859void StoreController::markForUnlink(StoreEntry &e)
860{
861 if (transients && e.mem_obj && e.mem_obj->xitTable.index >= 0)
862 transients->markForUnlink(e);
863 if (memStore && e.mem_obj && e.mem_obj->memCache.index >= 0)
864 memStore->markForUnlink(e);
865 if (e.swap_filen >= 0)
866 e.store()->markForUnlink(e);
867}
868
96a7de88
DK
869// move this into [non-shared] memory cache class when we have one
870/// whether e should be kept in local RAM for possible future caching
871bool
872StoreController::keepForLocalMemoryCache(const StoreEntry &e) const
873{
874 if (!e.memoryCachable())
875 return false;
876
877 // does the current and expected size obey memory caching limits?
878 assert(e.mem_obj);
879 const int64_t loadedSize = e.mem_obj->endOffset();
880 const int64_t expectedSize = e.mem_obj->expectedReplySize(); // may be < 0
881 const int64_t ramSize = max(loadedSize, expectedSize);
882 const int64_t ramLimit = min(
817138f8
A
883 static_cast<int64_t>(Config.memMaxSize),
884 static_cast<int64_t>(Config.Store.maxInMemObjSize));
96a7de88
DK
885 return ramSize <= ramLimit;
886}
887
888void
4475555f 889StoreController::memoryOut(StoreEntry &e, const bool preserveSwappable)
96a7de88
DK
890{
891 bool keepInLocalMemory = false;
892 if (memStore)
4475555f 893 memStore->write(e); // leave keepInLocalMemory false
96a7de88
DK
894 else
895 keepInLocalMemory = keepForLocalMemoryCache(e);
896
897 debugs(20, 7, HERE << "keepInLocalMemory: " << keepInLocalMemory);
898
899 if (!keepInLocalMemory)
900 e.trimMemory(preserveSwappable);
901}
902
ce49546e
AR
903void
904StoreController::memoryUnlink(StoreEntry &e)
905{
ce49546e
AR
906 if (memStore)
907 memStore->unlink(e);
908 else // TODO: move into [non-shared] memory cache class when we have one
909 e.destroyMemObject();
910}
911
4475555f 912void
29c56e41 913StoreController::memoryDisconnect(StoreEntry &e)
4475555f
AR
914{
915 if (memStore)
29c56e41 916 memStore->disconnect(e);
4475555f
AR
917 // else nothing to do for non-shared memory cache
918}
919
920void
921StoreController::transientsAbandon(StoreEntry &e)
922{
923 if (transients) {
924 assert(e.mem_obj);
925 if (e.mem_obj->xitTable.index >= 0)
926 transients->abandon(e);
927 }
928}
929
99921d9d
AR
930void
931StoreController::transientsCompleteWriting(StoreEntry &e)
932{
933 if (transients) {
934 assert(e.mem_obj);
935 if (e.mem_obj->xitTable.index >= 0)
936 transients->completeWriting(e);
937 }
938}
939
d366a7fa
AR
940int
941StoreController::transientReaders(const StoreEntry &e) const
942{
943 return (transients && e.mem_obj && e.mem_obj->xitTable.index >= 0) ?
944 transients->readers(e) : 0;
945}
946
4475555f
AR
947void
948StoreController::transientsDisconnect(MemObject &mem_obj)
949{
950 if (transients)
951 transients->disconnect(mem_obj);
952}
953
9487bae9
AR
954void
955StoreController::handleIdleEntry(StoreEntry &e)
956{
957 bool keepInLocalMemory = false;
c5426f8f
AR
958
959 if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) {
960 // Icons (and cache digests?) should stay in store_table until we
961 // have a dedicated storage for them (that would not purge them).
962 // They are not managed [well] by any specific Store handled below.
963 keepInLocalMemory = true;
d3cd2e81 964 } else if (memStore) {
9487bae9
AR
965 // leave keepInLocalMemory false; memStore maintains its own cache
966 } else {
96a7de88 967 keepInLocalMemory = keepForLocalMemoryCache(e) && // in good shape and
9199139f
AR
968 // the local memory cache is not overflowing
969 (mem_node::InUseCount() <= store_pages_max);
9487bae9
AR
970 }
971
54347cbd 972 // An idle, unlocked entry that only belongs to a SwapDir which controls
4c973beb 973 // its own index, should not stay in the global store_table.
54347cbd 974 if (!dereference(e, keepInLocalMemory)) {
9487bae9
AR
975 debugs(20, 5, HERE << "destroying unlocked entry: " << &e << ' ' << e);
976 destroyStoreEntry(static_cast<hash_link*>(&e));
977 return;
978 }
979
c5426f8f
AR
980 debugs(20, 5, HERE << "keepInLocalMemory: " << keepInLocalMemory);
981
9487bae9
AR
982 // TODO: move this into [non-shared] memory cache class when we have one
983 if (keepInLocalMemory) {
984 e.setMemStatus(IN_MEMORY);
985 e.mem_obj->unlinkRequest();
986 } else {
987 e.purgeMem(); // may free e
988 }
989}
990
9a9954ba
AR
991void
992StoreController::allowCollapsing(StoreEntry *e, const RequestFlags &reqFlags,
993 const HttpRequestMethod &reqMethod)
994{
995 e->makePublic(); // this is needed for both local and SMP collapsing
996 if (transients)
99921d9d 997 transients->startWriting(e, reqFlags, reqMethod);
4475555f
AR
998 debugs(20, 3, "may " << (transients && e->mem_obj->xitTable.index >= 0 ?
999 "SMP-" : "locally-") << "collapse " << *e);
9a9954ba
AR
1000}
1001
ce49546e 1002void
6919be24 1003StoreController::syncCollapsed(const sfileno xitIndex)
ce49546e 1004{
99921d9d 1005 assert(transients);
6919be24
AR
1006
1007 StoreEntry *collapsed = transients->findCollapsed(xitIndex);
1008 if (!collapsed) { // the entry is no longer locally active, ignore update
1009 debugs(20, 7, "not SMP-syncing not-transient " << xitIndex);
99921d9d
AR
1010 return;
1011 }
6919be24
AR
1012 assert(collapsed->mem_obj);
1013 assert(collapsed->mem_obj->smpCollapsed);
ce49546e
AR
1014
1015 debugs(20, 7, "syncing " << *collapsed);
1016
6919be24 1017 bool abandoned = transients->abandoned(*collapsed);
4475555f 1018 bool found = false;
ce49546e 1019 bool inSync = false;
1bfe9ade
AR
1020 if (memStore && collapsed->mem_obj->memCache.io == MemObject::ioDone) {
1021 found = true;
1022 inSync = true;
1023 debugs(20, 7, "fully mem-loaded " << *collapsed);
1024 } else if (memStore && collapsed->mem_obj->memCache.index >= 0) {
4475555f 1025 found = true;
ce49546e 1026 inSync = memStore->updateCollapsed(*collapsed);
4475555f
AR
1027 } else if (collapsed->swap_filen >= 0) {
1028 found = true;
ce49546e 1029 inSync = collapsed->store()->updateCollapsed(*collapsed);
4475555f 1030 } else {
99921d9d 1031 found = anchorCollapsed(*collapsed, inSync);
4475555f 1032 }
ce49546e 1033
6919be24
AR
1034 if (abandoned && collapsed->store_status == STORE_PENDING) {
1035 debugs(20, 3, "aborting abandoned but STORE_PENDING " << *collapsed);
1036 collapsed->abort();
1037 return;
1038 }
1039
ce49546e
AR
1040 if (inSync) {
1041 debugs(20, 5, "synced " << *collapsed);
1042 collapsed->invokeHandlers();
4475555f 1043 } else if (found) { // unrecoverable problem syncing this entry
99921d9d 1044 debugs(20, 3, "aborting unsyncable " << *collapsed);
ce49546e 1045 collapsed->abort();
4475555f
AR
1046 } else { // the entry is still not in one of the caches
1047 debugs(20, 7, "waiting " << *collapsed);
ce49546e
AR
1048 }
1049}
9a9954ba 1050
99921d9d
AR
1051/// Called for in-transit entries that are not yet anchored to a cache.
1052/// For cached entries, return true after synchronizing them with their cache
1053/// (making inSync true on success). For not-yet-cached entries, return false.
1054bool
1055StoreController::anchorCollapsed(StoreEntry &collapsed, bool &inSync)
1056{
1057 // this method is designed to work with collapsed transients only
1058 assert(collapsed.mem_obj);
1059 assert(collapsed.mem_obj->xitTable.index >= 0);
1060 assert(collapsed.mem_obj->smpCollapsed);
1061
1062 debugs(20, 7, "anchoring " << collapsed);
1063
1064 bool found = false;
1065 if (memStore)
1066 found = memStore->anchorCollapsed(collapsed, inSync);
1067 else if (Config.cacheSwap.n_configured)
1068 found = anchorCollapsedOnDisk(collapsed, inSync);
1069
1070 if (found) {
1071 if (inSync)
1072 debugs(20, 7, "anchored " << collapsed);
1073 else
1074 debugs(20, 5, "failed to anchor " << collapsed);
1075 } else {
1076 debugs(20, 7, "skipping not yet cached " << collapsed);
1077 }
1078
1079 return found;
1080}
1081
c8f4eac4 1082StoreHashIndex::StoreHashIndex()
1083{
47f6e231 1084 if (store_table)
26ac0430 1085 abort();
c8f4eac4 1086 assert (store_table == NULL);
1087}
1088
1089StoreHashIndex::~StoreHashIndex()
1090{
1091 if (store_table) {
1092 hashFreeItems(store_table, destroyStoreEntry);
1093 hashFreeMemory(store_table);
1094 store_table = NULL;
1095 }
1096}
1097
1098int
1099StoreHashIndex::callback()
1100{
1101 int result = 0;
1102 int j;
1103 static int ndir = 0;
1104
1105 do {
1106 j = 0;
1107
5db6bf73 1108 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
c8f4eac4 1109 if (ndir >= Config.cacheSwap.n_configured)
1110 ndir = ndir % Config.cacheSwap.n_configured;
1111
1112 int temp_result = store(ndir)->callback();
1113
1114 ++ndir;
1115
1116 j += temp_result;
1117
1118 result += temp_result;
1119
1120 if (j > 100)
1121 fatal ("too much io\n");
1122 }
1123 } while (j > 0);
1124
5db6bf73 1125 ++ndir;
c8f4eac4 1126
1127 return result;
1128}
1129
1130void
1131StoreHashIndex::create()
1132{
608622b8 1133 if (Config.cacheSwap.n_configured == 0) {
a8163539
TX
1134 debugs(0, DBG_PARSE_NOTE(DBG_CRITICAL), "No cache_dir stores are configured.");
1135 }
1136
5db6bf73 1137 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
14911a4e
AR
1138 if (dir(i).active())
1139 store(i)->create();
1140 }
c8f4eac4 1141}
1142
1143/* Lookup an object in the cache.
1144 * return just a reference to object, don't start swapping in yet. */
1145StoreEntry *
6ca34f6f 1146StoreHashIndex::get(const cache_key *key)
c8f4eac4 1147{
1148 PROF_start(storeGet);
bf8fe701 1149 debugs(20, 3, "storeGet: looking up " << storeKeyText(key));
c8f4eac4 1150 StoreEntry *p = static_cast<StoreEntry *>(hash_lookup(store_table, key));
1151 PROF_stop(storeGet);
1152 return p;
1153}
1154
1155void
6ca34f6f 1156StoreHashIndex::get(String const key, STOREGETCLIENT aCallback, void *aCallbackData)
c8f4eac4 1157{
1158 fatal("not implemented");
1159}
1160
1161void
1162StoreHashIndex::init()
1163{
90d881c4
TX
1164 if (Config.Store.objectsPerBucket <= 0)
1165 fatal("'store_objects_per_bucket' should be larger than 0.");
1166
1167 if (Config.Store.avgObjectSize <= 0)
1168 fatal("'store_avg_object_size' should be larger than 0.");
1169
c8f4eac4 1170 /* Calculate size of hash table (maximum currently 64k buckets). */
1171 /* this is very bogus, its specific to the any Store maintaining an
1172 * in-core index, not global */
58d5c5dd 1173 size_t buckets = (Store::Root().maxSize() + Config.memMaxSize) / Config.Store.avgObjectSize;
e0236918 1174 debugs(20, DBG_IMPORTANT, "Swap maxSize " << (Store::Root().maxSize() >> 10) <<
13a07022 1175 " + " << ( Config.memMaxSize >> 10) << " KB, estimated " << buckets << " objects");
c8f4eac4 1176 buckets /= Config.Store.objectsPerBucket;
e0236918 1177 debugs(20, DBG_IMPORTANT, "Target number of buckets: " << buckets);
c8f4eac4 1178 /* ideally the full scan period should be configurable, for the
1179 * moment it remains at approximately 24 hours. */
1180 store_hash_buckets = storeKeyHashBuckets(buckets);
e0236918
FC
1181 debugs(20, DBG_IMPORTANT, "Using " << store_hash_buckets << " Store buckets");
1182 debugs(20, DBG_IMPORTANT, "Max Mem size: " << ( Config.memMaxSize >> 10) << " KB" <<
9199139f 1183 (Config.memShared ? " [shared]" : ""));
e0236918 1184 debugs(20, DBG_IMPORTANT, "Max Swap size: " << (Store::Root().maxSize() >> 10) << " KB");
c8f4eac4 1185
1186 store_table = hash_create(storeKeyHashCmp,
1187 store_hash_buckets, storeKeyHashHash);
1188
5db6bf73 1189 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
c8f4eac4 1190 /* this starts a search of the store dirs, loading their
1191 * index. under the new Store api this should be
1192 * driven by the StoreHashIndex, not by each store.
bef81ea5 1193 *
1194 * That is, the HashIndex should perform a search of each dir it is
26ac0430 1195 * indexing to do the hash insertions. The search is then able to
bef81ea5 1196 * decide 'from-memory', or 'from-clean-log' or 'from-dirty-log' or
1197 * 'from-no-log'.
1198 *
c8f4eac4 1199 * Step 1: make the store rebuilds use a search internally
bef81ea5 1200 * Step 2: change the search logic to use the four modes described
1201 * above
1202 * Step 3: have the hash index walk the searches itself.
c8f4eac4 1203 */
14911a4e
AR
1204 if (dir(i).active())
1205 store(i)->init();
13a07022 1206 }
c8f4eac4 1207}
1208
12e11a5c 1209uint64_t
c8f4eac4 1210StoreHashIndex::maxSize() const
1211{
12e11a5c 1212 uint64_t result = 0;
c8f4eac4 1213
5db6bf73 1214 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
39c1e1d9
DK
1215 if (dir(i).doReportStat())
1216 result += store(i)->maxSize();
1217 }
c8f4eac4 1218
1219 return result;
1220}
1221
12e11a5c 1222uint64_t
c8f4eac4 1223StoreHashIndex::minSize() const
1224{
12e11a5c 1225 uint64_t result = 0;
c8f4eac4 1226
5db6bf73 1227 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
39c1e1d9
DK
1228 if (dir(i).doReportStat())
1229 result += store(i)->minSize();
1230 }
1231
1232 return result;
1233}
1234
1235uint64_t
1236StoreHashIndex::currentSize() const
1237{
1238 uint64_t result = 0;
1239
5db6bf73 1240 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
39c1e1d9
DK
1241 if (dir(i).doReportStat())
1242 result += store(i)->currentSize();
1243 }
1244
1245 return result;
1246}
1247
1248uint64_t
1249StoreHashIndex::currentCount() const
1250{
1251 uint64_t result = 0;
1252
5db6bf73 1253 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
39c1e1d9
DK
1254 if (dir(i).doReportStat())
1255 result += store(i)->currentCount();
1256 }
c8f4eac4 1257
1258 return result;
1259}
1260
af2fda07
DK
1261int64_t
1262StoreHashIndex::maxObjectSize() const
1263{
1264 int64_t result = -1;
1265
5db6bf73 1266 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
af2fda07
DK
1267 if (dir(i).active() && store(i)->maxObjectSize() > result)
1268 result = store(i)->maxObjectSize();
1269 }
1270
1271 return result;
1272}
1273
93bc1434
AR
1274void
1275StoreHashIndex::getStats(StoreInfoStats &stats) const
1276{
1277 // accumulate per-disk cache stats
5db6bf73 1278 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
93bc1434
AR
1279 StoreInfoStats dirStats;
1280 store(i)->getStats(dirStats);
1281 stats += dirStats;
1282 }
1283
1284 // common to all disks
1285 stats.swap.open_disk_fd = store_open_disk_fd;
1286
1287 // memory cache stats are collected in StoreController::getStats(), for now
1288}
1289
c8f4eac4 1290void
1291StoreHashIndex::stat(StoreEntry & output) const
1292{
1293 int i;
1294
1295 /* Now go through each store, calling its stat routine */
1296
5db6bf73 1297 for (i = 0; i < Config.cacheSwap.n_configured; ++i) {
c8f4eac4 1298 storeAppendPrintf(&output, "\n");
1299 store(i)->stat(output);
1300 }
1301}
1302
1303void
4c973beb
AR
1304StoreHashIndex::reference(StoreEntry &e)
1305{
1306 e.store()->reference(e);
1307}
c8f4eac4 1308
4c973beb 1309bool
54347cbd 1310StoreHashIndex::dereference(StoreEntry &e, bool wantsLocalMemory)
4c973beb 1311{
54347cbd 1312 return e.store()->dereference(e, wantsLocalMemory);
4c973beb 1313}
c8f4eac4 1314
1315void
1316StoreHashIndex::maintain()
1317{
1318 int i;
1319 /* walk each fs */
1320
5db6bf73 1321 for (i = 0; i < Config.cacheSwap.n_configured; ++i) {
c8f4eac4 1322 /* XXX FixMe: This should be done "in parallell" on the different
1323 * cache_dirs, not one at a time.
1324 */
1325 /* call the maintain function .. */
1326 store(i)->maintain();
1327 }
1328}
1329
c8f4eac4 1330void
1331StoreHashIndex::sync()
1332{
1333 for (int i = 0; i < Config.cacheSwap.n_configured; ++i)
1334 store(i)->sync();
1335}
1336
1337StoreSearch *
30abd221 1338StoreHashIndex::search(String const url, HttpRequest *)
c8f4eac4 1339{
1340 if (url.size())
1341 fatal ("Cannot search by url yet\n");
1342
1343 return new StoreSearchHashIndex (this);
1344}
1345
1346CBDATA_CLASS_INIT(StoreSearchHashIndex);
aa839030 1347
6fd5ccc3
AJ
1348StoreSearchHashIndex::StoreSearchHashIndex(RefCount<StoreHashIndex> aSwapDir) :
1349 sd(aSwapDir),
1350 callback(NULL),
1351 cbdata(NULL),
1352 _done(false),
1353 bucket(0)
c8f4eac4 1354{}
1355
1356/* do not link
1357StoreSearchHashIndex::StoreSearchHashIndex(StoreSearchHashIndex const &);
1358*/
1359
1360StoreSearchHashIndex::~StoreSearchHashIndex()
1361{}
1362
1363void
70efcae0 1364StoreSearchHashIndex::next(void (aCallback)(void *), void *aCallbackData)
c8f4eac4 1365{
1366 next();
70efcae0 1367 aCallback (aCallbackData);
c8f4eac4 1368}
1369
1370bool
1371StoreSearchHashIndex::next()
1372{
1373 if (entries.size())
1374 entries.pop_back();
1375
1376 while (!isDone() && !entries.size())
1377 copyBucket();
1378
1379 return currentItem() != NULL;
1380}
1381
1382bool
1383StoreSearchHashIndex::error() const
1384{
1385 return false;
1386}
1387
1388bool
1389StoreSearchHashIndex::isDone() const
1390{
1391 return bucket >= store_hash_buckets || _done;
1392}
1393
1394StoreEntry *
1395StoreSearchHashIndex::currentItem()
1396{
1397 if (!entries.size())
1398 return NULL;
1399
1400 return entries.back();
1401}
1402
1403void
1404StoreSearchHashIndex::copyBucket()
1405{
1406 /* probably need to lock the store entries...
1407 * we copy them all to prevent races on the links. */
1408 debugs(47, 3, "StoreSearchHashIndex::copyBucket #" << bucket);
1409 assert (!entries.size());
1410 hash_link *link_ptr = NULL;
1411 hash_link *link_next = NULL;
1412 link_next = hash_get_bucket(store_table, bucket);
1413
1414 while (NULL != (link_ptr = link_next)) {
1415 link_next = link_ptr->next;
1416 StoreEntry *e = (StoreEntry *) link_ptr;
1417
1418 entries.push_back(e);
1419 }
1420
5db6bf73 1421 ++bucket;
c8f4eac4 1422 debugs(47,3, "got entries: " << entries.size());
1423}