]> git.ipfire.org Git - thirdparty/squid.git/blame - src/store_dir.cc
SourceFormat Enforcement
[thirdparty/squid.git] / src / store_dir.cc
CommitLineData
f1dc9b30 1
2/*
262a0e14 3 * $Id$
f1dc9b30 4 *
5 * DEBUG: section 47 Store Directory Routines
6 * AUTHOR: Duane Wessels
7 *
2b6662ba 8 * SQUID Web Proxy Cache http://www.squid-cache.org/
e25c139f 9 * ----------------------------------------------------------
f1dc9b30 10 *
2b6662ba 11 * Squid is the result of efforts by numerous individuals from
12 * the Internet community; see the CONTRIBUTORS file for full
13 * details. Many organizations have provided support for Squid's
14 * development; see the SPONSORS file for full details. Squid is
15 * Copyrighted (C) 2001 by the Regents of the University of
16 * California; see the COPYRIGHT file for full details. Squid
17 * incorporates software developed and/or copyrighted by other
18 * sources; see the CREDITS file for full details.
f1dc9b30 19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
26ac0430 24 *
f1dc9b30 25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
26ac0430 29 *
f1dc9b30 30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
cbdec147 32 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
e25c139f 33 *
f1dc9b30 34 */
35
582c2af2
FC
36#include "squid.h"
37#include "globals.h"
38#include "mem_node.h"
528b2c61 39#include "MemObject.h"
9487bae9 40#include "MemStore.h"
582c2af2
FC
41#include "profiler/Profiler.h"
42#include "protos.h"
a98bcbee 43#include "SquidMath.h"
985c86bc 44#include "SquidTime.h"
582c2af2 45#include "Store.h"
21d845b1 46#include "StoreHashIndex.h"
d3b3ab85 47#include "SwapDir.h"
4b981814 48#include "swap_log_op.h"
85407535 49
c0db87f2 50#if HAVE_STATVFS
51#if HAVE_SYS_STATVFS_H
52#include <sys/statvfs.h>
53#endif
ec15e022 54#endif /* HAVE_STATVFS */
55/* statfs() needs <sys/param.h> and <sys/mount.h> on BSD systems */
56#if HAVE_SYS_PARAM_H
57#include <sys/param.h>
203526a1 58#endif
ec15e022 59#if HAVE_SYS_MOUNT_H
60#include <sys/mount.h>
61#endif
62/* Windows and Linux use sys/vfs.h */
6c86a065 63#if HAVE_SYS_VFS_H
64#include <sys/vfs.h>
65#endif
582c2af2
FC
66#if HAVE_SYS_WAIT_H
67#include <sys/wait.h>
68#endif
21d845b1
FC
69#if HAVE_ERRNO_H
70#include <errno.h>
71#endif
c0db87f2 72
65a53c8e 73static STDIRSELECT storeDirSelectSwapDirRoundRobin;
74static STDIRSELECT storeDirSelectSwapDirLeastLoad;
c8f4eac4 75
b07b21cc 76/*
77 * store_dirs_rebuilding is initialized to _1_ as a hack so that
78 * storeDirWriteCleanLogs() doesn't try to do anything unless _all_
79 * cache_dirs have been read. For example, without this hack, Squid
80 * will try to write clean log files if -kparse fails (becasue it
81 * calls fatal()).
82 */
83int StoreController::store_dirs_rebuilding = 1;
bef81ea5 84
c8f4eac4 85StoreController::StoreController() : swapDir (new StoreHashIndex())
9199139f 86 , memStore(NULL)
c8f4eac4 87{}
88
89StoreController::~StoreController()
9487bae9
AR
90{
91 delete memStore;
92}
65a53c8e 93
94/*
95 * This function pointer is set according to 'store_dir_select_algorithm'
96 * in squid.conf.
97 */
98STDIRSELECT *storeDirSelectSwapDir = storeDirSelectSwapDirLeastLoad;
d141c677 99
9838d6c8 100void
c8f4eac4 101StoreController::init()
596dddc1 102{
57af1e3f 103 if (Config.memShared && IamWorkerProcess()) {
60be8b2d
AR
104 memStore = new MemStore;
105 memStore->init();
106 }
9487bae9 107
c8f4eac4 108 swapDir->init();
62e76326 109
65a53c8e 110 if (0 == strcasecmp(Config.store_dir_select_algorithm, "round-robin")) {
62e76326 111 storeDirSelectSwapDir = storeDirSelectSwapDirRoundRobin;
e0236918 112 debugs(47, DBG_IMPORTANT, "Using Round Robin store dir selection");
65a53c8e 113 } else {
62e76326 114 storeDirSelectSwapDir = storeDirSelectSwapDirLeastLoad;
e0236918 115 debugs(47, DBG_IMPORTANT, "Using Least Load store dir selection");
65a53c8e 116 }
85407535 117}
118
119void
c8f4eac4 120StoreController::createOneStore(Store &aStore)
596dddc1 121{
62e76326 122 /*
154c7949 123 * On Windows, fork() is not available.
124 * The following is a workaround for create store directories sequentially
125 * when running on native Windows port.
126 */
1191b93b 127#if !_SQUID_MSWIN_
62e76326 128
154c7949 129 if (fork())
62e76326 130 return;
131
099a1791 132#endif
62e76326 133
c8f4eac4 134 aStore.create();
62e76326 135
1191b93b 136#if !_SQUID_MSWIN_
62e76326 137
154c7949 138 exit(0);
62e76326 139
099a1791 140#endif
154c7949 141}
142
143void
c8f4eac4 144StoreController::create()
154c7949 145{
c8f4eac4 146 swapDir->create();
62e76326 147
1191b93b 148#if !_SQUID_MSWIN_
62e76326 149
8a1c8f2c 150 pid_t pid;
62e76326 151
b2c141d4 152 do {
62e76326 153 int status;
1191b93b 154#if _SQUID_NEXT_
62e76326 155
156 pid = wait3(&status, WNOHANG, NULL);
b2c141d4 157#else
62e76326 158
159 pid = waitpid(-1, &status, 0);
b2c141d4 160#endif
62e76326 161
b2c141d4 162 } while (pid > 0 || (pid < 0 && errno == EINTR));
62e76326 163
099a1791 164#endif
596dddc1 165}
166
a8a33c46 167/**
cd748f27 168 * Determine whether the given directory can handle this object
169 * size
170 *
171 * Note: if the object size is -1, then the only swapdirs that
a8a33c46 172 * will return true here are ones that have min and max unset,
cd748f27 173 * ie any-sized-object swapdirs. This is a good thing.
174 */
c8f4eac4 175bool
3e62bd58 176SwapDir::objectSizeIsAcceptable(int64_t objsize) const
cd748f27 177{
a8a33c46 178 // If the swapdir has no range limits, then it definitely can
b6662ffd 179 if (min_objsize <= 0 && max_objsize == -1)
c8f4eac4 180 return true;
d68f43a0 181
182 /*
a8a33c46
A
183 * If the object size is -1 and the storedir has limits we
184 * can't store it there.
d68f43a0 185 */
a8a33c46 186 if (objsize == -1)
c8f4eac4 187 return false;
d68f43a0 188
a8a33c46 189 // Else, make sure that the object size will fit.
b475997c
AJ
190 if (max_objsize == -1 && min_objsize <= objsize)
191 return true;
192 else
193 return min_objsize <= objsize && max_objsize > objsize;
cd748f27 194}
195
d141c677 196/*
197 * This new selection scheme simply does round-robin on all SwapDirs.
8e8d4f30 198 * A SwapDir is skipped if it is over the max_size (100%) limit, or
199 * overloaded.
d141c677 200 */
201static int
8e8d4f30 202storeDirSelectSwapDirRoundRobin(const StoreEntry * e)
d141c677 203{
204 static int dirn = 0;
205 int i;
8e8d4f30 206 int load;
c8f4eac4 207 RefCount<SwapDir> sd;
62e76326 208
aa1a691e
AR
209 // e->objectLen() is negative at this point when we are still STORE_PENDING
210 ssize_t objsize = e->mem_obj->expectedReplySize();
246e6cc1
AJ
211 if (objsize != -1)
212 objsize += e->mem_obj->swap_hdr_sz;
213
5db6bf73 214 for (i = 0; i < Config.cacheSwap.n_configured; ++i) {
62e76326 215 if (++dirn >= Config.cacheSwap.n_configured)
216 dirn = 0;
217
c8f4eac4 218 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
62e76326 219
aa1a691e 220 if (!sd->canStore(*e, objsize, load))
62e76326 221 continue;
222
62e76326 223 if (load < 0 || load > 1000) {
224 continue;
225 }
226
227 return dirn;
d141c677 228 }
62e76326 229
8e8d4f30 230 return -1;
d141c677 231}
960a01e3 232
a2899918 233/*
cd748f27 234 * Spread load across all of the store directories
235 *
236 * Note: We should modify this later on to prefer sticking objects
237 * in the *tightest fit* swapdir to conserve space, along with the
26ac0430 238 * actual swapdir usage. But for now, this hack will do while
cd748f27 239 * testing, so you should order your swapdirs in the config file
240 * from smallest maxobjsize to unlimited (-1) maxobjsize.
241 *
242 * We also have to choose nleast == nconf since we need to consider
243 * ALL swapdirs, regardless of state. Again, this is a hack while
244 * we sort out the real usefulness of this algorithm.
a2899918 245 */
65a53c8e 246static int
247storeDirSelectSwapDirLeastLoad(const StoreEntry * e)
a2899918 248{
cc34568d 249 int64_t most_free = 0;
8e8d4f30 250 ssize_t least_objsize = -1;
251 int least_load = INT_MAX;
cd748f27 252 int load;
253 int dirn = -1;
254 int i;
c8f4eac4 255 RefCount<SwapDir> SD;
cd748f27 256
aa1a691e
AR
257 // e->objectLen() is negative at this point when we are still STORE_PENDING
258 ssize_t objsize = e->mem_obj->expectedReplySize();
62e76326 259
cd748f27 260 if (objsize != -1)
62e76326 261 objsize += e->mem_obj->swap_hdr_sz;
262
5db6bf73 263 for (i = 0; i < Config.cacheSwap.n_configured; ++i) {
c8f4eac4 264 SD = dynamic_cast<SwapDir *>(INDEXSD(i));
62e76326 265 SD->flags.selected = 0;
62e76326 266
aa1a691e 267 if (!SD->canStore(*e, objsize, load))
62e76326 268 continue;
269
aa1a691e 270 if (load < 0 || load > 1000)
62e76326 271 continue;
272
273 if (load > least_load)
274 continue;
275
cc34568d 276 const int64_t cur_free = SD->maxSize() - SD->currentSize();
62e76326 277
278 /* If the load is equal, then look in more details */
279 if (load == least_load) {
280 /* closest max_objsize fit */
281
282 if (least_objsize != -1)
283 if (SD->max_objsize > least_objsize || SD->max_objsize == -1)
284 continue;
285
286 /* most free */
287 if (cur_free < most_free)
288 continue;
289 }
290
291 least_load = load;
292 least_objsize = SD->max_objsize;
293 most_free = cur_free;
294 dirn = i;
a2899918 295 }
62e76326 296
ade906c8 297 if (dirn >= 0)
c8f4eac4 298 dynamic_cast<SwapDir *>(INDEXSD(dirn))->flags.selected = 1;
62e76326 299
cd748f27 300 return dirn;
596dddc1 301}
302
b109de6b 303/*
304 * An entry written to the swap log MUST have the following
305 * properties.
306 * 1. It MUST be a public key. It does no good to log
307 * a public ADD, change the key, then log a private
308 * DEL. So we need to log a DEL before we change a
309 * key from public to private.
cd748f27 310 * 2. It MUST have a valid (> -1) swap_filen.
b109de6b 311 */
4683e377 312void
5830cdb3 313storeDirSwapLog(const StoreEntry * e, int op)
4683e377 314{
d3b3ab85 315 assert (e);
d46a87a8 316 assert(!EBIT_TEST(e->flags, KEY_PRIVATE));
cd748f27 317 assert(e->swap_filen >= 0);
6c57e268 318 /*
319 * icons and such; don't write them to the swap log
320 */
62e76326 321
d46a87a8 322 if (EBIT_TEST(e->flags, ENTRY_SPECIAL))
62e76326 323 return;
324
b109de6b 325 assert(op > SWAP_LOG_NOP && op < SWAP_LOG_MAX);
62e76326 326
26ac0430
AJ
327 debugs(20, 3, "storeDirSwapLog: " <<
328 swap_log_op_str[op] << " " <<
329 e->getMD5Text() << " " <<
330 e->swap_dirn << " " <<
bf8fe701 331 std::hex << std::uppercase << std::setfill('0') << std::setw(8) << e->swap_filen);
62e76326 332
c8f4eac4 333 dynamic_cast<SwapDir *>(INDEXSD(e->swap_dirn))->logEntry(*e, op);
334}
335
93bc1434
AR
336void
337StoreController::getStats(StoreInfoStats &stats) const
338{
339 if (memStore)
340 memStore->getStats(stats);
341 else {
342 // move this code to a non-shared memory cache class when we have it
343 stats.mem.shared = false;
344 stats.mem.capacity = Config.memMaxSize;
345 stats.mem.size = mem_node::StoreMemSize();
346 stats.mem.count = hot_obj_count;
347 }
348
349 swapDir->getStats(stats);
350
351 // low-level info not specific to memory or disk cache
352 stats.store_entry_count = StoreEntry::inUseCount();
353 stats.mem_object_count = MemObject::inUseCount();
354}
355
c932b107 356void
c8f4eac4 357StoreController::stat(StoreEntry &output) const
c932b107 358{
c8f4eac4 359 storeAppendPrintf(&output, "Store Directory Statistics:\n");
360 storeAppendPrintf(&output, "Store Entries : %lu\n",
62e76326 361 (unsigned long int)StoreEntry::inUseCount());
c91ca3ce 362 storeAppendPrintf(&output, "Maximum Swap Size : %" PRIu64 " KB\n",
cc34568d 363 maxSize() >> 10);
57f583f1 364 storeAppendPrintf(&output, "Current Store Swap Size: %.2f KB\n",
cc34568d 365 currentSize() / 1024.0);
57f583f1 366 storeAppendPrintf(&output, "Current Capacity : %.2f%% used, %.2f%% free\n",
cc34568d
DK
367 Math::doublePercent(currentSize(), maxSize()),
368 Math::doublePercent((maxSize() - currentSize()), maxSize()));
ada9124c
AR
369
370 if (memStore)
371 memStore->stat(output);
e3ef2b09 372
c8f4eac4 373 /* now the swapDir */
374 swapDir->stat(output);
5d406e78 375}
376
c8f4eac4 377/* if needed, this could be taught to cache the result */
12e11a5c 378uint64_t
c8f4eac4 379StoreController::maxSize() const
f4e3fa54 380{
c8f4eac4 381 /* TODO: include memory cache ? */
382 return swapDir->maxSize();
383}
62e76326 384
12e11a5c 385uint64_t
c8f4eac4 386StoreController::minSize() const
387{
388 /* TODO: include memory cache ? */
389 return swapDir->minSize();
f4e3fa54 390}
391
39c1e1d9
DK
392uint64_t
393StoreController::currentSize() const
394{
395 return swapDir->currentSize();
396}
397
398uint64_t
399StoreController::currentCount() const
400{
401 return swapDir->currentCount();
402}
403
af2fda07
DK
404int64_t
405StoreController::maxObjectSize() const
406{
407 return swapDir->maxObjectSize();
408}
409
f4e3fa54 410void
c8f4eac4 411SwapDir::diskFull()
f4e3fa54 412{
cc34568d 413 if (currentSize() >= maxSize())
62e76326 414 return;
415
cc34568d 416 max_size = currentSize();
62e76326 417
e0236918 418 debugs(20, DBG_IMPORTANT, "WARNING: Shrinking cache_dir #" << index << " to " << currentSize() / 1024.0 << " KB");
f4e3fa54 419}
95dcd2b8 420
421void
422storeDirOpenSwapLogs(void)
423{
d3b3ab85 424 for (int dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
c8f4eac4 425 dynamic_cast<SwapDir *>(INDEXSD(dirn))->openLog();
95dcd2b8 426}
427
428void
429storeDirCloseSwapLogs(void)
430{
d3b3ab85 431 for (int dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
c8f4eac4 432 dynamic_cast<SwapDir *>(INDEXSD(dirn))->closeLog();
95dcd2b8 433}
434
b2c141d4 435/*
436 * storeDirWriteCleanLogs
26ac0430 437 *
b2c141d4 438 * Writes a "clean" swap log file from in-memory metadata.
cd748f27 439 * This is a rewrite of the original function to troll each
440 * StoreDir and write the logs, and flush at the end of
441 * the run. Thanks goes to Eric Stern, since this solution
442 * came out of his COSS code.
b2c141d4 443 */
b2c141d4 444int
445storeDirWriteCleanLogs(int reopen)
95dcd2b8 446{
6a566b9c 447 const StoreEntry *e = NULL;
b2c141d4 448 int n = 0;
62e76326 449
e812ecfc 450 struct timeval start;
451 double dt;
c8f4eac4 452 RefCount<SwapDir> sd;
b2c141d4 453 int dirn;
6a566b9c 454 int notdone = 1;
62e76326 455
bef81ea5 456 if (StoreController::store_dirs_rebuilding) {
e0236918
FC
457 debugs(20, DBG_IMPORTANT, "Not currently OK to rewrite swap log.");
458 debugs(20, DBG_IMPORTANT, "storeDirWriteCleanLogs: Operation aborted.");
62e76326 459 return 0;
b2c141d4 460 }
62e76326 461
e0236918 462 debugs(20, DBG_IMPORTANT, "storeDirWriteCleanLogs: Starting...");
e812ecfc 463 getCurrentTime();
464 start = current_time;
62e76326 465
5db6bf73 466 for (dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn) {
c8f4eac4 467 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
62e76326 468
469 if (sd->writeCleanStart() < 0) {
e0236918 470 debugs(20, DBG_IMPORTANT, "log.clean.start() failed for dir #" << sd->index);
62e76326 471 continue;
472 }
6a566b9c 473 }
62e76326 474
e78ef51b 475 /*
476 * This may look inefficient as CPU wise it is more efficient to do this
477 * sequentially, but I/O wise the parallellism helps as it allows more
478 * hdd spindles to be active.
d3b3ab85 479 */
c1dd71ae 480 while (notdone) {
62e76326 481 notdone = 0;
482
5db6bf73 483 for (dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn) {
c8f4eac4 484 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
62e76326 485
486 if (NULL == sd->cleanLog)
487 continue;
488
489 e = sd->cleanLog->nextEntry();
490
491 if (!e)
492 continue;
493
494 notdone = 1;
495
496 if (!sd->canLog(*e))
497 continue;
498
499 sd->cleanLog->write(*e);
500
501 if ((++n & 0xFFFF) == 0) {
502 getCurrentTime();
e0236918 503 debugs(20, DBG_IMPORTANT, " " << std::setw(7) << n <<
bf8fe701 504 " entries written so far.");
62e76326 505 }
506 }
6a566b9c 507 }
62e76326 508
6a566b9c 509 /* Flush */
5db6bf73 510 for (dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
c8f4eac4 511 dynamic_cast<SwapDir *>(INDEXSD(dirn))->writeCleanDone();
62e76326 512
b2c141d4 513 if (reopen)
62e76326 514 storeDirOpenSwapLogs();
515
e812ecfc 516 getCurrentTime();
62e76326 517
e812ecfc 518 dt = tvSubDsec(start, current_time);
62e76326 519
e0236918
FC
520 debugs(20, DBG_IMPORTANT, " Finished. Wrote " << n << " entries.");
521 debugs(20, DBG_IMPORTANT, " Took "<< std::setw(3)<< std::setprecision(2) << dt <<
bf8fe701 522 " seconds ("<< std::setw(6) << ((double) n / (dt > 0.0 ? dt : 1.0)) << " entries/sec).");
62e76326 523
b2c141d4 524 return n;
95dcd2b8 525}
d141c677 526
c8f4eac4 527StoreSearch *
30abd221 528StoreController::search(String const url, HttpRequest *request)
c8f4eac4 529{
530 /* cheat, for now you can't search the memory hot cache */
531 return swapDir->search(url, request);
532}
533
534StorePointer
535StoreHashIndex::store(int const x) const
536{
537 return INDEXSD(x);
538}
539
14911a4e
AR
540SwapDir &
541StoreHashIndex::dir(const int i) const
542{
543 SwapDir *sd = dynamic_cast<SwapDir*>(INDEXSD(i));
544 assert(sd);
545 return *sd;
546}
547
cd748f27 548void
c8f4eac4 549StoreController::sync(void)
cd748f27 550{
9487bae9
AR
551 if (memStore)
552 memStore->sync();
c8f4eac4 553 swapDir->sync();
cd748f27 554}
555
556/*
26ac0430 557 * handle callbacks all avaliable fs'es
cd748f27 558 */
c8f4eac4 559int
560StoreController::callback()
cd748f27 561{
1d5161bd 562 /* This will likely double count. Thats ok. */
563 PROF_start(storeDirCallback);
564
c8f4eac4 565 /* mem cache callbacks ? */
566 int result = swapDir->callback();
1d5161bd 567
568 PROF_stop(storeDirCallback);
c8f4eac4 569
570 return result;
d141c677 571}
90d42c28 572
573int
574storeDirGetBlkSize(const char *path, int *blksize)
575{
576#if HAVE_STATVFS
62e76326 577
90d42c28 578 struct statvfs sfs;
62e76326 579
90d42c28 580 if (statvfs(path, &sfs)) {
e0236918 581 debugs(50, DBG_IMPORTANT, "" << path << ": " << xstrerror());
62e76326 582 *blksize = 2048;
583 return 1;
90d42c28 584 }
62e76326 585
6759a7aa 586 *blksize = (int) sfs.f_frsize;
90d42c28 587#else
62e76326 588
90d42c28 589 struct statfs sfs;
62e76326 590
90d42c28 591 if (statfs(path, &sfs)) {
e0236918 592 debugs(50, DBG_IMPORTANT, "" << path << ": " << xstrerror());
62e76326 593 *blksize = 2048;
594 return 1;
90d42c28 595 }
62e76326 596
90d42c28 597 *blksize = (int) sfs.f_bsize;
6759a7aa 598#endif
4b3af09f 599 /*
600 * Sanity check; make sure we have a meaningful value.
601 */
62e76326 602
d5b72fe7 603 if (*blksize < 512)
62e76326 604 *blksize = 2048;
605
90d42c28 606 return 0;
607}
781d6656 608
609#define fsbtoblk(num, fsbs, bs) \
610 (((fsbs) != 0 && (fsbs) < (bs)) ? \
611 (num) / ((bs) / (fsbs)) : (num) * ((fsbs) / (bs)))
612int
613storeDirGetUFSStats(const char *path, int *totl_kb, int *free_kb, int *totl_in, int *free_in)
614{
615#if HAVE_STATVFS
62e76326 616
781d6656 617 struct statvfs sfs;
62e76326 618
781d6656 619 if (statvfs(path, &sfs)) {
e0236918 620 debugs(50, DBG_IMPORTANT, "" << path << ": " << xstrerror());
62e76326 621 return 1;
781d6656 622 }
62e76326 623
781d6656 624 *totl_kb = (int) fsbtoblk(sfs.f_blocks, sfs.f_frsize, 1024);
625 *free_kb = (int) fsbtoblk(sfs.f_bfree, sfs.f_frsize, 1024);
626 *totl_in = (int) sfs.f_files;
627 *free_in = (int) sfs.f_ffree;
628#else
62e76326 629
781d6656 630 struct statfs sfs;
62e76326 631
781d6656 632 if (statfs(path, &sfs)) {
e0236918 633 debugs(50, DBG_IMPORTANT, "" << path << ": " << xstrerror());
62e76326 634 return 1;
781d6656 635 }
62e76326 636
781d6656 637 *totl_kb = (int) fsbtoblk(sfs.f_blocks, sfs.f_bsize, 1024);
638 *free_kb = (int) fsbtoblk(sfs.f_bfree, sfs.f_bsize, 1024);
639 *totl_in = (int) sfs.f_files;
640 *free_in = (int) sfs.f_ffree;
641#endif
62e76326 642
781d6656 643 return 0;
644}
c8f4eac4 645
646void
e1f7507e 647allocate_new_swapdir(SquidConfig::_cacheSwap * swap)
c8f4eac4 648{
649 if (swap->swapDirs == NULL) {
650 swap->n_allocated = 4;
7d3c4ca1 651 swap->swapDirs = static_cast<SwapDir::Pointer *>(xcalloc(swap->n_allocated, sizeof(SwapDir::Pointer)));
c8f4eac4 652 }
653
654 if (swap->n_allocated == swap->n_configured) {
c8f4eac4 655 swap->n_allocated <<= 1;
7d3c4ca1 656 SwapDir::Pointer *const tmp = static_cast<SwapDir::Pointer *>(xcalloc(swap->n_allocated, sizeof(SwapDir::Pointer)));
41d00cd3 657 memcpy(tmp, swap->swapDirs, swap->n_configured * sizeof(SwapDir *));
c8f4eac4 658 xfree(swap->swapDirs);
659 swap->swapDirs = tmp;
660 }
661}
662
663void
e1f7507e 664free_cachedir(SquidConfig::_cacheSwap * swap)
c8f4eac4 665{
666 int i;
667 /* DON'T FREE THESE FOR RECONFIGURE */
668
669 if (reconfiguring)
670 return;
671
5db6bf73 672 for (i = 0; i < swap->n_configured; ++i) {
c8f4eac4 673 /* TODO XXX this lets the swapdir free resources asynchronously
674 * swap->swapDirs[i]->deactivate();
26ac0430 675 * but there may be such a means already.
c8f4eac4 676 * RBC 20041225
677 */
678 swap->swapDirs[i] = NULL;
679 }
680
681 safe_free(swap->swapDirs);
682 swap->swapDirs = NULL;
683 swap->n_allocated = 0;
684 swap->n_configured = 0;
685}
686
687/* this should be a virtual method on StoreEntry,
688 * i.e. e->referenced()
689 * so that the entry can notify the creating Store
690 */
691void
692StoreController::reference(StoreEntry &e)
693{
c5426f8f
AR
694 // special entries do not belong to any specific Store, but are IN_MEMORY
695 if (EBIT_TEST(e.flags, ENTRY_SPECIAL))
696 return;
697
c8f4eac4 698 /* Notify the fs that we're referencing this object again */
699
700 if (e.swap_dirn > -1)
4c973beb 701 swapDir->reference(e);
c8f4eac4 702
9487bae9
AR
703 // Notify the memory cache that we're referencing this object again
704 if (memStore && e.mem_status == IN_MEMORY)
705 memStore->reference(e);
706
707 // TODO: move this code to a non-shared memory cache class when we have it
c8f4eac4 708 if (e.mem_obj) {
709 if (mem_policy->Referenced)
710 mem_policy->Referenced(mem_policy, &e, &e.mem_obj->repl);
711 }
712}
713
4c973beb 714bool
c8f4eac4 715StoreController::dereference(StoreEntry & e)
716{
9c02fb44
AJ
717 bool keepInStoreTable = true; // keep if there are no objections
718
c5426f8f
AR
719 // special entries do not belong to any specific Store, but are IN_MEMORY
720 if (EBIT_TEST(e.flags, ENTRY_SPECIAL))
9c02fb44 721 return keepInStoreTable;
c5426f8f 722
c8f4eac4 723 /* Notify the fs that we're not referencing this object any more */
724
725 if (e.swap_filen > -1)
9c02fb44 726 keepInStoreTable = swapDir->dereference(e) && keepInStoreTable;
c8f4eac4 727
9487bae9
AR
728 // Notify the memory cache that we're not referencing this object any more
729 if (memStore && e.mem_status == IN_MEMORY)
9c02fb44 730 keepInStoreTable = memStore->dereference(e) && keepInStoreTable;
9487bae9
AR
731
732 // TODO: move this code to a non-shared memory cache class when we have it
c8f4eac4 733 if (e.mem_obj) {
734 if (mem_policy->Dereferenced)
735 mem_policy->Dereferenced(mem_policy, &e, &e.mem_obj->repl);
736 }
4c973beb
AR
737
738 return keepInStoreTable;
c8f4eac4 739}
740
741StoreEntry *
6ca34f6f 742StoreController::get(const cache_key *key)
c8f4eac4 743{
44def0f9 744 if (StoreEntry *e = swapDir->get(key)) {
9487bae9
AR
745 // TODO: ignore and maybe handleIdleEntry() unlocked intransit entries
746 // because their backing store slot may be gone already.
171d5429 747 debugs(20, 3, HERE << "got in-transit entry: " << *e);
44def0f9
AR
748 return e;
749 }
750
9487bae9
AR
751 if (memStore) {
752 if (StoreEntry *e = memStore->get(key)) {
753 debugs(20, 3, HERE << "got mem-cached entry: " << *e);
754 return e;
755 }
756 }
757
022f96ad
AR
758 // TODO: this disk iteration is misplaced; move to StoreHashIndex when
759 // the global store_table is no longer used for in-transit objects.
44def0f9
AR
760 if (const int cacheDirs = Config.cacheSwap.n_configured) {
761 // ask each cache_dir until the entry is found; use static starting
762 // point to avoid asking the same subset of disks more often
763 // TODO: coordinate with put() to be able to guess the right disk often
14911a4e 764 static int idx = 0;
44def0f9 765 for (int n = 0; n < cacheDirs; ++n) {
14911a4e 766 idx = (idx + 1) % cacheDirs;
44def0f9 767 SwapDir *sd = dynamic_cast<SwapDir*>(INDEXSD(idx));
14911a4e
AR
768 if (!sd->active())
769 continue;
770
44def0f9 771 if (StoreEntry *e = sd->get(key)) {
eccba1d9 772 debugs(20, 3, HERE << "cache_dir " << idx <<
9199139f 773 " got cached entry: " << *e);
44def0f9
AR
774 return e;
775 }
776 }
777 }
c8f4eac4 778
eccba1d9 779 debugs(20, 4, HERE << "none of " << Config.cacheSwap.n_configured <<
9199139f 780 " cache_dirs have " << storeKeyText(key));
44def0f9 781 return NULL;
c8f4eac4 782}
783
784void
6ca34f6f 785StoreController::get(String const key, STOREGETCLIENT aCallback, void *aCallbackData)
c8f4eac4 786{
787 fatal("not implemented");
788}
789
96a7de88
DK
790// move this into [non-shared] memory cache class when we have one
791/// whether e should be kept in local RAM for possible future caching
792bool
793StoreController::keepForLocalMemoryCache(const StoreEntry &e) const
794{
795 if (!e.memoryCachable())
796 return false;
797
798 // does the current and expected size obey memory caching limits?
799 assert(e.mem_obj);
800 const int64_t loadedSize = e.mem_obj->endOffset();
801 const int64_t expectedSize = e.mem_obj->expectedReplySize(); // may be < 0
802 const int64_t ramSize = max(loadedSize, expectedSize);
803 const int64_t ramLimit = min(
817138f8
A
804 static_cast<int64_t>(Config.memMaxSize),
805 static_cast<int64_t>(Config.Store.maxInMemObjSize));
96a7de88
DK
806 return ramSize <= ramLimit;
807}
808
809void
810StoreController::maybeTrimMemory(StoreEntry &e, const bool preserveSwappable)
811{
812 bool keepInLocalMemory = false;
813 if (memStore)
814 keepInLocalMemory = memStore->keepInLocalMemory(e);
815 else
816 keepInLocalMemory = keepForLocalMemoryCache(e);
817
818 debugs(20, 7, HERE << "keepInLocalMemory: " << keepInLocalMemory);
819
820 if (!keepInLocalMemory)
821 e.trimMemory(preserveSwappable);
822}
823
9487bae9
AR
824void
825StoreController::handleIdleEntry(StoreEntry &e)
826{
827 bool keepInLocalMemory = false;
c5426f8f
AR
828
829 if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) {
830 // Icons (and cache digests?) should stay in store_table until we
831 // have a dedicated storage for them (that would not purge them).
832 // They are not managed [well] by any specific Store handled below.
833 keepInLocalMemory = true;
d3cd2e81 834 } else if (memStore) {
9487bae9
AR
835 memStore->considerKeeping(e);
836 // leave keepInLocalMemory false; memStore maintains its own cache
837 } else {
96a7de88 838 keepInLocalMemory = keepForLocalMemoryCache(e) && // in good shape and
9199139f
AR
839 // the local memory cache is not overflowing
840 (mem_node::InUseCount() <= store_pages_max);
9487bae9
AR
841 }
842
9c02fb44 843 // An idle, unlocked entry that belongs to a SwapDir which controls
4c973beb
AR
844 // its own index, should not stay in the global store_table.
845 if (!dereference(e)) {
9487bae9
AR
846 debugs(20, 5, HERE << "destroying unlocked entry: " << &e << ' ' << e);
847 destroyStoreEntry(static_cast<hash_link*>(&e));
848 return;
849 }
850
c5426f8f
AR
851 debugs(20, 5, HERE << "keepInLocalMemory: " << keepInLocalMemory);
852
9487bae9
AR
853 // TODO: move this into [non-shared] memory cache class when we have one
854 if (keepInLocalMemory) {
855 e.setMemStatus(IN_MEMORY);
856 e.mem_obj->unlinkRequest();
857 } else {
858 e.purgeMem(); // may free e
859 }
860}
861
c8f4eac4 862StoreHashIndex::StoreHashIndex()
863{
47f6e231 864 if (store_table)
26ac0430 865 abort();
c8f4eac4 866 assert (store_table == NULL);
867}
868
869StoreHashIndex::~StoreHashIndex()
870{
871 if (store_table) {
872 hashFreeItems(store_table, destroyStoreEntry);
873 hashFreeMemory(store_table);
874 store_table = NULL;
875 }
876}
877
878int
879StoreHashIndex::callback()
880{
881 int result = 0;
882 int j;
883 static int ndir = 0;
884
885 do {
886 j = 0;
887
5db6bf73 888 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
c8f4eac4 889 if (ndir >= Config.cacheSwap.n_configured)
890 ndir = ndir % Config.cacheSwap.n_configured;
891
892 int temp_result = store(ndir)->callback();
893
894 ++ndir;
895
896 j += temp_result;
897
898 result += temp_result;
899
900 if (j > 100)
901 fatal ("too much io\n");
902 }
903 } while (j > 0);
904
5db6bf73 905 ++ndir;
c8f4eac4 906
907 return result;
908}
909
910void
911StoreHashIndex::create()
912{
5db6bf73 913 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
14911a4e
AR
914 if (dir(i).active())
915 store(i)->create();
916 }
c8f4eac4 917}
918
919/* Lookup an object in the cache.
920 * return just a reference to object, don't start swapping in yet. */
921StoreEntry *
6ca34f6f 922StoreHashIndex::get(const cache_key *key)
c8f4eac4 923{
924 PROF_start(storeGet);
bf8fe701 925 debugs(20, 3, "storeGet: looking up " << storeKeyText(key));
c8f4eac4 926 StoreEntry *p = static_cast<StoreEntry *>(hash_lookup(store_table, key));
927 PROF_stop(storeGet);
928 return p;
929}
930
931void
6ca34f6f 932StoreHashIndex::get(String const key, STOREGETCLIENT aCallback, void *aCallbackData)
c8f4eac4 933{
934 fatal("not implemented");
935}
936
937void
938StoreHashIndex::init()
939{
940 /* Calculate size of hash table (maximum currently 64k buckets). */
941 /* this is very bogus, its specific to the any Store maintaining an
942 * in-core index, not global */
58d5c5dd 943 size_t buckets = (Store::Root().maxSize() + Config.memMaxSize) / Config.Store.avgObjectSize;
e0236918 944 debugs(20, DBG_IMPORTANT, "Swap maxSize " << (Store::Root().maxSize() >> 10) <<
13a07022 945 " + " << ( Config.memMaxSize >> 10) << " KB, estimated " << buckets << " objects");
c8f4eac4 946 buckets /= Config.Store.objectsPerBucket;
e0236918 947 debugs(20, DBG_IMPORTANT, "Target number of buckets: " << buckets);
c8f4eac4 948 /* ideally the full scan period should be configurable, for the
949 * moment it remains at approximately 24 hours. */
950 store_hash_buckets = storeKeyHashBuckets(buckets);
e0236918
FC
951 debugs(20, DBG_IMPORTANT, "Using " << store_hash_buckets << " Store buckets");
952 debugs(20, DBG_IMPORTANT, "Max Mem size: " << ( Config.memMaxSize >> 10) << " KB" <<
9199139f 953 (Config.memShared ? " [shared]" : ""));
e0236918 954 debugs(20, DBG_IMPORTANT, "Max Swap size: " << (Store::Root().maxSize() >> 10) << " KB");
c8f4eac4 955
956 store_table = hash_create(storeKeyHashCmp,
957 store_hash_buckets, storeKeyHashHash);
958
5db6bf73 959 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
c8f4eac4 960 /* this starts a search of the store dirs, loading their
961 * index. under the new Store api this should be
962 * driven by the StoreHashIndex, not by each store.
bef81ea5 963 *
964 * That is, the HashIndex should perform a search of each dir it is
26ac0430 965 * indexing to do the hash insertions. The search is then able to
bef81ea5 966 * decide 'from-memory', or 'from-clean-log' or 'from-dirty-log' or
967 * 'from-no-log'.
968 *
c8f4eac4 969 * Step 1: make the store rebuilds use a search internally
bef81ea5 970 * Step 2: change the search logic to use the four modes described
971 * above
972 * Step 3: have the hash index walk the searches itself.
c8f4eac4 973 */
14911a4e
AR
974 if (dir(i).active())
975 store(i)->init();
13a07022 976 }
c8f4eac4 977}
978
12e11a5c 979uint64_t
c8f4eac4 980StoreHashIndex::maxSize() const
981{
12e11a5c 982 uint64_t result = 0;
c8f4eac4 983
5db6bf73 984 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
39c1e1d9
DK
985 if (dir(i).doReportStat())
986 result += store(i)->maxSize();
987 }
c8f4eac4 988
989 return result;
990}
991
12e11a5c 992uint64_t
c8f4eac4 993StoreHashIndex::minSize() const
994{
12e11a5c 995 uint64_t result = 0;
c8f4eac4 996
5db6bf73 997 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
39c1e1d9
DK
998 if (dir(i).doReportStat())
999 result += store(i)->minSize();
1000 }
1001
1002 return result;
1003}
1004
1005uint64_t
1006StoreHashIndex::currentSize() const
1007{
1008 uint64_t result = 0;
1009
5db6bf73 1010 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
39c1e1d9
DK
1011 if (dir(i).doReportStat())
1012 result += store(i)->currentSize();
1013 }
1014
1015 return result;
1016}
1017
1018uint64_t
1019StoreHashIndex::currentCount() const
1020{
1021 uint64_t result = 0;
1022
5db6bf73 1023 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
39c1e1d9
DK
1024 if (dir(i).doReportStat())
1025 result += store(i)->currentCount();
1026 }
c8f4eac4 1027
1028 return result;
1029}
1030
af2fda07
DK
1031int64_t
1032StoreHashIndex::maxObjectSize() const
1033{
1034 int64_t result = -1;
1035
5db6bf73 1036 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
af2fda07
DK
1037 if (dir(i).active() && store(i)->maxObjectSize() > result)
1038 result = store(i)->maxObjectSize();
1039 }
1040
1041 return result;
1042}
1043
93bc1434
AR
1044void
1045StoreHashIndex::getStats(StoreInfoStats &stats) const
1046{
1047 // accumulate per-disk cache stats
5db6bf73 1048 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
93bc1434
AR
1049 StoreInfoStats dirStats;
1050 store(i)->getStats(dirStats);
1051 stats += dirStats;
1052 }
1053
1054 // common to all disks
1055 stats.swap.open_disk_fd = store_open_disk_fd;
1056
1057 // memory cache stats are collected in StoreController::getStats(), for now
1058}
1059
c8f4eac4 1060void
1061StoreHashIndex::stat(StoreEntry & output) const
1062{
1063 int i;
1064
1065 /* Now go through each store, calling its stat routine */
1066
5db6bf73 1067 for (i = 0; i < Config.cacheSwap.n_configured; ++i) {
c8f4eac4 1068 storeAppendPrintf(&output, "\n");
1069 store(i)->stat(output);
1070 }
1071}
1072
1073void
4c973beb
AR
1074StoreHashIndex::reference(StoreEntry &e)
1075{
1076 e.store()->reference(e);
1077}
c8f4eac4 1078
4c973beb
AR
1079bool
1080StoreHashIndex::dereference(StoreEntry &e)
1081{
1082 return e.store()->dereference(e);
1083}
c8f4eac4 1084
1085void
1086StoreHashIndex::maintain()
1087{
1088 int i;
1089 /* walk each fs */
1090
5db6bf73 1091 for (i = 0; i < Config.cacheSwap.n_configured; ++i) {
c8f4eac4 1092 /* XXX FixMe: This should be done "in parallell" on the different
1093 * cache_dirs, not one at a time.
1094 */
1095 /* call the maintain function .. */
1096 store(i)->maintain();
1097 }
1098}
1099
c8f4eac4 1100void
1101StoreHashIndex::sync()
1102{
1103 for (int i = 0; i < Config.cacheSwap.n_configured; ++i)
1104 store(i)->sync();
1105}
1106
1107StoreSearch *
30abd221 1108StoreHashIndex::search(String const url, HttpRequest *)
c8f4eac4 1109{
1110 if (url.size())
1111 fatal ("Cannot search by url yet\n");
1112
1113 return new StoreSearchHashIndex (this);
1114}
1115
1116CBDATA_CLASS_INIT(StoreSearchHashIndex);
aa839030 1117
c8f4eac4 1118StoreSearchHashIndex::StoreSearchHashIndex(RefCount<StoreHashIndex> aSwapDir) : sd(aSwapDir), _done (false), bucket (0)
1119{}
1120
1121/* do not link
1122StoreSearchHashIndex::StoreSearchHashIndex(StoreSearchHashIndex const &);
1123*/
1124
1125StoreSearchHashIndex::~StoreSearchHashIndex()
1126{}
1127
1128void
70efcae0 1129StoreSearchHashIndex::next(void (aCallback)(void *), void *aCallbackData)
c8f4eac4 1130{
1131 next();
70efcae0 1132 aCallback (aCallbackData);
c8f4eac4 1133}
1134
1135bool
1136StoreSearchHashIndex::next()
1137{
1138 if (entries.size())
1139 entries.pop_back();
1140
1141 while (!isDone() && !entries.size())
1142 copyBucket();
1143
1144 return currentItem() != NULL;
1145}
1146
1147bool
1148StoreSearchHashIndex::error() const
1149{
1150 return false;
1151}
1152
1153bool
1154StoreSearchHashIndex::isDone() const
1155{
1156 return bucket >= store_hash_buckets || _done;
1157}
1158
1159StoreEntry *
1160StoreSearchHashIndex::currentItem()
1161{
1162 if (!entries.size())
1163 return NULL;
1164
1165 return entries.back();
1166}
1167
1168void
1169StoreSearchHashIndex::copyBucket()
1170{
1171 /* probably need to lock the store entries...
1172 * we copy them all to prevent races on the links. */
1173 debugs(47, 3, "StoreSearchHashIndex::copyBucket #" << bucket);
1174 assert (!entries.size());
1175 hash_link *link_ptr = NULL;
1176 hash_link *link_next = NULL;
1177 link_next = hash_get_bucket(store_table, bucket);
1178
1179 while (NULL != (link_ptr = link_next)) {
1180 link_next = link_ptr->next;
1181 StoreEntry *e = (StoreEntry *) link_ptr;
1182
1183 entries.push_back(e);
1184 }
1185
5db6bf73 1186 ++bucket;
c8f4eac4 1187 debugs(47,3, "got entries: " << entries.size());
1188}