]> git.ipfire.org Git - thirdparty/squid.git/blame - src/store_dir.cc
Polish debug messages on Path MTU discovery on client connections
[thirdparty/squid.git] / src / store_dir.cc
CommitLineData
f1dc9b30 1
2/*
f1dc9b30 3 * DEBUG: section 47 Store Directory Routines
4 * AUTHOR: Duane Wessels
5 *
2b6662ba 6 * SQUID Web Proxy Cache http://www.squid-cache.org/
e25c139f 7 * ----------------------------------------------------------
f1dc9b30 8 *
2b6662ba 9 * Squid is the result of efforts by numerous individuals from
10 * the Internet community; see the CONTRIBUTORS file for full
11 * details. Many organizations have provided support for Squid's
12 * development; see the SPONSORS file for full details. Squid is
13 * Copyrighted (C) 2001 by the Regents of the University of
14 * California; see the COPYRIGHT file for full details. Squid
15 * incorporates software developed and/or copyrighted by other
16 * sources; see the CREDITS file for full details.
f1dc9b30 17 *
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2 of the License, or
21 * (at your option) any later version.
26ac0430 22 *
f1dc9b30 23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
26ac0430 27 *
f1dc9b30 28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
cbdec147 30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
e25c139f 31 *
f1dc9b30 32 */
33
582c2af2
FC
34#include "squid.h"
35#include "globals.h"
36#include "mem_node.h"
528b2c61 37#include "MemObject.h"
9487bae9 38#include "MemStore.h"
582c2af2 39#include "profiler/Profiler.h"
4d5904f7 40#include "SquidConfig.h"
a98bcbee 41#include "SquidMath.h"
985c86bc 42#include "SquidTime.h"
582c2af2 43#include "Store.h"
fb548aaf 44#include "store_key_md5.h"
21d845b1 45#include "StoreHashIndex.h"
d3b3ab85 46#include "SwapDir.h"
4b981814 47#include "swap_log_op.h"
5bed43d6 48#include "tools.h"
85407535 49
c0db87f2 50#if HAVE_STATVFS
51#if HAVE_SYS_STATVFS_H
52#include <sys/statvfs.h>
53#endif
ec15e022 54#endif /* HAVE_STATVFS */
55/* statfs() needs <sys/param.h> and <sys/mount.h> on BSD systems */
56#if HAVE_SYS_PARAM_H
57#include <sys/param.h>
203526a1 58#endif
ec15e022 59#if HAVE_SYS_MOUNT_H
60#include <sys/mount.h>
61#endif
62/* Windows and Linux use sys/vfs.h */
6c86a065 63#if HAVE_SYS_VFS_H
64#include <sys/vfs.h>
65#endif
582c2af2
FC
66#if HAVE_SYS_WAIT_H
67#include <sys/wait.h>
68#endif
21d845b1
FC
69#if HAVE_ERRNO_H
70#include <errno.h>
71#endif
c0db87f2 72
65a53c8e 73static STDIRSELECT storeDirSelectSwapDirRoundRobin;
74static STDIRSELECT storeDirSelectSwapDirLeastLoad;
c8f4eac4 75
b07b21cc 76/*
77 * store_dirs_rebuilding is initialized to _1_ as a hack so that
78 * storeDirWriteCleanLogs() doesn't try to do anything unless _all_
79 * cache_dirs have been read. For example, without this hack, Squid
80 * will try to write clean log files if -kparse fails (becasue it
81 * calls fatal()).
82 */
83int StoreController::store_dirs_rebuilding = 1;
bef81ea5 84
c8f4eac4 85StoreController::StoreController() : swapDir (new StoreHashIndex())
9199139f 86 , memStore(NULL)
c8f4eac4 87{}
88
89StoreController::~StoreController()
9487bae9
AR
90{
91 delete memStore;
92}
65a53c8e 93
94/*
95 * This function pointer is set according to 'store_dir_select_algorithm'
96 * in squid.conf.
97 */
98STDIRSELECT *storeDirSelectSwapDir = storeDirSelectSwapDirLeastLoad;
d141c677 99
9838d6c8 100void
c8f4eac4 101StoreController::init()
596dddc1 102{
57af1e3f 103 if (Config.memShared && IamWorkerProcess()) {
60be8b2d
AR
104 memStore = new MemStore;
105 memStore->init();
106 }
9487bae9 107
c8f4eac4 108 swapDir->init();
62e76326 109
65a53c8e 110 if (0 == strcasecmp(Config.store_dir_select_algorithm, "round-robin")) {
62e76326 111 storeDirSelectSwapDir = storeDirSelectSwapDirRoundRobin;
e0236918 112 debugs(47, DBG_IMPORTANT, "Using Round Robin store dir selection");
65a53c8e 113 } else {
62e76326 114 storeDirSelectSwapDir = storeDirSelectSwapDirLeastLoad;
e0236918 115 debugs(47, DBG_IMPORTANT, "Using Least Load store dir selection");
65a53c8e 116 }
85407535 117}
118
119void
c8f4eac4 120StoreController::createOneStore(Store &aStore)
596dddc1 121{
62e76326 122 /*
154c7949 123 * On Windows, fork() is not available.
124 * The following is a workaround for create store directories sequentially
125 * when running on native Windows port.
126 */
7aa9bb3e 127#if !_SQUID_WINDOWS_
62e76326 128
154c7949 129 if (fork())
62e76326 130 return;
131
099a1791 132#endif
62e76326 133
c8f4eac4 134 aStore.create();
62e76326 135
7aa9bb3e 136#if !_SQUID_WINDOWS_
62e76326 137
154c7949 138 exit(0);
62e76326 139
099a1791 140#endif
154c7949 141}
142
143void
c8f4eac4 144StoreController::create()
154c7949 145{
c8f4eac4 146 swapDir->create();
62e76326 147
7aa9bb3e 148#if !_SQUID_WINDOWS_
62e76326 149
8a1c8f2c 150 pid_t pid;
62e76326 151
b2c141d4 152 do {
62e76326 153 int status;
1191b93b 154#if _SQUID_NEXT_
62e76326 155
156 pid = wait3(&status, WNOHANG, NULL);
b2c141d4 157#else
62e76326 158
159 pid = waitpid(-1, &status, 0);
b2c141d4 160#endif
62e76326 161
b2c141d4 162 } while (pid > 0 || (pid < 0 && errno == EINTR));
62e76326 163
099a1791 164#endif
596dddc1 165}
166
a8a33c46 167/**
cd748f27 168 * Determine whether the given directory can handle this object
169 * size
170 *
171 * Note: if the object size is -1, then the only swapdirs that
a8a33c46 172 * will return true here are ones that have min and max unset,
cd748f27 173 * ie any-sized-object swapdirs. This is a good thing.
174 */
c8f4eac4 175bool
3e62bd58 176SwapDir::objectSizeIsAcceptable(int64_t objsize) const
cd748f27 177{
a8a33c46 178 // If the swapdir has no range limits, then it definitely can
b6662ffd 179 if (min_objsize <= 0 && max_objsize == -1)
c8f4eac4 180 return true;
d68f43a0 181
182 /*
a8a33c46
A
183 * If the object size is -1 and the storedir has limits we
184 * can't store it there.
d68f43a0 185 */
a8a33c46 186 if (objsize == -1)
c8f4eac4 187 return false;
d68f43a0 188
a8a33c46 189 // Else, make sure that the object size will fit.
b475997c
AJ
190 if (max_objsize == -1 && min_objsize <= objsize)
191 return true;
192 else
193 return min_objsize <= objsize && max_objsize > objsize;
cd748f27 194}
195
d141c677 196/*
197 * This new selection scheme simply does round-robin on all SwapDirs.
8e8d4f30 198 * A SwapDir is skipped if it is over the max_size (100%) limit, or
199 * overloaded.
d141c677 200 */
201static int
8e8d4f30 202storeDirSelectSwapDirRoundRobin(const StoreEntry * e)
d141c677 203{
204 static int dirn = 0;
205 int i;
8e8d4f30 206 int load;
c8f4eac4 207 RefCount<SwapDir> sd;
62e76326 208
aa1a691e
AR
209 // e->objectLen() is negative at this point when we are still STORE_PENDING
210 ssize_t objsize = e->mem_obj->expectedReplySize();
246e6cc1
AJ
211 if (objsize != -1)
212 objsize += e->mem_obj->swap_hdr_sz;
213
5db6bf73 214 for (i = 0; i < Config.cacheSwap.n_configured; ++i) {
62e76326 215 if (++dirn >= Config.cacheSwap.n_configured)
216 dirn = 0;
217
c8f4eac4 218 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
62e76326 219
aa1a691e 220 if (!sd->canStore(*e, objsize, load))
62e76326 221 continue;
222
62e76326 223 if (load < 0 || load > 1000) {
224 continue;
225 }
226
227 return dirn;
d141c677 228 }
62e76326 229
8e8d4f30 230 return -1;
d141c677 231}
960a01e3 232
a2899918 233/*
cd748f27 234 * Spread load across all of the store directories
235 *
236 * Note: We should modify this later on to prefer sticking objects
237 * in the *tightest fit* swapdir to conserve space, along with the
26ac0430 238 * actual swapdir usage. But for now, this hack will do while
cd748f27 239 * testing, so you should order your swapdirs in the config file
240 * from smallest maxobjsize to unlimited (-1) maxobjsize.
241 *
242 * We also have to choose nleast == nconf since we need to consider
243 * ALL swapdirs, regardless of state. Again, this is a hack while
244 * we sort out the real usefulness of this algorithm.
a2899918 245 */
65a53c8e 246static int
247storeDirSelectSwapDirLeastLoad(const StoreEntry * e)
a2899918 248{
cc34568d 249 int64_t most_free = 0;
8e8d4f30 250 ssize_t least_objsize = -1;
251 int least_load = INT_MAX;
cd748f27 252 int load;
253 int dirn = -1;
254 int i;
c8f4eac4 255 RefCount<SwapDir> SD;
cd748f27 256
aa1a691e
AR
257 // e->objectLen() is negative at this point when we are still STORE_PENDING
258 ssize_t objsize = e->mem_obj->expectedReplySize();
62e76326 259
cd748f27 260 if (objsize != -1)
62e76326 261 objsize += e->mem_obj->swap_hdr_sz;
262
5db6bf73 263 for (i = 0; i < Config.cacheSwap.n_configured; ++i) {
c8f4eac4 264 SD = dynamic_cast<SwapDir *>(INDEXSD(i));
62e76326 265 SD->flags.selected = 0;
62e76326 266
aa1a691e 267 if (!SD->canStore(*e, objsize, load))
62e76326 268 continue;
269
aa1a691e 270 if (load < 0 || load > 1000)
62e76326 271 continue;
272
273 if (load > least_load)
274 continue;
275
cc34568d 276 const int64_t cur_free = SD->maxSize() - SD->currentSize();
62e76326 277
278 /* If the load is equal, then look in more details */
279 if (load == least_load) {
280 /* closest max_objsize fit */
281
282 if (least_objsize != -1)
283 if (SD->max_objsize > least_objsize || SD->max_objsize == -1)
284 continue;
285
286 /* most free */
287 if (cur_free < most_free)
288 continue;
289 }
290
291 least_load = load;
292 least_objsize = SD->max_objsize;
293 most_free = cur_free;
294 dirn = i;
a2899918 295 }
62e76326 296
ade906c8 297 if (dirn >= 0)
c8f4eac4 298 dynamic_cast<SwapDir *>(INDEXSD(dirn))->flags.selected = 1;
62e76326 299
cd748f27 300 return dirn;
596dddc1 301}
302
b109de6b 303/*
304 * An entry written to the swap log MUST have the following
305 * properties.
306 * 1. It MUST be a public key. It does no good to log
307 * a public ADD, change the key, then log a private
308 * DEL. So we need to log a DEL before we change a
309 * key from public to private.
cd748f27 310 * 2. It MUST have a valid (> -1) swap_filen.
b109de6b 311 */
4683e377 312void
5830cdb3 313storeDirSwapLog(const StoreEntry * e, int op)
4683e377 314{
d3b3ab85 315 assert (e);
d46a87a8 316 assert(!EBIT_TEST(e->flags, KEY_PRIVATE));
cd748f27 317 assert(e->swap_filen >= 0);
6c57e268 318 /*
319 * icons and such; don't write them to the swap log
320 */
62e76326 321
d46a87a8 322 if (EBIT_TEST(e->flags, ENTRY_SPECIAL))
62e76326 323 return;
324
b109de6b 325 assert(op > SWAP_LOG_NOP && op < SWAP_LOG_MAX);
62e76326 326
26ac0430
AJ
327 debugs(20, 3, "storeDirSwapLog: " <<
328 swap_log_op_str[op] << " " <<
329 e->getMD5Text() << " " <<
330 e->swap_dirn << " " <<
bf8fe701 331 std::hex << std::uppercase << std::setfill('0') << std::setw(8) << e->swap_filen);
62e76326 332
c8f4eac4 333 dynamic_cast<SwapDir *>(INDEXSD(e->swap_dirn))->logEntry(*e, op);
334}
335
93bc1434
AR
336void
337StoreController::getStats(StoreInfoStats &stats) const
338{
339 if (memStore)
340 memStore->getStats(stats);
341 else {
342 // move this code to a non-shared memory cache class when we have it
343 stats.mem.shared = false;
344 stats.mem.capacity = Config.memMaxSize;
345 stats.mem.size = mem_node::StoreMemSize();
346 stats.mem.count = hot_obj_count;
347 }
348
349 swapDir->getStats(stats);
350
351 // low-level info not specific to memory or disk cache
352 stats.store_entry_count = StoreEntry::inUseCount();
353 stats.mem_object_count = MemObject::inUseCount();
354}
355
c932b107 356void
c8f4eac4 357StoreController::stat(StoreEntry &output) const
c932b107 358{
c8f4eac4 359 storeAppendPrintf(&output, "Store Directory Statistics:\n");
360 storeAppendPrintf(&output, "Store Entries : %lu\n",
62e76326 361 (unsigned long int)StoreEntry::inUseCount());
c91ca3ce 362 storeAppendPrintf(&output, "Maximum Swap Size : %" PRIu64 " KB\n",
cc34568d 363 maxSize() >> 10);
57f583f1 364 storeAppendPrintf(&output, "Current Store Swap Size: %.2f KB\n",
cc34568d 365 currentSize() / 1024.0);
57f583f1 366 storeAppendPrintf(&output, "Current Capacity : %.2f%% used, %.2f%% free\n",
cc34568d
DK
367 Math::doublePercent(currentSize(), maxSize()),
368 Math::doublePercent((maxSize() - currentSize()), maxSize()));
ada9124c
AR
369
370 if (memStore)
371 memStore->stat(output);
e3ef2b09 372
c8f4eac4 373 /* now the swapDir */
374 swapDir->stat(output);
5d406e78 375}
376
c8f4eac4 377/* if needed, this could be taught to cache the result */
12e11a5c 378uint64_t
c8f4eac4 379StoreController::maxSize() const
f4e3fa54 380{
c8f4eac4 381 /* TODO: include memory cache ? */
382 return swapDir->maxSize();
383}
62e76326 384
12e11a5c 385uint64_t
c8f4eac4 386StoreController::minSize() const
387{
388 /* TODO: include memory cache ? */
389 return swapDir->minSize();
f4e3fa54 390}
391
39c1e1d9
DK
392uint64_t
393StoreController::currentSize() const
394{
395 return swapDir->currentSize();
396}
397
398uint64_t
399StoreController::currentCount() const
400{
401 return swapDir->currentCount();
402}
403
af2fda07
DK
404int64_t
405StoreController::maxObjectSize() const
406{
407 return swapDir->maxObjectSize();
408}
409
f4e3fa54 410void
c8f4eac4 411SwapDir::diskFull()
f4e3fa54 412{
cc34568d 413 if (currentSize() >= maxSize())
62e76326 414 return;
415
cc34568d 416 max_size = currentSize();
62e76326 417
e0236918 418 debugs(20, DBG_IMPORTANT, "WARNING: Shrinking cache_dir #" << index << " to " << currentSize() / 1024.0 << " KB");
f4e3fa54 419}
95dcd2b8 420
421void
422storeDirOpenSwapLogs(void)
423{
d3b3ab85 424 for (int dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
c8f4eac4 425 dynamic_cast<SwapDir *>(INDEXSD(dirn))->openLog();
95dcd2b8 426}
427
428void
429storeDirCloseSwapLogs(void)
430{
d3b3ab85 431 for (int dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
c8f4eac4 432 dynamic_cast<SwapDir *>(INDEXSD(dirn))->closeLog();
95dcd2b8 433}
434
b2c141d4 435/*
436 * storeDirWriteCleanLogs
26ac0430 437 *
b2c141d4 438 * Writes a "clean" swap log file from in-memory metadata.
cd748f27 439 * This is a rewrite of the original function to troll each
440 * StoreDir and write the logs, and flush at the end of
441 * the run. Thanks goes to Eric Stern, since this solution
442 * came out of his COSS code.
b2c141d4 443 */
b2c141d4 444int
445storeDirWriteCleanLogs(int reopen)
95dcd2b8 446{
6a566b9c 447 const StoreEntry *e = NULL;
b2c141d4 448 int n = 0;
62e76326 449
e812ecfc 450 struct timeval start;
451 double dt;
c8f4eac4 452 RefCount<SwapDir> sd;
b2c141d4 453 int dirn;
6a566b9c 454 int notdone = 1;
62e76326 455
bef81ea5 456 if (StoreController::store_dirs_rebuilding) {
e0236918
FC
457 debugs(20, DBG_IMPORTANT, "Not currently OK to rewrite swap log.");
458 debugs(20, DBG_IMPORTANT, "storeDirWriteCleanLogs: Operation aborted.");
62e76326 459 return 0;
b2c141d4 460 }
62e76326 461
e0236918 462 debugs(20, DBG_IMPORTANT, "storeDirWriteCleanLogs: Starting...");
e812ecfc 463 getCurrentTime();
464 start = current_time;
62e76326 465
5db6bf73 466 for (dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn) {
c8f4eac4 467 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
62e76326 468
469 if (sd->writeCleanStart() < 0) {
e0236918 470 debugs(20, DBG_IMPORTANT, "log.clean.start() failed for dir #" << sd->index);
62e76326 471 continue;
472 }
6a566b9c 473 }
62e76326 474
e78ef51b 475 /*
476 * This may look inefficient as CPU wise it is more efficient to do this
477 * sequentially, but I/O wise the parallellism helps as it allows more
478 * hdd spindles to be active.
d3b3ab85 479 */
c1dd71ae 480 while (notdone) {
62e76326 481 notdone = 0;
482
5db6bf73 483 for (dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn) {
c8f4eac4 484 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
62e76326 485
486 if (NULL == sd->cleanLog)
487 continue;
488
489 e = sd->cleanLog->nextEntry();
490
491 if (!e)
492 continue;
493
494 notdone = 1;
495
496 if (!sd->canLog(*e))
497 continue;
498
499 sd->cleanLog->write(*e);
500
501 if ((++n & 0xFFFF) == 0) {
502 getCurrentTime();
e0236918 503 debugs(20, DBG_IMPORTANT, " " << std::setw(7) << n <<
bf8fe701 504 " entries written so far.");
62e76326 505 }
506 }
6a566b9c 507 }
62e76326 508
6a566b9c 509 /* Flush */
5db6bf73 510 for (dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
c8f4eac4 511 dynamic_cast<SwapDir *>(INDEXSD(dirn))->writeCleanDone();
62e76326 512
b2c141d4 513 if (reopen)
62e76326 514 storeDirOpenSwapLogs();
515
e812ecfc 516 getCurrentTime();
62e76326 517
e812ecfc 518 dt = tvSubDsec(start, current_time);
62e76326 519
e0236918
FC
520 debugs(20, DBG_IMPORTANT, " Finished. Wrote " << n << " entries.");
521 debugs(20, DBG_IMPORTANT, " Took "<< std::setw(3)<< std::setprecision(2) << dt <<
bf8fe701 522 " seconds ("<< std::setw(6) << ((double) n / (dt > 0.0 ? dt : 1.0)) << " entries/sec).");
62e76326 523
b2c141d4 524 return n;
95dcd2b8 525}
d141c677 526
c8f4eac4 527StoreSearch *
30abd221 528StoreController::search(String const url, HttpRequest *request)
c8f4eac4 529{
530 /* cheat, for now you can't search the memory hot cache */
531 return swapDir->search(url, request);
532}
533
534StorePointer
535StoreHashIndex::store(int const x) const
536{
537 return INDEXSD(x);
538}
539
14911a4e
AR
540SwapDir &
541StoreHashIndex::dir(const int i) const
542{
543 SwapDir *sd = dynamic_cast<SwapDir*>(INDEXSD(i));
544 assert(sd);
545 return *sd;
546}
547
cd748f27 548void
c8f4eac4 549StoreController::sync(void)
cd748f27 550{
9487bae9
AR
551 if (memStore)
552 memStore->sync();
c8f4eac4 553 swapDir->sync();
cd748f27 554}
555
556/*
26ac0430 557 * handle callbacks all avaliable fs'es
cd748f27 558 */
c8f4eac4 559int
560StoreController::callback()
cd748f27 561{
1d5161bd 562 /* This will likely double count. Thats ok. */
563 PROF_start(storeDirCallback);
564
c8f4eac4 565 /* mem cache callbacks ? */
566 int result = swapDir->callback();
1d5161bd 567
568 PROF_stop(storeDirCallback);
c8f4eac4 569
570 return result;
d141c677 571}
90d42c28 572
573int
574storeDirGetBlkSize(const char *path, int *blksize)
575{
576#if HAVE_STATVFS
62e76326 577
90d42c28 578 struct statvfs sfs;
62e76326 579
90d42c28 580 if (statvfs(path, &sfs)) {
e0236918 581 debugs(50, DBG_IMPORTANT, "" << path << ": " << xstrerror());
62e76326 582 *blksize = 2048;
583 return 1;
90d42c28 584 }
62e76326 585
6759a7aa 586 *blksize = (int) sfs.f_frsize;
90d42c28 587#else
62e76326 588
90d42c28 589 struct statfs sfs;
62e76326 590
90d42c28 591 if (statfs(path, &sfs)) {
e0236918 592 debugs(50, DBG_IMPORTANT, "" << path << ": " << xstrerror());
62e76326 593 *blksize = 2048;
594 return 1;
90d42c28 595 }
62e76326 596
90d42c28 597 *blksize = (int) sfs.f_bsize;
6759a7aa 598#endif
4b3af09f 599 /*
600 * Sanity check; make sure we have a meaningful value.
601 */
62e76326 602
d5b72fe7 603 if (*blksize < 512)
62e76326 604 *blksize = 2048;
605
90d42c28 606 return 0;
607}
781d6656 608
609#define fsbtoblk(num, fsbs, bs) \
610 (((fsbs) != 0 && (fsbs) < (bs)) ? \
611 (num) / ((bs) / (fsbs)) : (num) * ((fsbs) / (bs)))
612int
613storeDirGetUFSStats(const char *path, int *totl_kb, int *free_kb, int *totl_in, int *free_in)
614{
615#if HAVE_STATVFS
62e76326 616
781d6656 617 struct statvfs sfs;
62e76326 618
781d6656 619 if (statvfs(path, &sfs)) {
e0236918 620 debugs(50, DBG_IMPORTANT, "" << path << ": " << xstrerror());
62e76326 621 return 1;
781d6656 622 }
62e76326 623
781d6656 624 *totl_kb = (int) fsbtoblk(sfs.f_blocks, sfs.f_frsize, 1024);
625 *free_kb = (int) fsbtoblk(sfs.f_bfree, sfs.f_frsize, 1024);
626 *totl_in = (int) sfs.f_files;
627 *free_in = (int) sfs.f_ffree;
628#else
62e76326 629
781d6656 630 struct statfs sfs;
62e76326 631
781d6656 632 if (statfs(path, &sfs)) {
e0236918 633 debugs(50, DBG_IMPORTANT, "" << path << ": " << xstrerror());
62e76326 634 return 1;
781d6656 635 }
62e76326 636
781d6656 637 *totl_kb = (int) fsbtoblk(sfs.f_blocks, sfs.f_bsize, 1024);
638 *free_kb = (int) fsbtoblk(sfs.f_bfree, sfs.f_bsize, 1024);
639 *totl_in = (int) sfs.f_files;
640 *free_in = (int) sfs.f_ffree;
641#endif
62e76326 642
781d6656 643 return 0;
644}
c8f4eac4 645
646void
e1f7507e 647allocate_new_swapdir(SquidConfig::_cacheSwap * swap)
c8f4eac4 648{
649 if (swap->swapDirs == NULL) {
650 swap->n_allocated = 4;
7d3c4ca1 651 swap->swapDirs = static_cast<SwapDir::Pointer *>(xcalloc(swap->n_allocated, sizeof(SwapDir::Pointer)));
c8f4eac4 652 }
653
654 if (swap->n_allocated == swap->n_configured) {
c8f4eac4 655 swap->n_allocated <<= 1;
7d3c4ca1 656 SwapDir::Pointer *const tmp = static_cast<SwapDir::Pointer *>(xcalloc(swap->n_allocated, sizeof(SwapDir::Pointer)));
41d00cd3 657 memcpy(tmp, swap->swapDirs, swap->n_configured * sizeof(SwapDir *));
c8f4eac4 658 xfree(swap->swapDirs);
659 swap->swapDirs = tmp;
660 }
661}
662
663void
e1f7507e 664free_cachedir(SquidConfig::_cacheSwap * swap)
c8f4eac4 665{
666 int i;
667 /* DON'T FREE THESE FOR RECONFIGURE */
668
669 if (reconfiguring)
670 return;
671
5db6bf73 672 for (i = 0; i < swap->n_configured; ++i) {
c8f4eac4 673 /* TODO XXX this lets the swapdir free resources asynchronously
674 * swap->swapDirs[i]->deactivate();
26ac0430 675 * but there may be such a means already.
c8f4eac4 676 * RBC 20041225
677 */
678 swap->swapDirs[i] = NULL;
679 }
680
681 safe_free(swap->swapDirs);
682 swap->swapDirs = NULL;
683 swap->n_allocated = 0;
684 swap->n_configured = 0;
685}
686
687/* this should be a virtual method on StoreEntry,
688 * i.e. e->referenced()
689 * so that the entry can notify the creating Store
690 */
691void
692StoreController::reference(StoreEntry &e)
693{
c5426f8f
AR
694 // special entries do not belong to any specific Store, but are IN_MEMORY
695 if (EBIT_TEST(e.flags, ENTRY_SPECIAL))
696 return;
697
c8f4eac4 698 /* Notify the fs that we're referencing this object again */
699
700 if (e.swap_dirn > -1)
4c973beb 701 swapDir->reference(e);
c8f4eac4 702
9487bae9
AR
703 // Notify the memory cache that we're referencing this object again
704 if (memStore && e.mem_status == IN_MEMORY)
705 memStore->reference(e);
706
707 // TODO: move this code to a non-shared memory cache class when we have it
c8f4eac4 708 if (e.mem_obj) {
709 if (mem_policy->Referenced)
710 mem_policy->Referenced(mem_policy, &e, &e.mem_obj->repl);
711 }
712}
713
4c973beb 714bool
54347cbd 715StoreController::dereference(StoreEntry &e, bool wantsLocalMemory)
c8f4eac4 716{
c5426f8f
AR
717 // special entries do not belong to any specific Store, but are IN_MEMORY
718 if (EBIT_TEST(e.flags, ENTRY_SPECIAL))
54347cbd
AR
719 return true;
720
721 bool keepInStoreTable = false; // keep only if somebody needs it there
c5426f8f 722
c8f4eac4 723 /* Notify the fs that we're not referencing this object any more */
724
725 if (e.swap_filen > -1)
54347cbd 726 keepInStoreTable = swapDir->dereference(e, wantsLocalMemory) || keepInStoreTable;
c8f4eac4 727
9487bae9
AR
728 // Notify the memory cache that we're not referencing this object any more
729 if (memStore && e.mem_status == IN_MEMORY)
54347cbd 730 keepInStoreTable = memStore->dereference(e, wantsLocalMemory) || keepInStoreTable;
9487bae9
AR
731
732 // TODO: move this code to a non-shared memory cache class when we have it
c8f4eac4 733 if (e.mem_obj) {
734 if (mem_policy->Dereferenced)
735 mem_policy->Dereferenced(mem_policy, &e, &e.mem_obj->repl);
54347cbd
AR
736 // non-shared memory cache relies on store_table
737 if (!memStore)
738 keepInStoreTable = wantsLocalMemory || keepInStoreTable;
c8f4eac4 739 }
4c973beb
AR
740
741 return keepInStoreTable;
c8f4eac4 742}
743
744StoreEntry *
6ca34f6f 745StoreController::get(const cache_key *key)
c8f4eac4 746{
44def0f9 747 if (StoreEntry *e = swapDir->get(key)) {
9487bae9
AR
748 // TODO: ignore and maybe handleIdleEntry() unlocked intransit entries
749 // because their backing store slot may be gone already.
171d5429 750 debugs(20, 3, HERE << "got in-transit entry: " << *e);
44def0f9
AR
751 return e;
752 }
753
9487bae9
AR
754 if (memStore) {
755 if (StoreEntry *e = memStore->get(key)) {
756 debugs(20, 3, HERE << "got mem-cached entry: " << *e);
757 return e;
758 }
759 }
760
022f96ad
AR
761 // TODO: this disk iteration is misplaced; move to StoreHashIndex when
762 // the global store_table is no longer used for in-transit objects.
44def0f9
AR
763 if (const int cacheDirs = Config.cacheSwap.n_configured) {
764 // ask each cache_dir until the entry is found; use static starting
765 // point to avoid asking the same subset of disks more often
766 // TODO: coordinate with put() to be able to guess the right disk often
14911a4e 767 static int idx = 0;
44def0f9 768 for (int n = 0; n < cacheDirs; ++n) {
14911a4e 769 idx = (idx + 1) % cacheDirs;
44def0f9 770 SwapDir *sd = dynamic_cast<SwapDir*>(INDEXSD(idx));
14911a4e
AR
771 if (!sd->active())
772 continue;
773
44def0f9 774 if (StoreEntry *e = sd->get(key)) {
eccba1d9 775 debugs(20, 3, HERE << "cache_dir " << idx <<
9199139f 776 " got cached entry: " << *e);
44def0f9
AR
777 return e;
778 }
779 }
780 }
c8f4eac4 781
eccba1d9 782 debugs(20, 4, HERE << "none of " << Config.cacheSwap.n_configured <<
9199139f 783 " cache_dirs have " << storeKeyText(key));
44def0f9 784 return NULL;
c8f4eac4 785}
786
787void
6ca34f6f 788StoreController::get(String const key, STOREGETCLIENT aCallback, void *aCallbackData)
c8f4eac4 789{
790 fatal("not implemented");
791}
792
96a7de88
DK
793// move this into [non-shared] memory cache class when we have one
794/// whether e should be kept in local RAM for possible future caching
795bool
796StoreController::keepForLocalMemoryCache(const StoreEntry &e) const
797{
798 if (!e.memoryCachable())
799 return false;
800
801 // does the current and expected size obey memory caching limits?
802 assert(e.mem_obj);
803 const int64_t loadedSize = e.mem_obj->endOffset();
804 const int64_t expectedSize = e.mem_obj->expectedReplySize(); // may be < 0
805 const int64_t ramSize = max(loadedSize, expectedSize);
806 const int64_t ramLimit = min(
817138f8
A
807 static_cast<int64_t>(Config.memMaxSize),
808 static_cast<int64_t>(Config.Store.maxInMemObjSize));
96a7de88
DK
809 return ramSize <= ramLimit;
810}
811
812void
813StoreController::maybeTrimMemory(StoreEntry &e, const bool preserveSwappable)
814{
815 bool keepInLocalMemory = false;
816 if (memStore)
817 keepInLocalMemory = memStore->keepInLocalMemory(e);
818 else
819 keepInLocalMemory = keepForLocalMemoryCache(e);
820
821 debugs(20, 7, HERE << "keepInLocalMemory: " << keepInLocalMemory);
822
823 if (!keepInLocalMemory)
824 e.trimMemory(preserveSwappable);
825}
826
9487bae9
AR
827void
828StoreController::handleIdleEntry(StoreEntry &e)
829{
830 bool keepInLocalMemory = false;
c5426f8f
AR
831
832 if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) {
833 // Icons (and cache digests?) should stay in store_table until we
834 // have a dedicated storage for them (that would not purge them).
835 // They are not managed [well] by any specific Store handled below.
836 keepInLocalMemory = true;
d3cd2e81 837 } else if (memStore) {
9487bae9
AR
838 memStore->considerKeeping(e);
839 // leave keepInLocalMemory false; memStore maintains its own cache
840 } else {
96a7de88 841 keepInLocalMemory = keepForLocalMemoryCache(e) && // in good shape and
9199139f
AR
842 // the local memory cache is not overflowing
843 (mem_node::InUseCount() <= store_pages_max);
9487bae9
AR
844 }
845
54347cbd 846 // An idle, unlocked entry that only belongs to a SwapDir which controls
4c973beb 847 // its own index, should not stay in the global store_table.
54347cbd 848 if (!dereference(e, keepInLocalMemory)) {
9487bae9
AR
849 debugs(20, 5, HERE << "destroying unlocked entry: " << &e << ' ' << e);
850 destroyStoreEntry(static_cast<hash_link*>(&e));
851 return;
852 }
853
c5426f8f
AR
854 debugs(20, 5, HERE << "keepInLocalMemory: " << keepInLocalMemory);
855
9487bae9
AR
856 // TODO: move this into [non-shared] memory cache class when we have one
857 if (keepInLocalMemory) {
858 e.setMemStatus(IN_MEMORY);
859 e.mem_obj->unlinkRequest();
860 } else {
861 e.purgeMem(); // may free e
862 }
863}
864
c8f4eac4 865StoreHashIndex::StoreHashIndex()
866{
47f6e231 867 if (store_table)
26ac0430 868 abort();
c8f4eac4 869 assert (store_table == NULL);
870}
871
872StoreHashIndex::~StoreHashIndex()
873{
874 if (store_table) {
875 hashFreeItems(store_table, destroyStoreEntry);
876 hashFreeMemory(store_table);
877 store_table = NULL;
878 }
879}
880
881int
882StoreHashIndex::callback()
883{
884 int result = 0;
885 int j;
886 static int ndir = 0;
887
888 do {
889 j = 0;
890
5db6bf73 891 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
c8f4eac4 892 if (ndir >= Config.cacheSwap.n_configured)
893 ndir = ndir % Config.cacheSwap.n_configured;
894
895 int temp_result = store(ndir)->callback();
896
897 ++ndir;
898
899 j += temp_result;
900
901 result += temp_result;
902
903 if (j > 100)
904 fatal ("too much io\n");
905 }
906 } while (j > 0);
907
5db6bf73 908 ++ndir;
c8f4eac4 909
910 return result;
911}
912
913void
914StoreHashIndex::create()
915{
608622b8 916 if (Config.cacheSwap.n_configured == 0) {
a8163539
TX
917 debugs(0, DBG_PARSE_NOTE(DBG_CRITICAL), "No cache_dir stores are configured.");
918 }
919
5db6bf73 920 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
14911a4e
AR
921 if (dir(i).active())
922 store(i)->create();
923 }
c8f4eac4 924}
925
926/* Lookup an object in the cache.
927 * return just a reference to object, don't start swapping in yet. */
928StoreEntry *
6ca34f6f 929StoreHashIndex::get(const cache_key *key)
c8f4eac4 930{
931 PROF_start(storeGet);
bf8fe701 932 debugs(20, 3, "storeGet: looking up " << storeKeyText(key));
c8f4eac4 933 StoreEntry *p = static_cast<StoreEntry *>(hash_lookup(store_table, key));
934 PROF_stop(storeGet);
935 return p;
936}
937
938void
6ca34f6f 939StoreHashIndex::get(String const key, STOREGETCLIENT aCallback, void *aCallbackData)
c8f4eac4 940{
941 fatal("not implemented");
942}
943
944void
945StoreHashIndex::init()
946{
947 /* Calculate size of hash table (maximum currently 64k buckets). */
948 /* this is very bogus, its specific to the any Store maintaining an
949 * in-core index, not global */
58d5c5dd 950 size_t buckets = (Store::Root().maxSize() + Config.memMaxSize) / Config.Store.avgObjectSize;
e0236918 951 debugs(20, DBG_IMPORTANT, "Swap maxSize " << (Store::Root().maxSize() >> 10) <<
13a07022 952 " + " << ( Config.memMaxSize >> 10) << " KB, estimated " << buckets << " objects");
c8f4eac4 953 buckets /= Config.Store.objectsPerBucket;
e0236918 954 debugs(20, DBG_IMPORTANT, "Target number of buckets: " << buckets);
c8f4eac4 955 /* ideally the full scan period should be configurable, for the
956 * moment it remains at approximately 24 hours. */
957 store_hash_buckets = storeKeyHashBuckets(buckets);
e0236918
FC
958 debugs(20, DBG_IMPORTANT, "Using " << store_hash_buckets << " Store buckets");
959 debugs(20, DBG_IMPORTANT, "Max Mem size: " << ( Config.memMaxSize >> 10) << " KB" <<
9199139f 960 (Config.memShared ? " [shared]" : ""));
e0236918 961 debugs(20, DBG_IMPORTANT, "Max Swap size: " << (Store::Root().maxSize() >> 10) << " KB");
c8f4eac4 962
963 store_table = hash_create(storeKeyHashCmp,
964 store_hash_buckets, storeKeyHashHash);
965
5db6bf73 966 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
c8f4eac4 967 /* this starts a search of the store dirs, loading their
968 * index. under the new Store api this should be
969 * driven by the StoreHashIndex, not by each store.
bef81ea5 970 *
971 * That is, the HashIndex should perform a search of each dir it is
26ac0430 972 * indexing to do the hash insertions. The search is then able to
bef81ea5 973 * decide 'from-memory', or 'from-clean-log' or 'from-dirty-log' or
974 * 'from-no-log'.
975 *
c8f4eac4 976 * Step 1: make the store rebuilds use a search internally
bef81ea5 977 * Step 2: change the search logic to use the four modes described
978 * above
979 * Step 3: have the hash index walk the searches itself.
c8f4eac4 980 */
14911a4e
AR
981 if (dir(i).active())
982 store(i)->init();
13a07022 983 }
c8f4eac4 984}
985
12e11a5c 986uint64_t
c8f4eac4 987StoreHashIndex::maxSize() const
988{
12e11a5c 989 uint64_t result = 0;
c8f4eac4 990
5db6bf73 991 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
39c1e1d9
DK
992 if (dir(i).doReportStat())
993 result += store(i)->maxSize();
994 }
c8f4eac4 995
996 return result;
997}
998
12e11a5c 999uint64_t
c8f4eac4 1000StoreHashIndex::minSize() const
1001{
12e11a5c 1002 uint64_t result = 0;
c8f4eac4 1003
5db6bf73 1004 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
39c1e1d9
DK
1005 if (dir(i).doReportStat())
1006 result += store(i)->minSize();
1007 }
1008
1009 return result;
1010}
1011
1012uint64_t
1013StoreHashIndex::currentSize() const
1014{
1015 uint64_t result = 0;
1016
5db6bf73 1017 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
39c1e1d9
DK
1018 if (dir(i).doReportStat())
1019 result += store(i)->currentSize();
1020 }
1021
1022 return result;
1023}
1024
1025uint64_t
1026StoreHashIndex::currentCount() const
1027{
1028 uint64_t result = 0;
1029
5db6bf73 1030 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
39c1e1d9
DK
1031 if (dir(i).doReportStat())
1032 result += store(i)->currentCount();
1033 }
c8f4eac4 1034
1035 return result;
1036}
1037
af2fda07
DK
1038int64_t
1039StoreHashIndex::maxObjectSize() const
1040{
1041 int64_t result = -1;
1042
5db6bf73 1043 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
af2fda07
DK
1044 if (dir(i).active() && store(i)->maxObjectSize() > result)
1045 result = store(i)->maxObjectSize();
1046 }
1047
1048 return result;
1049}
1050
93bc1434
AR
1051void
1052StoreHashIndex::getStats(StoreInfoStats &stats) const
1053{
1054 // accumulate per-disk cache stats
5db6bf73 1055 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
93bc1434
AR
1056 StoreInfoStats dirStats;
1057 store(i)->getStats(dirStats);
1058 stats += dirStats;
1059 }
1060
1061 // common to all disks
1062 stats.swap.open_disk_fd = store_open_disk_fd;
1063
1064 // memory cache stats are collected in StoreController::getStats(), for now
1065}
1066
c8f4eac4 1067void
1068StoreHashIndex::stat(StoreEntry & output) const
1069{
1070 int i;
1071
1072 /* Now go through each store, calling its stat routine */
1073
5db6bf73 1074 for (i = 0; i < Config.cacheSwap.n_configured; ++i) {
c8f4eac4 1075 storeAppendPrintf(&output, "\n");
1076 store(i)->stat(output);
1077 }
1078}
1079
1080void
4c973beb
AR
1081StoreHashIndex::reference(StoreEntry &e)
1082{
1083 e.store()->reference(e);
1084}
c8f4eac4 1085
4c973beb 1086bool
54347cbd 1087StoreHashIndex::dereference(StoreEntry &e, bool wantsLocalMemory)
4c973beb 1088{
54347cbd 1089 return e.store()->dereference(e, wantsLocalMemory);
4c973beb 1090}
c8f4eac4 1091
1092void
1093StoreHashIndex::maintain()
1094{
1095 int i;
1096 /* walk each fs */
1097
5db6bf73 1098 for (i = 0; i < Config.cacheSwap.n_configured; ++i) {
c8f4eac4 1099 /* XXX FixMe: This should be done "in parallell" on the different
1100 * cache_dirs, not one at a time.
1101 */
1102 /* call the maintain function .. */
1103 store(i)->maintain();
1104 }
1105}
1106
c8f4eac4 1107void
1108StoreHashIndex::sync()
1109{
1110 for (int i = 0; i < Config.cacheSwap.n_configured; ++i)
1111 store(i)->sync();
1112}
1113
1114StoreSearch *
30abd221 1115StoreHashIndex::search(String const url, HttpRequest *)
c8f4eac4 1116{
1117 if (url.size())
1118 fatal ("Cannot search by url yet\n");
1119
1120 return new StoreSearchHashIndex (this);
1121}
1122
1123CBDATA_CLASS_INIT(StoreSearchHashIndex);
aa839030 1124
c8f4eac4 1125StoreSearchHashIndex::StoreSearchHashIndex(RefCount<StoreHashIndex> aSwapDir) : sd(aSwapDir), _done (false), bucket (0)
1126{}
1127
1128/* do not link
1129StoreSearchHashIndex::StoreSearchHashIndex(StoreSearchHashIndex const &);
1130*/
1131
1132StoreSearchHashIndex::~StoreSearchHashIndex()
1133{}
1134
1135void
70efcae0 1136StoreSearchHashIndex::next(void (aCallback)(void *), void *aCallbackData)
c8f4eac4 1137{
1138 next();
70efcae0 1139 aCallback (aCallbackData);
c8f4eac4 1140}
1141
1142bool
1143StoreSearchHashIndex::next()
1144{
1145 if (entries.size())
1146 entries.pop_back();
1147
1148 while (!isDone() && !entries.size())
1149 copyBucket();
1150
1151 return currentItem() != NULL;
1152}
1153
1154bool
1155StoreSearchHashIndex::error() const
1156{
1157 return false;
1158}
1159
1160bool
1161StoreSearchHashIndex::isDone() const
1162{
1163 return bucket >= store_hash_buckets || _done;
1164}
1165
1166StoreEntry *
1167StoreSearchHashIndex::currentItem()
1168{
1169 if (!entries.size())
1170 return NULL;
1171
1172 return entries.back();
1173}
1174
1175void
1176StoreSearchHashIndex::copyBucket()
1177{
1178 /* probably need to lock the store entries...
1179 * we copy them all to prevent races on the links. */
1180 debugs(47, 3, "StoreSearchHashIndex::copyBucket #" << bucket);
1181 assert (!entries.size());
1182 hash_link *link_ptr = NULL;
1183 hash_link *link_next = NULL;
1184 link_next = hash_get_bucket(store_table, bucket);
1185
1186 while (NULL != (link_ptr = link_next)) {
1187 link_next = link_ptr->next;
1188 StoreEntry *e = (StoreEntry *) link_ptr;
1189
1190 entries.push_back(e);
1191 }
1192
5db6bf73 1193 ++bucket;
c8f4eac4 1194 debugs(47,3, "got entries: " << entries.size());
1195}