]> git.ipfire.org Git - thirdparty/squid.git/blame - src/store_dir.cc
Polish: allow debugs printing a StringArea
[thirdparty/squid.git] / src / store_dir.cc
CommitLineData
f1dc9b30 1
2/*
f1dc9b30 3 * DEBUG: section 47 Store Directory Routines
4 * AUTHOR: Duane Wessels
5 *
2b6662ba 6 * SQUID Web Proxy Cache http://www.squid-cache.org/
e25c139f 7 * ----------------------------------------------------------
f1dc9b30 8 *
2b6662ba 9 * Squid is the result of efforts by numerous individuals from
10 * the Internet community; see the CONTRIBUTORS file for full
11 * details. Many organizations have provided support for Squid's
12 * development; see the SPONSORS file for full details. Squid is
13 * Copyrighted (C) 2001 by the Regents of the University of
14 * California; see the COPYRIGHT file for full details. Squid
15 * incorporates software developed and/or copyrighted by other
16 * sources; see the CREDITS file for full details.
f1dc9b30 17 *
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2 of the License, or
21 * (at your option) any later version.
26ac0430 22 *
f1dc9b30 23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
26ac0430 27 *
f1dc9b30 28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
cbdec147 30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
e25c139f 31 *
f1dc9b30 32 */
33
582c2af2
FC
34#include "squid.h"
35#include "globals.h"
36#include "mem_node.h"
528b2c61 37#include "MemObject.h"
9487bae9 38#include "MemStore.h"
582c2af2 39#include "profiler/Profiler.h"
4d5904f7 40#include "SquidConfig.h"
a98bcbee 41#include "SquidMath.h"
985c86bc 42#include "SquidTime.h"
582c2af2 43#include "Store.h"
fb548aaf 44#include "store_key_md5.h"
21d845b1 45#include "StoreHashIndex.h"
d3b3ab85 46#include "SwapDir.h"
4b981814 47#include "swap_log_op.h"
5bed43d6 48#include "tools.h"
85407535 49
c0db87f2 50#if HAVE_STATVFS
51#if HAVE_SYS_STATVFS_H
52#include <sys/statvfs.h>
53#endif
ec15e022 54#endif /* HAVE_STATVFS */
55/* statfs() needs <sys/param.h> and <sys/mount.h> on BSD systems */
56#if HAVE_SYS_PARAM_H
57#include <sys/param.h>
203526a1 58#endif
411abf44
A
59#if HAVE_LIMITS_H
60#include <limits.h>
61#endif
ec15e022 62#if HAVE_SYS_MOUNT_H
63#include <sys/mount.h>
64#endif
65/* Windows and Linux use sys/vfs.h */
6c86a065 66#if HAVE_SYS_VFS_H
67#include <sys/vfs.h>
68#endif
582c2af2
FC
69#if HAVE_SYS_WAIT_H
70#include <sys/wait.h>
71#endif
21d845b1
FC
72#if HAVE_ERRNO_H
73#include <errno.h>
74#endif
c0db87f2 75
65a53c8e 76static STDIRSELECT storeDirSelectSwapDirRoundRobin;
77static STDIRSELECT storeDirSelectSwapDirLeastLoad;
c8f4eac4 78
b07b21cc 79/*
80 * store_dirs_rebuilding is initialized to _1_ as a hack so that
81 * storeDirWriteCleanLogs() doesn't try to do anything unless _all_
82 * cache_dirs have been read. For example, without this hack, Squid
83 * will try to write clean log files if -kparse fails (becasue it
84 * calls fatal()).
85 */
86int StoreController::store_dirs_rebuilding = 1;
bef81ea5 87
c8f4eac4 88StoreController::StoreController() : swapDir (new StoreHashIndex())
9199139f 89 , memStore(NULL)
c8f4eac4 90{}
91
92StoreController::~StoreController()
9487bae9
AR
93{
94 delete memStore;
95}
65a53c8e 96
97/*
98 * This function pointer is set according to 'store_dir_select_algorithm'
99 * in squid.conf.
100 */
101STDIRSELECT *storeDirSelectSwapDir = storeDirSelectSwapDirLeastLoad;
d141c677 102
9838d6c8 103void
c8f4eac4 104StoreController::init()
596dddc1 105{
57af1e3f 106 if (Config.memShared && IamWorkerProcess()) {
60be8b2d
AR
107 memStore = new MemStore;
108 memStore->init();
109 }
9487bae9 110
c8f4eac4 111 swapDir->init();
62e76326 112
65a53c8e 113 if (0 == strcasecmp(Config.store_dir_select_algorithm, "round-robin")) {
62e76326 114 storeDirSelectSwapDir = storeDirSelectSwapDirRoundRobin;
e0236918 115 debugs(47, DBG_IMPORTANT, "Using Round Robin store dir selection");
65a53c8e 116 } else {
62e76326 117 storeDirSelectSwapDir = storeDirSelectSwapDirLeastLoad;
e0236918 118 debugs(47, DBG_IMPORTANT, "Using Least Load store dir selection");
65a53c8e 119 }
85407535 120}
121
122void
c8f4eac4 123StoreController::createOneStore(Store &aStore)
596dddc1 124{
62e76326 125 /*
154c7949 126 * On Windows, fork() is not available.
127 * The following is a workaround for create store directories sequentially
128 * when running on native Windows port.
129 */
7aa9bb3e 130#if !_SQUID_WINDOWS_
62e76326 131
154c7949 132 if (fork())
62e76326 133 return;
134
099a1791 135#endif
62e76326 136
c8f4eac4 137 aStore.create();
62e76326 138
7aa9bb3e 139#if !_SQUID_WINDOWS_
62e76326 140
154c7949 141 exit(0);
62e76326 142
099a1791 143#endif
154c7949 144}
145
146void
c8f4eac4 147StoreController::create()
154c7949 148{
c8f4eac4 149 swapDir->create();
62e76326 150
7aa9bb3e 151#if !_SQUID_WINDOWS_
62e76326 152
8a1c8f2c 153 pid_t pid;
62e76326 154
b2c141d4 155 do {
62e76326 156 int status;
1191b93b 157#if _SQUID_NEXT_
62e76326 158
159 pid = wait3(&status, WNOHANG, NULL);
b2c141d4 160#else
62e76326 161
162 pid = waitpid(-1, &status, 0);
b2c141d4 163#endif
62e76326 164
b2c141d4 165 } while (pid > 0 || (pid < 0 && errno == EINTR));
62e76326 166
099a1791 167#endif
596dddc1 168}
169
a8a33c46 170/**
cd748f27 171 * Determine whether the given directory can handle this object
172 * size
173 *
174 * Note: if the object size is -1, then the only swapdirs that
a8a33c46 175 * will return true here are ones that have min and max unset,
cd748f27 176 * ie any-sized-object swapdirs. This is a good thing.
177 */
c8f4eac4 178bool
3e62bd58 179SwapDir::objectSizeIsAcceptable(int64_t objsize) const
cd748f27 180{
a8a33c46 181 // If the swapdir has no range limits, then it definitely can
b6662ffd 182 if (min_objsize <= 0 && max_objsize == -1)
c8f4eac4 183 return true;
d68f43a0 184
185 /*
a8a33c46
A
186 * If the object size is -1 and the storedir has limits we
187 * can't store it there.
d68f43a0 188 */
a8a33c46 189 if (objsize == -1)
c8f4eac4 190 return false;
d68f43a0 191
a8a33c46 192 // Else, make sure that the object size will fit.
b475997c
AJ
193 if (max_objsize == -1 && min_objsize <= objsize)
194 return true;
195 else
196 return min_objsize <= objsize && max_objsize > objsize;
cd748f27 197}
198
d141c677 199/*
200 * This new selection scheme simply does round-robin on all SwapDirs.
8e8d4f30 201 * A SwapDir is skipped if it is over the max_size (100%) limit, or
202 * overloaded.
d141c677 203 */
204static int
8e8d4f30 205storeDirSelectSwapDirRoundRobin(const StoreEntry * e)
d141c677 206{
207 static int dirn = 0;
208 int i;
8e8d4f30 209 int load;
c8f4eac4 210 RefCount<SwapDir> sd;
62e76326 211
aa1a691e
AR
212 // e->objectLen() is negative at this point when we are still STORE_PENDING
213 ssize_t objsize = e->mem_obj->expectedReplySize();
246e6cc1
AJ
214 if (objsize != -1)
215 objsize += e->mem_obj->swap_hdr_sz;
216
5db6bf73 217 for (i = 0; i < Config.cacheSwap.n_configured; ++i) {
62e76326 218 if (++dirn >= Config.cacheSwap.n_configured)
219 dirn = 0;
220
c8f4eac4 221 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
62e76326 222
aa1a691e 223 if (!sd->canStore(*e, objsize, load))
62e76326 224 continue;
225
62e76326 226 if (load < 0 || load > 1000) {
227 continue;
228 }
229
230 return dirn;
d141c677 231 }
62e76326 232
8e8d4f30 233 return -1;
d141c677 234}
960a01e3 235
a2899918 236/*
cd748f27 237 * Spread load across all of the store directories
238 *
239 * Note: We should modify this later on to prefer sticking objects
240 * in the *tightest fit* swapdir to conserve space, along with the
26ac0430 241 * actual swapdir usage. But for now, this hack will do while
cd748f27 242 * testing, so you should order your swapdirs in the config file
243 * from smallest maxobjsize to unlimited (-1) maxobjsize.
244 *
245 * We also have to choose nleast == nconf since we need to consider
246 * ALL swapdirs, regardless of state. Again, this is a hack while
247 * we sort out the real usefulness of this algorithm.
a2899918 248 */
65a53c8e 249static int
250storeDirSelectSwapDirLeastLoad(const StoreEntry * e)
a2899918 251{
cc34568d 252 int64_t most_free = 0;
8e8d4f30 253 ssize_t least_objsize = -1;
254 int least_load = INT_MAX;
cd748f27 255 int load;
256 int dirn = -1;
257 int i;
c8f4eac4 258 RefCount<SwapDir> SD;
cd748f27 259
aa1a691e
AR
260 // e->objectLen() is negative at this point when we are still STORE_PENDING
261 ssize_t objsize = e->mem_obj->expectedReplySize();
62e76326 262
cd748f27 263 if (objsize != -1)
62e76326 264 objsize += e->mem_obj->swap_hdr_sz;
265
5db6bf73 266 for (i = 0; i < Config.cacheSwap.n_configured; ++i) {
c8f4eac4 267 SD = dynamic_cast<SwapDir *>(INDEXSD(i));
1a6347cd 268 SD->flags.selected = false;
62e76326 269
aa1a691e 270 if (!SD->canStore(*e, objsize, load))
62e76326 271 continue;
272
aa1a691e 273 if (load < 0 || load > 1000)
62e76326 274 continue;
275
276 if (load > least_load)
277 continue;
278
cc34568d 279 const int64_t cur_free = SD->maxSize() - SD->currentSize();
62e76326 280
281 /* If the load is equal, then look in more details */
282 if (load == least_load) {
b51ec8c8 283 /* closest max-size fit */
62e76326 284
285 if (least_objsize != -1)
b51ec8c8 286 if (SD->maxObjectSize() > least_objsize)
62e76326 287 continue;
288
289 /* most free */
290 if (cur_free < most_free)
291 continue;
292 }
293
294 least_load = load;
b51ec8c8 295 least_objsize = SD->maxObjectSize();
62e76326 296 most_free = cur_free;
297 dirn = i;
a2899918 298 }
62e76326 299
ade906c8 300 if (dirn >= 0)
1a6347cd 301 dynamic_cast<SwapDir *>(INDEXSD(dirn))->flags.selected = true;
62e76326 302
cd748f27 303 return dirn;
596dddc1 304}
305
b109de6b 306/*
307 * An entry written to the swap log MUST have the following
308 * properties.
309 * 1. It MUST be a public key. It does no good to log
310 * a public ADD, change the key, then log a private
311 * DEL. So we need to log a DEL before we change a
312 * key from public to private.
cd748f27 313 * 2. It MUST have a valid (> -1) swap_filen.
b109de6b 314 */
4683e377 315void
5830cdb3 316storeDirSwapLog(const StoreEntry * e, int op)
4683e377 317{
d3b3ab85 318 assert (e);
d46a87a8 319 assert(!EBIT_TEST(e->flags, KEY_PRIVATE));
cd748f27 320 assert(e->swap_filen >= 0);
6c57e268 321 /*
322 * icons and such; don't write them to the swap log
323 */
62e76326 324
d46a87a8 325 if (EBIT_TEST(e->flags, ENTRY_SPECIAL))
62e76326 326 return;
327
b109de6b 328 assert(op > SWAP_LOG_NOP && op < SWAP_LOG_MAX);
62e76326 329
26ac0430
AJ
330 debugs(20, 3, "storeDirSwapLog: " <<
331 swap_log_op_str[op] << " " <<
332 e->getMD5Text() << " " <<
333 e->swap_dirn << " " <<
bf8fe701 334 std::hex << std::uppercase << std::setfill('0') << std::setw(8) << e->swap_filen);
62e76326 335
c8f4eac4 336 dynamic_cast<SwapDir *>(INDEXSD(e->swap_dirn))->logEntry(*e, op);
337}
338
93bc1434
AR
339void
340StoreController::getStats(StoreInfoStats &stats) const
341{
342 if (memStore)
343 memStore->getStats(stats);
344 else {
345 // move this code to a non-shared memory cache class when we have it
346 stats.mem.shared = false;
347 stats.mem.capacity = Config.memMaxSize;
348 stats.mem.size = mem_node::StoreMemSize();
349 stats.mem.count = hot_obj_count;
350 }
351
352 swapDir->getStats(stats);
353
354 // low-level info not specific to memory or disk cache
355 stats.store_entry_count = StoreEntry::inUseCount();
356 stats.mem_object_count = MemObject::inUseCount();
357}
358
c932b107 359void
c8f4eac4 360StoreController::stat(StoreEntry &output) const
c932b107 361{
c8f4eac4 362 storeAppendPrintf(&output, "Store Directory Statistics:\n");
363 storeAppendPrintf(&output, "Store Entries : %lu\n",
62e76326 364 (unsigned long int)StoreEntry::inUseCount());
c91ca3ce 365 storeAppendPrintf(&output, "Maximum Swap Size : %" PRIu64 " KB\n",
cc34568d 366 maxSize() >> 10);
57f583f1 367 storeAppendPrintf(&output, "Current Store Swap Size: %.2f KB\n",
cc34568d 368 currentSize() / 1024.0);
57f583f1 369 storeAppendPrintf(&output, "Current Capacity : %.2f%% used, %.2f%% free\n",
cc34568d
DK
370 Math::doublePercent(currentSize(), maxSize()),
371 Math::doublePercent((maxSize() - currentSize()), maxSize()));
ada9124c
AR
372
373 if (memStore)
374 memStore->stat(output);
e3ef2b09 375
c8f4eac4 376 /* now the swapDir */
377 swapDir->stat(output);
5d406e78 378}
379
c8f4eac4 380/* if needed, this could be taught to cache the result */
12e11a5c 381uint64_t
c8f4eac4 382StoreController::maxSize() const
f4e3fa54 383{
c8f4eac4 384 /* TODO: include memory cache ? */
385 return swapDir->maxSize();
386}
62e76326 387
12e11a5c 388uint64_t
c8f4eac4 389StoreController::minSize() const
390{
391 /* TODO: include memory cache ? */
392 return swapDir->minSize();
f4e3fa54 393}
394
39c1e1d9
DK
395uint64_t
396StoreController::currentSize() const
397{
398 return swapDir->currentSize();
399}
400
401uint64_t
402StoreController::currentCount() const
403{
404 return swapDir->currentCount();
405}
406
af2fda07
DK
407int64_t
408StoreController::maxObjectSize() const
409{
410 return swapDir->maxObjectSize();
411}
412
f4e3fa54 413void
c8f4eac4 414SwapDir::diskFull()
f4e3fa54 415{
cc34568d 416 if (currentSize() >= maxSize())
62e76326 417 return;
418
cc34568d 419 max_size = currentSize();
62e76326 420
e0236918 421 debugs(20, DBG_IMPORTANT, "WARNING: Shrinking cache_dir #" << index << " to " << currentSize() / 1024.0 << " KB");
f4e3fa54 422}
95dcd2b8 423
424void
425storeDirOpenSwapLogs(void)
426{
d3b3ab85 427 for (int dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
c8f4eac4 428 dynamic_cast<SwapDir *>(INDEXSD(dirn))->openLog();
95dcd2b8 429}
430
431void
432storeDirCloseSwapLogs(void)
433{
d3b3ab85 434 for (int dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
c8f4eac4 435 dynamic_cast<SwapDir *>(INDEXSD(dirn))->closeLog();
95dcd2b8 436}
437
b2c141d4 438/*
439 * storeDirWriteCleanLogs
26ac0430 440 *
b2c141d4 441 * Writes a "clean" swap log file from in-memory metadata.
cd748f27 442 * This is a rewrite of the original function to troll each
443 * StoreDir and write the logs, and flush at the end of
444 * the run. Thanks goes to Eric Stern, since this solution
445 * came out of his COSS code.
b2c141d4 446 */
b2c141d4 447int
448storeDirWriteCleanLogs(int reopen)
95dcd2b8 449{
6a566b9c 450 const StoreEntry *e = NULL;
b2c141d4 451 int n = 0;
62e76326 452
e812ecfc 453 struct timeval start;
454 double dt;
c8f4eac4 455 RefCount<SwapDir> sd;
b2c141d4 456 int dirn;
6a566b9c 457 int notdone = 1;
62e76326 458
bef81ea5 459 if (StoreController::store_dirs_rebuilding) {
e0236918
FC
460 debugs(20, DBG_IMPORTANT, "Not currently OK to rewrite swap log.");
461 debugs(20, DBG_IMPORTANT, "storeDirWriteCleanLogs: Operation aborted.");
62e76326 462 return 0;
b2c141d4 463 }
62e76326 464
e0236918 465 debugs(20, DBG_IMPORTANT, "storeDirWriteCleanLogs: Starting...");
e812ecfc 466 getCurrentTime();
467 start = current_time;
62e76326 468
5db6bf73 469 for (dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn) {
c8f4eac4 470 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
62e76326 471
472 if (sd->writeCleanStart() < 0) {
e0236918 473 debugs(20, DBG_IMPORTANT, "log.clean.start() failed for dir #" << sd->index);
62e76326 474 continue;
475 }
6a566b9c 476 }
62e76326 477
e78ef51b 478 /*
479 * This may look inefficient as CPU wise it is more efficient to do this
480 * sequentially, but I/O wise the parallellism helps as it allows more
481 * hdd spindles to be active.
d3b3ab85 482 */
c1dd71ae 483 while (notdone) {
62e76326 484 notdone = 0;
485
5db6bf73 486 for (dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn) {
c8f4eac4 487 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
62e76326 488
489 if (NULL == sd->cleanLog)
490 continue;
491
492 e = sd->cleanLog->nextEntry();
493
494 if (!e)
495 continue;
496
497 notdone = 1;
498
499 if (!sd->canLog(*e))
500 continue;
501
502 sd->cleanLog->write(*e);
503
504 if ((++n & 0xFFFF) == 0) {
505 getCurrentTime();
e0236918 506 debugs(20, DBG_IMPORTANT, " " << std::setw(7) << n <<
bf8fe701 507 " entries written so far.");
62e76326 508 }
509 }
6a566b9c 510 }
62e76326 511
6a566b9c 512 /* Flush */
5db6bf73 513 for (dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
c8f4eac4 514 dynamic_cast<SwapDir *>(INDEXSD(dirn))->writeCleanDone();
62e76326 515
b2c141d4 516 if (reopen)
62e76326 517 storeDirOpenSwapLogs();
518
e812ecfc 519 getCurrentTime();
62e76326 520
e812ecfc 521 dt = tvSubDsec(start, current_time);
62e76326 522
e0236918
FC
523 debugs(20, DBG_IMPORTANT, " Finished. Wrote " << n << " entries.");
524 debugs(20, DBG_IMPORTANT, " Took "<< std::setw(3)<< std::setprecision(2) << dt <<
bf8fe701 525 " seconds ("<< std::setw(6) << ((double) n / (dt > 0.0 ? dt : 1.0)) << " entries/sec).");
62e76326 526
b2c141d4 527 return n;
95dcd2b8 528}
d141c677 529
c8f4eac4 530StoreSearch *
30abd221 531StoreController::search(String const url, HttpRequest *request)
c8f4eac4 532{
533 /* cheat, for now you can't search the memory hot cache */
534 return swapDir->search(url, request);
535}
536
537StorePointer
538StoreHashIndex::store(int const x) const
539{
540 return INDEXSD(x);
541}
542
14911a4e
AR
543SwapDir &
544StoreHashIndex::dir(const int i) const
545{
546 SwapDir *sd = dynamic_cast<SwapDir*>(INDEXSD(i));
547 assert(sd);
548 return *sd;
549}
550
cd748f27 551void
c8f4eac4 552StoreController::sync(void)
cd748f27 553{
9487bae9
AR
554 if (memStore)
555 memStore->sync();
c8f4eac4 556 swapDir->sync();
cd748f27 557}
558
559/*
26ac0430 560 * handle callbacks all avaliable fs'es
cd748f27 561 */
c8f4eac4 562int
563StoreController::callback()
cd748f27 564{
1d5161bd 565 /* This will likely double count. Thats ok. */
566 PROF_start(storeDirCallback);
567
c8f4eac4 568 /* mem cache callbacks ? */
569 int result = swapDir->callback();
1d5161bd 570
571 PROF_stop(storeDirCallback);
c8f4eac4 572
573 return result;
d141c677 574}
90d42c28 575
576int
577storeDirGetBlkSize(const char *path, int *blksize)
578{
579#if HAVE_STATVFS
62e76326 580
90d42c28 581 struct statvfs sfs;
62e76326 582
90d42c28 583 if (statvfs(path, &sfs)) {
e0236918 584 debugs(50, DBG_IMPORTANT, "" << path << ": " << xstrerror());
62e76326 585 *blksize = 2048;
586 return 1;
90d42c28 587 }
62e76326 588
6759a7aa 589 *blksize = (int) sfs.f_frsize;
90d42c28 590#else
62e76326 591
90d42c28 592 struct statfs sfs;
62e76326 593
90d42c28 594 if (statfs(path, &sfs)) {
e0236918 595 debugs(50, DBG_IMPORTANT, "" << path << ": " << xstrerror());
62e76326 596 *blksize = 2048;
597 return 1;
90d42c28 598 }
62e76326 599
90d42c28 600 *blksize = (int) sfs.f_bsize;
6759a7aa 601#endif
4b3af09f 602 /*
603 * Sanity check; make sure we have a meaningful value.
604 */
62e76326 605
d5b72fe7 606 if (*blksize < 512)
62e76326 607 *blksize = 2048;
608
90d42c28 609 return 0;
610}
781d6656 611
612#define fsbtoblk(num, fsbs, bs) \
613 (((fsbs) != 0 && (fsbs) < (bs)) ? \
614 (num) / ((bs) / (fsbs)) : (num) * ((fsbs) / (bs)))
615int
616storeDirGetUFSStats(const char *path, int *totl_kb, int *free_kb, int *totl_in, int *free_in)
617{
618#if HAVE_STATVFS
62e76326 619
781d6656 620 struct statvfs sfs;
62e76326 621
781d6656 622 if (statvfs(path, &sfs)) {
e0236918 623 debugs(50, DBG_IMPORTANT, "" << path << ": " << xstrerror());
62e76326 624 return 1;
781d6656 625 }
62e76326 626
781d6656 627 *totl_kb = (int) fsbtoblk(sfs.f_blocks, sfs.f_frsize, 1024);
628 *free_kb = (int) fsbtoblk(sfs.f_bfree, sfs.f_frsize, 1024);
629 *totl_in = (int) sfs.f_files;
630 *free_in = (int) sfs.f_ffree;
631#else
62e76326 632
781d6656 633 struct statfs sfs;
62e76326 634
781d6656 635 if (statfs(path, &sfs)) {
e0236918 636 debugs(50, DBG_IMPORTANT, "" << path << ": " << xstrerror());
62e76326 637 return 1;
781d6656 638 }
62e76326 639
781d6656 640 *totl_kb = (int) fsbtoblk(sfs.f_blocks, sfs.f_bsize, 1024);
641 *free_kb = (int) fsbtoblk(sfs.f_bfree, sfs.f_bsize, 1024);
642 *totl_in = (int) sfs.f_files;
643 *free_in = (int) sfs.f_ffree;
644#endif
62e76326 645
781d6656 646 return 0;
647}
c8f4eac4 648
649void
e1f7507e 650allocate_new_swapdir(SquidConfig::_cacheSwap * swap)
c8f4eac4 651{
652 if (swap->swapDirs == NULL) {
653 swap->n_allocated = 4;
7d3c4ca1 654 swap->swapDirs = static_cast<SwapDir::Pointer *>(xcalloc(swap->n_allocated, sizeof(SwapDir::Pointer)));
c8f4eac4 655 }
656
657 if (swap->n_allocated == swap->n_configured) {
c8f4eac4 658 swap->n_allocated <<= 1;
7d3c4ca1 659 SwapDir::Pointer *const tmp = static_cast<SwapDir::Pointer *>(xcalloc(swap->n_allocated, sizeof(SwapDir::Pointer)));
41d00cd3 660 memcpy(tmp, swap->swapDirs, swap->n_configured * sizeof(SwapDir *));
c8f4eac4 661 xfree(swap->swapDirs);
662 swap->swapDirs = tmp;
663 }
664}
665
666void
e1f7507e 667free_cachedir(SquidConfig::_cacheSwap * swap)
c8f4eac4 668{
669 int i;
670 /* DON'T FREE THESE FOR RECONFIGURE */
671
672 if (reconfiguring)
673 return;
674
5db6bf73 675 for (i = 0; i < swap->n_configured; ++i) {
c8f4eac4 676 /* TODO XXX this lets the swapdir free resources asynchronously
677 * swap->swapDirs[i]->deactivate();
26ac0430 678 * but there may be such a means already.
c8f4eac4 679 * RBC 20041225
680 */
681 swap->swapDirs[i] = NULL;
682 }
683
684 safe_free(swap->swapDirs);
685 swap->swapDirs = NULL;
686 swap->n_allocated = 0;
687 swap->n_configured = 0;
688}
689
690/* this should be a virtual method on StoreEntry,
691 * i.e. e->referenced()
692 * so that the entry can notify the creating Store
693 */
694void
695StoreController::reference(StoreEntry &e)
696{
c5426f8f
AR
697 // special entries do not belong to any specific Store, but are IN_MEMORY
698 if (EBIT_TEST(e.flags, ENTRY_SPECIAL))
699 return;
700
c8f4eac4 701 /* Notify the fs that we're referencing this object again */
702
703 if (e.swap_dirn > -1)
4c973beb 704 swapDir->reference(e);
c8f4eac4 705
9487bae9
AR
706 // Notify the memory cache that we're referencing this object again
707 if (memStore && e.mem_status == IN_MEMORY)
708 memStore->reference(e);
709
710 // TODO: move this code to a non-shared memory cache class when we have it
c8f4eac4 711 if (e.mem_obj) {
712 if (mem_policy->Referenced)
713 mem_policy->Referenced(mem_policy, &e, &e.mem_obj->repl);
714 }
715}
716
4c973beb 717bool
54347cbd 718StoreController::dereference(StoreEntry &e, bool wantsLocalMemory)
c8f4eac4 719{
c5426f8f
AR
720 // special entries do not belong to any specific Store, but are IN_MEMORY
721 if (EBIT_TEST(e.flags, ENTRY_SPECIAL))
54347cbd
AR
722 return true;
723
724 bool keepInStoreTable = false; // keep only if somebody needs it there
c5426f8f 725
c8f4eac4 726 /* Notify the fs that we're not referencing this object any more */
727
728 if (e.swap_filen > -1)
54347cbd 729 keepInStoreTable = swapDir->dereference(e, wantsLocalMemory) || keepInStoreTable;
c8f4eac4 730
9487bae9
AR
731 // Notify the memory cache that we're not referencing this object any more
732 if (memStore && e.mem_status == IN_MEMORY)
54347cbd 733 keepInStoreTable = memStore->dereference(e, wantsLocalMemory) || keepInStoreTable;
9487bae9
AR
734
735 // TODO: move this code to a non-shared memory cache class when we have it
c8f4eac4 736 if (e.mem_obj) {
737 if (mem_policy->Dereferenced)
738 mem_policy->Dereferenced(mem_policy, &e, &e.mem_obj->repl);
54347cbd
AR
739 // non-shared memory cache relies on store_table
740 if (!memStore)
741 keepInStoreTable = wantsLocalMemory || keepInStoreTable;
c8f4eac4 742 }
4c973beb
AR
743
744 return keepInStoreTable;
c8f4eac4 745}
746
747StoreEntry *
6ca34f6f 748StoreController::get(const cache_key *key)
c8f4eac4 749{
44def0f9 750 if (StoreEntry *e = swapDir->get(key)) {
9487bae9
AR
751 // TODO: ignore and maybe handleIdleEntry() unlocked intransit entries
752 // because their backing store slot may be gone already.
171d5429 753 debugs(20, 3, HERE << "got in-transit entry: " << *e);
44def0f9
AR
754 return e;
755 }
756
9487bae9
AR
757 if (memStore) {
758 if (StoreEntry *e = memStore->get(key)) {
759 debugs(20, 3, HERE << "got mem-cached entry: " << *e);
760 return e;
761 }
762 }
763
022f96ad
AR
764 // TODO: this disk iteration is misplaced; move to StoreHashIndex when
765 // the global store_table is no longer used for in-transit objects.
44def0f9
AR
766 if (const int cacheDirs = Config.cacheSwap.n_configured) {
767 // ask each cache_dir until the entry is found; use static starting
768 // point to avoid asking the same subset of disks more often
769 // TODO: coordinate with put() to be able to guess the right disk often
14911a4e 770 static int idx = 0;
44def0f9 771 for (int n = 0; n < cacheDirs; ++n) {
14911a4e 772 idx = (idx + 1) % cacheDirs;
44def0f9 773 SwapDir *sd = dynamic_cast<SwapDir*>(INDEXSD(idx));
14911a4e
AR
774 if (!sd->active())
775 continue;
776
44def0f9 777 if (StoreEntry *e = sd->get(key)) {
eccba1d9 778 debugs(20, 3, HERE << "cache_dir " << idx <<
9199139f 779 " got cached entry: " << *e);
44def0f9
AR
780 return e;
781 }
782 }
783 }
c8f4eac4 784
eccba1d9 785 debugs(20, 4, HERE << "none of " << Config.cacheSwap.n_configured <<
9199139f 786 " cache_dirs have " << storeKeyText(key));
44def0f9 787 return NULL;
c8f4eac4 788}
789
790void
6ca34f6f 791StoreController::get(String const key, STOREGETCLIENT aCallback, void *aCallbackData)
c8f4eac4 792{
793 fatal("not implemented");
794}
795
96a7de88
DK
796// move this into [non-shared] memory cache class when we have one
797/// whether e should be kept in local RAM for possible future caching
798bool
799StoreController::keepForLocalMemoryCache(const StoreEntry &e) const
800{
801 if (!e.memoryCachable())
802 return false;
803
804 // does the current and expected size obey memory caching limits?
805 assert(e.mem_obj);
806 const int64_t loadedSize = e.mem_obj->endOffset();
807 const int64_t expectedSize = e.mem_obj->expectedReplySize(); // may be < 0
808 const int64_t ramSize = max(loadedSize, expectedSize);
809 const int64_t ramLimit = min(
817138f8
A
810 static_cast<int64_t>(Config.memMaxSize),
811 static_cast<int64_t>(Config.Store.maxInMemObjSize));
96a7de88
DK
812 return ramSize <= ramLimit;
813}
814
815void
816StoreController::maybeTrimMemory(StoreEntry &e, const bool preserveSwappable)
817{
818 bool keepInLocalMemory = false;
819 if (memStore)
820 keepInLocalMemory = memStore->keepInLocalMemory(e);
821 else
822 keepInLocalMemory = keepForLocalMemoryCache(e);
823
824 debugs(20, 7, HERE << "keepInLocalMemory: " << keepInLocalMemory);
825
826 if (!keepInLocalMemory)
827 e.trimMemory(preserveSwappable);
828}
829
9487bae9
AR
830void
831StoreController::handleIdleEntry(StoreEntry &e)
832{
833 bool keepInLocalMemory = false;
c5426f8f
AR
834
835 if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) {
836 // Icons (and cache digests?) should stay in store_table until we
837 // have a dedicated storage for them (that would not purge them).
838 // They are not managed [well] by any specific Store handled below.
839 keepInLocalMemory = true;
d3cd2e81 840 } else if (memStore) {
9487bae9
AR
841 memStore->considerKeeping(e);
842 // leave keepInLocalMemory false; memStore maintains its own cache
843 } else {
96a7de88 844 keepInLocalMemory = keepForLocalMemoryCache(e) && // in good shape and
9199139f
AR
845 // the local memory cache is not overflowing
846 (mem_node::InUseCount() <= store_pages_max);
9487bae9
AR
847 }
848
54347cbd 849 // An idle, unlocked entry that only belongs to a SwapDir which controls
4c973beb 850 // its own index, should not stay in the global store_table.
54347cbd 851 if (!dereference(e, keepInLocalMemory)) {
9487bae9
AR
852 debugs(20, 5, HERE << "destroying unlocked entry: " << &e << ' ' << e);
853 destroyStoreEntry(static_cast<hash_link*>(&e));
854 return;
855 }
856
c5426f8f
AR
857 debugs(20, 5, HERE << "keepInLocalMemory: " << keepInLocalMemory);
858
9487bae9
AR
859 // TODO: move this into [non-shared] memory cache class when we have one
860 if (keepInLocalMemory) {
861 e.setMemStatus(IN_MEMORY);
862 e.mem_obj->unlinkRequest();
863 } else {
864 e.purgeMem(); // may free e
865 }
866}
867
c8f4eac4 868StoreHashIndex::StoreHashIndex()
869{
47f6e231 870 if (store_table)
26ac0430 871 abort();
c8f4eac4 872 assert (store_table == NULL);
873}
874
875StoreHashIndex::~StoreHashIndex()
876{
877 if (store_table) {
878 hashFreeItems(store_table, destroyStoreEntry);
879 hashFreeMemory(store_table);
880 store_table = NULL;
881 }
882}
883
884int
885StoreHashIndex::callback()
886{
887 int result = 0;
888 int j;
889 static int ndir = 0;
890
891 do {
892 j = 0;
893
5db6bf73 894 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
c8f4eac4 895 if (ndir >= Config.cacheSwap.n_configured)
896 ndir = ndir % Config.cacheSwap.n_configured;
897
898 int temp_result = store(ndir)->callback();
899
900 ++ndir;
901
902 j += temp_result;
903
904 result += temp_result;
905
906 if (j > 100)
907 fatal ("too much io\n");
908 }
909 } while (j > 0);
910
5db6bf73 911 ++ndir;
c8f4eac4 912
913 return result;
914}
915
916void
917StoreHashIndex::create()
918{
608622b8 919 if (Config.cacheSwap.n_configured == 0) {
a8163539
TX
920 debugs(0, DBG_PARSE_NOTE(DBG_CRITICAL), "No cache_dir stores are configured.");
921 }
922
5db6bf73 923 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
14911a4e
AR
924 if (dir(i).active())
925 store(i)->create();
926 }
c8f4eac4 927}
928
929/* Lookup an object in the cache.
930 * return just a reference to object, don't start swapping in yet. */
931StoreEntry *
6ca34f6f 932StoreHashIndex::get(const cache_key *key)
c8f4eac4 933{
934 PROF_start(storeGet);
bf8fe701 935 debugs(20, 3, "storeGet: looking up " << storeKeyText(key));
c8f4eac4 936 StoreEntry *p = static_cast<StoreEntry *>(hash_lookup(store_table, key));
937 PROF_stop(storeGet);
938 return p;
939}
940
941void
6ca34f6f 942StoreHashIndex::get(String const key, STOREGETCLIENT aCallback, void *aCallbackData)
c8f4eac4 943{
944 fatal("not implemented");
945}
946
947void
948StoreHashIndex::init()
949{
90d881c4
TX
950 if (Config.Store.objectsPerBucket <= 0)
951 fatal("'store_objects_per_bucket' should be larger than 0.");
952
953 if (Config.Store.avgObjectSize <= 0)
954 fatal("'store_avg_object_size' should be larger than 0.");
955
c8f4eac4 956 /* Calculate size of hash table (maximum currently 64k buckets). */
957 /* this is very bogus, its specific to the any Store maintaining an
958 * in-core index, not global */
58d5c5dd 959 size_t buckets = (Store::Root().maxSize() + Config.memMaxSize) / Config.Store.avgObjectSize;
e0236918 960 debugs(20, DBG_IMPORTANT, "Swap maxSize " << (Store::Root().maxSize() >> 10) <<
13a07022 961 " + " << ( Config.memMaxSize >> 10) << " KB, estimated " << buckets << " objects");
c8f4eac4 962 buckets /= Config.Store.objectsPerBucket;
e0236918 963 debugs(20, DBG_IMPORTANT, "Target number of buckets: " << buckets);
c8f4eac4 964 /* ideally the full scan period should be configurable, for the
965 * moment it remains at approximately 24 hours. */
966 store_hash_buckets = storeKeyHashBuckets(buckets);
e0236918
FC
967 debugs(20, DBG_IMPORTANT, "Using " << store_hash_buckets << " Store buckets");
968 debugs(20, DBG_IMPORTANT, "Max Mem size: " << ( Config.memMaxSize >> 10) << " KB" <<
9199139f 969 (Config.memShared ? " [shared]" : ""));
e0236918 970 debugs(20, DBG_IMPORTANT, "Max Swap size: " << (Store::Root().maxSize() >> 10) << " KB");
c8f4eac4 971
972 store_table = hash_create(storeKeyHashCmp,
973 store_hash_buckets, storeKeyHashHash);
974
5db6bf73 975 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
c8f4eac4 976 /* this starts a search of the store dirs, loading their
977 * index. under the new Store api this should be
978 * driven by the StoreHashIndex, not by each store.
bef81ea5 979 *
980 * That is, the HashIndex should perform a search of each dir it is
26ac0430 981 * indexing to do the hash insertions. The search is then able to
bef81ea5 982 * decide 'from-memory', or 'from-clean-log' or 'from-dirty-log' or
983 * 'from-no-log'.
984 *
c8f4eac4 985 * Step 1: make the store rebuilds use a search internally
bef81ea5 986 * Step 2: change the search logic to use the four modes described
987 * above
988 * Step 3: have the hash index walk the searches itself.
c8f4eac4 989 */
14911a4e
AR
990 if (dir(i).active())
991 store(i)->init();
13a07022 992 }
c8f4eac4 993}
994
12e11a5c 995uint64_t
c8f4eac4 996StoreHashIndex::maxSize() const
997{
12e11a5c 998 uint64_t result = 0;
c8f4eac4 999
5db6bf73 1000 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
39c1e1d9
DK
1001 if (dir(i).doReportStat())
1002 result += store(i)->maxSize();
1003 }
c8f4eac4 1004
1005 return result;
1006}
1007
12e11a5c 1008uint64_t
c8f4eac4 1009StoreHashIndex::minSize() const
1010{
12e11a5c 1011 uint64_t result = 0;
c8f4eac4 1012
5db6bf73 1013 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
39c1e1d9
DK
1014 if (dir(i).doReportStat())
1015 result += store(i)->minSize();
1016 }
1017
1018 return result;
1019}
1020
1021uint64_t
1022StoreHashIndex::currentSize() const
1023{
1024 uint64_t result = 0;
1025
5db6bf73 1026 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
39c1e1d9
DK
1027 if (dir(i).doReportStat())
1028 result += store(i)->currentSize();
1029 }
1030
1031 return result;
1032}
1033
1034uint64_t
1035StoreHashIndex::currentCount() const
1036{
1037 uint64_t result = 0;
1038
5db6bf73 1039 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
39c1e1d9
DK
1040 if (dir(i).doReportStat())
1041 result += store(i)->currentCount();
1042 }
c8f4eac4 1043
1044 return result;
1045}
1046
af2fda07
DK
1047int64_t
1048StoreHashIndex::maxObjectSize() const
1049{
1050 int64_t result = -1;
1051
5db6bf73 1052 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
af2fda07
DK
1053 if (dir(i).active() && store(i)->maxObjectSize() > result)
1054 result = store(i)->maxObjectSize();
1055 }
1056
1057 return result;
1058}
1059
93bc1434
AR
1060void
1061StoreHashIndex::getStats(StoreInfoStats &stats) const
1062{
1063 // accumulate per-disk cache stats
5db6bf73 1064 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
93bc1434
AR
1065 StoreInfoStats dirStats;
1066 store(i)->getStats(dirStats);
1067 stats += dirStats;
1068 }
1069
1070 // common to all disks
1071 stats.swap.open_disk_fd = store_open_disk_fd;
1072
1073 // memory cache stats are collected in StoreController::getStats(), for now
1074}
1075
c8f4eac4 1076void
1077StoreHashIndex::stat(StoreEntry & output) const
1078{
1079 int i;
1080
1081 /* Now go through each store, calling its stat routine */
1082
5db6bf73 1083 for (i = 0; i < Config.cacheSwap.n_configured; ++i) {
c8f4eac4 1084 storeAppendPrintf(&output, "\n");
1085 store(i)->stat(output);
1086 }
1087}
1088
1089void
4c973beb
AR
1090StoreHashIndex::reference(StoreEntry &e)
1091{
1092 e.store()->reference(e);
1093}
c8f4eac4 1094
4c973beb 1095bool
54347cbd 1096StoreHashIndex::dereference(StoreEntry &e, bool wantsLocalMemory)
4c973beb 1097{
54347cbd 1098 return e.store()->dereference(e, wantsLocalMemory);
4c973beb 1099}
c8f4eac4 1100
1101void
1102StoreHashIndex::maintain()
1103{
1104 int i;
1105 /* walk each fs */
1106
5db6bf73 1107 for (i = 0; i < Config.cacheSwap.n_configured; ++i) {
c8f4eac4 1108 /* XXX FixMe: This should be done "in parallell" on the different
1109 * cache_dirs, not one at a time.
1110 */
1111 /* call the maintain function .. */
1112 store(i)->maintain();
1113 }
1114}
1115
c8f4eac4 1116void
1117StoreHashIndex::sync()
1118{
1119 for (int i = 0; i < Config.cacheSwap.n_configured; ++i)
1120 store(i)->sync();
1121}
1122
1123StoreSearch *
30abd221 1124StoreHashIndex::search(String const url, HttpRequest *)
c8f4eac4 1125{
1126 if (url.size())
1127 fatal ("Cannot search by url yet\n");
1128
1129 return new StoreSearchHashIndex (this);
1130}
1131
1132CBDATA_CLASS_INIT(StoreSearchHashIndex);
aa839030 1133
6fd5ccc3
AJ
1134StoreSearchHashIndex::StoreSearchHashIndex(RefCount<StoreHashIndex> aSwapDir) :
1135 sd(aSwapDir),
1136 callback(NULL),
1137 cbdata(NULL),
1138 _done(false),
1139 bucket(0)
c8f4eac4 1140{}
1141
1142/* do not link
1143StoreSearchHashIndex::StoreSearchHashIndex(StoreSearchHashIndex const &);
1144*/
1145
1146StoreSearchHashIndex::~StoreSearchHashIndex()
1147{}
1148
1149void
70efcae0 1150StoreSearchHashIndex::next(void (aCallback)(void *), void *aCallbackData)
c8f4eac4 1151{
1152 next();
70efcae0 1153 aCallback (aCallbackData);
c8f4eac4 1154}
1155
1156bool
1157StoreSearchHashIndex::next()
1158{
1159 if (entries.size())
1160 entries.pop_back();
1161
1162 while (!isDone() && !entries.size())
1163 copyBucket();
1164
1165 return currentItem() != NULL;
1166}
1167
1168bool
1169StoreSearchHashIndex::error() const
1170{
1171 return false;
1172}
1173
1174bool
1175StoreSearchHashIndex::isDone() const
1176{
1177 return bucket >= store_hash_buckets || _done;
1178}
1179
1180StoreEntry *
1181StoreSearchHashIndex::currentItem()
1182{
1183 if (!entries.size())
1184 return NULL;
1185
1186 return entries.back();
1187}
1188
1189void
1190StoreSearchHashIndex::copyBucket()
1191{
1192 /* probably need to lock the store entries...
1193 * we copy them all to prevent races on the links. */
1194 debugs(47, 3, "StoreSearchHashIndex::copyBucket #" << bucket);
1195 assert (!entries.size());
1196 hash_link *link_ptr = NULL;
1197 hash_link *link_next = NULL;
1198 link_next = hash_get_bucket(store_table, bucket);
1199
1200 while (NULL != (link_ptr = link_next)) {
1201 link_next = link_ptr->next;
1202 StoreEntry *e = (StoreEntry *) link_ptr;
1203
1204 entries.push_back(e);
1205 }
1206
5db6bf73 1207 ++bucket;
c8f4eac4 1208 debugs(47,3, "got entries: " << entries.size());
1209}