]> git.ipfire.org Git - thirdparty/squid.git/blame - src/store_dir.cc
moved mem.cc prototypes to Mem.h
[thirdparty/squid.git] / src / store_dir.cc
CommitLineData
f1dc9b30 1
2/*
262a0e14 3 * $Id$
f1dc9b30 4 *
5 * DEBUG: section 47 Store Directory Routines
6 * AUTHOR: Duane Wessels
7 *
2b6662ba 8 * SQUID Web Proxy Cache http://www.squid-cache.org/
e25c139f 9 * ----------------------------------------------------------
f1dc9b30 10 *
2b6662ba 11 * Squid is the result of efforts by numerous individuals from
12 * the Internet community; see the CONTRIBUTORS file for full
13 * details. Many organizations have provided support for Squid's
14 * development; see the SPONSORS file for full details. Squid is
15 * Copyrighted (C) 2001 by the Regents of the University of
16 * California; see the COPYRIGHT file for full details. Squid
17 * incorporates software developed and/or copyrighted by other
18 * sources; see the CREDITS file for full details.
f1dc9b30 19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
26ac0430 24 *
f1dc9b30 25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
26ac0430 29 *
f1dc9b30 30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
cbdec147 32 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
e25c139f 33 *
f1dc9b30 34 */
35
582c2af2
FC
36#include "squid.h"
37#include "globals.h"
38#include "mem_node.h"
528b2c61 39#include "MemObject.h"
9487bae9 40#include "MemStore.h"
582c2af2
FC
41#include "profiler/Profiler.h"
42#include "protos.h"
a98bcbee 43#include "SquidMath.h"
985c86bc 44#include "SquidTime.h"
582c2af2 45#include "Store.h"
21d845b1 46#include "StoreHashIndex.h"
d3b3ab85 47#include "SwapDir.h"
4b981814 48#include "swap_log_op.h"
85407535 49
c0db87f2 50#if HAVE_STATVFS
51#if HAVE_SYS_STATVFS_H
52#include <sys/statvfs.h>
53#endif
ec15e022 54#endif /* HAVE_STATVFS */
55/* statfs() needs <sys/param.h> and <sys/mount.h> on BSD systems */
56#if HAVE_SYS_PARAM_H
57#include <sys/param.h>
203526a1 58#endif
ec15e022 59#if HAVE_SYS_MOUNT_H
60#include <sys/mount.h>
61#endif
62/* Windows and Linux use sys/vfs.h */
6c86a065 63#if HAVE_SYS_VFS_H
64#include <sys/vfs.h>
65#endif
582c2af2
FC
66#if HAVE_SYS_WAIT_H
67#include <sys/wait.h>
68#endif
21d845b1
FC
69#if HAVE_ERRNO_H
70#include <errno.h>
71#endif
c0db87f2 72
c8f4eac4 73
65a53c8e 74static STDIRSELECT storeDirSelectSwapDirRoundRobin;
75static STDIRSELECT storeDirSelectSwapDirLeastLoad;
c8f4eac4 76
b07b21cc 77/*
78 * store_dirs_rebuilding is initialized to _1_ as a hack so that
79 * storeDirWriteCleanLogs() doesn't try to do anything unless _all_
80 * cache_dirs have been read. For example, without this hack, Squid
81 * will try to write clean log files if -kparse fails (becasue it
82 * calls fatal()).
83 */
84int StoreController::store_dirs_rebuilding = 1;
bef81ea5 85
c8f4eac4 86StoreController::StoreController() : swapDir (new StoreHashIndex())
9199139f 87 , memStore(NULL)
c8f4eac4 88{}
89
90StoreController::~StoreController()
9487bae9
AR
91{
92 delete memStore;
93}
65a53c8e 94
95/*
96 * This function pointer is set according to 'store_dir_select_algorithm'
97 * in squid.conf.
98 */
99STDIRSELECT *storeDirSelectSwapDir = storeDirSelectSwapDirLeastLoad;
d141c677 100
9838d6c8 101void
c8f4eac4 102StoreController::init()
596dddc1 103{
57af1e3f 104 if (Config.memShared && IamWorkerProcess()) {
60be8b2d
AR
105 memStore = new MemStore;
106 memStore->init();
107 }
9487bae9 108
c8f4eac4 109 swapDir->init();
62e76326 110
65a53c8e 111 if (0 == strcasecmp(Config.store_dir_select_algorithm, "round-robin")) {
62e76326 112 storeDirSelectSwapDir = storeDirSelectSwapDirRoundRobin;
e0236918 113 debugs(47, DBG_IMPORTANT, "Using Round Robin store dir selection");
65a53c8e 114 } else {
62e76326 115 storeDirSelectSwapDir = storeDirSelectSwapDirLeastLoad;
e0236918 116 debugs(47, DBG_IMPORTANT, "Using Least Load store dir selection");
65a53c8e 117 }
85407535 118}
119
120void
c8f4eac4 121StoreController::createOneStore(Store &aStore)
596dddc1 122{
62e76326 123 /*
154c7949 124 * On Windows, fork() is not available.
125 * The following is a workaround for create store directories sequentially
126 * when running on native Windows port.
127 */
1191b93b 128#if !_SQUID_MSWIN_
62e76326 129
154c7949 130 if (fork())
62e76326 131 return;
132
099a1791 133#endif
62e76326 134
c8f4eac4 135 aStore.create();
62e76326 136
1191b93b 137#if !_SQUID_MSWIN_
62e76326 138
154c7949 139 exit(0);
62e76326 140
099a1791 141#endif
154c7949 142}
143
144void
c8f4eac4 145StoreController::create()
154c7949 146{
c8f4eac4 147 swapDir->create();
62e76326 148
1191b93b 149#if !_SQUID_MSWIN_
62e76326 150
8a1c8f2c 151 pid_t pid;
62e76326 152
b2c141d4 153 do {
62e76326 154 int status;
1191b93b 155#if _SQUID_NEXT_
62e76326 156
157 pid = wait3(&status, WNOHANG, NULL);
b2c141d4 158#else
62e76326 159
160 pid = waitpid(-1, &status, 0);
b2c141d4 161#endif
62e76326 162
b2c141d4 163 } while (pid > 0 || (pid < 0 && errno == EINTR));
62e76326 164
099a1791 165#endif
596dddc1 166}
167
a8a33c46 168/**
cd748f27 169 * Determine whether the given directory can handle this object
170 * size
171 *
172 * Note: if the object size is -1, then the only swapdirs that
a8a33c46 173 * will return true here are ones that have min and max unset,
cd748f27 174 * ie any-sized-object swapdirs. This is a good thing.
175 */
c8f4eac4 176bool
3e62bd58 177SwapDir::objectSizeIsAcceptable(int64_t objsize) const
cd748f27 178{
a8a33c46 179 // If the swapdir has no range limits, then it definitely can
b6662ffd 180 if (min_objsize <= 0 && max_objsize == -1)
c8f4eac4 181 return true;
d68f43a0 182
183 /*
a8a33c46
A
184 * If the object size is -1 and the storedir has limits we
185 * can't store it there.
d68f43a0 186 */
a8a33c46 187 if (objsize == -1)
c8f4eac4 188 return false;
d68f43a0 189
a8a33c46 190 // Else, make sure that the object size will fit.
b475997c
AJ
191 if (max_objsize == -1 && min_objsize <= objsize)
192 return true;
193 else
194 return min_objsize <= objsize && max_objsize > objsize;
cd748f27 195}
196
197
d141c677 198/*
199 * This new selection scheme simply does round-robin on all SwapDirs.
8e8d4f30 200 * A SwapDir is skipped if it is over the max_size (100%) limit, or
201 * overloaded.
d141c677 202 */
203static int
8e8d4f30 204storeDirSelectSwapDirRoundRobin(const StoreEntry * e)
d141c677 205{
206 static int dirn = 0;
207 int i;
8e8d4f30 208 int load;
c8f4eac4 209 RefCount<SwapDir> sd;
62e76326 210
aa1a691e
AR
211 // e->objectLen() is negative at this point when we are still STORE_PENDING
212 ssize_t objsize = e->mem_obj->expectedReplySize();
246e6cc1
AJ
213 if (objsize != -1)
214 objsize += e->mem_obj->swap_hdr_sz;
215
5db6bf73 216 for (i = 0; i < Config.cacheSwap.n_configured; ++i) {
62e76326 217 if (++dirn >= Config.cacheSwap.n_configured)
218 dirn = 0;
219
c8f4eac4 220 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
62e76326 221
aa1a691e 222 if (!sd->canStore(*e, objsize, load))
62e76326 223 continue;
224
62e76326 225 if (load < 0 || load > 1000) {
226 continue;
227 }
228
229 return dirn;
d141c677 230 }
62e76326 231
8e8d4f30 232 return -1;
d141c677 233}
960a01e3 234
a2899918 235/*
cd748f27 236 * Spread load across all of the store directories
237 *
238 * Note: We should modify this later on to prefer sticking objects
239 * in the *tightest fit* swapdir to conserve space, along with the
26ac0430 240 * actual swapdir usage. But for now, this hack will do while
cd748f27 241 * testing, so you should order your swapdirs in the config file
242 * from smallest maxobjsize to unlimited (-1) maxobjsize.
243 *
244 * We also have to choose nleast == nconf since we need to consider
245 * ALL swapdirs, regardless of state. Again, this is a hack while
246 * we sort out the real usefulness of this algorithm.
a2899918 247 */
65a53c8e 248static int
249storeDirSelectSwapDirLeastLoad(const StoreEntry * e)
a2899918 250{
cc34568d 251 int64_t most_free = 0;
8e8d4f30 252 ssize_t least_objsize = -1;
253 int least_load = INT_MAX;
cd748f27 254 int load;
255 int dirn = -1;
256 int i;
c8f4eac4 257 RefCount<SwapDir> SD;
cd748f27 258
aa1a691e
AR
259 // e->objectLen() is negative at this point when we are still STORE_PENDING
260 ssize_t objsize = e->mem_obj->expectedReplySize();
62e76326 261
cd748f27 262 if (objsize != -1)
62e76326 263 objsize += e->mem_obj->swap_hdr_sz;
264
5db6bf73 265 for (i = 0; i < Config.cacheSwap.n_configured; ++i) {
c8f4eac4 266 SD = dynamic_cast<SwapDir *>(INDEXSD(i));
62e76326 267 SD->flags.selected = 0;
62e76326 268
aa1a691e 269 if (!SD->canStore(*e, objsize, load))
62e76326 270 continue;
271
aa1a691e 272 if (load < 0 || load > 1000)
62e76326 273 continue;
274
275 if (load > least_load)
276 continue;
277
cc34568d 278 const int64_t cur_free = SD->maxSize() - SD->currentSize();
62e76326 279
280 /* If the load is equal, then look in more details */
281 if (load == least_load) {
282 /* closest max_objsize fit */
283
284 if (least_objsize != -1)
285 if (SD->max_objsize > least_objsize || SD->max_objsize == -1)
286 continue;
287
288 /* most free */
289 if (cur_free < most_free)
290 continue;
291 }
292
293 least_load = load;
294 least_objsize = SD->max_objsize;
295 most_free = cur_free;
296 dirn = i;
a2899918 297 }
62e76326 298
ade906c8 299 if (dirn >= 0)
c8f4eac4 300 dynamic_cast<SwapDir *>(INDEXSD(dirn))->flags.selected = 1;
62e76326 301
cd748f27 302 return dirn;
596dddc1 303}
304
b109de6b 305/*
306 * An entry written to the swap log MUST have the following
307 * properties.
308 * 1. It MUST be a public key. It does no good to log
309 * a public ADD, change the key, then log a private
310 * DEL. So we need to log a DEL before we change a
311 * key from public to private.
cd748f27 312 * 2. It MUST have a valid (> -1) swap_filen.
b109de6b 313 */
4683e377 314void
5830cdb3 315storeDirSwapLog(const StoreEntry * e, int op)
4683e377 316{
d3b3ab85 317 assert (e);
d46a87a8 318 assert(!EBIT_TEST(e->flags, KEY_PRIVATE));
cd748f27 319 assert(e->swap_filen >= 0);
6c57e268 320 /*
321 * icons and such; don't write them to the swap log
322 */
62e76326 323
d46a87a8 324 if (EBIT_TEST(e->flags, ENTRY_SPECIAL))
62e76326 325 return;
326
b109de6b 327 assert(op > SWAP_LOG_NOP && op < SWAP_LOG_MAX);
62e76326 328
26ac0430
AJ
329 debugs(20, 3, "storeDirSwapLog: " <<
330 swap_log_op_str[op] << " " <<
331 e->getMD5Text() << " " <<
332 e->swap_dirn << " " <<
bf8fe701 333 std::hex << std::uppercase << std::setfill('0') << std::setw(8) << e->swap_filen);
62e76326 334
c8f4eac4 335 dynamic_cast<SwapDir *>(INDEXSD(e->swap_dirn))->logEntry(*e, op);
336}
337
93bc1434
AR
338void
339StoreController::getStats(StoreInfoStats &stats) const
340{
341 if (memStore)
342 memStore->getStats(stats);
343 else {
344 // move this code to a non-shared memory cache class when we have it
345 stats.mem.shared = false;
346 stats.mem.capacity = Config.memMaxSize;
347 stats.mem.size = mem_node::StoreMemSize();
348 stats.mem.count = hot_obj_count;
349 }
350
351 swapDir->getStats(stats);
352
353 // low-level info not specific to memory or disk cache
354 stats.store_entry_count = StoreEntry::inUseCount();
355 stats.mem_object_count = MemObject::inUseCount();
356}
357
c932b107 358void
c8f4eac4 359StoreController::stat(StoreEntry &output) const
c932b107 360{
c8f4eac4 361 storeAppendPrintf(&output, "Store Directory Statistics:\n");
362 storeAppendPrintf(&output, "Store Entries : %lu\n",
62e76326 363 (unsigned long int)StoreEntry::inUseCount());
c91ca3ce 364 storeAppendPrintf(&output, "Maximum Swap Size : %" PRIu64 " KB\n",
cc34568d 365 maxSize() >> 10);
57f583f1 366 storeAppendPrintf(&output, "Current Store Swap Size: %.2f KB\n",
cc34568d 367 currentSize() / 1024.0);
57f583f1 368 storeAppendPrintf(&output, "Current Capacity : %.2f%% used, %.2f%% free\n",
cc34568d
DK
369 Math::doublePercent(currentSize(), maxSize()),
370 Math::doublePercent((maxSize() - currentSize()), maxSize()));
ada9124c
AR
371
372 if (memStore)
373 memStore->stat(output);
e3ef2b09 374
c8f4eac4 375 /* now the swapDir */
376 swapDir->stat(output);
5d406e78 377}
378
c8f4eac4 379/* if needed, this could be taught to cache the result */
12e11a5c 380uint64_t
c8f4eac4 381StoreController::maxSize() const
f4e3fa54 382{
c8f4eac4 383 /* TODO: include memory cache ? */
384 return swapDir->maxSize();
385}
62e76326 386
12e11a5c 387uint64_t
c8f4eac4 388StoreController::minSize() const
389{
390 /* TODO: include memory cache ? */
391 return swapDir->minSize();
f4e3fa54 392}
393
39c1e1d9
DK
394uint64_t
395StoreController::currentSize() const
396{
397 return swapDir->currentSize();
398}
399
400uint64_t
401StoreController::currentCount() const
402{
403 return swapDir->currentCount();
404}
405
af2fda07
DK
406int64_t
407StoreController::maxObjectSize() const
408{
409 return swapDir->maxObjectSize();
410}
411
f4e3fa54 412void
c8f4eac4 413SwapDir::diskFull()
f4e3fa54 414{
cc34568d 415 if (currentSize() >= maxSize())
62e76326 416 return;
417
cc34568d 418 max_size = currentSize();
62e76326 419
e0236918 420 debugs(20, DBG_IMPORTANT, "WARNING: Shrinking cache_dir #" << index << " to " << currentSize() / 1024.0 << " KB");
f4e3fa54 421}
95dcd2b8 422
423void
424storeDirOpenSwapLogs(void)
425{
d3b3ab85 426 for (int dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
c8f4eac4 427 dynamic_cast<SwapDir *>(INDEXSD(dirn))->openLog();
95dcd2b8 428}
429
430void
431storeDirCloseSwapLogs(void)
432{
d3b3ab85 433 for (int dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
c8f4eac4 434 dynamic_cast<SwapDir *>(INDEXSD(dirn))->closeLog();
95dcd2b8 435}
436
b2c141d4 437/*
438 * storeDirWriteCleanLogs
26ac0430 439 *
b2c141d4 440 * Writes a "clean" swap log file from in-memory metadata.
cd748f27 441 * This is a rewrite of the original function to troll each
442 * StoreDir and write the logs, and flush at the end of
443 * the run. Thanks goes to Eric Stern, since this solution
444 * came out of his COSS code.
b2c141d4 445 */
b2c141d4 446int
447storeDirWriteCleanLogs(int reopen)
95dcd2b8 448{
6a566b9c 449 const StoreEntry *e = NULL;
b2c141d4 450 int n = 0;
62e76326 451
e812ecfc 452 struct timeval start;
453 double dt;
c8f4eac4 454 RefCount<SwapDir> sd;
b2c141d4 455 int dirn;
6a566b9c 456 int notdone = 1;
62e76326 457
bef81ea5 458 if (StoreController::store_dirs_rebuilding) {
e0236918
FC
459 debugs(20, DBG_IMPORTANT, "Not currently OK to rewrite swap log.");
460 debugs(20, DBG_IMPORTANT, "storeDirWriteCleanLogs: Operation aborted.");
62e76326 461 return 0;
b2c141d4 462 }
62e76326 463
e0236918 464 debugs(20, DBG_IMPORTANT, "storeDirWriteCleanLogs: Starting...");
e812ecfc 465 getCurrentTime();
466 start = current_time;
62e76326 467
5db6bf73 468 for (dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn) {
c8f4eac4 469 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
62e76326 470
471 if (sd->writeCleanStart() < 0) {
e0236918 472 debugs(20, DBG_IMPORTANT, "log.clean.start() failed for dir #" << sd->index);
62e76326 473 continue;
474 }
6a566b9c 475 }
62e76326 476
e78ef51b 477 /*
478 * This may look inefficient as CPU wise it is more efficient to do this
479 * sequentially, but I/O wise the parallellism helps as it allows more
480 * hdd spindles to be active.
d3b3ab85 481 */
c1dd71ae 482 while (notdone) {
62e76326 483 notdone = 0;
484
5db6bf73 485 for (dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn) {
c8f4eac4 486 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
62e76326 487
488 if (NULL == sd->cleanLog)
489 continue;
490
491 e = sd->cleanLog->nextEntry();
492
493 if (!e)
494 continue;
495
496 notdone = 1;
497
498 if (!sd->canLog(*e))
499 continue;
500
501 sd->cleanLog->write(*e);
502
503 if ((++n & 0xFFFF) == 0) {
504 getCurrentTime();
e0236918 505 debugs(20, DBG_IMPORTANT, " " << std::setw(7) << n <<
bf8fe701 506 " entries written so far.");
62e76326 507 }
508 }
6a566b9c 509 }
62e76326 510
6a566b9c 511 /* Flush */
5db6bf73 512 for (dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
c8f4eac4 513 dynamic_cast<SwapDir *>(INDEXSD(dirn))->writeCleanDone();
62e76326 514
b2c141d4 515 if (reopen)
62e76326 516 storeDirOpenSwapLogs();
517
e812ecfc 518 getCurrentTime();
62e76326 519
e812ecfc 520 dt = tvSubDsec(start, current_time);
62e76326 521
e0236918
FC
522 debugs(20, DBG_IMPORTANT, " Finished. Wrote " << n << " entries.");
523 debugs(20, DBG_IMPORTANT, " Took "<< std::setw(3)<< std::setprecision(2) << dt <<
bf8fe701 524 " seconds ("<< std::setw(6) << ((double) n / (dt > 0.0 ? dt : 1.0)) << " entries/sec).");
62e76326 525
62e76326 526
b2c141d4 527 return n;
95dcd2b8 528}
d141c677 529
c8f4eac4 530StoreSearch *
30abd221 531StoreController::search(String const url, HttpRequest *request)
c8f4eac4 532{
533 /* cheat, for now you can't search the memory hot cache */
534 return swapDir->search(url, request);
535}
536
537StorePointer
538StoreHashIndex::store(int const x) const
539{
540 return INDEXSD(x);
541}
542
14911a4e
AR
543SwapDir &
544StoreHashIndex::dir(const int i) const
545{
546 SwapDir *sd = dynamic_cast<SwapDir*>(INDEXSD(i));
547 assert(sd);
548 return *sd;
549}
550
cd748f27 551void
c8f4eac4 552StoreController::sync(void)
cd748f27 553{
9487bae9
AR
554 if (memStore)
555 memStore->sync();
c8f4eac4 556 swapDir->sync();
cd748f27 557}
558
559/*
26ac0430 560 * handle callbacks all avaliable fs'es
cd748f27 561 */
c8f4eac4 562int
563StoreController::callback()
cd748f27 564{
1d5161bd 565 /* This will likely double count. Thats ok. */
566 PROF_start(storeDirCallback);
567
c8f4eac4 568 /* mem cache callbacks ? */
569 int result = swapDir->callback();
1d5161bd 570
571 PROF_stop(storeDirCallback);
c8f4eac4 572
573 return result;
d141c677 574}
90d42c28 575
576int
577storeDirGetBlkSize(const char *path, int *blksize)
578{
579#if HAVE_STATVFS
62e76326 580
90d42c28 581 struct statvfs sfs;
62e76326 582
90d42c28 583 if (statvfs(path, &sfs)) {
e0236918 584 debugs(50, DBG_IMPORTANT, "" << path << ": " << xstrerror());
62e76326 585 *blksize = 2048;
586 return 1;
90d42c28 587 }
62e76326 588
6759a7aa 589 *blksize = (int) sfs.f_frsize;
90d42c28 590#else
62e76326 591
90d42c28 592 struct statfs sfs;
62e76326 593
90d42c28 594 if (statfs(path, &sfs)) {
e0236918 595 debugs(50, DBG_IMPORTANT, "" << path << ": " << xstrerror());
62e76326 596 *blksize = 2048;
597 return 1;
90d42c28 598 }
62e76326 599
90d42c28 600 *blksize = (int) sfs.f_bsize;
6759a7aa 601#endif
4b3af09f 602 /*
603 * Sanity check; make sure we have a meaningful value.
604 */
62e76326 605
d5b72fe7 606 if (*blksize < 512)
62e76326 607 *blksize = 2048;
608
90d42c28 609 return 0;
610}
781d6656 611
612#define fsbtoblk(num, fsbs, bs) \
613 (((fsbs) != 0 && (fsbs) < (bs)) ? \
614 (num) / ((bs) / (fsbs)) : (num) * ((fsbs) / (bs)))
615int
616storeDirGetUFSStats(const char *path, int *totl_kb, int *free_kb, int *totl_in, int *free_in)
617{
618#if HAVE_STATVFS
62e76326 619
781d6656 620 struct statvfs sfs;
62e76326 621
781d6656 622 if (statvfs(path, &sfs)) {
e0236918 623 debugs(50, DBG_IMPORTANT, "" << path << ": " << xstrerror());
62e76326 624 return 1;
781d6656 625 }
62e76326 626
781d6656 627 *totl_kb = (int) fsbtoblk(sfs.f_blocks, sfs.f_frsize, 1024);
628 *free_kb = (int) fsbtoblk(sfs.f_bfree, sfs.f_frsize, 1024);
629 *totl_in = (int) sfs.f_files;
630 *free_in = (int) sfs.f_ffree;
631#else
62e76326 632
781d6656 633 struct statfs sfs;
62e76326 634
781d6656 635 if (statfs(path, &sfs)) {
e0236918 636 debugs(50, DBG_IMPORTANT, "" << path << ": " << xstrerror());
62e76326 637 return 1;
781d6656 638 }
62e76326 639
781d6656 640 *totl_kb = (int) fsbtoblk(sfs.f_blocks, sfs.f_bsize, 1024);
641 *free_kb = (int) fsbtoblk(sfs.f_bfree, sfs.f_bsize, 1024);
642 *totl_in = (int) sfs.f_files;
643 *free_in = (int) sfs.f_ffree;
644#endif
62e76326 645
781d6656 646 return 0;
647}
c8f4eac4 648
649void
e1f7507e 650allocate_new_swapdir(SquidConfig::_cacheSwap * swap)
c8f4eac4 651{
652 if (swap->swapDirs == NULL) {
653 swap->n_allocated = 4;
7d3c4ca1 654 swap->swapDirs = static_cast<SwapDir::Pointer *>(xcalloc(swap->n_allocated, sizeof(SwapDir::Pointer)));
c8f4eac4 655 }
656
657 if (swap->n_allocated == swap->n_configured) {
c8f4eac4 658 swap->n_allocated <<= 1;
7d3c4ca1 659 SwapDir::Pointer *const tmp = static_cast<SwapDir::Pointer *>(xcalloc(swap->n_allocated, sizeof(SwapDir::Pointer)));
41d00cd3 660 memcpy(tmp, swap->swapDirs, swap->n_configured * sizeof(SwapDir *));
c8f4eac4 661 xfree(swap->swapDirs);
662 swap->swapDirs = tmp;
663 }
664}
665
666void
e1f7507e 667free_cachedir(SquidConfig::_cacheSwap * swap)
c8f4eac4 668{
669 int i;
670 /* DON'T FREE THESE FOR RECONFIGURE */
671
672 if (reconfiguring)
673 return;
674
5db6bf73 675 for (i = 0; i < swap->n_configured; ++i) {
c8f4eac4 676 /* TODO XXX this lets the swapdir free resources asynchronously
677 * swap->swapDirs[i]->deactivate();
26ac0430 678 * but there may be such a means already.
c8f4eac4 679 * RBC 20041225
680 */
681 swap->swapDirs[i] = NULL;
682 }
683
684 safe_free(swap->swapDirs);
685 swap->swapDirs = NULL;
686 swap->n_allocated = 0;
687 swap->n_configured = 0;
688}
689
690/* this should be a virtual method on StoreEntry,
691 * i.e. e->referenced()
692 * so that the entry can notify the creating Store
693 */
694void
695StoreController::reference(StoreEntry &e)
696{
c5426f8f
AR
697 // special entries do not belong to any specific Store, but are IN_MEMORY
698 if (EBIT_TEST(e.flags, ENTRY_SPECIAL))
699 return;
700
c8f4eac4 701 /* Notify the fs that we're referencing this object again */
702
703 if (e.swap_dirn > -1)
4c973beb 704 swapDir->reference(e);
c8f4eac4 705
9487bae9
AR
706 // Notify the memory cache that we're referencing this object again
707 if (memStore && e.mem_status == IN_MEMORY)
708 memStore->reference(e);
709
710 // TODO: move this code to a non-shared memory cache class when we have it
c8f4eac4 711 if (e.mem_obj) {
712 if (mem_policy->Referenced)
713 mem_policy->Referenced(mem_policy, &e, &e.mem_obj->repl);
714 }
715}
716
4c973beb 717bool
c8f4eac4 718StoreController::dereference(StoreEntry & e)
719{
9c02fb44
AJ
720 bool keepInStoreTable = true; // keep if there are no objections
721
c5426f8f
AR
722 // special entries do not belong to any specific Store, but are IN_MEMORY
723 if (EBIT_TEST(e.flags, ENTRY_SPECIAL))
9c02fb44 724 return keepInStoreTable;
c5426f8f 725
c8f4eac4 726 /* Notify the fs that we're not referencing this object any more */
727
728 if (e.swap_filen > -1)
9c02fb44 729 keepInStoreTable = swapDir->dereference(e) && keepInStoreTable;
c8f4eac4 730
9487bae9
AR
731 // Notify the memory cache that we're not referencing this object any more
732 if (memStore && e.mem_status == IN_MEMORY)
9c02fb44 733 keepInStoreTable = memStore->dereference(e) && keepInStoreTable;
9487bae9
AR
734
735 // TODO: move this code to a non-shared memory cache class when we have it
c8f4eac4 736 if (e.mem_obj) {
737 if (mem_policy->Dereferenced)
738 mem_policy->Dereferenced(mem_policy, &e, &e.mem_obj->repl);
739 }
4c973beb
AR
740
741 return keepInStoreTable;
c8f4eac4 742}
743
744StoreEntry *
6ca34f6f 745StoreController::get(const cache_key *key)
c8f4eac4 746{
44def0f9 747 if (StoreEntry *e = swapDir->get(key)) {
9487bae9
AR
748 // TODO: ignore and maybe handleIdleEntry() unlocked intransit entries
749 // because their backing store slot may be gone already.
171d5429 750 debugs(20, 3, HERE << "got in-transit entry: " << *e);
44def0f9
AR
751 return e;
752 }
753
9487bae9
AR
754 if (memStore) {
755 if (StoreEntry *e = memStore->get(key)) {
756 debugs(20, 3, HERE << "got mem-cached entry: " << *e);
757 return e;
758 }
759 }
760
022f96ad
AR
761 // TODO: this disk iteration is misplaced; move to StoreHashIndex when
762 // the global store_table is no longer used for in-transit objects.
44def0f9
AR
763 if (const int cacheDirs = Config.cacheSwap.n_configured) {
764 // ask each cache_dir until the entry is found; use static starting
765 // point to avoid asking the same subset of disks more often
766 // TODO: coordinate with put() to be able to guess the right disk often
14911a4e 767 static int idx = 0;
44def0f9 768 for (int n = 0; n < cacheDirs; ++n) {
14911a4e 769 idx = (idx + 1) % cacheDirs;
44def0f9 770 SwapDir *sd = dynamic_cast<SwapDir*>(INDEXSD(idx));
14911a4e
AR
771 if (!sd->active())
772 continue;
773
44def0f9 774 if (StoreEntry *e = sd->get(key)) {
eccba1d9 775 debugs(20, 3, HERE << "cache_dir " << idx <<
9199139f 776 " got cached entry: " << *e);
44def0f9
AR
777 return e;
778 }
779 }
780 }
c8f4eac4 781
eccba1d9 782 debugs(20, 4, HERE << "none of " << Config.cacheSwap.n_configured <<
9199139f 783 " cache_dirs have " << storeKeyText(key));
44def0f9 784 return NULL;
c8f4eac4 785}
786
787void
6ca34f6f 788StoreController::get(String const key, STOREGETCLIENT aCallback, void *aCallbackData)
c8f4eac4 789{
790 fatal("not implemented");
791}
792
96a7de88
DK
793// move this into [non-shared] memory cache class when we have one
794/// whether e should be kept in local RAM for possible future caching
795bool
796StoreController::keepForLocalMemoryCache(const StoreEntry &e) const
797{
798 if (!e.memoryCachable())
799 return false;
800
801 // does the current and expected size obey memory caching limits?
802 assert(e.mem_obj);
803 const int64_t loadedSize = e.mem_obj->endOffset();
804 const int64_t expectedSize = e.mem_obj->expectedReplySize(); // may be < 0
805 const int64_t ramSize = max(loadedSize, expectedSize);
806 const int64_t ramLimit = min(
817138f8
A
807 static_cast<int64_t>(Config.memMaxSize),
808 static_cast<int64_t>(Config.Store.maxInMemObjSize));
96a7de88
DK
809 return ramSize <= ramLimit;
810}
811
812void
813StoreController::maybeTrimMemory(StoreEntry &e, const bool preserveSwappable)
814{
815 bool keepInLocalMemory = false;
816 if (memStore)
817 keepInLocalMemory = memStore->keepInLocalMemory(e);
818 else
819 keepInLocalMemory = keepForLocalMemoryCache(e);
820
821 debugs(20, 7, HERE << "keepInLocalMemory: " << keepInLocalMemory);
822
823 if (!keepInLocalMemory)
824 e.trimMemory(preserveSwappable);
825}
826
9487bae9
AR
827void
828StoreController::handleIdleEntry(StoreEntry &e)
829{
830 bool keepInLocalMemory = false;
c5426f8f
AR
831
832 if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) {
833 // Icons (and cache digests?) should stay in store_table until we
834 // have a dedicated storage for them (that would not purge them).
835 // They are not managed [well] by any specific Store handled below.
836 keepInLocalMemory = true;
d3cd2e81 837 } else if (memStore) {
9487bae9
AR
838 memStore->considerKeeping(e);
839 // leave keepInLocalMemory false; memStore maintains its own cache
840 } else {
96a7de88 841 keepInLocalMemory = keepForLocalMemoryCache(e) && // in good shape and
9199139f
AR
842 // the local memory cache is not overflowing
843 (mem_node::InUseCount() <= store_pages_max);
9487bae9
AR
844 }
845
9c02fb44 846 // An idle, unlocked entry that belongs to a SwapDir which controls
4c973beb
AR
847 // its own index, should not stay in the global store_table.
848 if (!dereference(e)) {
9487bae9
AR
849 debugs(20, 5, HERE << "destroying unlocked entry: " << &e << ' ' << e);
850 destroyStoreEntry(static_cast<hash_link*>(&e));
851 return;
852 }
853
c5426f8f
AR
854 debugs(20, 5, HERE << "keepInLocalMemory: " << keepInLocalMemory);
855
9487bae9
AR
856 // TODO: move this into [non-shared] memory cache class when we have one
857 if (keepInLocalMemory) {
858 e.setMemStatus(IN_MEMORY);
859 e.mem_obj->unlinkRequest();
860 } else {
861 e.purgeMem(); // may free e
862 }
863}
864
c8f4eac4 865StoreHashIndex::StoreHashIndex()
866{
47f6e231 867 if (store_table)
26ac0430 868 abort();
c8f4eac4 869 assert (store_table == NULL);
870}
871
872StoreHashIndex::~StoreHashIndex()
873{
874 if (store_table) {
875 hashFreeItems(store_table, destroyStoreEntry);
876 hashFreeMemory(store_table);
877 store_table = NULL;
878 }
879}
880
881int
882StoreHashIndex::callback()
883{
884 int result = 0;
885 int j;
886 static int ndir = 0;
887
888 do {
889 j = 0;
890
5db6bf73 891 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
c8f4eac4 892 if (ndir >= Config.cacheSwap.n_configured)
893 ndir = ndir % Config.cacheSwap.n_configured;
894
895 int temp_result = store(ndir)->callback();
896
897 ++ndir;
898
899 j += temp_result;
900
901 result += temp_result;
902
903 if (j > 100)
904 fatal ("too much io\n");
905 }
906 } while (j > 0);
907
5db6bf73 908 ++ndir;
c8f4eac4 909
910 return result;
911}
912
913void
914StoreHashIndex::create()
915{
5db6bf73 916 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
14911a4e
AR
917 if (dir(i).active())
918 store(i)->create();
919 }
c8f4eac4 920}
921
922/* Lookup an object in the cache.
923 * return just a reference to object, don't start swapping in yet. */
924StoreEntry *
6ca34f6f 925StoreHashIndex::get(const cache_key *key)
c8f4eac4 926{
927 PROF_start(storeGet);
bf8fe701 928 debugs(20, 3, "storeGet: looking up " << storeKeyText(key));
c8f4eac4 929 StoreEntry *p = static_cast<StoreEntry *>(hash_lookup(store_table, key));
930 PROF_stop(storeGet);
931 return p;
932}
933
934void
6ca34f6f 935StoreHashIndex::get(String const key, STOREGETCLIENT aCallback, void *aCallbackData)
c8f4eac4 936{
937 fatal("not implemented");
938}
939
940void
941StoreHashIndex::init()
942{
943 /* Calculate size of hash table (maximum currently 64k buckets). */
944 /* this is very bogus, its specific to the any Store maintaining an
945 * in-core index, not global */
58d5c5dd 946 size_t buckets = (Store::Root().maxSize() + Config.memMaxSize) / Config.Store.avgObjectSize;
e0236918 947 debugs(20, DBG_IMPORTANT, "Swap maxSize " << (Store::Root().maxSize() >> 10) <<
13a07022 948 " + " << ( Config.memMaxSize >> 10) << " KB, estimated " << buckets << " objects");
c8f4eac4 949 buckets /= Config.Store.objectsPerBucket;
e0236918 950 debugs(20, DBG_IMPORTANT, "Target number of buckets: " << buckets);
c8f4eac4 951 /* ideally the full scan period should be configurable, for the
952 * moment it remains at approximately 24 hours. */
953 store_hash_buckets = storeKeyHashBuckets(buckets);
e0236918
FC
954 debugs(20, DBG_IMPORTANT, "Using " << store_hash_buckets << " Store buckets");
955 debugs(20, DBG_IMPORTANT, "Max Mem size: " << ( Config.memMaxSize >> 10) << " KB" <<
9199139f 956 (Config.memShared ? " [shared]" : ""));
e0236918 957 debugs(20, DBG_IMPORTANT, "Max Swap size: " << (Store::Root().maxSize() >> 10) << " KB");
c8f4eac4 958
959 store_table = hash_create(storeKeyHashCmp,
960 store_hash_buckets, storeKeyHashHash);
961
5db6bf73 962 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
c8f4eac4 963 /* this starts a search of the store dirs, loading their
964 * index. under the new Store api this should be
965 * driven by the StoreHashIndex, not by each store.
bef81ea5 966 *
967 * That is, the HashIndex should perform a search of each dir it is
26ac0430 968 * indexing to do the hash insertions. The search is then able to
bef81ea5 969 * decide 'from-memory', or 'from-clean-log' or 'from-dirty-log' or
970 * 'from-no-log'.
971 *
c8f4eac4 972 * Step 1: make the store rebuilds use a search internally
bef81ea5 973 * Step 2: change the search logic to use the four modes described
974 * above
975 * Step 3: have the hash index walk the searches itself.
c8f4eac4 976 */
14911a4e
AR
977 if (dir(i).active())
978 store(i)->init();
13a07022 979 }
c8f4eac4 980}
981
12e11a5c 982uint64_t
c8f4eac4 983StoreHashIndex::maxSize() const
984{
12e11a5c 985 uint64_t result = 0;
c8f4eac4 986
5db6bf73 987 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
39c1e1d9
DK
988 if (dir(i).doReportStat())
989 result += store(i)->maxSize();
990 }
c8f4eac4 991
992 return result;
993}
994
12e11a5c 995uint64_t
c8f4eac4 996StoreHashIndex::minSize() const
997{
12e11a5c 998 uint64_t result = 0;
c8f4eac4 999
5db6bf73 1000 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
39c1e1d9
DK
1001 if (dir(i).doReportStat())
1002 result += store(i)->minSize();
1003 }
1004
1005 return result;
1006}
1007
1008uint64_t
1009StoreHashIndex::currentSize() const
1010{
1011 uint64_t result = 0;
1012
5db6bf73 1013 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
39c1e1d9
DK
1014 if (dir(i).doReportStat())
1015 result += store(i)->currentSize();
1016 }
1017
1018 return result;
1019}
1020
1021uint64_t
1022StoreHashIndex::currentCount() const
1023{
1024 uint64_t result = 0;
1025
5db6bf73 1026 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
39c1e1d9
DK
1027 if (dir(i).doReportStat())
1028 result += store(i)->currentCount();
1029 }
c8f4eac4 1030
1031 return result;
1032}
1033
af2fda07
DK
1034int64_t
1035StoreHashIndex::maxObjectSize() const
1036{
1037 int64_t result = -1;
1038
5db6bf73 1039 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
af2fda07
DK
1040 if (dir(i).active() && store(i)->maxObjectSize() > result)
1041 result = store(i)->maxObjectSize();
1042 }
1043
1044 return result;
1045}
1046
93bc1434
AR
1047void
1048StoreHashIndex::getStats(StoreInfoStats &stats) const
1049{
1050 // accumulate per-disk cache stats
5db6bf73 1051 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
93bc1434
AR
1052 StoreInfoStats dirStats;
1053 store(i)->getStats(dirStats);
1054 stats += dirStats;
1055 }
1056
1057 // common to all disks
1058 stats.swap.open_disk_fd = store_open_disk_fd;
1059
1060 // memory cache stats are collected in StoreController::getStats(), for now
1061}
1062
c8f4eac4 1063void
1064StoreHashIndex::stat(StoreEntry & output) const
1065{
1066 int i;
1067
1068 /* Now go through each store, calling its stat routine */
1069
5db6bf73 1070 for (i = 0; i < Config.cacheSwap.n_configured; ++i) {
c8f4eac4 1071 storeAppendPrintf(&output, "\n");
1072 store(i)->stat(output);
1073 }
1074}
1075
1076void
4c973beb
AR
1077StoreHashIndex::reference(StoreEntry &e)
1078{
1079 e.store()->reference(e);
1080}
c8f4eac4 1081
4c973beb
AR
1082bool
1083StoreHashIndex::dereference(StoreEntry &e)
1084{
1085 return e.store()->dereference(e);
1086}
c8f4eac4 1087
1088void
1089StoreHashIndex::maintain()
1090{
1091 int i;
1092 /* walk each fs */
1093
5db6bf73 1094 for (i = 0; i < Config.cacheSwap.n_configured; ++i) {
c8f4eac4 1095 /* XXX FixMe: This should be done "in parallell" on the different
1096 * cache_dirs, not one at a time.
1097 */
1098 /* call the maintain function .. */
1099 store(i)->maintain();
1100 }
1101}
1102
c8f4eac4 1103void
1104StoreHashIndex::sync()
1105{
1106 for (int i = 0; i < Config.cacheSwap.n_configured; ++i)
1107 store(i)->sync();
1108}
1109
1110StoreSearch *
30abd221 1111StoreHashIndex::search(String const url, HttpRequest *)
c8f4eac4 1112{
1113 if (url.size())
1114 fatal ("Cannot search by url yet\n");
1115
1116 return new StoreSearchHashIndex (this);
1117}
1118
1119CBDATA_CLASS_INIT(StoreSearchHashIndex);
aa839030 1120
c8f4eac4 1121StoreSearchHashIndex::StoreSearchHashIndex(RefCount<StoreHashIndex> aSwapDir) : sd(aSwapDir), _done (false), bucket (0)
1122{}
1123
1124/* do not link
1125StoreSearchHashIndex::StoreSearchHashIndex(StoreSearchHashIndex const &);
1126*/
1127
1128StoreSearchHashIndex::~StoreSearchHashIndex()
1129{}
1130
1131void
70efcae0 1132StoreSearchHashIndex::next(void (aCallback)(void *), void *aCallbackData)
c8f4eac4 1133{
1134 next();
70efcae0 1135 aCallback (aCallbackData);
c8f4eac4 1136}
1137
1138bool
1139StoreSearchHashIndex::next()
1140{
1141 if (entries.size())
1142 entries.pop_back();
1143
1144 while (!isDone() && !entries.size())
1145 copyBucket();
1146
1147 return currentItem() != NULL;
1148}
1149
1150bool
1151StoreSearchHashIndex::error() const
1152{
1153 return false;
1154}
1155
1156bool
1157StoreSearchHashIndex::isDone() const
1158{
1159 return bucket >= store_hash_buckets || _done;
1160}
1161
1162StoreEntry *
1163StoreSearchHashIndex::currentItem()
1164{
1165 if (!entries.size())
1166 return NULL;
1167
1168 return entries.back();
1169}
1170
1171void
1172StoreSearchHashIndex::copyBucket()
1173{
1174 /* probably need to lock the store entries...
1175 * we copy them all to prevent races on the links. */
1176 debugs(47, 3, "StoreSearchHashIndex::copyBucket #" << bucket);
1177 assert (!entries.size());
1178 hash_link *link_ptr = NULL;
1179 hash_link *link_next = NULL;
1180 link_next = hash_get_bucket(store_table, bucket);
1181
1182 while (NULL != (link_ptr = link_next)) {
1183 link_next = link_ptr->next;
1184 StoreEntry *e = (StoreEntry *) link_ptr;
1185
1186 entries.push_back(e);
1187 }
1188
5db6bf73 1189 ++bucket;
c8f4eac4 1190 debugs(47,3, "got entries: " << entries.size());
1191}