]> git.ipfire.org Git - thirdparty/squid.git/blame - src/store_dir.cc
Removed squid-old.h
[thirdparty/squid.git] / src / store_dir.cc
CommitLineData
f1dc9b30 1
2/*
262a0e14 3 * $Id$
f1dc9b30 4 *
5 * DEBUG: section 47 Store Directory Routines
6 * AUTHOR: Duane Wessels
7 *
2b6662ba 8 * SQUID Web Proxy Cache http://www.squid-cache.org/
e25c139f 9 * ----------------------------------------------------------
f1dc9b30 10 *
2b6662ba 11 * Squid is the result of efforts by numerous individuals from
12 * the Internet community; see the CONTRIBUTORS file for full
13 * details. Many organizations have provided support for Squid's
14 * development; see the SPONSORS file for full details. Squid is
15 * Copyrighted (C) 2001 by the Regents of the University of
16 * California; see the COPYRIGHT file for full details. Squid
17 * incorporates software developed and/or copyrighted by other
18 * sources; see the CREDITS file for full details.
f1dc9b30 19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
26ac0430 24 *
f1dc9b30 25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
26ac0430 29 *
f1dc9b30 30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
cbdec147 32 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
e25c139f 33 *
f1dc9b30 34 */
35
582c2af2
FC
36#include "squid.h"
37#include "globals.h"
38#include "mem_node.h"
528b2c61 39#include "MemObject.h"
9487bae9 40#include "MemStore.h"
582c2af2
FC
41#include "profiler/Profiler.h"
42#include "protos.h"
a98bcbee 43#include "SquidMath.h"
985c86bc 44#include "SquidTime.h"
582c2af2 45#include "Store.h"
d3b3ab85 46#include "SwapDir.h"
4b981814 47#include "swap_log_op.h"
85407535 48
c0db87f2 49#if HAVE_STATVFS
50#if HAVE_SYS_STATVFS_H
51#include <sys/statvfs.h>
52#endif
ec15e022 53#endif /* HAVE_STATVFS */
54/* statfs() needs <sys/param.h> and <sys/mount.h> on BSD systems */
55#if HAVE_SYS_PARAM_H
56#include <sys/param.h>
203526a1 57#endif
ec15e022 58#if HAVE_SYS_MOUNT_H
59#include <sys/mount.h>
60#endif
61/* Windows and Linux use sys/vfs.h */
6c86a065 62#if HAVE_SYS_VFS_H
63#include <sys/vfs.h>
64#endif
582c2af2
FC
65#if HAVE_SYS_WAIT_H
66#include <sys/wait.h>
67#endif
c0db87f2 68
c8f4eac4 69#include "StoreHashIndex.h"
70
65a53c8e 71static STDIRSELECT storeDirSelectSwapDirRoundRobin;
72static STDIRSELECT storeDirSelectSwapDirLeastLoad;
c8f4eac4 73
b07b21cc 74/*
75 * store_dirs_rebuilding is initialized to _1_ as a hack so that
76 * storeDirWriteCleanLogs() doesn't try to do anything unless _all_
77 * cache_dirs have been read. For example, without this hack, Squid
78 * will try to write clean log files if -kparse fails (becasue it
79 * calls fatal()).
80 */
81int StoreController::store_dirs_rebuilding = 1;
bef81ea5 82
c8f4eac4 83StoreController::StoreController() : swapDir (new StoreHashIndex())
9199139f 84 , memStore(NULL)
c8f4eac4 85{}
86
87StoreController::~StoreController()
9487bae9
AR
88{
89 delete memStore;
90}
65a53c8e 91
92/*
93 * This function pointer is set according to 'store_dir_select_algorithm'
94 * in squid.conf.
95 */
96STDIRSELECT *storeDirSelectSwapDir = storeDirSelectSwapDirLeastLoad;
d141c677 97
9838d6c8 98void
c8f4eac4 99StoreController::init()
596dddc1 100{
57af1e3f 101 if (Config.memShared && IamWorkerProcess()) {
60be8b2d
AR
102 memStore = new MemStore;
103 memStore->init();
104 }
9487bae9 105
c8f4eac4 106 swapDir->init();
62e76326 107
65a53c8e 108 if (0 == strcasecmp(Config.store_dir_select_algorithm, "round-robin")) {
62e76326 109 storeDirSelectSwapDir = storeDirSelectSwapDirRoundRobin;
e0236918 110 debugs(47, DBG_IMPORTANT, "Using Round Robin store dir selection");
65a53c8e 111 } else {
62e76326 112 storeDirSelectSwapDir = storeDirSelectSwapDirLeastLoad;
e0236918 113 debugs(47, DBG_IMPORTANT, "Using Least Load store dir selection");
65a53c8e 114 }
85407535 115}
116
117void
c8f4eac4 118StoreController::createOneStore(Store &aStore)
596dddc1 119{
62e76326 120 /*
154c7949 121 * On Windows, fork() is not available.
122 * The following is a workaround for create store directories sequentially
123 * when running on native Windows port.
124 */
1191b93b 125#if !_SQUID_MSWIN_
62e76326 126
154c7949 127 if (fork())
62e76326 128 return;
129
099a1791 130#endif
62e76326 131
c8f4eac4 132 aStore.create();
62e76326 133
1191b93b 134#if !_SQUID_MSWIN_
62e76326 135
154c7949 136 exit(0);
62e76326 137
099a1791 138#endif
154c7949 139}
140
141void
c8f4eac4 142StoreController::create()
154c7949 143{
c8f4eac4 144 swapDir->create();
62e76326 145
1191b93b 146#if !_SQUID_MSWIN_
62e76326 147
8a1c8f2c 148 pid_t pid;
62e76326 149
b2c141d4 150 do {
62e76326 151 int status;
1191b93b 152#if _SQUID_NEXT_
62e76326 153
154 pid = wait3(&status, WNOHANG, NULL);
b2c141d4 155#else
62e76326 156
157 pid = waitpid(-1, &status, 0);
b2c141d4 158#endif
62e76326 159
b2c141d4 160 } while (pid > 0 || (pid < 0 && errno == EINTR));
62e76326 161
099a1791 162#endif
596dddc1 163}
164
a8a33c46 165/**
cd748f27 166 * Determine whether the given directory can handle this object
167 * size
168 *
169 * Note: if the object size is -1, then the only swapdirs that
a8a33c46 170 * will return true here are ones that have min and max unset,
cd748f27 171 * ie any-sized-object swapdirs. This is a good thing.
172 */
c8f4eac4 173bool
3e62bd58 174SwapDir::objectSizeIsAcceptable(int64_t objsize) const
cd748f27 175{
a8a33c46 176 // If the swapdir has no range limits, then it definitely can
b6662ffd 177 if (min_objsize <= 0 && max_objsize == -1)
c8f4eac4 178 return true;
d68f43a0 179
180 /*
a8a33c46
A
181 * If the object size is -1 and the storedir has limits we
182 * can't store it there.
d68f43a0 183 */
a8a33c46 184 if (objsize == -1)
c8f4eac4 185 return false;
d68f43a0 186
a8a33c46 187 // Else, make sure that the object size will fit.
b475997c
AJ
188 if (max_objsize == -1 && min_objsize <= objsize)
189 return true;
190 else
191 return min_objsize <= objsize && max_objsize > objsize;
cd748f27 192}
193
194
d141c677 195/*
196 * This new selection scheme simply does round-robin on all SwapDirs.
8e8d4f30 197 * A SwapDir is skipped if it is over the max_size (100%) limit, or
198 * overloaded.
d141c677 199 */
200static int
8e8d4f30 201storeDirSelectSwapDirRoundRobin(const StoreEntry * e)
d141c677 202{
203 static int dirn = 0;
204 int i;
8e8d4f30 205 int load;
c8f4eac4 206 RefCount<SwapDir> sd;
62e76326 207
aa1a691e
AR
208 // e->objectLen() is negative at this point when we are still STORE_PENDING
209 ssize_t objsize = e->mem_obj->expectedReplySize();
246e6cc1
AJ
210 if (objsize != -1)
211 objsize += e->mem_obj->swap_hdr_sz;
212
5db6bf73 213 for (i = 0; i < Config.cacheSwap.n_configured; ++i) {
62e76326 214 if (++dirn >= Config.cacheSwap.n_configured)
215 dirn = 0;
216
c8f4eac4 217 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
62e76326 218
aa1a691e 219 if (!sd->canStore(*e, objsize, load))
62e76326 220 continue;
221
62e76326 222 if (load < 0 || load > 1000) {
223 continue;
224 }
225
226 return dirn;
d141c677 227 }
62e76326 228
8e8d4f30 229 return -1;
d141c677 230}
960a01e3 231
a2899918 232/*
cd748f27 233 * Spread load across all of the store directories
234 *
235 * Note: We should modify this later on to prefer sticking objects
236 * in the *tightest fit* swapdir to conserve space, along with the
26ac0430 237 * actual swapdir usage. But for now, this hack will do while
cd748f27 238 * testing, so you should order your swapdirs in the config file
239 * from smallest maxobjsize to unlimited (-1) maxobjsize.
240 *
241 * We also have to choose nleast == nconf since we need to consider
242 * ALL swapdirs, regardless of state. Again, this is a hack while
243 * we sort out the real usefulness of this algorithm.
a2899918 244 */
65a53c8e 245static int
246storeDirSelectSwapDirLeastLoad(const StoreEntry * e)
a2899918 247{
cc34568d 248 int64_t most_free = 0;
8e8d4f30 249 ssize_t least_objsize = -1;
250 int least_load = INT_MAX;
cd748f27 251 int load;
252 int dirn = -1;
253 int i;
c8f4eac4 254 RefCount<SwapDir> SD;
cd748f27 255
aa1a691e
AR
256 // e->objectLen() is negative at this point when we are still STORE_PENDING
257 ssize_t objsize = e->mem_obj->expectedReplySize();
62e76326 258
cd748f27 259 if (objsize != -1)
62e76326 260 objsize += e->mem_obj->swap_hdr_sz;
261
5db6bf73 262 for (i = 0; i < Config.cacheSwap.n_configured; ++i) {
c8f4eac4 263 SD = dynamic_cast<SwapDir *>(INDEXSD(i));
62e76326 264 SD->flags.selected = 0;
62e76326 265
aa1a691e 266 if (!SD->canStore(*e, objsize, load))
62e76326 267 continue;
268
aa1a691e 269 if (load < 0 || load > 1000)
62e76326 270 continue;
271
272 if (load > least_load)
273 continue;
274
cc34568d 275 const int64_t cur_free = SD->maxSize() - SD->currentSize();
62e76326 276
277 /* If the load is equal, then look in more details */
278 if (load == least_load) {
279 /* closest max_objsize fit */
280
281 if (least_objsize != -1)
282 if (SD->max_objsize > least_objsize || SD->max_objsize == -1)
283 continue;
284
285 /* most free */
286 if (cur_free < most_free)
287 continue;
288 }
289
290 least_load = load;
291 least_objsize = SD->max_objsize;
292 most_free = cur_free;
293 dirn = i;
a2899918 294 }
62e76326 295
ade906c8 296 if (dirn >= 0)
c8f4eac4 297 dynamic_cast<SwapDir *>(INDEXSD(dirn))->flags.selected = 1;
62e76326 298
cd748f27 299 return dirn;
596dddc1 300}
301
b109de6b 302/*
303 * An entry written to the swap log MUST have the following
304 * properties.
305 * 1. It MUST be a public key. It does no good to log
306 * a public ADD, change the key, then log a private
307 * DEL. So we need to log a DEL before we change a
308 * key from public to private.
cd748f27 309 * 2. It MUST have a valid (> -1) swap_filen.
b109de6b 310 */
4683e377 311void
5830cdb3 312storeDirSwapLog(const StoreEntry * e, int op)
4683e377 313{
d3b3ab85 314 assert (e);
d46a87a8 315 assert(!EBIT_TEST(e->flags, KEY_PRIVATE));
cd748f27 316 assert(e->swap_filen >= 0);
6c57e268 317 /*
318 * icons and such; don't write them to the swap log
319 */
62e76326 320
d46a87a8 321 if (EBIT_TEST(e->flags, ENTRY_SPECIAL))
62e76326 322 return;
323
b109de6b 324 assert(op > SWAP_LOG_NOP && op < SWAP_LOG_MAX);
62e76326 325
26ac0430
AJ
326 debugs(20, 3, "storeDirSwapLog: " <<
327 swap_log_op_str[op] << " " <<
328 e->getMD5Text() << " " <<
329 e->swap_dirn << " " <<
bf8fe701 330 std::hex << std::uppercase << std::setfill('0') << std::setw(8) << e->swap_filen);
62e76326 331
c8f4eac4 332 dynamic_cast<SwapDir *>(INDEXSD(e->swap_dirn))->logEntry(*e, op);
333}
334
93bc1434
AR
335void
336StoreController::getStats(StoreInfoStats &stats) const
337{
338 if (memStore)
339 memStore->getStats(stats);
340 else {
341 // move this code to a non-shared memory cache class when we have it
342 stats.mem.shared = false;
343 stats.mem.capacity = Config.memMaxSize;
344 stats.mem.size = mem_node::StoreMemSize();
345 stats.mem.count = hot_obj_count;
346 }
347
348 swapDir->getStats(stats);
349
350 // low-level info not specific to memory or disk cache
351 stats.store_entry_count = StoreEntry::inUseCount();
352 stats.mem_object_count = MemObject::inUseCount();
353}
354
c932b107 355void
c8f4eac4 356StoreController::stat(StoreEntry &output) const
c932b107 357{
c8f4eac4 358 storeAppendPrintf(&output, "Store Directory Statistics:\n");
359 storeAppendPrintf(&output, "Store Entries : %lu\n",
62e76326 360 (unsigned long int)StoreEntry::inUseCount());
c91ca3ce 361 storeAppendPrintf(&output, "Maximum Swap Size : %" PRIu64 " KB\n",
cc34568d 362 maxSize() >> 10);
57f583f1 363 storeAppendPrintf(&output, "Current Store Swap Size: %.2f KB\n",
cc34568d 364 currentSize() / 1024.0);
57f583f1 365 storeAppendPrintf(&output, "Current Capacity : %.2f%% used, %.2f%% free\n",
cc34568d
DK
366 Math::doublePercent(currentSize(), maxSize()),
367 Math::doublePercent((maxSize() - currentSize()), maxSize()));
ada9124c
AR
368
369 if (memStore)
370 memStore->stat(output);
e3ef2b09 371
c8f4eac4 372 /* now the swapDir */
373 swapDir->stat(output);
5d406e78 374}
375
c8f4eac4 376/* if needed, this could be taught to cache the result */
12e11a5c 377uint64_t
c8f4eac4 378StoreController::maxSize() const
f4e3fa54 379{
c8f4eac4 380 /* TODO: include memory cache ? */
381 return swapDir->maxSize();
382}
62e76326 383
12e11a5c 384uint64_t
c8f4eac4 385StoreController::minSize() const
386{
387 /* TODO: include memory cache ? */
388 return swapDir->minSize();
f4e3fa54 389}
390
39c1e1d9
DK
391uint64_t
392StoreController::currentSize() const
393{
394 return swapDir->currentSize();
395}
396
397uint64_t
398StoreController::currentCount() const
399{
400 return swapDir->currentCount();
401}
402
af2fda07
DK
403int64_t
404StoreController::maxObjectSize() const
405{
406 return swapDir->maxObjectSize();
407}
408
f4e3fa54 409void
c8f4eac4 410SwapDir::diskFull()
f4e3fa54 411{
cc34568d 412 if (currentSize() >= maxSize())
62e76326 413 return;
414
cc34568d 415 max_size = currentSize();
62e76326 416
e0236918 417 debugs(20, DBG_IMPORTANT, "WARNING: Shrinking cache_dir #" << index << " to " << currentSize() / 1024.0 << " KB");
f4e3fa54 418}
95dcd2b8 419
420void
421storeDirOpenSwapLogs(void)
422{
d3b3ab85 423 for (int dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
c8f4eac4 424 dynamic_cast<SwapDir *>(INDEXSD(dirn))->openLog();
95dcd2b8 425}
426
427void
428storeDirCloseSwapLogs(void)
429{
d3b3ab85 430 for (int dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
c8f4eac4 431 dynamic_cast<SwapDir *>(INDEXSD(dirn))->closeLog();
95dcd2b8 432}
433
b2c141d4 434/*
435 * storeDirWriteCleanLogs
26ac0430 436 *
b2c141d4 437 * Writes a "clean" swap log file from in-memory metadata.
cd748f27 438 * This is a rewrite of the original function to troll each
439 * StoreDir and write the logs, and flush at the end of
440 * the run. Thanks goes to Eric Stern, since this solution
441 * came out of his COSS code.
b2c141d4 442 */
b2c141d4 443int
444storeDirWriteCleanLogs(int reopen)
95dcd2b8 445{
6a566b9c 446 const StoreEntry *e = NULL;
b2c141d4 447 int n = 0;
62e76326 448
e812ecfc 449 struct timeval start;
450 double dt;
c8f4eac4 451 RefCount<SwapDir> sd;
b2c141d4 452 int dirn;
6a566b9c 453 int notdone = 1;
62e76326 454
bef81ea5 455 if (StoreController::store_dirs_rebuilding) {
e0236918
FC
456 debugs(20, DBG_IMPORTANT, "Not currently OK to rewrite swap log.");
457 debugs(20, DBG_IMPORTANT, "storeDirWriteCleanLogs: Operation aborted.");
62e76326 458 return 0;
b2c141d4 459 }
62e76326 460
e0236918 461 debugs(20, DBG_IMPORTANT, "storeDirWriteCleanLogs: Starting...");
e812ecfc 462 getCurrentTime();
463 start = current_time;
62e76326 464
5db6bf73 465 for (dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn) {
c8f4eac4 466 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
62e76326 467
468 if (sd->writeCleanStart() < 0) {
e0236918 469 debugs(20, DBG_IMPORTANT, "log.clean.start() failed for dir #" << sd->index);
62e76326 470 continue;
471 }
6a566b9c 472 }
62e76326 473
e78ef51b 474 /*
475 * This may look inefficient as CPU wise it is more efficient to do this
476 * sequentially, but I/O wise the parallellism helps as it allows more
477 * hdd spindles to be active.
d3b3ab85 478 */
c1dd71ae 479 while (notdone) {
62e76326 480 notdone = 0;
481
5db6bf73 482 for (dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn) {
c8f4eac4 483 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
62e76326 484
485 if (NULL == sd->cleanLog)
486 continue;
487
488 e = sd->cleanLog->nextEntry();
489
490 if (!e)
491 continue;
492
493 notdone = 1;
494
495 if (!sd->canLog(*e))
496 continue;
497
498 sd->cleanLog->write(*e);
499
500 if ((++n & 0xFFFF) == 0) {
501 getCurrentTime();
e0236918 502 debugs(20, DBG_IMPORTANT, " " << std::setw(7) << n <<
bf8fe701 503 " entries written so far.");
62e76326 504 }
505 }
6a566b9c 506 }
62e76326 507
6a566b9c 508 /* Flush */
5db6bf73 509 for (dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
c8f4eac4 510 dynamic_cast<SwapDir *>(INDEXSD(dirn))->writeCleanDone();
62e76326 511
b2c141d4 512 if (reopen)
62e76326 513 storeDirOpenSwapLogs();
514
e812ecfc 515 getCurrentTime();
62e76326 516
e812ecfc 517 dt = tvSubDsec(start, current_time);
62e76326 518
e0236918
FC
519 debugs(20, DBG_IMPORTANT, " Finished. Wrote " << n << " entries.");
520 debugs(20, DBG_IMPORTANT, " Took "<< std::setw(3)<< std::setprecision(2) << dt <<
bf8fe701 521 " seconds ("<< std::setw(6) << ((double) n / (dt > 0.0 ? dt : 1.0)) << " entries/sec).");
62e76326 522
62e76326 523
b2c141d4 524 return n;
95dcd2b8 525}
d141c677 526
c8f4eac4 527StoreSearch *
30abd221 528StoreController::search(String const url, HttpRequest *request)
c8f4eac4 529{
530 /* cheat, for now you can't search the memory hot cache */
531 return swapDir->search(url, request);
532}
533
534StorePointer
535StoreHashIndex::store(int const x) const
536{
537 return INDEXSD(x);
538}
539
14911a4e
AR
540SwapDir &
541StoreHashIndex::dir(const int i) const
542{
543 SwapDir *sd = dynamic_cast<SwapDir*>(INDEXSD(i));
544 assert(sd);
545 return *sd;
546}
547
cd748f27 548void
c8f4eac4 549StoreController::sync(void)
cd748f27 550{
9487bae9
AR
551 if (memStore)
552 memStore->sync();
c8f4eac4 553 swapDir->sync();
cd748f27 554}
555
556/*
26ac0430 557 * handle callbacks all avaliable fs'es
cd748f27 558 */
c8f4eac4 559int
560StoreController::callback()
cd748f27 561{
1d5161bd 562 /* This will likely double count. Thats ok. */
563 PROF_start(storeDirCallback);
564
c8f4eac4 565 /* mem cache callbacks ? */
566 int result = swapDir->callback();
1d5161bd 567
568 PROF_stop(storeDirCallback);
c8f4eac4 569
570 return result;
d141c677 571}
90d42c28 572
573int
574storeDirGetBlkSize(const char *path, int *blksize)
575{
576#if HAVE_STATVFS
62e76326 577
90d42c28 578 struct statvfs sfs;
62e76326 579
90d42c28 580 if (statvfs(path, &sfs)) {
e0236918 581 debugs(50, DBG_IMPORTANT, "" << path << ": " << xstrerror());
62e76326 582 *blksize = 2048;
583 return 1;
90d42c28 584 }
62e76326 585
6759a7aa 586 *blksize = (int) sfs.f_frsize;
90d42c28 587#else
62e76326 588
90d42c28 589 struct statfs sfs;
62e76326 590
90d42c28 591 if (statfs(path, &sfs)) {
e0236918 592 debugs(50, DBG_IMPORTANT, "" << path << ": " << xstrerror());
62e76326 593 *blksize = 2048;
594 return 1;
90d42c28 595 }
62e76326 596
90d42c28 597 *blksize = (int) sfs.f_bsize;
6759a7aa 598#endif
4b3af09f 599 /*
600 * Sanity check; make sure we have a meaningful value.
601 */
62e76326 602
d5b72fe7 603 if (*blksize < 512)
62e76326 604 *blksize = 2048;
605
90d42c28 606 return 0;
607}
781d6656 608
609#define fsbtoblk(num, fsbs, bs) \
610 (((fsbs) != 0 && (fsbs) < (bs)) ? \
611 (num) / ((bs) / (fsbs)) : (num) * ((fsbs) / (bs)))
612int
613storeDirGetUFSStats(const char *path, int *totl_kb, int *free_kb, int *totl_in, int *free_in)
614{
615#if HAVE_STATVFS
62e76326 616
781d6656 617 struct statvfs sfs;
62e76326 618
781d6656 619 if (statvfs(path, &sfs)) {
e0236918 620 debugs(50, DBG_IMPORTANT, "" << path << ": " << xstrerror());
62e76326 621 return 1;
781d6656 622 }
62e76326 623
781d6656 624 *totl_kb = (int) fsbtoblk(sfs.f_blocks, sfs.f_frsize, 1024);
625 *free_kb = (int) fsbtoblk(sfs.f_bfree, sfs.f_frsize, 1024);
626 *totl_in = (int) sfs.f_files;
627 *free_in = (int) sfs.f_ffree;
628#else
62e76326 629
781d6656 630 struct statfs sfs;
62e76326 631
781d6656 632 if (statfs(path, &sfs)) {
e0236918 633 debugs(50, DBG_IMPORTANT, "" << path << ": " << xstrerror());
62e76326 634 return 1;
781d6656 635 }
62e76326 636
781d6656 637 *totl_kb = (int) fsbtoblk(sfs.f_blocks, sfs.f_bsize, 1024);
638 *free_kb = (int) fsbtoblk(sfs.f_bfree, sfs.f_bsize, 1024);
639 *totl_in = (int) sfs.f_files;
640 *free_in = (int) sfs.f_ffree;
641#endif
62e76326 642
781d6656 643 return 0;
644}
c8f4eac4 645
646void
e1f7507e 647allocate_new_swapdir(SquidConfig::_cacheSwap * swap)
c8f4eac4 648{
649 if (swap->swapDirs == NULL) {
650 swap->n_allocated = 4;
7d3c4ca1 651 swap->swapDirs = static_cast<SwapDir::Pointer *>(xcalloc(swap->n_allocated, sizeof(SwapDir::Pointer)));
c8f4eac4 652 }
653
654 if (swap->n_allocated == swap->n_configured) {
c8f4eac4 655 swap->n_allocated <<= 1;
7d3c4ca1 656 SwapDir::Pointer *const tmp = static_cast<SwapDir::Pointer *>(xcalloc(swap->n_allocated, sizeof(SwapDir::Pointer)));
41d00cd3 657 memcpy(tmp, swap->swapDirs, swap->n_configured * sizeof(SwapDir *));
c8f4eac4 658 xfree(swap->swapDirs);
659 swap->swapDirs = tmp;
660 }
661}
662
663void
e1f7507e 664free_cachedir(SquidConfig::_cacheSwap * swap)
c8f4eac4 665{
666 int i;
667 /* DON'T FREE THESE FOR RECONFIGURE */
668
669 if (reconfiguring)
670 return;
671
5db6bf73 672 for (i = 0; i < swap->n_configured; ++i) {
c8f4eac4 673 /* TODO XXX this lets the swapdir free resources asynchronously
674 * swap->swapDirs[i]->deactivate();
26ac0430 675 * but there may be such a means already.
c8f4eac4 676 * RBC 20041225
677 */
678 swap->swapDirs[i] = NULL;
679 }
680
681 safe_free(swap->swapDirs);
682 swap->swapDirs = NULL;
683 swap->n_allocated = 0;
684 swap->n_configured = 0;
685}
686
687/* this should be a virtual method on StoreEntry,
688 * i.e. e->referenced()
689 * so that the entry can notify the creating Store
690 */
691void
692StoreController::reference(StoreEntry &e)
693{
c5426f8f
AR
694 // special entries do not belong to any specific Store, but are IN_MEMORY
695 if (EBIT_TEST(e.flags, ENTRY_SPECIAL))
696 return;
697
c8f4eac4 698 /* Notify the fs that we're referencing this object again */
699
700 if (e.swap_dirn > -1)
4c973beb 701 swapDir->reference(e);
c8f4eac4 702
9487bae9
AR
703 // Notify the memory cache that we're referencing this object again
704 if (memStore && e.mem_status == IN_MEMORY)
705 memStore->reference(e);
706
707 // TODO: move this code to a non-shared memory cache class when we have it
c8f4eac4 708 if (e.mem_obj) {
709 if (mem_policy->Referenced)
710 mem_policy->Referenced(mem_policy, &e, &e.mem_obj->repl);
711 }
712}
713
4c973beb 714bool
c8f4eac4 715StoreController::dereference(StoreEntry & e)
716{
9c02fb44
AJ
717 bool keepInStoreTable = true; // keep if there are no objections
718
c5426f8f
AR
719 // special entries do not belong to any specific Store, but are IN_MEMORY
720 if (EBIT_TEST(e.flags, ENTRY_SPECIAL))
9c02fb44 721 return keepInStoreTable;
c5426f8f 722
c8f4eac4 723 /* Notify the fs that we're not referencing this object any more */
724
725 if (e.swap_filen > -1)
9c02fb44 726 keepInStoreTable = swapDir->dereference(e) && keepInStoreTable;
c8f4eac4 727
9487bae9
AR
728 // Notify the memory cache that we're not referencing this object any more
729 if (memStore && e.mem_status == IN_MEMORY)
9c02fb44 730 keepInStoreTable = memStore->dereference(e) && keepInStoreTable;
9487bae9
AR
731
732 // TODO: move this code to a non-shared memory cache class when we have it
c8f4eac4 733 if (e.mem_obj) {
734 if (mem_policy->Dereferenced)
735 mem_policy->Dereferenced(mem_policy, &e, &e.mem_obj->repl);
736 }
4c973beb
AR
737
738 return keepInStoreTable;
c8f4eac4 739}
740
741StoreEntry *
6ca34f6f 742StoreController::get(const cache_key *key)
c8f4eac4 743{
44def0f9 744 if (StoreEntry *e = swapDir->get(key)) {
9487bae9
AR
745 // TODO: ignore and maybe handleIdleEntry() unlocked intransit entries
746 // because their backing store slot may be gone already.
171d5429 747 debugs(20, 3, HERE << "got in-transit entry: " << *e);
44def0f9
AR
748 return e;
749 }
750
9487bae9
AR
751 if (memStore) {
752 if (StoreEntry *e = memStore->get(key)) {
753 debugs(20, 3, HERE << "got mem-cached entry: " << *e);
754 return e;
755 }
756 }
757
022f96ad
AR
758 // TODO: this disk iteration is misplaced; move to StoreHashIndex when
759 // the global store_table is no longer used for in-transit objects.
44def0f9
AR
760 if (const int cacheDirs = Config.cacheSwap.n_configured) {
761 // ask each cache_dir until the entry is found; use static starting
762 // point to avoid asking the same subset of disks more often
763 // TODO: coordinate with put() to be able to guess the right disk often
14911a4e 764 static int idx = 0;
44def0f9 765 for (int n = 0; n < cacheDirs; ++n) {
14911a4e 766 idx = (idx + 1) % cacheDirs;
44def0f9 767 SwapDir *sd = dynamic_cast<SwapDir*>(INDEXSD(idx));
14911a4e
AR
768 if (!sd->active())
769 continue;
770
44def0f9 771 if (StoreEntry *e = sd->get(key)) {
eccba1d9 772 debugs(20, 3, HERE << "cache_dir " << idx <<
9199139f 773 " got cached entry: " << *e);
44def0f9
AR
774 return e;
775 }
776 }
777 }
c8f4eac4 778
eccba1d9 779 debugs(20, 4, HERE << "none of " << Config.cacheSwap.n_configured <<
9199139f 780 " cache_dirs have " << storeKeyText(key));
44def0f9 781 return NULL;
c8f4eac4 782}
783
784void
6ca34f6f 785StoreController::get(String const key, STOREGETCLIENT aCallback, void *aCallbackData)
c8f4eac4 786{
787 fatal("not implemented");
788}
789
96a7de88
DK
790// move this into [non-shared] memory cache class when we have one
791/// whether e should be kept in local RAM for possible future caching
792bool
793StoreController::keepForLocalMemoryCache(const StoreEntry &e) const
794{
795 if (!e.memoryCachable())
796 return false;
797
798 // does the current and expected size obey memory caching limits?
799 assert(e.mem_obj);
800 const int64_t loadedSize = e.mem_obj->endOffset();
801 const int64_t expectedSize = e.mem_obj->expectedReplySize(); // may be < 0
802 const int64_t ramSize = max(loadedSize, expectedSize);
803 const int64_t ramLimit = min(
817138f8
A
804 static_cast<int64_t>(Config.memMaxSize),
805 static_cast<int64_t>(Config.Store.maxInMemObjSize));
96a7de88
DK
806 return ramSize <= ramLimit;
807}
808
809void
810StoreController::maybeTrimMemory(StoreEntry &e, const bool preserveSwappable)
811{
812 bool keepInLocalMemory = false;
813 if (memStore)
814 keepInLocalMemory = memStore->keepInLocalMemory(e);
815 else
816 keepInLocalMemory = keepForLocalMemoryCache(e);
817
818 debugs(20, 7, HERE << "keepInLocalMemory: " << keepInLocalMemory);
819
820 if (!keepInLocalMemory)
821 e.trimMemory(preserveSwappable);
822}
823
9487bae9
AR
824void
825StoreController::handleIdleEntry(StoreEntry &e)
826{
827 bool keepInLocalMemory = false;
c5426f8f
AR
828
829 if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) {
830 // Icons (and cache digests?) should stay in store_table until we
831 // have a dedicated storage for them (that would not purge them).
832 // They are not managed [well] by any specific Store handled below.
833 keepInLocalMemory = true;
d3cd2e81 834 } else if (memStore) {
9487bae9
AR
835 memStore->considerKeeping(e);
836 // leave keepInLocalMemory false; memStore maintains its own cache
837 } else {
96a7de88 838 keepInLocalMemory = keepForLocalMemoryCache(e) && // in good shape and
9199139f
AR
839 // the local memory cache is not overflowing
840 (mem_node::InUseCount() <= store_pages_max);
9487bae9
AR
841 }
842
9c02fb44 843 // An idle, unlocked entry that belongs to a SwapDir which controls
4c973beb
AR
844 // its own index, should not stay in the global store_table.
845 if (!dereference(e)) {
9487bae9
AR
846 debugs(20, 5, HERE << "destroying unlocked entry: " << &e << ' ' << e);
847 destroyStoreEntry(static_cast<hash_link*>(&e));
848 return;
849 }
850
c5426f8f
AR
851 debugs(20, 5, HERE << "keepInLocalMemory: " << keepInLocalMemory);
852
9487bae9
AR
853 // TODO: move this into [non-shared] memory cache class when we have one
854 if (keepInLocalMemory) {
855 e.setMemStatus(IN_MEMORY);
856 e.mem_obj->unlinkRequest();
857 } else {
858 e.purgeMem(); // may free e
859 }
860}
861
c8f4eac4 862StoreHashIndex::StoreHashIndex()
863{
47f6e231 864 if (store_table)
26ac0430 865 abort();
c8f4eac4 866 assert (store_table == NULL);
867}
868
869StoreHashIndex::~StoreHashIndex()
870{
871 if (store_table) {
872 hashFreeItems(store_table, destroyStoreEntry);
873 hashFreeMemory(store_table);
874 store_table = NULL;
875 }
876}
877
878int
879StoreHashIndex::callback()
880{
881 int result = 0;
882 int j;
883 static int ndir = 0;
884
885 do {
886 j = 0;
887
5db6bf73 888 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
c8f4eac4 889 if (ndir >= Config.cacheSwap.n_configured)
890 ndir = ndir % Config.cacheSwap.n_configured;
891
892 int temp_result = store(ndir)->callback();
893
894 ++ndir;
895
896 j += temp_result;
897
898 result += temp_result;
899
900 if (j > 100)
901 fatal ("too much io\n");
902 }
903 } while (j > 0);
904
5db6bf73 905 ++ndir;
c8f4eac4 906
907 return result;
908}
909
910void
911StoreHashIndex::create()
912{
5db6bf73 913 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
14911a4e
AR
914 if (dir(i).active())
915 store(i)->create();
916 }
c8f4eac4 917}
918
919/* Lookup an object in the cache.
920 * return just a reference to object, don't start swapping in yet. */
921StoreEntry *
6ca34f6f 922StoreHashIndex::get(const cache_key *key)
c8f4eac4 923{
924 PROF_start(storeGet);
bf8fe701 925 debugs(20, 3, "storeGet: looking up " << storeKeyText(key));
c8f4eac4 926 StoreEntry *p = static_cast<StoreEntry *>(hash_lookup(store_table, key));
927 PROF_stop(storeGet);
928 return p;
929}
930
931void
6ca34f6f 932StoreHashIndex::get(String const key, STOREGETCLIENT aCallback, void *aCallbackData)
c8f4eac4 933{
934 fatal("not implemented");
935}
936
937void
938StoreHashIndex::init()
939{
940 /* Calculate size of hash table (maximum currently 64k buckets). */
941 /* this is very bogus, its specific to the any Store maintaining an
942 * in-core index, not global */
58d5c5dd 943 size_t buckets = (Store::Root().maxSize() + Config.memMaxSize) / Config.Store.avgObjectSize;
e0236918 944 debugs(20, DBG_IMPORTANT, "Swap maxSize " << (Store::Root().maxSize() >> 10) <<
13a07022 945 " + " << ( Config.memMaxSize >> 10) << " KB, estimated " << buckets << " objects");
c8f4eac4 946 buckets /= Config.Store.objectsPerBucket;
e0236918 947 debugs(20, DBG_IMPORTANT, "Target number of buckets: " << buckets);
c8f4eac4 948 /* ideally the full scan period should be configurable, for the
949 * moment it remains at approximately 24 hours. */
950 store_hash_buckets = storeKeyHashBuckets(buckets);
e0236918
FC
951 debugs(20, DBG_IMPORTANT, "Using " << store_hash_buckets << " Store buckets");
952 debugs(20, DBG_IMPORTANT, "Max Mem size: " << ( Config.memMaxSize >> 10) << " KB" <<
9199139f 953 (Config.memShared ? " [shared]" : ""));
e0236918 954 debugs(20, DBG_IMPORTANT, "Max Swap size: " << (Store::Root().maxSize() >> 10) << " KB");
c8f4eac4 955
956 store_table = hash_create(storeKeyHashCmp,
957 store_hash_buckets, storeKeyHashHash);
958
5db6bf73 959 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
c8f4eac4 960 /* this starts a search of the store dirs, loading their
961 * index. under the new Store api this should be
962 * driven by the StoreHashIndex, not by each store.
bef81ea5 963 *
964 * That is, the HashIndex should perform a search of each dir it is
26ac0430 965 * indexing to do the hash insertions. The search is then able to
bef81ea5 966 * decide 'from-memory', or 'from-clean-log' or 'from-dirty-log' or
967 * 'from-no-log'.
968 *
c8f4eac4 969 * Step 1: make the store rebuilds use a search internally
bef81ea5 970 * Step 2: change the search logic to use the four modes described
971 * above
972 * Step 3: have the hash index walk the searches itself.
c8f4eac4 973 */
14911a4e
AR
974 if (dir(i).active())
975 store(i)->init();
13a07022 976 }
c8f4eac4 977}
978
12e11a5c 979uint64_t
c8f4eac4 980StoreHashIndex::maxSize() const
981{
12e11a5c 982 uint64_t result = 0;
c8f4eac4 983
5db6bf73 984 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
39c1e1d9
DK
985 if (dir(i).doReportStat())
986 result += store(i)->maxSize();
987 }
c8f4eac4 988
989 return result;
990}
991
12e11a5c 992uint64_t
c8f4eac4 993StoreHashIndex::minSize() const
994{
12e11a5c 995 uint64_t result = 0;
c8f4eac4 996
5db6bf73 997 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
39c1e1d9
DK
998 if (dir(i).doReportStat())
999 result += store(i)->minSize();
1000 }
1001
1002 return result;
1003}
1004
1005uint64_t
1006StoreHashIndex::currentSize() const
1007{
1008 uint64_t result = 0;
1009
5db6bf73 1010 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
39c1e1d9
DK
1011 if (dir(i).doReportStat())
1012 result += store(i)->currentSize();
1013 }
1014
1015 return result;
1016}
1017
1018uint64_t
1019StoreHashIndex::currentCount() const
1020{
1021 uint64_t result = 0;
1022
5db6bf73 1023 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
39c1e1d9
DK
1024 if (dir(i).doReportStat())
1025 result += store(i)->currentCount();
1026 }
c8f4eac4 1027
1028 return result;
1029}
1030
af2fda07
DK
1031int64_t
1032StoreHashIndex::maxObjectSize() const
1033{
1034 int64_t result = -1;
1035
5db6bf73 1036 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
af2fda07
DK
1037 if (dir(i).active() && store(i)->maxObjectSize() > result)
1038 result = store(i)->maxObjectSize();
1039 }
1040
1041 return result;
1042}
1043
93bc1434
AR
1044void
1045StoreHashIndex::getStats(StoreInfoStats &stats) const
1046{
1047 // accumulate per-disk cache stats
5db6bf73 1048 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
93bc1434
AR
1049 StoreInfoStats dirStats;
1050 store(i)->getStats(dirStats);
1051 stats += dirStats;
1052 }
1053
1054 // common to all disks
1055 stats.swap.open_disk_fd = store_open_disk_fd;
1056
1057 // memory cache stats are collected in StoreController::getStats(), for now
1058}
1059
c8f4eac4 1060void
1061StoreHashIndex::stat(StoreEntry & output) const
1062{
1063 int i;
1064
1065 /* Now go through each store, calling its stat routine */
1066
5db6bf73 1067 for (i = 0; i < Config.cacheSwap.n_configured; ++i) {
c8f4eac4 1068 storeAppendPrintf(&output, "\n");
1069 store(i)->stat(output);
1070 }
1071}
1072
1073void
4c973beb
AR
1074StoreHashIndex::reference(StoreEntry &e)
1075{
1076 e.store()->reference(e);
1077}
c8f4eac4 1078
4c973beb
AR
1079bool
1080StoreHashIndex::dereference(StoreEntry &e)
1081{
1082 return e.store()->dereference(e);
1083}
c8f4eac4 1084
1085void
1086StoreHashIndex::maintain()
1087{
1088 int i;
1089 /* walk each fs */
1090
5db6bf73 1091 for (i = 0; i < Config.cacheSwap.n_configured; ++i) {
c8f4eac4 1092 /* XXX FixMe: This should be done "in parallell" on the different
1093 * cache_dirs, not one at a time.
1094 */
1095 /* call the maintain function .. */
1096 store(i)->maintain();
1097 }
1098}
1099
c8f4eac4 1100void
1101StoreHashIndex::sync()
1102{
1103 for (int i = 0; i < Config.cacheSwap.n_configured; ++i)
1104 store(i)->sync();
1105}
1106
1107StoreSearch *
30abd221 1108StoreHashIndex::search(String const url, HttpRequest *)
c8f4eac4 1109{
1110 if (url.size())
1111 fatal ("Cannot search by url yet\n");
1112
1113 return new StoreSearchHashIndex (this);
1114}
1115
1116CBDATA_CLASS_INIT(StoreSearchHashIndex);
aa839030 1117
c8f4eac4 1118StoreSearchHashIndex::StoreSearchHashIndex(RefCount<StoreHashIndex> aSwapDir) : sd(aSwapDir), _done (false), bucket (0)
1119{}
1120
1121/* do not link
1122StoreSearchHashIndex::StoreSearchHashIndex(StoreSearchHashIndex const &);
1123*/
1124
1125StoreSearchHashIndex::~StoreSearchHashIndex()
1126{}
1127
1128void
70efcae0 1129StoreSearchHashIndex::next(void (aCallback)(void *), void *aCallbackData)
c8f4eac4 1130{
1131 next();
70efcae0 1132 aCallback (aCallbackData);
c8f4eac4 1133}
1134
1135bool
1136StoreSearchHashIndex::next()
1137{
1138 if (entries.size())
1139 entries.pop_back();
1140
1141 while (!isDone() && !entries.size())
1142 copyBucket();
1143
1144 return currentItem() != NULL;
1145}
1146
1147bool
1148StoreSearchHashIndex::error() const
1149{
1150 return false;
1151}
1152
1153bool
1154StoreSearchHashIndex::isDone() const
1155{
1156 return bucket >= store_hash_buckets || _done;
1157}
1158
1159StoreEntry *
1160StoreSearchHashIndex::currentItem()
1161{
1162 if (!entries.size())
1163 return NULL;
1164
1165 return entries.back();
1166}
1167
1168void
1169StoreSearchHashIndex::copyBucket()
1170{
1171 /* probably need to lock the store entries...
1172 * we copy them all to prevent races on the links. */
1173 debugs(47, 3, "StoreSearchHashIndex::copyBucket #" << bucket);
1174 assert (!entries.size());
1175 hash_link *link_ptr = NULL;
1176 hash_link *link_next = NULL;
1177 link_next = hash_get_bucket(store_table, bucket);
1178
1179 while (NULL != (link_ptr = link_next)) {
1180 link_next = link_ptr->next;
1181 StoreEntry *e = (StoreEntry *) link_ptr;
1182
1183 entries.push_back(e);
1184 }
1185
5db6bf73 1186 ++bucket;
c8f4eac4 1187 debugs(47,3, "got entries: " << entries.size());
1188}