]> git.ipfire.org Git - thirdparty/squid.git/blame - src/store_dir.cc
Code polishing: move Rock store entry limit calculation to entryLimitAllowed().
[thirdparty/squid.git] / src / store_dir.cc
CommitLineData
f1dc9b30 1
2/*
262a0e14 3 * $Id$
f1dc9b30 4 *
5 * DEBUG: section 47 Store Directory Routines
6 * AUTHOR: Duane Wessels
7 *
2b6662ba 8 * SQUID Web Proxy Cache http://www.squid-cache.org/
e25c139f 9 * ----------------------------------------------------------
f1dc9b30 10 *
2b6662ba 11 * Squid is the result of efforts by numerous individuals from
12 * the Internet community; see the CONTRIBUTORS file for full
13 * details. Many organizations have provided support for Squid's
14 * development; see the SPONSORS file for full details. Squid is
15 * Copyrighted (C) 2001 by the Regents of the University of
16 * California; see the COPYRIGHT file for full details. Squid
17 * incorporates software developed and/or copyrighted by other
18 * sources; see the CREDITS file for full details.
f1dc9b30 19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
26ac0430 24 *
f1dc9b30 25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
26ac0430 29 *
f1dc9b30 30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
cbdec147 32 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
e25c139f 33 *
f1dc9b30 34 */
35
596dddc1 36#include "squid.h"
e6ccf245 37#include "Store.h"
528b2c61 38#include "MemObject.h"
9487bae9
AR
39#include "MemStore.h"
40#include "mem_node.h"
a98bcbee 41#include "SquidMath.h"
985c86bc 42#include "SquidTime.h"
d3b3ab85 43#include "SwapDir.h"
4b981814 44#include "swap_log_op.h"
85407535 45
c0db87f2 46#if HAVE_STATVFS
47#if HAVE_SYS_STATVFS_H
48#include <sys/statvfs.h>
49#endif
ec15e022 50#endif /* HAVE_STATVFS */
51/* statfs() needs <sys/param.h> and <sys/mount.h> on BSD systems */
52#if HAVE_SYS_PARAM_H
53#include <sys/param.h>
203526a1 54#endif
ec15e022 55#if HAVE_SYS_MOUNT_H
56#include <sys/mount.h>
57#endif
58/* Windows and Linux use sys/vfs.h */
6c86a065 59#if HAVE_SYS_VFS_H
60#include <sys/vfs.h>
61#endif
c0db87f2 62
c8f4eac4 63#include "StoreHashIndex.h"
64
65a53c8e 65static STDIRSELECT storeDirSelectSwapDirRoundRobin;
66static STDIRSELECT storeDirSelectSwapDirLeastLoad;
c8f4eac4 67
b07b21cc 68/*
69 * store_dirs_rebuilding is initialized to _1_ as a hack so that
70 * storeDirWriteCleanLogs() doesn't try to do anything unless _all_
71 * cache_dirs have been read. For example, without this hack, Squid
72 * will try to write clean log files if -kparse fails (becasue it
73 * calls fatal()).
74 */
75int StoreController::store_dirs_rebuilding = 1;
bef81ea5 76
c8f4eac4 77StoreController::StoreController() : swapDir (new StoreHashIndex())
9487bae9 78 , memStore(NULL)
c8f4eac4 79{}
80
81StoreController::~StoreController()
9487bae9
AR
82{
83 delete memStore;
84}
65a53c8e 85
86/*
87 * This function pointer is set according to 'store_dir_select_algorithm'
88 * in squid.conf.
89 */
90STDIRSELECT *storeDirSelectSwapDir = storeDirSelectSwapDirLeastLoad;
d141c677 91
9838d6c8 92void
c8f4eac4 93StoreController::init()
596dddc1 94{
60be8b2d
AR
95 if (UsingSmp() && IamWorkerProcess()) {
96 memStore = new MemStore;
97 memStore->init();
98 }
9487bae9 99
c8f4eac4 100 swapDir->init();
62e76326 101
65a53c8e 102 if (0 == strcasecmp(Config.store_dir_select_algorithm, "round-robin")) {
62e76326 103 storeDirSelectSwapDir = storeDirSelectSwapDirRoundRobin;
bf8fe701 104 debugs(47, 1, "Using Round Robin store dir selection");
65a53c8e 105 } else {
62e76326 106 storeDirSelectSwapDir = storeDirSelectSwapDirLeastLoad;
bf8fe701 107 debugs(47, 1, "Using Least Load store dir selection");
65a53c8e 108 }
85407535 109}
110
111void
c8f4eac4 112StoreController::createOneStore(Store &aStore)
596dddc1 113{
62e76326 114 /*
154c7949 115 * On Windows, fork() is not available.
116 * The following is a workaround for create store directories sequentially
117 * when running on native Windows port.
118 */
099a1791 119#ifndef _SQUID_MSWIN_
62e76326 120
154c7949 121 if (fork())
62e76326 122 return;
123
099a1791 124#endif
62e76326 125
c8f4eac4 126 aStore.create();
62e76326 127
099a1791 128#ifndef _SQUID_MSWIN_
62e76326 129
154c7949 130 exit(0);
62e76326 131
099a1791 132#endif
154c7949 133}
134
135void
c8f4eac4 136StoreController::create()
154c7949 137{
c8f4eac4 138 swapDir->create();
62e76326 139
099a1791 140#ifndef _SQUID_MSWIN_
62e76326 141
8a1c8f2c 142 pid_t pid;
62e76326 143
b2c141d4 144 do {
62e76326 145 int status;
b2c141d4 146#ifdef _SQUID_NEXT_
62e76326 147
148 pid = wait3(&status, WNOHANG, NULL);
b2c141d4 149#else
62e76326 150
151 pid = waitpid(-1, &status, 0);
b2c141d4 152#endif
62e76326 153
b2c141d4 154 } while (pid > 0 || (pid < 0 && errno == EINTR));
62e76326 155
099a1791 156#endif
596dddc1 157}
158
a8a33c46 159/**
cd748f27 160 * Determine whether the given directory can handle this object
161 * size
162 *
163 * Note: if the object size is -1, then the only swapdirs that
a8a33c46 164 * will return true here are ones that have min and max unset,
cd748f27 165 * ie any-sized-object swapdirs. This is a good thing.
166 */
c8f4eac4 167bool
3e62bd58 168SwapDir::objectSizeIsAcceptable(int64_t objsize) const
cd748f27 169{
a8a33c46 170 // If the swapdir has no range limits, then it definitely can
b6662ffd 171 if (min_objsize <= 0 && max_objsize == -1)
c8f4eac4 172 return true;
d68f43a0 173
174 /*
a8a33c46
A
175 * If the object size is -1 and the storedir has limits we
176 * can't store it there.
d68f43a0 177 */
a8a33c46 178 if (objsize == -1)
c8f4eac4 179 return false;
d68f43a0 180
a8a33c46
A
181 // Else, make sure that the object size will fit.
182 return min_objsize <= objsize && max_objsize > objsize;
cd748f27 183}
184
185
d141c677 186/*
187 * This new selection scheme simply does round-robin on all SwapDirs.
8e8d4f30 188 * A SwapDir is skipped if it is over the max_size (100%) limit, or
189 * overloaded.
d141c677 190 */
191static int
8e8d4f30 192storeDirSelectSwapDirRoundRobin(const StoreEntry * e)
d141c677 193{
194 static int dirn = 0;
195 int i;
8e8d4f30 196 int load;
c8f4eac4 197 RefCount<SwapDir> sd;
62e76326 198
aa1a691e
AR
199 // e->objectLen() is negative at this point when we are still STORE_PENDING
200 ssize_t objsize = e->mem_obj->expectedReplySize();
246e6cc1
AJ
201 if (objsize != -1)
202 objsize += e->mem_obj->swap_hdr_sz;
203
ada9124c 204 for (i = 0; i < Config.cacheSwap.n_configured; i++) {
62e76326 205 if (++dirn >= Config.cacheSwap.n_configured)
206 dirn = 0;
207
c8f4eac4 208 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
62e76326 209
aa1a691e 210 if (!sd->canStore(*e, objsize, load))
62e76326 211 continue;
212
62e76326 213 if (load < 0 || load > 1000) {
214 continue;
215 }
216
217 return dirn;
d141c677 218 }
62e76326 219
8e8d4f30 220 return -1;
d141c677 221}
960a01e3 222
a2899918 223/*
cd748f27 224 * Spread load across all of the store directories
225 *
226 * Note: We should modify this later on to prefer sticking objects
227 * in the *tightest fit* swapdir to conserve space, along with the
26ac0430 228 * actual swapdir usage. But for now, this hack will do while
cd748f27 229 * testing, so you should order your swapdirs in the config file
230 * from smallest maxobjsize to unlimited (-1) maxobjsize.
231 *
232 * We also have to choose nleast == nconf since we need to consider
233 * ALL swapdirs, regardless of state. Again, this is a hack while
234 * we sort out the real usefulness of this algorithm.
a2899918 235 */
65a53c8e 236static int
237storeDirSelectSwapDirLeastLoad(const StoreEntry * e)
a2899918 238{
8e8d4f30 239 ssize_t most_free = 0, cur_free;
240 ssize_t least_objsize = -1;
241 int least_load = INT_MAX;
cd748f27 242 int load;
243 int dirn = -1;
244 int i;
c8f4eac4 245 RefCount<SwapDir> SD;
cd748f27 246
aa1a691e
AR
247 // e->objectLen() is negative at this point when we are still STORE_PENDING
248 ssize_t objsize = e->mem_obj->expectedReplySize();
62e76326 249
cd748f27 250 if (objsize != -1)
62e76326 251 objsize += e->mem_obj->swap_hdr_sz;
252
cd748f27 253 for (i = 0; i < Config.cacheSwap.n_configured; i++) {
c8f4eac4 254 SD = dynamic_cast<SwapDir *>(INDEXSD(i));
62e76326 255 SD->flags.selected = 0;
62e76326 256
aa1a691e 257 if (!SD->canStore(*e, objsize, load))
62e76326 258 continue;
259
aa1a691e 260 if (load < 0 || load > 1000)
62e76326 261 continue;
262
263 if (load > least_load)
264 continue;
265
266 cur_free = SD->max_size - SD->cur_size;
267
268 /* If the load is equal, then look in more details */
269 if (load == least_load) {
270 /* closest max_objsize fit */
271
272 if (least_objsize != -1)
273 if (SD->max_objsize > least_objsize || SD->max_objsize == -1)
274 continue;
275
276 /* most free */
277 if (cur_free < most_free)
278 continue;
279 }
280
281 least_load = load;
282 least_objsize = SD->max_objsize;
283 most_free = cur_free;
284 dirn = i;
a2899918 285 }
62e76326 286
ade906c8 287 if (dirn >= 0)
c8f4eac4 288 dynamic_cast<SwapDir *>(INDEXSD(dirn))->flags.selected = 1;
62e76326 289
cd748f27 290 return dirn;
596dddc1 291}
292
b109de6b 293/*
294 * An entry written to the swap log MUST have the following
295 * properties.
296 * 1. It MUST be a public key. It does no good to log
297 * a public ADD, change the key, then log a private
298 * DEL. So we need to log a DEL before we change a
299 * key from public to private.
cd748f27 300 * 2. It MUST have a valid (> -1) swap_filen.
b109de6b 301 */
4683e377 302void
5830cdb3 303storeDirSwapLog(const StoreEntry * e, int op)
4683e377 304{
d3b3ab85 305 assert (e);
d46a87a8 306 assert(!EBIT_TEST(e->flags, KEY_PRIVATE));
cd748f27 307 assert(e->swap_filen >= 0);
6c57e268 308 /*
309 * icons and such; don't write them to the swap log
310 */
62e76326 311
d46a87a8 312 if (EBIT_TEST(e->flags, ENTRY_SPECIAL))
62e76326 313 return;
314
b109de6b 315 assert(op > SWAP_LOG_NOP && op < SWAP_LOG_MAX);
62e76326 316
26ac0430
AJ
317 debugs(20, 3, "storeDirSwapLog: " <<
318 swap_log_op_str[op] << " " <<
319 e->getMD5Text() << " " <<
320 e->swap_dirn << " " <<
bf8fe701 321 std::hex << std::uppercase << std::setfill('0') << std::setw(8) << e->swap_filen);
62e76326 322
c8f4eac4 323 dynamic_cast<SwapDir *>(INDEXSD(e->swap_dirn))->logEntry(*e, op);
324}
325
326void
47f6e231 327StoreController::updateSize(int64_t size, int sign)
c8f4eac4 328{
329 fatal("StoreController has no independent size\n");
5608850b 330}
76fefb77 331
332void
47f6e231 333SwapDir::updateSize(int64_t size, int sign)
76fefb77 334{
6da430a4
AJ
335 int64_t blks = (size + fs.blksize - 1) / fs.blksize;
336 int64_t k = ((blks * fs.blksize) >> 10) * sign;
c8f4eac4 337 cur_size += k;
62e76326 338
0faf70d0 339 if (sign > 0)
62e76326 340 n_disk_objects++;
0faf70d0 341 else if (sign < 0)
62e76326 342 n_disk_objects--;
76fefb77 343}
c932b107 344
345void
c8f4eac4 346StoreController::stat(StoreEntry &output) const
c932b107 347{
c8f4eac4 348 storeAppendPrintf(&output, "Store Directory Statistics:\n");
349 storeAppendPrintf(&output, "Store Entries : %lu\n",
62e76326 350 (unsigned long int)StoreEntry::inUseCount());
12e11a5c
AJ
351 storeAppendPrintf(&output, "Maximum Swap Size : %"PRIu64" KB\n",
352 maxSize());
bc9b2391 353 storeAppendPrintf(&output, "Current Store Swap Size: %"PRIu64" KB\n",
39c1e1d9 354 currentSize());
12e11a5c 355 storeAppendPrintf(&output, "Current Capacity : %"PRId64"%% used, %"PRId64"%% free\n",
39c1e1d9
DK
356 Math::int64Percent(currentSize(), maxSize()),
357 Math::int64Percent((maxSize() - currentSize()), maxSize()));
ada9124c
AR
358
359 if (memStore)
360 memStore->stat(output);
e3ef2b09 361
c8f4eac4 362 /* now the swapDir */
363 swapDir->stat(output);
5d406e78 364}
365
c8f4eac4 366/* if needed, this could be taught to cache the result */
12e11a5c 367uint64_t
c8f4eac4 368StoreController::maxSize() const
f4e3fa54 369{
c8f4eac4 370 /* TODO: include memory cache ? */
371 return swapDir->maxSize();
372}
62e76326 373
12e11a5c 374uint64_t
c8f4eac4 375StoreController::minSize() const
376{
377 /* TODO: include memory cache ? */
378 return swapDir->minSize();
f4e3fa54 379}
380
39c1e1d9
DK
381uint64_t
382StoreController::currentSize() const
383{
384 return swapDir->currentSize();
385}
386
387uint64_t
388StoreController::currentCount() const
389{
390 return swapDir->currentCount();
391}
392
af2fda07
DK
393int64_t
394StoreController::maxObjectSize() const
395{
396 return swapDir->maxObjectSize();
397}
398
f4e3fa54 399void
c8f4eac4 400SwapDir::diskFull()
f4e3fa54 401{
c8f4eac4 402 if (cur_size >= max_size)
62e76326 403 return;
404
c8f4eac4 405 max_size = cur_size;
62e76326 406
c8f4eac4 407 debugs(20, 1, "WARNING: Shrinking cache_dir #" << index << " to " << cur_size << " KB");
f4e3fa54 408}
95dcd2b8 409
410void
411storeDirOpenSwapLogs(void)
412{
d3b3ab85 413 for (int dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
c8f4eac4 414 dynamic_cast<SwapDir *>(INDEXSD(dirn))->openLog();
95dcd2b8 415}
416
417void
418storeDirCloseSwapLogs(void)
419{
d3b3ab85 420 for (int dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
c8f4eac4 421 dynamic_cast<SwapDir *>(INDEXSD(dirn))->closeLog();
95dcd2b8 422}
423
b2c141d4 424/*
425 * storeDirWriteCleanLogs
26ac0430 426 *
b2c141d4 427 * Writes a "clean" swap log file from in-memory metadata.
cd748f27 428 * This is a rewrite of the original function to troll each
429 * StoreDir and write the logs, and flush at the end of
430 * the run. Thanks goes to Eric Stern, since this solution
431 * came out of his COSS code.
b2c141d4 432 */
b2c141d4 433int
434storeDirWriteCleanLogs(int reopen)
95dcd2b8 435{
6a566b9c 436 const StoreEntry *e = NULL;
b2c141d4 437 int n = 0;
62e76326 438
e812ecfc 439 struct timeval start;
440 double dt;
c8f4eac4 441 RefCount<SwapDir> sd;
b2c141d4 442 int dirn;
6a566b9c 443 int notdone = 1;
62e76326 444
bef81ea5 445 if (StoreController::store_dirs_rebuilding) {
bf8fe701 446 debugs(20, 1, "Not currently OK to rewrite swap log.");
447 debugs(20, 1, "storeDirWriteCleanLogs: Operation aborted.");
62e76326 448 return 0;
b2c141d4 449 }
62e76326 450
bf8fe701 451 debugs(20, 1, "storeDirWriteCleanLogs: Starting...");
e812ecfc 452 getCurrentTime();
453 start = current_time;
62e76326 454
b2c141d4 455 for (dirn = 0; dirn < Config.cacheSwap.n_configured; dirn++) {
c8f4eac4 456 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
62e76326 457
458 if (sd->writeCleanStart() < 0) {
bf8fe701 459 debugs(20, 1, "log.clean.start() failed for dir #" << sd->index);
62e76326 460 continue;
461 }
6a566b9c 462 }
62e76326 463
e78ef51b 464 /*
465 * This may look inefficient as CPU wise it is more efficient to do this
466 * sequentially, but I/O wise the parallellism helps as it allows more
467 * hdd spindles to be active.
d3b3ab85 468 */
c1dd71ae 469 while (notdone) {
62e76326 470 notdone = 0;
471
472 for (dirn = 0; dirn < Config.cacheSwap.n_configured; dirn++) {
c8f4eac4 473 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
62e76326 474
475 if (NULL == sd->cleanLog)
476 continue;
477
478 e = sd->cleanLog->nextEntry();
479
480 if (!e)
481 continue;
482
483 notdone = 1;
484
485 if (!sd->canLog(*e))
486 continue;
487
488 sd->cleanLog->write(*e);
489
490 if ((++n & 0xFFFF) == 0) {
491 getCurrentTime();
bf8fe701 492 debugs(20, 1, " " << std::setw(7) << n <<
493 " entries written so far.");
62e76326 494 }
495 }
6a566b9c 496 }
62e76326 497
6a566b9c 498 /* Flush */
d3b3ab85 499 for (dirn = 0; dirn < Config.cacheSwap.n_configured; dirn++)
c8f4eac4 500 dynamic_cast<SwapDir *>(INDEXSD(dirn))->writeCleanDone();
62e76326 501
b2c141d4 502 if (reopen)
62e76326 503 storeDirOpenSwapLogs();
504
e812ecfc 505 getCurrentTime();
62e76326 506
e812ecfc 507 dt = tvSubDsec(start, current_time);
62e76326 508
bf8fe701 509 debugs(20, 1, " Finished. Wrote " << n << " entries.");
510 debugs(20, 1, " Took "<< std::setw(3)<< std::setprecision(2) << dt <<
511 " seconds ("<< std::setw(6) << ((double) n / (dt > 0.0 ? dt : 1.0)) << " entries/sec).");
62e76326 512
62e76326 513
b2c141d4 514 return n;
95dcd2b8 515}
d141c677 516
c8f4eac4 517StoreSearch *
30abd221 518StoreController::search(String const url, HttpRequest *request)
c8f4eac4 519{
520 /* cheat, for now you can't search the memory hot cache */
521 return swapDir->search(url, request);
522}
523
524StorePointer
525StoreHashIndex::store(int const x) const
526{
527 return INDEXSD(x);
528}
529
14911a4e
AR
530SwapDir &
531StoreHashIndex::dir(const int i) const
532{
533 SwapDir *sd = dynamic_cast<SwapDir*>(INDEXSD(i));
534 assert(sd);
535 return *sd;
536}
537
cd748f27 538void
c8f4eac4 539StoreController::sync(void)
cd748f27 540{
9487bae9
AR
541 if (memStore)
542 memStore->sync();
c8f4eac4 543 swapDir->sync();
cd748f27 544}
545
546/*
26ac0430 547 * handle callbacks all avaliable fs'es
cd748f27 548 */
c8f4eac4 549int
550StoreController::callback()
cd748f27 551{
1d5161bd 552 /* This will likely double count. Thats ok. */
553 PROF_start(storeDirCallback);
554
c8f4eac4 555 /* mem cache callbacks ? */
556 int result = swapDir->callback();
1d5161bd 557
558 PROF_stop(storeDirCallback);
c8f4eac4 559
560 return result;
d141c677 561}
90d42c28 562
563int
564storeDirGetBlkSize(const char *path, int *blksize)
565{
566#if HAVE_STATVFS
62e76326 567
90d42c28 568 struct statvfs sfs;
62e76326 569
90d42c28 570 if (statvfs(path, &sfs)) {
bf8fe701 571 debugs(50, 1, "" << path << ": " << xstrerror());
62e76326 572 *blksize = 2048;
573 return 1;
90d42c28 574 }
62e76326 575
6759a7aa 576 *blksize = (int) sfs.f_frsize;
90d42c28 577#else
62e76326 578
90d42c28 579 struct statfs sfs;
62e76326 580
90d42c28 581 if (statfs(path, &sfs)) {
bf8fe701 582 debugs(50, 1, "" << path << ": " << xstrerror());
62e76326 583 *blksize = 2048;
584 return 1;
90d42c28 585 }
62e76326 586
90d42c28 587 *blksize = (int) sfs.f_bsize;
6759a7aa 588#endif
4b3af09f 589 /*
590 * Sanity check; make sure we have a meaningful value.
591 */
62e76326 592
d5b72fe7 593 if (*blksize < 512)
62e76326 594 *blksize = 2048;
595
90d42c28 596 return 0;
597}
781d6656 598
599#define fsbtoblk(num, fsbs, bs) \
600 (((fsbs) != 0 && (fsbs) < (bs)) ? \
601 (num) / ((bs) / (fsbs)) : (num) * ((fsbs) / (bs)))
602int
603storeDirGetUFSStats(const char *path, int *totl_kb, int *free_kb, int *totl_in, int *free_in)
604{
605#if HAVE_STATVFS
62e76326 606
781d6656 607 struct statvfs sfs;
62e76326 608
781d6656 609 if (statvfs(path, &sfs)) {
bf8fe701 610 debugs(50, 1, "" << path << ": " << xstrerror());
62e76326 611 return 1;
781d6656 612 }
62e76326 613
781d6656 614 *totl_kb = (int) fsbtoblk(sfs.f_blocks, sfs.f_frsize, 1024);
615 *free_kb = (int) fsbtoblk(sfs.f_bfree, sfs.f_frsize, 1024);
616 *totl_in = (int) sfs.f_files;
617 *free_in = (int) sfs.f_ffree;
618#else
62e76326 619
781d6656 620 struct statfs sfs;
62e76326 621
781d6656 622 if (statfs(path, &sfs)) {
bf8fe701 623 debugs(50, 1, "" << path << ": " << xstrerror());
62e76326 624 return 1;
781d6656 625 }
62e76326 626
781d6656 627 *totl_kb = (int) fsbtoblk(sfs.f_blocks, sfs.f_bsize, 1024);
628 *free_kb = (int) fsbtoblk(sfs.f_bfree, sfs.f_bsize, 1024);
629 *totl_in = (int) sfs.f_files;
630 *free_in = (int) sfs.f_ffree;
631#endif
62e76326 632
781d6656 633 return 0;
634}
c8f4eac4 635
636void
e1f7507e 637allocate_new_swapdir(SquidConfig::_cacheSwap * swap)
c8f4eac4 638{
639 if (swap->swapDirs == NULL) {
640 swap->n_allocated = 4;
641 swap->swapDirs = static_cast<StorePointer *>(xcalloc(swap->n_allocated, sizeof(StorePointer)));
642 }
643
644 if (swap->n_allocated == swap->n_configured) {
645 StorePointer *tmp;
646 swap->n_allocated <<= 1;
647 tmp = static_cast<StorePointer *>(xcalloc(swap->n_allocated, sizeof(StorePointer)));
41d00cd3 648 memcpy(tmp, swap->swapDirs, swap->n_configured * sizeof(SwapDir *));
c8f4eac4 649 xfree(swap->swapDirs);
650 swap->swapDirs = tmp;
651 }
652}
653
654void
e1f7507e 655free_cachedir(SquidConfig::_cacheSwap * swap)
c8f4eac4 656{
657 int i;
658 /* DON'T FREE THESE FOR RECONFIGURE */
659
660 if (reconfiguring)
661 return;
662
663 for (i = 0; i < swap->n_configured; i++) {
664 /* TODO XXX this lets the swapdir free resources asynchronously
665 * swap->swapDirs[i]->deactivate();
26ac0430 666 * but there may be such a means already.
c8f4eac4 667 * RBC 20041225
668 */
669 swap->swapDirs[i] = NULL;
670 }
671
672 safe_free(swap->swapDirs);
673 swap->swapDirs = NULL;
674 swap->n_allocated = 0;
675 swap->n_configured = 0;
676}
677
678/* this should be a virtual method on StoreEntry,
679 * i.e. e->referenced()
680 * so that the entry can notify the creating Store
681 */
682void
683StoreController::reference(StoreEntry &e)
684{
685 /* Notify the fs that we're referencing this object again */
686
687 if (e.swap_dirn > -1)
688 e.store()->reference(e);
689
9487bae9
AR
690 // Notify the memory cache that we're referencing this object again
691 if (memStore && e.mem_status == IN_MEMORY)
692 memStore->reference(e);
693
694 // TODO: move this code to a non-shared memory cache class when we have it
c8f4eac4 695 if (e.mem_obj) {
696 if (mem_policy->Referenced)
697 mem_policy->Referenced(mem_policy, &e, &e.mem_obj->repl);
698 }
699}
700
701void
702StoreController::dereference(StoreEntry & e)
703{
704 /* Notify the fs that we're not referencing this object any more */
705
706 if (e.swap_filen > -1)
707 e.store()->dereference(e);
708
9487bae9
AR
709 // Notify the memory cache that we're not referencing this object any more
710 if (memStore && e.mem_status == IN_MEMORY)
711 memStore->dereference(e);
712
713 // TODO: move this code to a non-shared memory cache class when we have it
c8f4eac4 714 if (e.mem_obj) {
715 if (mem_policy->Dereferenced)
716 mem_policy->Dereferenced(mem_policy, &e, &e.mem_obj->repl);
717 }
718}
719
720StoreEntry *
6ca34f6f 721StoreController::get(const cache_key *key)
c8f4eac4 722{
44def0f9 723 if (StoreEntry *e = swapDir->get(key)) {
9487bae9
AR
724 // TODO: ignore and maybe handleIdleEntry() unlocked intransit entries
725 // because their backing store slot may be gone already.
171d5429 726 debugs(20, 3, HERE << "got in-transit entry: " << *e);
44def0f9
AR
727 return e;
728 }
729
9487bae9
AR
730 if (memStore) {
731 if (StoreEntry *e = memStore->get(key)) {
732 debugs(20, 3, HERE << "got mem-cached entry: " << *e);
733 return e;
734 }
735 }
736
737 // TODO: this disk iteration is misplaced; move to StoreHashIndex
44def0f9
AR
738 if (const int cacheDirs = Config.cacheSwap.n_configured) {
739 // ask each cache_dir until the entry is found; use static starting
740 // point to avoid asking the same subset of disks more often
741 // TODO: coordinate with put() to be able to guess the right disk often
14911a4e 742 static int idx = 0;
44def0f9 743 for (int n = 0; n < cacheDirs; ++n) {
14911a4e 744 idx = (idx + 1) % cacheDirs;
44def0f9 745 SwapDir *sd = dynamic_cast<SwapDir*>(INDEXSD(idx));
14911a4e
AR
746 if (!sd->active())
747 continue;
748
44def0f9 749 if (StoreEntry *e = sd->get(key)) {
eccba1d9 750 debugs(20, 3, HERE << "cache_dir " << idx <<
44def0f9
AR
751 " got cached entry: " << *e);
752 return e;
753 }
754 }
755 }
c8f4eac4 756
eccba1d9 757 debugs(20, 4, HERE << "none of " << Config.cacheSwap.n_configured <<
44def0f9
AR
758 " cache_dirs have " << storeKeyText(key));
759 return NULL;
c8f4eac4 760}
761
762void
6ca34f6f 763StoreController::get(String const key, STOREGETCLIENT aCallback, void *aCallbackData)
c8f4eac4 764{
765 fatal("not implemented");
766}
767
9487bae9
AR
768void
769StoreController::handleIdleEntry(StoreEntry &e)
770{
771 bool keepInLocalMemory = false;
772 if (memStore) {
773 memStore->considerKeeping(e);
774 // leave keepInLocalMemory false; memStore maintains its own cache
775 } else {
776 keepInLocalMemory = e.memoryCachable() && // entry is in good shape and
777 // the local memory cache is not overflowing
778 (mem_node::InUseCount() <= store_pages_max);
779 }
780
781 dereference(e);
782
783 // XXX: Rock store specific: Since each SwapDir controls its index,
784 // unlocked entries should not stay in the global store_table.
785 if (fileno >= 0) {
786 debugs(20, 5, HERE << "destroying unlocked entry: " << &e << ' ' << e);
787 destroyStoreEntry(static_cast<hash_link*>(&e));
788 return;
789 }
790
791 // TODO: move this into [non-shared] memory cache class when we have one
792 if (keepInLocalMemory) {
793 e.setMemStatus(IN_MEMORY);
794 e.mem_obj->unlinkRequest();
795 } else {
796 e.purgeMem(); // may free e
797 }
798}
799
c8f4eac4 800StoreHashIndex::StoreHashIndex()
801{
47f6e231 802 if (store_table)
26ac0430 803 abort();
c8f4eac4 804 assert (store_table == NULL);
805}
806
807StoreHashIndex::~StoreHashIndex()
808{
809 if (store_table) {
810 hashFreeItems(store_table, destroyStoreEntry);
811 hashFreeMemory(store_table);
812 store_table = NULL;
813 }
814}
815
816int
817StoreHashIndex::callback()
818{
819 int result = 0;
820 int j;
821 static int ndir = 0;
822
823 do {
824 j = 0;
825
826 for (int i = 0; i < Config.cacheSwap.n_configured; i++) {
827 if (ndir >= Config.cacheSwap.n_configured)
828 ndir = ndir % Config.cacheSwap.n_configured;
829
830 int temp_result = store(ndir)->callback();
831
832 ++ndir;
833
834 j += temp_result;
835
836 result += temp_result;
837
838 if (j > 100)
839 fatal ("too much io\n");
840 }
841 } while (j > 0);
842
843 ndir++;
844
845 return result;
846}
847
848void
849StoreHashIndex::create()
850{
14911a4e
AR
851 for (int i = 0; i < Config.cacheSwap.n_configured; i++) {
852 if (dir(i).active())
853 store(i)->create();
854 }
c8f4eac4 855}
856
857/* Lookup an object in the cache.
858 * return just a reference to object, don't start swapping in yet. */
859StoreEntry *
6ca34f6f 860StoreHashIndex::get(const cache_key *key)
c8f4eac4 861{
862 PROF_start(storeGet);
bf8fe701 863 debugs(20, 3, "storeGet: looking up " << storeKeyText(key));
c8f4eac4 864 StoreEntry *p = static_cast<StoreEntry *>(hash_lookup(store_table, key));
865 PROF_stop(storeGet);
866 return p;
867}
868
869void
6ca34f6f 870StoreHashIndex::get(String const key, STOREGETCLIENT aCallback, void *aCallbackData)
c8f4eac4 871{
872 fatal("not implemented");
873}
874
875void
876StoreHashIndex::init()
877{
878 /* Calculate size of hash table (maximum currently 64k buckets). */
879 /* this is very bogus, its specific to the any Store maintaining an
880 * in-core index, not global */
13a07022 881 size_t buckets = (Store::Root().maxSize() + ( Config.memMaxSize >> 10)) / Config.Store.avgObjectSize;
c8f4eac4 882 debugs(20, 1, "Swap maxSize " << Store::Root().maxSize() <<
13a07022 883 " + " << ( Config.memMaxSize >> 10) << " KB, estimated " << buckets << " objects");
c8f4eac4 884 buckets /= Config.Store.objectsPerBucket;
885 debugs(20, 1, "Target number of buckets: " << buckets);
886 /* ideally the full scan period should be configurable, for the
887 * moment it remains at approximately 24 hours. */
888 store_hash_buckets = storeKeyHashBuckets(buckets);
889 debugs(20, 1, "Using " << store_hash_buckets << " Store buckets");
890 debugs(20, 1, "Max Mem size: " << ( Config.memMaxSize >> 10) << " KB");
891 debugs(20, 1, "Max Swap size: " << Store::Root().maxSize() << " KB");
892
893 store_table = hash_create(storeKeyHashCmp,
894 store_hash_buckets, storeKeyHashHash);
895
13a07022 896 for (int i = 0; i < Config.cacheSwap.n_configured; i++) {
c8f4eac4 897 /* this starts a search of the store dirs, loading their
898 * index. under the new Store api this should be
899 * driven by the StoreHashIndex, not by each store.
bef81ea5 900 *
901 * That is, the HashIndex should perform a search of each dir it is
26ac0430 902 * indexing to do the hash insertions. The search is then able to
bef81ea5 903 * decide 'from-memory', or 'from-clean-log' or 'from-dirty-log' or
904 * 'from-no-log'.
905 *
c8f4eac4 906 * Step 1: make the store rebuilds use a search internally
bef81ea5 907 * Step 2: change the search logic to use the four modes described
908 * above
909 * Step 3: have the hash index walk the searches itself.
c8f4eac4 910 */
14911a4e
AR
911 if (dir(i).active())
912 store(i)->init();
13a07022 913 }
c8f4eac4 914}
915
12e11a5c 916uint64_t
c8f4eac4 917StoreHashIndex::maxSize() const
918{
12e11a5c 919 uint64_t result = 0;
c8f4eac4 920
39c1e1d9
DK
921 for (int i = 0; i < Config.cacheSwap.n_configured; i++) {
922 if (dir(i).doReportStat())
923 result += store(i)->maxSize();
924 }
c8f4eac4 925
926 return result;
927}
928
12e11a5c 929uint64_t
c8f4eac4 930StoreHashIndex::minSize() const
931{
12e11a5c 932 uint64_t result = 0;
c8f4eac4 933
39c1e1d9
DK
934 for (int i = 0; i < Config.cacheSwap.n_configured; i++) {
935 if (dir(i).doReportStat())
936 result += store(i)->minSize();
937 }
938
939 return result;
940}
941
942uint64_t
943StoreHashIndex::currentSize() const
944{
945 uint64_t result = 0;
946
947 for (int i = 0; i < Config.cacheSwap.n_configured; i++) {
948 if (dir(i).doReportStat())
949 result += store(i)->currentSize();
950 }
951
952 return result;
953}
954
955uint64_t
956StoreHashIndex::currentCount() const
957{
958 uint64_t result = 0;
959
960 for (int i = 0; i < Config.cacheSwap.n_configured; i++) {
961 if (dir(i).doReportStat())
962 result += store(i)->currentCount();
963 }
c8f4eac4 964
965 return result;
966}
967
af2fda07
DK
968int64_t
969StoreHashIndex::maxObjectSize() const
970{
971 int64_t result = -1;
972
973 for (int i = 0; i < Config.cacheSwap.n_configured; i++) {
974 if (dir(i).active() && store(i)->maxObjectSize() > result)
975 result = store(i)->maxObjectSize();
976 }
977
978 return result;
979}
980
c8f4eac4 981void
982StoreHashIndex::stat(StoreEntry & output) const
983{
984 int i;
985
986 /* Now go through each store, calling its stat routine */
987
988 for (i = 0; i < Config.cacheSwap.n_configured; i++) {
989 storeAppendPrintf(&output, "\n");
990 store(i)->stat(output);
991 }
992}
993
994void
995StoreHashIndex::reference(StoreEntry&)
996{}
997
998void
999StoreHashIndex::dereference(StoreEntry&)
1000{}
1001
1002void
1003StoreHashIndex::maintain()
1004{
1005 int i;
1006 /* walk each fs */
1007
1008 for (i = 0; i < Config.cacheSwap.n_configured; i++) {
1009 /* XXX FixMe: This should be done "in parallell" on the different
1010 * cache_dirs, not one at a time.
1011 */
1012 /* call the maintain function .. */
1013 store(i)->maintain();
1014 }
1015}
1016
1017void
47f6e231 1018StoreHashIndex::updateSize(int64_t, int)
c8f4eac4 1019{}
1020
1021void
1022StoreHashIndex::sync()
1023{
1024 for (int i = 0; i < Config.cacheSwap.n_configured; ++i)
1025 store(i)->sync();
1026}
1027
1028StoreSearch *
30abd221 1029StoreHashIndex::search(String const url, HttpRequest *)
c8f4eac4 1030{
1031 if (url.size())
1032 fatal ("Cannot search by url yet\n");
1033
1034 return new StoreSearchHashIndex (this);
1035}
1036
1037CBDATA_CLASS_INIT(StoreSearchHashIndex);
aa839030 1038
c8f4eac4 1039StoreSearchHashIndex::StoreSearchHashIndex(RefCount<StoreHashIndex> aSwapDir) : sd(aSwapDir), _done (false), bucket (0)
1040{}
1041
1042/* do not link
1043StoreSearchHashIndex::StoreSearchHashIndex(StoreSearchHashIndex const &);
1044*/
1045
1046StoreSearchHashIndex::~StoreSearchHashIndex()
1047{}
1048
1049void
70efcae0 1050StoreSearchHashIndex::next(void (aCallback)(void *), void *aCallbackData)
c8f4eac4 1051{
1052 next();
70efcae0 1053 aCallback (aCallbackData);
c8f4eac4 1054}
1055
1056bool
1057StoreSearchHashIndex::next()
1058{
1059 if (entries.size())
1060 entries.pop_back();
1061
1062 while (!isDone() && !entries.size())
1063 copyBucket();
1064
1065 return currentItem() != NULL;
1066}
1067
1068bool
1069StoreSearchHashIndex::error() const
1070{
1071 return false;
1072}
1073
1074bool
1075StoreSearchHashIndex::isDone() const
1076{
1077 return bucket >= store_hash_buckets || _done;
1078}
1079
1080StoreEntry *
1081StoreSearchHashIndex::currentItem()
1082{
1083 if (!entries.size())
1084 return NULL;
1085
1086 return entries.back();
1087}
1088
1089void
1090StoreSearchHashIndex::copyBucket()
1091{
1092 /* probably need to lock the store entries...
1093 * we copy them all to prevent races on the links. */
1094 debugs(47, 3, "StoreSearchHashIndex::copyBucket #" << bucket);
1095 assert (!entries.size());
1096 hash_link *link_ptr = NULL;
1097 hash_link *link_next = NULL;
1098 link_next = hash_get_bucket(store_table, bucket);
1099
1100 while (NULL != (link_ptr = link_next)) {
1101 link_next = link_ptr->next;
1102 StoreEntry *e = (StoreEntry *) link_ptr;
1103
1104 entries.push_back(e);
1105 }
1106
1107 bucket++;
1108 debugs(47,3, "got entries: " << entries.size());
1109}