]> git.ipfire.org Git - thirdparty/squid.git/blame - src/store_dir.cc
Bug #2016 fix: Prevent BodyPipe async calls from getting seemingly
[thirdparty/squid.git] / src / store_dir.cc
CommitLineData
f1dc9b30 1
2/*
30abd221 3 * $Id: store_dir.cc,v 1.161 2007/05/29 13:31:41 amosjeffries Exp $
f1dc9b30 4 *
5 * DEBUG: section 47 Store Directory Routines
6 * AUTHOR: Duane Wessels
7 *
2b6662ba 8 * SQUID Web Proxy Cache http://www.squid-cache.org/
e25c139f 9 * ----------------------------------------------------------
f1dc9b30 10 *
2b6662ba 11 * Squid is the result of efforts by numerous individuals from
12 * the Internet community; see the CONTRIBUTORS file for full
13 * details. Many organizations have provided support for Squid's
14 * development; see the SPONSORS file for full details. Squid is
15 * Copyrighted (C) 2001 by the Regents of the University of
16 * California; see the COPYRIGHT file for full details. Squid
17 * incorporates software developed and/or copyrighted by other
18 * sources; see the CREDITS file for full details.
f1dc9b30 19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
24 *
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
29 *
30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
cbdec147 32 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
e25c139f 33 *
f1dc9b30 34 */
35
596dddc1 36#include "squid.h"
e6ccf245 37#include "Store.h"
528b2c61 38#include "MemObject.h"
985c86bc 39#include "SquidTime.h"
d3b3ab85 40#include "SwapDir.h"
85407535 41
c0db87f2 42#if HAVE_STATVFS
43#if HAVE_SYS_STATVFS_H
44#include <sys/statvfs.h>
45#endif
ec15e022 46#endif /* HAVE_STATVFS */
47/* statfs() needs <sys/param.h> and <sys/mount.h> on BSD systems */
48#if HAVE_SYS_PARAM_H
49#include <sys/param.h>
203526a1 50#endif
ec15e022 51#if HAVE_SYS_MOUNT_H
52#include <sys/mount.h>
53#endif
54/* Windows and Linux use sys/vfs.h */
6c86a065 55#if HAVE_SYS_VFS_H
56#include <sys/vfs.h>
57#endif
c0db87f2 58
c8f4eac4 59#include "StoreHashIndex.h"
60
65a53c8e 61static STDIRSELECT storeDirSelectSwapDirRoundRobin;
62static STDIRSELECT storeDirSelectSwapDirLeastLoad;
c8f4eac4 63
b07b21cc 64/*
65 * store_dirs_rebuilding is initialized to _1_ as a hack so that
66 * storeDirWriteCleanLogs() doesn't try to do anything unless _all_
67 * cache_dirs have been read. For example, without this hack, Squid
68 * will try to write clean log files if -kparse fails (becasue it
69 * calls fatal()).
70 */
71int StoreController::store_dirs_rebuilding = 1;
bef81ea5 72
c8f4eac4 73StoreController::StoreController() : swapDir (new StoreHashIndex())
74{}
75
76StoreController::~StoreController()
77{}
65a53c8e 78
79/*
80 * This function pointer is set according to 'store_dir_select_algorithm'
81 * in squid.conf.
82 */
83STDIRSELECT *storeDirSelectSwapDir = storeDirSelectSwapDirLeastLoad;
d141c677 84
9838d6c8 85void
c8f4eac4 86StoreController::init()
596dddc1 87{
c8f4eac4 88 swapDir->init();
62e76326 89
65a53c8e 90 if (0 == strcasecmp(Config.store_dir_select_algorithm, "round-robin")) {
62e76326 91 storeDirSelectSwapDir = storeDirSelectSwapDirRoundRobin;
bf8fe701 92 debugs(47, 1, "Using Round Robin store dir selection");
65a53c8e 93 } else {
62e76326 94 storeDirSelectSwapDir = storeDirSelectSwapDirLeastLoad;
bf8fe701 95 debugs(47, 1, "Using Least Load store dir selection");
65a53c8e 96 }
85407535 97}
98
99void
c8f4eac4 100StoreController::createOneStore(Store &aStore)
596dddc1 101{
62e76326 102 /*
154c7949 103 * On Windows, fork() is not available.
104 * The following is a workaround for create store directories sequentially
105 * when running on native Windows port.
106 */
099a1791 107#ifndef _SQUID_MSWIN_
62e76326 108
154c7949 109 if (fork())
62e76326 110 return;
111
099a1791 112#endif
62e76326 113
c8f4eac4 114 aStore.create();
62e76326 115
099a1791 116#ifndef _SQUID_MSWIN_
62e76326 117
154c7949 118 exit(0);
62e76326 119
099a1791 120#endif
154c7949 121}
122
123void
c8f4eac4 124StoreController::create()
154c7949 125{
c8f4eac4 126 swapDir->create();
62e76326 127
099a1791 128#ifndef _SQUID_MSWIN_
62e76326 129
8a1c8f2c 130 pid_t pid;
62e76326 131
b2c141d4 132 do {
62e76326 133 int status;
b2c141d4 134#ifdef _SQUID_NEXT_
62e76326 135
136 pid = wait3(&status, WNOHANG, NULL);
b2c141d4 137#else
62e76326 138
139 pid = waitpid(-1, &status, 0);
b2c141d4 140#endif
62e76326 141
b2c141d4 142 } while (pid > 0 || (pid < 0 && errno == EINTR));
62e76326 143
099a1791 144#endif
596dddc1 145}
146
cd748f27 147/*
148 * Determine whether the given directory can handle this object
149 * size
150 *
151 * Note: if the object size is -1, then the only swapdirs that
152 * will return true here are ones that have max_obj_size = -1,
153 * ie any-sized-object swapdirs. This is a good thing.
154 */
c8f4eac4 155bool
156SwapDir::objectSizeIsAcceptable(ssize_t objsize) const
cd748f27 157{
158 /*
159 * If the swapdir's max_obj_size is -1, then it definitely can
160 */
62e76326 161
c8f4eac4 162 if (max_objsize == -1)
163 return true;
d68f43a0 164
165 /*
166 * If the object size is -1, then if the storedir isn't -1 we
167 * can't store it
168 */
c8f4eac4 169 if ((objsize == -1) && (max_objsize != -1))
170 return false;
d68f43a0 171
cd748f27 172 /*
173 * Else, make sure that the max object size is larger than objsize
174 */
c8f4eac4 175 return max_objsize > objsize;
cd748f27 176}
177
178
d141c677 179/*
180 * This new selection scheme simply does round-robin on all SwapDirs.
8e8d4f30 181 * A SwapDir is skipped if it is over the max_size (100%) limit, or
182 * overloaded.
d141c677 183 */
184static int
8e8d4f30 185storeDirSelectSwapDirRoundRobin(const StoreEntry * e)
d141c677 186{
187 static int dirn = 0;
188 int i;
8e8d4f30 189 int load;
c8f4eac4 190 RefCount<SwapDir> sd;
62e76326 191
d141c677 192 for (i = 0; i <= Config.cacheSwap.n_configured; i++) {
62e76326 193 if (++dirn >= Config.cacheSwap.n_configured)
194 dirn = 0;
195
c8f4eac4 196 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
62e76326 197
198 if (sd->flags.read_only)
199 continue;
200
201 if (sd->cur_size > sd->max_size)
202 continue;
203
707fdc47 204 if (!sd->objectSizeIsAcceptable(e->objectLen()))
62e76326 205 continue;
206
207 /* check for error or overload condition */
208 load = sd->canStore(*e);
209
210 if (load < 0 || load > 1000) {
211 continue;
212 }
213
214 return dirn;
d141c677 215 }
62e76326 216
8e8d4f30 217 return -1;
d141c677 218}
960a01e3 219
a2899918 220/*
cd748f27 221 * Spread load across all of the store directories
222 *
223 * Note: We should modify this later on to prefer sticking objects
224 * in the *tightest fit* swapdir to conserve space, along with the
225 * actual swapdir usage. But for now, this hack will do while
226 * testing, so you should order your swapdirs in the config file
227 * from smallest maxobjsize to unlimited (-1) maxobjsize.
228 *
229 * We also have to choose nleast == nconf since we need to consider
230 * ALL swapdirs, regardless of state. Again, this is a hack while
231 * we sort out the real usefulness of this algorithm.
a2899918 232 */
65a53c8e 233static int
234storeDirSelectSwapDirLeastLoad(const StoreEntry * e)
a2899918 235{
e8dbac8b 236 ssize_t objsize;
8e8d4f30 237 ssize_t most_free = 0, cur_free;
238 ssize_t least_objsize = -1;
239 int least_load = INT_MAX;
cd748f27 240 int load;
241 int dirn = -1;
242 int i;
c8f4eac4 243 RefCount<SwapDir> SD;
cd748f27 244
245 /* Calculate the object size */
707fdc47 246 objsize = e->objectLen();
62e76326 247
cd748f27 248 if (objsize != -1)
62e76326 249 objsize += e->mem_obj->swap_hdr_sz;
250
cd748f27 251 for (i = 0; i < Config.cacheSwap.n_configured; i++) {
c8f4eac4 252 SD = dynamic_cast<SwapDir *>(INDEXSD(i));
62e76326 253 SD->flags.selected = 0;
254 load = SD->canStore(*e);
255
256 if (load < 0 || load > 1000) {
257 continue;
258 }
259
c8f4eac4 260 if (!SD->objectSizeIsAcceptable(objsize))
62e76326 261 continue;
262
263 if (SD->flags.read_only)
264 continue;
265
266 if (SD->cur_size > SD->max_size)
267 continue;
268
269 if (load > least_load)
270 continue;
271
272 cur_free = SD->max_size - SD->cur_size;
273
274 /* If the load is equal, then look in more details */
275 if (load == least_load) {
276 /* closest max_objsize fit */
277
278 if (least_objsize != -1)
279 if (SD->max_objsize > least_objsize || SD->max_objsize == -1)
280 continue;
281
282 /* most free */
283 if (cur_free < most_free)
284 continue;
285 }
286
287 least_load = load;
288 least_objsize = SD->max_objsize;
289 most_free = cur_free;
290 dirn = i;
a2899918 291 }
62e76326 292
ade906c8 293 if (dirn >= 0)
c8f4eac4 294 dynamic_cast<SwapDir *>(INDEXSD(dirn))->flags.selected = 1;
62e76326 295
cd748f27 296 return dirn;
596dddc1 297}
298
b109de6b 299/*
300 * An entry written to the swap log MUST have the following
301 * properties.
302 * 1. It MUST be a public key. It does no good to log
303 * a public ADD, change the key, then log a private
304 * DEL. So we need to log a DEL before we change a
305 * key from public to private.
cd748f27 306 * 2. It MUST have a valid (> -1) swap_filen.
b109de6b 307 */
4683e377 308void
5830cdb3 309storeDirSwapLog(const StoreEntry * e, int op)
4683e377 310{
d3b3ab85 311 assert (e);
d46a87a8 312 assert(!EBIT_TEST(e->flags, KEY_PRIVATE));
cd748f27 313 assert(e->swap_filen >= 0);
6c57e268 314 /*
315 * icons and such; don't write them to the swap log
316 */
62e76326 317
d46a87a8 318 if (EBIT_TEST(e->flags, ENTRY_SPECIAL))
62e76326 319 return;
320
b109de6b 321 assert(op > SWAP_LOG_NOP && op < SWAP_LOG_MAX);
62e76326 322
bf8fe701 323 debugs(20, 3, "storeDirSwapLog: " <<
324 swap_log_op_str[op] << " " <<
325 e->getMD5Text() << " " <<
326 e->swap_dirn << " " <<
327 std::hex << std::uppercase << std::setfill('0') << std::setw(8) << e->swap_filen);
62e76326 328
c8f4eac4 329 dynamic_cast<SwapDir *>(INDEXSD(e->swap_dirn))->logEntry(*e, op);
330}
331
332void
333StoreController::updateSize(size_t size, int sign)
334{
335 fatal("StoreController has no independent size\n");
5608850b 336}
76fefb77 337
338void
c8f4eac4 339SwapDir::updateSize(size_t size, int sign)
76fefb77 340{
c8f4eac4 341 int blks = (size + fs.blksize - 1) / fs.blksize;
342 int k = (blks * fs.blksize >> 10) * sign;
343 cur_size += k;
76fefb77 344 store_swap_size += k;
62e76326 345
0faf70d0 346 if (sign > 0)
62e76326 347 n_disk_objects++;
0faf70d0 348 else if (sign < 0)
62e76326 349 n_disk_objects--;
76fefb77 350}
c932b107 351
352void
c8f4eac4 353StoreController::stat(StoreEntry &output) const
c932b107 354{
c8f4eac4 355 storeAppendPrintf(&output, "Store Directory Statistics:\n");
356 storeAppendPrintf(&output, "Store Entries : %lu\n",
62e76326 357 (unsigned long int)StoreEntry::inUseCount());
c8f4eac4 358 storeAppendPrintf(&output, "Maximum Swap Size : %8ld KB\n",
359 (long int) maxSize());
360 storeAppendPrintf(&output, "Current Store Swap Size: %8lu KB\n",
62e76326 361 store_swap_size);
c8f4eac4 362 storeAppendPrintf(&output, "Current Capacity : %d%% used, %d%% free\n",
363 percent((int) store_swap_size, (int) maxSize()),
364 percent((int) (maxSize() - store_swap_size), (int) maxSize()));
8a3e7d9e 365 /* FIXME Here we should output memory statistics */
e3ef2b09 366
c8f4eac4 367 /* now the swapDir */
368 swapDir->stat(output);
5d406e78 369}
370
c8f4eac4 371/* if needed, this could be taught to cache the result */
372size_t
373StoreController::maxSize() const
f4e3fa54 374{
c8f4eac4 375 /* TODO: include memory cache ? */
376 return swapDir->maxSize();
377}
62e76326 378
c8f4eac4 379size_t
380StoreController::minSize() const
381{
382 /* TODO: include memory cache ? */
383 return swapDir->minSize();
f4e3fa54 384}
385
386void
c8f4eac4 387SwapDir::diskFull()
f4e3fa54 388{
c8f4eac4 389 if (cur_size >= max_size)
62e76326 390 return;
391
c8f4eac4 392 max_size = cur_size;
62e76326 393
c8f4eac4 394 debugs(20, 1, "WARNING: Shrinking cache_dir #" << index << " to " << cur_size << " KB");
f4e3fa54 395}
95dcd2b8 396
397void
398storeDirOpenSwapLogs(void)
399{
d3b3ab85 400 for (int dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
c8f4eac4 401 dynamic_cast<SwapDir *>(INDEXSD(dirn))->openLog();
95dcd2b8 402}
403
404void
405storeDirCloseSwapLogs(void)
406{
d3b3ab85 407 for (int dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
c8f4eac4 408 dynamic_cast<SwapDir *>(INDEXSD(dirn))->closeLog();
95dcd2b8 409}
410
b2c141d4 411/*
412 * storeDirWriteCleanLogs
413 *
414 * Writes a "clean" swap log file from in-memory metadata.
cd748f27 415 * This is a rewrite of the original function to troll each
416 * StoreDir and write the logs, and flush at the end of
417 * the run. Thanks goes to Eric Stern, since this solution
418 * came out of his COSS code.
b2c141d4 419 */
b2c141d4 420int
421storeDirWriteCleanLogs(int reopen)
95dcd2b8 422{
6a566b9c 423 const StoreEntry *e = NULL;
b2c141d4 424 int n = 0;
62e76326 425
e812ecfc 426 struct timeval start;
427 double dt;
c8f4eac4 428 RefCount<SwapDir> sd;
b2c141d4 429 int dirn;
6a566b9c 430 int notdone = 1;
62e76326 431
bef81ea5 432 if (StoreController::store_dirs_rebuilding) {
bf8fe701 433 debugs(20, 1, "Not currently OK to rewrite swap log.");
434 debugs(20, 1, "storeDirWriteCleanLogs: Operation aborted.");
62e76326 435 return 0;
b2c141d4 436 }
62e76326 437
bf8fe701 438 debugs(20, 1, "storeDirWriteCleanLogs: Starting...");
e812ecfc 439 getCurrentTime();
440 start = current_time;
62e76326 441
b2c141d4 442 for (dirn = 0; dirn < Config.cacheSwap.n_configured; dirn++) {
c8f4eac4 443 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
62e76326 444
445 if (sd->writeCleanStart() < 0) {
bf8fe701 446 debugs(20, 1, "log.clean.start() failed for dir #" << sd->index);
62e76326 447 continue;
448 }
6a566b9c 449 }
62e76326 450
e78ef51b 451 /*
452 * This may look inefficient as CPU wise it is more efficient to do this
453 * sequentially, but I/O wise the parallellism helps as it allows more
454 * hdd spindles to be active.
d3b3ab85 455 */
c1dd71ae 456 while (notdone) {
62e76326 457 notdone = 0;
458
459 for (dirn = 0; dirn < Config.cacheSwap.n_configured; dirn++) {
c8f4eac4 460 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
62e76326 461
462 if (NULL == sd->cleanLog)
463 continue;
464
465 e = sd->cleanLog->nextEntry();
466
467 if (!e)
468 continue;
469
470 notdone = 1;
471
472 if (!sd->canLog(*e))
473 continue;
474
475 sd->cleanLog->write(*e);
476
477 if ((++n & 0xFFFF) == 0) {
478 getCurrentTime();
bf8fe701 479 debugs(20, 1, " " << std::setw(7) << n <<
480 " entries written so far.");
62e76326 481 }
482 }
6a566b9c 483 }
62e76326 484
6a566b9c 485 /* Flush */
d3b3ab85 486 for (dirn = 0; dirn < Config.cacheSwap.n_configured; dirn++)
c8f4eac4 487 dynamic_cast<SwapDir *>(INDEXSD(dirn))->writeCleanDone();
62e76326 488
b2c141d4 489 if (reopen)
62e76326 490 storeDirOpenSwapLogs();
491
e812ecfc 492 getCurrentTime();
62e76326 493
e812ecfc 494 dt = tvSubDsec(start, current_time);
62e76326 495
bf8fe701 496 debugs(20, 1, " Finished. Wrote " << n << " entries.");
497 debugs(20, 1, " Took "<< std::setw(3)<< std::setprecision(2) << dt <<
498 " seconds ("<< std::setw(6) << ((double) n / (dt > 0.0 ? dt : 1.0)) << " entries/sec).");
62e76326 499
62e76326 500
b2c141d4 501 return n;
95dcd2b8 502}
d141c677 503
c8f4eac4 504StoreSearch *
30abd221 505StoreController::search(String const url, HttpRequest *request)
c8f4eac4 506{
507 /* cheat, for now you can't search the memory hot cache */
508 return swapDir->search(url, request);
509}
510
511StorePointer
512StoreHashIndex::store(int const x) const
513{
514 return INDEXSD(x);
515}
516
cd748f27 517void
c8f4eac4 518StoreController::sync(void)
cd748f27 519{
c8f4eac4 520 /* sync mem cache? */
521 swapDir->sync();
cd748f27 522}
523
524/*
1d5161bd 525 * handle callbacks all avaliable fs'es
cd748f27 526 */
c8f4eac4 527int
528StoreController::callback()
cd748f27 529{
1d5161bd 530 /* This will likely double count. Thats ok. */
531 PROF_start(storeDirCallback);
532
c8f4eac4 533 /* mem cache callbacks ? */
534 int result = swapDir->callback();
1d5161bd 535
536 PROF_stop(storeDirCallback);
c8f4eac4 537
538 return result;
d141c677 539}
90d42c28 540
541int
542storeDirGetBlkSize(const char *path, int *blksize)
543{
544#if HAVE_STATVFS
62e76326 545
90d42c28 546 struct statvfs sfs;
62e76326 547
90d42c28 548 if (statvfs(path, &sfs)) {
bf8fe701 549 debugs(50, 1, "" << path << ": " << xstrerror());
62e76326 550 *blksize = 2048;
551 return 1;
90d42c28 552 }
62e76326 553
6759a7aa 554 *blksize = (int) sfs.f_frsize;
90d42c28 555#else
62e76326 556
90d42c28 557 struct statfs sfs;
62e76326 558
90d42c28 559 if (statfs(path, &sfs)) {
bf8fe701 560 debugs(50, 1, "" << path << ": " << xstrerror());
62e76326 561 *blksize = 2048;
562 return 1;
90d42c28 563 }
62e76326 564
90d42c28 565 *blksize = (int) sfs.f_bsize;
6759a7aa 566#endif
4b3af09f 567 /*
568 * Sanity check; make sure we have a meaningful value.
569 */
62e76326 570
d5b72fe7 571 if (*blksize < 512)
62e76326 572 *blksize = 2048;
573
90d42c28 574 return 0;
575}
781d6656 576
577#define fsbtoblk(num, fsbs, bs) \
578 (((fsbs) != 0 && (fsbs) < (bs)) ? \
579 (num) / ((bs) / (fsbs)) : (num) * ((fsbs) / (bs)))
580int
581storeDirGetUFSStats(const char *path, int *totl_kb, int *free_kb, int *totl_in, int *free_in)
582{
583#if HAVE_STATVFS
62e76326 584
781d6656 585 struct statvfs sfs;
62e76326 586
781d6656 587 if (statvfs(path, &sfs)) {
bf8fe701 588 debugs(50, 1, "" << path << ": " << xstrerror());
62e76326 589 return 1;
781d6656 590 }
62e76326 591
781d6656 592 *totl_kb = (int) fsbtoblk(sfs.f_blocks, sfs.f_frsize, 1024);
593 *free_kb = (int) fsbtoblk(sfs.f_bfree, sfs.f_frsize, 1024);
594 *totl_in = (int) sfs.f_files;
595 *free_in = (int) sfs.f_ffree;
596#else
62e76326 597
781d6656 598 struct statfs sfs;
62e76326 599
781d6656 600 if (statfs(path, &sfs)) {
bf8fe701 601 debugs(50, 1, "" << path << ": " << xstrerror());
62e76326 602 return 1;
781d6656 603 }
62e76326 604
781d6656 605 *totl_kb = (int) fsbtoblk(sfs.f_blocks, sfs.f_bsize, 1024);
606 *free_kb = (int) fsbtoblk(sfs.f_bfree, sfs.f_bsize, 1024);
607 *totl_in = (int) sfs.f_files;
608 *free_in = (int) sfs.f_ffree;
609#endif
62e76326 610
781d6656 611 return 0;
612}
c8f4eac4 613
614void
615allocate_new_swapdir(_SquidConfig::_cacheSwap * swap)
616{
617 if (swap->swapDirs == NULL) {
618 swap->n_allocated = 4;
619 swap->swapDirs = static_cast<StorePointer *>(xcalloc(swap->n_allocated, sizeof(StorePointer)));
620 }
621
622 if (swap->n_allocated == swap->n_configured) {
623 StorePointer *tmp;
624 swap->n_allocated <<= 1;
625 tmp = static_cast<StorePointer *>(xcalloc(swap->n_allocated, sizeof(StorePointer)));
626 xmemcpy(tmp, swap->swapDirs, swap->n_configured * sizeof(SwapDir *));
627 xfree(swap->swapDirs);
628 swap->swapDirs = tmp;
629 }
630}
631
632void
633free_cachedir(_SquidConfig::_cacheSwap * swap)
634{
635 int i;
636 /* DON'T FREE THESE FOR RECONFIGURE */
637
638 if (reconfiguring)
639 return;
640
641 for (i = 0; i < swap->n_configured; i++) {
642 /* TODO XXX this lets the swapdir free resources asynchronously
643 * swap->swapDirs[i]->deactivate();
644 * but there may be such a means already.
645 * RBC 20041225
646 */
647 swap->swapDirs[i] = NULL;
648 }
649
650 safe_free(swap->swapDirs);
651 swap->swapDirs = NULL;
652 swap->n_allocated = 0;
653 swap->n_configured = 0;
654}
655
656/* this should be a virtual method on StoreEntry,
657 * i.e. e->referenced()
658 * so that the entry can notify the creating Store
659 */
660void
661StoreController::reference(StoreEntry &e)
662{
663 /* Notify the fs that we're referencing this object again */
664
665 if (e.swap_dirn > -1)
666 e.store()->reference(e);
667
668 /* Notify the memory cache that we're referencing this object again */
669 if (e.mem_obj) {
670 if (mem_policy->Referenced)
671 mem_policy->Referenced(mem_policy, &e, &e.mem_obj->repl);
672 }
673}
674
675void
676StoreController::dereference(StoreEntry & e)
677{
678 /* Notify the fs that we're not referencing this object any more */
679
680 if (e.swap_filen > -1)
681 e.store()->dereference(e);
682
683 /* Notify the memory cache that we're not referencing this object any more */
684 if (e.mem_obj) {
685 if (mem_policy->Dereferenced)
686 mem_policy->Dereferenced(mem_policy, &e, &e.mem_obj->repl);
687 }
688}
689
690StoreEntry *
691
692StoreController::get
693 (const cache_key *key)
694{
695
696 return swapDir->get
697 (key);
698}
699
700void
701
702StoreController::get
30abd221 703 (String const key, STOREGETCLIENT callback, void *cbdata)
c8f4eac4 704{
705 fatal("not implemented");
706}
707
708StoreHashIndex::StoreHashIndex()
709{
710 assert (store_table == NULL);
711}
712
713StoreHashIndex::~StoreHashIndex()
714{
715 if (store_table) {
716 hashFreeItems(store_table, destroyStoreEntry);
717 hashFreeMemory(store_table);
718 store_table = NULL;
719 }
720}
721
722int
723StoreHashIndex::callback()
724{
725 int result = 0;
726 int j;
727 static int ndir = 0;
728
729 do {
730 j = 0;
731
732 for (int i = 0; i < Config.cacheSwap.n_configured; i++) {
733 if (ndir >= Config.cacheSwap.n_configured)
734 ndir = ndir % Config.cacheSwap.n_configured;
735
736 int temp_result = store(ndir)->callback();
737
738 ++ndir;
739
740 j += temp_result;
741
742 result += temp_result;
743
744 if (j > 100)
745 fatal ("too much io\n");
746 }
747 } while (j > 0);
748
749 ndir++;
750
751 return result;
752}
753
754void
755StoreHashIndex::create()
756{
757 for (int i = 0; i < Config.cacheSwap.n_configured; i++)
758 store(i)->create();
759}
760
761/* Lookup an object in the cache.
762 * return just a reference to object, don't start swapping in yet. */
763StoreEntry *
764
765StoreHashIndex::get
766 (const cache_key *key)
767{
768 PROF_start(storeGet);
bf8fe701 769 debugs(20, 3, "storeGet: looking up " << storeKeyText(key));
c8f4eac4 770 StoreEntry *p = static_cast<StoreEntry *>(hash_lookup(store_table, key));
771 PROF_stop(storeGet);
772 return p;
773}
774
775void
776
777StoreHashIndex::get
30abd221 778 (String const key, STOREGETCLIENT callback, void *cbdata)
c8f4eac4 779{
780 fatal("not implemented");
781}
782
783void
784StoreHashIndex::init()
785{
786 /* Calculate size of hash table (maximum currently 64k buckets). */
787 /* this is very bogus, its specific to the any Store maintaining an
788 * in-core index, not global */
789 size_t buckets = Store::Root().maxSize() / Config.Store.avgObjectSize;
790 debugs(20, 1, "Swap maxSize " << Store::Root().maxSize() <<
08b7c0f0 791 " KB, estimated " << buckets << " objects");
c8f4eac4 792 buckets /= Config.Store.objectsPerBucket;
793 debugs(20, 1, "Target number of buckets: " << buckets);
794 /* ideally the full scan period should be configurable, for the
795 * moment it remains at approximately 24 hours. */
796 store_hash_buckets = storeKeyHashBuckets(buckets);
797 debugs(20, 1, "Using " << store_hash_buckets << " Store buckets");
798 debugs(20, 1, "Max Mem size: " << ( Config.memMaxSize >> 10) << " KB");
799 debugs(20, 1, "Max Swap size: " << Store::Root().maxSize() << " KB");
800
801 store_table = hash_create(storeKeyHashCmp,
802 store_hash_buckets, storeKeyHashHash);
803
804 for (int i = 0; i < Config.cacheSwap.n_configured; i++)
805 /* this starts a search of the store dirs, loading their
806 * index. under the new Store api this should be
807 * driven by the StoreHashIndex, not by each store.
bef81ea5 808 *
809 * That is, the HashIndex should perform a search of each dir it is
810 * indexing to do the hash insertions. The search is then able to
811 * decide 'from-memory', or 'from-clean-log' or 'from-dirty-log' or
812 * 'from-no-log'.
813 *
c8f4eac4 814 * Step 1: make the store rebuilds use a search internally
bef81ea5 815 * Step 2: change the search logic to use the four modes described
816 * above
817 * Step 3: have the hash index walk the searches itself.
c8f4eac4 818 */
819 store(i)->init();
820
821}
822
823size_t
824StoreHashIndex::maxSize() const
825{
826 int i;
827 size_t result = 0;
828
829 for (i = 0; i < Config.cacheSwap.n_configured; i++)
830 result += store(i)->maxSize();
831
832 return result;
833}
834
835size_t
836StoreHashIndex::minSize() const
837{
838 size_t result = 0;
839
840 for (int i = 0; i < Config.cacheSwap.n_configured; i++)
841 result += store(i)->minSize();
842
843 return result;
844}
845
846void
847StoreHashIndex::stat(StoreEntry & output) const
848{
849 int i;
850
851 /* Now go through each store, calling its stat routine */
852
853 for (i = 0; i < Config.cacheSwap.n_configured; i++) {
854 storeAppendPrintf(&output, "\n");
855 store(i)->stat(output);
856 }
857}
858
859void
860StoreHashIndex::reference(StoreEntry&)
861{}
862
863void
864StoreHashIndex::dereference(StoreEntry&)
865{}
866
867void
868StoreHashIndex::maintain()
869{
870 int i;
871 /* walk each fs */
872
873 for (i = 0; i < Config.cacheSwap.n_configured; i++) {
874 /* XXX FixMe: This should be done "in parallell" on the different
875 * cache_dirs, not one at a time.
876 */
877 /* call the maintain function .. */
878 store(i)->maintain();
879 }
880}
881
882void
137a13ea 883StoreHashIndex::updateSize(size_t, int)
c8f4eac4 884{}
885
886void
887StoreHashIndex::sync()
888{
889 for (int i = 0; i < Config.cacheSwap.n_configured; ++i)
890 store(i)->sync();
891}
892
893StoreSearch *
30abd221 894StoreHashIndex::search(String const url, HttpRequest *)
c8f4eac4 895{
896 if (url.size())
897 fatal ("Cannot search by url yet\n");
898
899 return new StoreSearchHashIndex (this);
900}
901
902CBDATA_CLASS_INIT(StoreSearchHashIndex);
aa839030 903
c8f4eac4 904StoreSearchHashIndex::StoreSearchHashIndex(RefCount<StoreHashIndex> aSwapDir) : sd(aSwapDir), _done (false), bucket (0)
905{}
906
907/* do not link
908StoreSearchHashIndex::StoreSearchHashIndex(StoreSearchHashIndex const &);
909*/
910
911StoreSearchHashIndex::~StoreSearchHashIndex()
912{}
913
914void
915StoreSearchHashIndex::next(void (callback)(void *cbdata), void *cbdata)
916{
917 next();
918 callback (cbdata);
919}
920
921bool
922StoreSearchHashIndex::next()
923{
924 if (entries.size())
925 entries.pop_back();
926
927 while (!isDone() && !entries.size())
928 copyBucket();
929
930 return currentItem() != NULL;
931}
932
933bool
934StoreSearchHashIndex::error() const
935{
936 return false;
937}
938
939bool
940StoreSearchHashIndex::isDone() const
941{
942 return bucket >= store_hash_buckets || _done;
943}
944
945StoreEntry *
946StoreSearchHashIndex::currentItem()
947{
948 if (!entries.size())
949 return NULL;
950
951 return entries.back();
952}
953
954void
955StoreSearchHashIndex::copyBucket()
956{
957 /* probably need to lock the store entries...
958 * we copy them all to prevent races on the links. */
959 debugs(47, 3, "StoreSearchHashIndex::copyBucket #" << bucket);
960 assert (!entries.size());
961 hash_link *link_ptr = NULL;
962 hash_link *link_next = NULL;
963 link_next = hash_get_bucket(store_table, bucket);
964
965 while (NULL != (link_ptr = link_next)) {
966 link_next = link_ptr->next;
967 StoreEntry *e = (StoreEntry *) link_ptr;
968
969 entries.push_back(e);
970 }
971
972 bucket++;
973 debugs(47,3, "got entries: " << entries.size());
974}