]> git.ipfire.org Git - thirdparty/squid.git/blame - src/store_dir.cc
Bug 3397: do not mark connection as opened until after SYN-ACK
[thirdparty/squid.git] / src / store_dir.cc
CommitLineData
f1dc9b30 1
2/*
262a0e14 3 * $Id$
f1dc9b30 4 *
5 * DEBUG: section 47 Store Directory Routines
6 * AUTHOR: Duane Wessels
7 *
2b6662ba 8 * SQUID Web Proxy Cache http://www.squid-cache.org/
e25c139f 9 * ----------------------------------------------------------
f1dc9b30 10 *
2b6662ba 11 * Squid is the result of efforts by numerous individuals from
12 * the Internet community; see the CONTRIBUTORS file for full
13 * details. Many organizations have provided support for Squid's
14 * development; see the SPONSORS file for full details. Squid is
15 * Copyrighted (C) 2001 by the Regents of the University of
16 * California; see the COPYRIGHT file for full details. Squid
17 * incorporates software developed and/or copyrighted by other
18 * sources; see the CREDITS file for full details.
f1dc9b30 19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
26ac0430 24 *
f1dc9b30 25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
26ac0430 29 *
f1dc9b30 30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
cbdec147 32 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
e25c139f 33 *
f1dc9b30 34 */
35
f7f3304a 36#include "squid-old.h"
e6ccf245 37#include "Store.h"
528b2c61 38#include "MemObject.h"
9487bae9
AR
39#include "MemStore.h"
40#include "mem_node.h"
a98bcbee 41#include "SquidMath.h"
985c86bc 42#include "SquidTime.h"
d3b3ab85 43#include "SwapDir.h"
4b981814 44#include "swap_log_op.h"
85407535 45
c0db87f2 46#if HAVE_STATVFS
47#if HAVE_SYS_STATVFS_H
48#include <sys/statvfs.h>
49#endif
ec15e022 50#endif /* HAVE_STATVFS */
51/* statfs() needs <sys/param.h> and <sys/mount.h> on BSD systems */
52#if HAVE_SYS_PARAM_H
53#include <sys/param.h>
203526a1 54#endif
ec15e022 55#if HAVE_SYS_MOUNT_H
56#include <sys/mount.h>
57#endif
58/* Windows and Linux use sys/vfs.h */
6c86a065 59#if HAVE_SYS_VFS_H
60#include <sys/vfs.h>
61#endif
c0db87f2 62
c8f4eac4 63#include "StoreHashIndex.h"
64
65a53c8e 65static STDIRSELECT storeDirSelectSwapDirRoundRobin;
66static STDIRSELECT storeDirSelectSwapDirLeastLoad;
c8f4eac4 67
b07b21cc 68/*
69 * store_dirs_rebuilding is initialized to _1_ as a hack so that
70 * storeDirWriteCleanLogs() doesn't try to do anything unless _all_
71 * cache_dirs have been read. For example, without this hack, Squid
72 * will try to write clean log files if -kparse fails (becasue it
73 * calls fatal()).
74 */
75int StoreController::store_dirs_rebuilding = 1;
bef81ea5 76
c8f4eac4 77StoreController::StoreController() : swapDir (new StoreHashIndex())
9199139f 78 , memStore(NULL)
c8f4eac4 79{}
80
81StoreController::~StoreController()
9487bae9
AR
82{
83 delete memStore;
84}
65a53c8e 85
86/*
87 * This function pointer is set according to 'store_dir_select_algorithm'
88 * in squid.conf.
89 */
90STDIRSELECT *storeDirSelectSwapDir = storeDirSelectSwapDirLeastLoad;
d141c677 91
9838d6c8 92void
c8f4eac4 93StoreController::init()
596dddc1 94{
57af1e3f 95 if (Config.memShared && IamWorkerProcess()) {
60be8b2d
AR
96 memStore = new MemStore;
97 memStore->init();
98 }
9487bae9 99
c8f4eac4 100 swapDir->init();
62e76326 101
65a53c8e 102 if (0 == strcasecmp(Config.store_dir_select_algorithm, "round-robin")) {
62e76326 103 storeDirSelectSwapDir = storeDirSelectSwapDirRoundRobin;
bf8fe701 104 debugs(47, 1, "Using Round Robin store dir selection");
65a53c8e 105 } else {
62e76326 106 storeDirSelectSwapDir = storeDirSelectSwapDirLeastLoad;
bf8fe701 107 debugs(47, 1, "Using Least Load store dir selection");
65a53c8e 108 }
85407535 109}
110
111void
c8f4eac4 112StoreController::createOneStore(Store &aStore)
596dddc1 113{
62e76326 114 /*
154c7949 115 * On Windows, fork() is not available.
116 * The following is a workaround for create store directories sequentially
117 * when running on native Windows port.
118 */
1191b93b 119#if !_SQUID_MSWIN_
62e76326 120
154c7949 121 if (fork())
62e76326 122 return;
123
099a1791 124#endif
62e76326 125
c8f4eac4 126 aStore.create();
62e76326 127
1191b93b 128#if !_SQUID_MSWIN_
62e76326 129
154c7949 130 exit(0);
62e76326 131
099a1791 132#endif
154c7949 133}
134
135void
c8f4eac4 136StoreController::create()
154c7949 137{
c8f4eac4 138 swapDir->create();
62e76326 139
1191b93b 140#if !_SQUID_MSWIN_
62e76326 141
8a1c8f2c 142 pid_t pid;
62e76326 143
b2c141d4 144 do {
62e76326 145 int status;
1191b93b 146#if _SQUID_NEXT_
62e76326 147
148 pid = wait3(&status, WNOHANG, NULL);
b2c141d4 149#else
62e76326 150
151 pid = waitpid(-1, &status, 0);
b2c141d4 152#endif
62e76326 153
b2c141d4 154 } while (pid > 0 || (pid < 0 && errno == EINTR));
62e76326 155
099a1791 156#endif
596dddc1 157}
158
a8a33c46 159/**
cd748f27 160 * Determine whether the given directory can handle this object
161 * size
162 *
163 * Note: if the object size is -1, then the only swapdirs that
a8a33c46 164 * will return true here are ones that have min and max unset,
cd748f27 165 * ie any-sized-object swapdirs. This is a good thing.
166 */
c8f4eac4 167bool
3e62bd58 168SwapDir::objectSizeIsAcceptable(int64_t objsize) const
cd748f27 169{
a8a33c46 170 // If the swapdir has no range limits, then it definitely can
b6662ffd 171 if (min_objsize <= 0 && max_objsize == -1)
c8f4eac4 172 return true;
d68f43a0 173
174 /*
a8a33c46
A
175 * If the object size is -1 and the storedir has limits we
176 * can't store it there.
d68f43a0 177 */
a8a33c46 178 if (objsize == -1)
c8f4eac4 179 return false;
d68f43a0 180
a8a33c46 181 // Else, make sure that the object size will fit.
b475997c
AJ
182 if (max_objsize == -1 && min_objsize <= objsize)
183 return true;
184 else
185 return min_objsize <= objsize && max_objsize > objsize;
cd748f27 186}
187
188
d141c677 189/*
190 * This new selection scheme simply does round-robin on all SwapDirs.
8e8d4f30 191 * A SwapDir is skipped if it is over the max_size (100%) limit, or
192 * overloaded.
d141c677 193 */
194static int
8e8d4f30 195storeDirSelectSwapDirRoundRobin(const StoreEntry * e)
d141c677 196{
197 static int dirn = 0;
198 int i;
8e8d4f30 199 int load;
c8f4eac4 200 RefCount<SwapDir> sd;
62e76326 201
aa1a691e
AR
202 // e->objectLen() is negative at this point when we are still STORE_PENDING
203 ssize_t objsize = e->mem_obj->expectedReplySize();
246e6cc1
AJ
204 if (objsize != -1)
205 objsize += e->mem_obj->swap_hdr_sz;
206
ada9124c 207 for (i = 0; i < Config.cacheSwap.n_configured; i++) {
62e76326 208 if (++dirn >= Config.cacheSwap.n_configured)
209 dirn = 0;
210
c8f4eac4 211 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
62e76326 212
aa1a691e 213 if (!sd->canStore(*e, objsize, load))
62e76326 214 continue;
215
62e76326 216 if (load < 0 || load > 1000) {
217 continue;
218 }
219
220 return dirn;
d141c677 221 }
62e76326 222
8e8d4f30 223 return -1;
d141c677 224}
960a01e3 225
a2899918 226/*
cd748f27 227 * Spread load across all of the store directories
228 *
229 * Note: We should modify this later on to prefer sticking objects
230 * in the *tightest fit* swapdir to conserve space, along with the
26ac0430 231 * actual swapdir usage. But for now, this hack will do while
cd748f27 232 * testing, so you should order your swapdirs in the config file
233 * from smallest maxobjsize to unlimited (-1) maxobjsize.
234 *
235 * We also have to choose nleast == nconf since we need to consider
236 * ALL swapdirs, regardless of state. Again, this is a hack while
237 * we sort out the real usefulness of this algorithm.
a2899918 238 */
65a53c8e 239static int
240storeDirSelectSwapDirLeastLoad(const StoreEntry * e)
a2899918 241{
cc34568d 242 int64_t most_free = 0;
8e8d4f30 243 ssize_t least_objsize = -1;
244 int least_load = INT_MAX;
cd748f27 245 int load;
246 int dirn = -1;
247 int i;
c8f4eac4 248 RefCount<SwapDir> SD;
cd748f27 249
aa1a691e
AR
250 // e->objectLen() is negative at this point when we are still STORE_PENDING
251 ssize_t objsize = e->mem_obj->expectedReplySize();
62e76326 252
cd748f27 253 if (objsize != -1)
62e76326 254 objsize += e->mem_obj->swap_hdr_sz;
255
cd748f27 256 for (i = 0; i < Config.cacheSwap.n_configured; i++) {
c8f4eac4 257 SD = dynamic_cast<SwapDir *>(INDEXSD(i));
62e76326 258 SD->flags.selected = 0;
62e76326 259
aa1a691e 260 if (!SD->canStore(*e, objsize, load))
62e76326 261 continue;
262
aa1a691e 263 if (load < 0 || load > 1000)
62e76326 264 continue;
265
266 if (load > least_load)
267 continue;
268
cc34568d 269 const int64_t cur_free = SD->maxSize() - SD->currentSize();
62e76326 270
271 /* If the load is equal, then look in more details */
272 if (load == least_load) {
273 /* closest max_objsize fit */
274
275 if (least_objsize != -1)
276 if (SD->max_objsize > least_objsize || SD->max_objsize == -1)
277 continue;
278
279 /* most free */
280 if (cur_free < most_free)
281 continue;
282 }
283
284 least_load = load;
285 least_objsize = SD->max_objsize;
286 most_free = cur_free;
287 dirn = i;
a2899918 288 }
62e76326 289
ade906c8 290 if (dirn >= 0)
c8f4eac4 291 dynamic_cast<SwapDir *>(INDEXSD(dirn))->flags.selected = 1;
62e76326 292
cd748f27 293 return dirn;
596dddc1 294}
295
b109de6b 296/*
297 * An entry written to the swap log MUST have the following
298 * properties.
299 * 1. It MUST be a public key. It does no good to log
300 * a public ADD, change the key, then log a private
301 * DEL. So we need to log a DEL before we change a
302 * key from public to private.
cd748f27 303 * 2. It MUST have a valid (> -1) swap_filen.
b109de6b 304 */
4683e377 305void
5830cdb3 306storeDirSwapLog(const StoreEntry * e, int op)
4683e377 307{
d3b3ab85 308 assert (e);
d46a87a8 309 assert(!EBIT_TEST(e->flags, KEY_PRIVATE));
cd748f27 310 assert(e->swap_filen >= 0);
6c57e268 311 /*
312 * icons and such; don't write them to the swap log
313 */
62e76326 314
d46a87a8 315 if (EBIT_TEST(e->flags, ENTRY_SPECIAL))
62e76326 316 return;
317
b109de6b 318 assert(op > SWAP_LOG_NOP && op < SWAP_LOG_MAX);
62e76326 319
26ac0430
AJ
320 debugs(20, 3, "storeDirSwapLog: " <<
321 swap_log_op_str[op] << " " <<
322 e->getMD5Text() << " " <<
323 e->swap_dirn << " " <<
bf8fe701 324 std::hex << std::uppercase << std::setfill('0') << std::setw(8) << e->swap_filen);
62e76326 325
c8f4eac4 326 dynamic_cast<SwapDir *>(INDEXSD(e->swap_dirn))->logEntry(*e, op);
327}
328
93bc1434
AR
329void
330StoreController::getStats(StoreInfoStats &stats) const
331{
332 if (memStore)
333 memStore->getStats(stats);
334 else {
335 // move this code to a non-shared memory cache class when we have it
336 stats.mem.shared = false;
337 stats.mem.capacity = Config.memMaxSize;
338 stats.mem.size = mem_node::StoreMemSize();
339 stats.mem.count = hot_obj_count;
340 }
341
342 swapDir->getStats(stats);
343
344 // low-level info not specific to memory or disk cache
345 stats.store_entry_count = StoreEntry::inUseCount();
346 stats.mem_object_count = MemObject::inUseCount();
347}
348
c932b107 349void
c8f4eac4 350StoreController::stat(StoreEntry &output) const
c932b107 351{
c8f4eac4 352 storeAppendPrintf(&output, "Store Directory Statistics:\n");
353 storeAppendPrintf(&output, "Store Entries : %lu\n",
62e76326 354 (unsigned long int)StoreEntry::inUseCount());
12e11a5c 355 storeAppendPrintf(&output, "Maximum Swap Size : %"PRIu64" KB\n",
cc34568d 356 maxSize() >> 10);
57f583f1 357 storeAppendPrintf(&output, "Current Store Swap Size: %.2f KB\n",
cc34568d 358 currentSize() / 1024.0);
57f583f1 359 storeAppendPrintf(&output, "Current Capacity : %.2f%% used, %.2f%% free\n",
cc34568d
DK
360 Math::doublePercent(currentSize(), maxSize()),
361 Math::doublePercent((maxSize() - currentSize()), maxSize()));
ada9124c
AR
362
363 if (memStore)
364 memStore->stat(output);
e3ef2b09 365
c8f4eac4 366 /* now the swapDir */
367 swapDir->stat(output);
5d406e78 368}
369
c8f4eac4 370/* if needed, this could be taught to cache the result */
12e11a5c 371uint64_t
c8f4eac4 372StoreController::maxSize() const
f4e3fa54 373{
c8f4eac4 374 /* TODO: include memory cache ? */
375 return swapDir->maxSize();
376}
62e76326 377
12e11a5c 378uint64_t
c8f4eac4 379StoreController::minSize() const
380{
381 /* TODO: include memory cache ? */
382 return swapDir->minSize();
f4e3fa54 383}
384
39c1e1d9
DK
385uint64_t
386StoreController::currentSize() const
387{
388 return swapDir->currentSize();
389}
390
391uint64_t
392StoreController::currentCount() const
393{
394 return swapDir->currentCount();
395}
396
af2fda07
DK
397int64_t
398StoreController::maxObjectSize() const
399{
400 return swapDir->maxObjectSize();
401}
402
f4e3fa54 403void
c8f4eac4 404SwapDir::diskFull()
f4e3fa54 405{
cc34568d 406 if (currentSize() >= maxSize())
62e76326 407 return;
408
cc34568d 409 max_size = currentSize();
62e76326 410
8765bc8c 411 debugs(20, 1, "WARNING: Shrinking cache_dir #" << index << " to " << currentSize() / 1024.0 << " KB");
f4e3fa54 412}
95dcd2b8 413
414void
415storeDirOpenSwapLogs(void)
416{
d3b3ab85 417 for (int dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
c8f4eac4 418 dynamic_cast<SwapDir *>(INDEXSD(dirn))->openLog();
95dcd2b8 419}
420
421void
422storeDirCloseSwapLogs(void)
423{
d3b3ab85 424 for (int dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
c8f4eac4 425 dynamic_cast<SwapDir *>(INDEXSD(dirn))->closeLog();
95dcd2b8 426}
427
b2c141d4 428/*
429 * storeDirWriteCleanLogs
26ac0430 430 *
b2c141d4 431 * Writes a "clean" swap log file from in-memory metadata.
cd748f27 432 * This is a rewrite of the original function to troll each
433 * StoreDir and write the logs, and flush at the end of
434 * the run. Thanks goes to Eric Stern, since this solution
435 * came out of his COSS code.
b2c141d4 436 */
b2c141d4 437int
438storeDirWriteCleanLogs(int reopen)
95dcd2b8 439{
6a566b9c 440 const StoreEntry *e = NULL;
b2c141d4 441 int n = 0;
62e76326 442
e812ecfc 443 struct timeval start;
444 double dt;
c8f4eac4 445 RefCount<SwapDir> sd;
b2c141d4 446 int dirn;
6a566b9c 447 int notdone = 1;
62e76326 448
bef81ea5 449 if (StoreController::store_dirs_rebuilding) {
bf8fe701 450 debugs(20, 1, "Not currently OK to rewrite swap log.");
451 debugs(20, 1, "storeDirWriteCleanLogs: Operation aborted.");
62e76326 452 return 0;
b2c141d4 453 }
62e76326 454
bf8fe701 455 debugs(20, 1, "storeDirWriteCleanLogs: Starting...");
e812ecfc 456 getCurrentTime();
457 start = current_time;
62e76326 458
b2c141d4 459 for (dirn = 0; dirn < Config.cacheSwap.n_configured; dirn++) {
c8f4eac4 460 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
62e76326 461
462 if (sd->writeCleanStart() < 0) {
bf8fe701 463 debugs(20, 1, "log.clean.start() failed for dir #" << sd->index);
62e76326 464 continue;
465 }
6a566b9c 466 }
62e76326 467
e78ef51b 468 /*
469 * This may look inefficient as CPU wise it is more efficient to do this
470 * sequentially, but I/O wise the parallellism helps as it allows more
471 * hdd spindles to be active.
d3b3ab85 472 */
c1dd71ae 473 while (notdone) {
62e76326 474 notdone = 0;
475
476 for (dirn = 0; dirn < Config.cacheSwap.n_configured; dirn++) {
c8f4eac4 477 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
62e76326 478
479 if (NULL == sd->cleanLog)
480 continue;
481
482 e = sd->cleanLog->nextEntry();
483
484 if (!e)
485 continue;
486
487 notdone = 1;
488
489 if (!sd->canLog(*e))
490 continue;
491
492 sd->cleanLog->write(*e);
493
494 if ((++n & 0xFFFF) == 0) {
495 getCurrentTime();
bf8fe701 496 debugs(20, 1, " " << std::setw(7) << n <<
497 " entries written so far.");
62e76326 498 }
499 }
6a566b9c 500 }
62e76326 501
6a566b9c 502 /* Flush */
d3b3ab85 503 for (dirn = 0; dirn < Config.cacheSwap.n_configured; dirn++)
c8f4eac4 504 dynamic_cast<SwapDir *>(INDEXSD(dirn))->writeCleanDone();
62e76326 505
b2c141d4 506 if (reopen)
62e76326 507 storeDirOpenSwapLogs();
508
e812ecfc 509 getCurrentTime();
62e76326 510
e812ecfc 511 dt = tvSubDsec(start, current_time);
62e76326 512
bf8fe701 513 debugs(20, 1, " Finished. Wrote " << n << " entries.");
514 debugs(20, 1, " Took "<< std::setw(3)<< std::setprecision(2) << dt <<
515 " seconds ("<< std::setw(6) << ((double) n / (dt > 0.0 ? dt : 1.0)) << " entries/sec).");
62e76326 516
62e76326 517
b2c141d4 518 return n;
95dcd2b8 519}
d141c677 520
c8f4eac4 521StoreSearch *
30abd221 522StoreController::search(String const url, HttpRequest *request)
c8f4eac4 523{
524 /* cheat, for now you can't search the memory hot cache */
525 return swapDir->search(url, request);
526}
527
528StorePointer
529StoreHashIndex::store(int const x) const
530{
531 return INDEXSD(x);
532}
533
14911a4e
AR
534SwapDir &
535StoreHashIndex::dir(const int i) const
536{
537 SwapDir *sd = dynamic_cast<SwapDir*>(INDEXSD(i));
538 assert(sd);
539 return *sd;
540}
541
cd748f27 542void
c8f4eac4 543StoreController::sync(void)
cd748f27 544{
9487bae9
AR
545 if (memStore)
546 memStore->sync();
c8f4eac4 547 swapDir->sync();
cd748f27 548}
549
550/*
26ac0430 551 * handle callbacks all avaliable fs'es
cd748f27 552 */
c8f4eac4 553int
554StoreController::callback()
cd748f27 555{
1d5161bd 556 /* This will likely double count. Thats ok. */
557 PROF_start(storeDirCallback);
558
c8f4eac4 559 /* mem cache callbacks ? */
560 int result = swapDir->callback();
1d5161bd 561
562 PROF_stop(storeDirCallback);
c8f4eac4 563
564 return result;
d141c677 565}
90d42c28 566
567int
568storeDirGetBlkSize(const char *path, int *blksize)
569{
570#if HAVE_STATVFS
62e76326 571
90d42c28 572 struct statvfs sfs;
62e76326 573
90d42c28 574 if (statvfs(path, &sfs)) {
bf8fe701 575 debugs(50, 1, "" << path << ": " << xstrerror());
62e76326 576 *blksize = 2048;
577 return 1;
90d42c28 578 }
62e76326 579
6759a7aa 580 *blksize = (int) sfs.f_frsize;
90d42c28 581#else
62e76326 582
90d42c28 583 struct statfs sfs;
62e76326 584
90d42c28 585 if (statfs(path, &sfs)) {
bf8fe701 586 debugs(50, 1, "" << path << ": " << xstrerror());
62e76326 587 *blksize = 2048;
588 return 1;
90d42c28 589 }
62e76326 590
90d42c28 591 *blksize = (int) sfs.f_bsize;
6759a7aa 592#endif
4b3af09f 593 /*
594 * Sanity check; make sure we have a meaningful value.
595 */
62e76326 596
d5b72fe7 597 if (*blksize < 512)
62e76326 598 *blksize = 2048;
599
90d42c28 600 return 0;
601}
781d6656 602
603#define fsbtoblk(num, fsbs, bs) \
604 (((fsbs) != 0 && (fsbs) < (bs)) ? \
605 (num) / ((bs) / (fsbs)) : (num) * ((fsbs) / (bs)))
606int
607storeDirGetUFSStats(const char *path, int *totl_kb, int *free_kb, int *totl_in, int *free_in)
608{
609#if HAVE_STATVFS
62e76326 610
781d6656 611 struct statvfs sfs;
62e76326 612
781d6656 613 if (statvfs(path, &sfs)) {
bf8fe701 614 debugs(50, 1, "" << path << ": " << xstrerror());
62e76326 615 return 1;
781d6656 616 }
62e76326 617
781d6656 618 *totl_kb = (int) fsbtoblk(sfs.f_blocks, sfs.f_frsize, 1024);
619 *free_kb = (int) fsbtoblk(sfs.f_bfree, sfs.f_frsize, 1024);
620 *totl_in = (int) sfs.f_files;
621 *free_in = (int) sfs.f_ffree;
622#else
62e76326 623
781d6656 624 struct statfs sfs;
62e76326 625
781d6656 626 if (statfs(path, &sfs)) {
bf8fe701 627 debugs(50, 1, "" << path << ": " << xstrerror());
62e76326 628 return 1;
781d6656 629 }
62e76326 630
781d6656 631 *totl_kb = (int) fsbtoblk(sfs.f_blocks, sfs.f_bsize, 1024);
632 *free_kb = (int) fsbtoblk(sfs.f_bfree, sfs.f_bsize, 1024);
633 *totl_in = (int) sfs.f_files;
634 *free_in = (int) sfs.f_ffree;
635#endif
62e76326 636
781d6656 637 return 0;
638}
c8f4eac4 639
640void
e1f7507e 641allocate_new_swapdir(SquidConfig::_cacheSwap * swap)
c8f4eac4 642{
643 if (swap->swapDirs == NULL) {
644 swap->n_allocated = 4;
7d3c4ca1 645 swap->swapDirs = static_cast<SwapDir::Pointer *>(xcalloc(swap->n_allocated, sizeof(SwapDir::Pointer)));
c8f4eac4 646 }
647
648 if (swap->n_allocated == swap->n_configured) {
c8f4eac4 649 swap->n_allocated <<= 1;
7d3c4ca1 650 SwapDir::Pointer *const tmp = static_cast<SwapDir::Pointer *>(xcalloc(swap->n_allocated, sizeof(SwapDir::Pointer)));
41d00cd3 651 memcpy(tmp, swap->swapDirs, swap->n_configured * sizeof(SwapDir *));
c8f4eac4 652 xfree(swap->swapDirs);
653 swap->swapDirs = tmp;
654 }
655}
656
657void
e1f7507e 658free_cachedir(SquidConfig::_cacheSwap * swap)
c8f4eac4 659{
660 int i;
661 /* DON'T FREE THESE FOR RECONFIGURE */
662
663 if (reconfiguring)
664 return;
665
666 for (i = 0; i < swap->n_configured; i++) {
667 /* TODO XXX this lets the swapdir free resources asynchronously
668 * swap->swapDirs[i]->deactivate();
26ac0430 669 * but there may be such a means already.
c8f4eac4 670 * RBC 20041225
671 */
672 swap->swapDirs[i] = NULL;
673 }
674
675 safe_free(swap->swapDirs);
676 swap->swapDirs = NULL;
677 swap->n_allocated = 0;
678 swap->n_configured = 0;
679}
680
681/* this should be a virtual method on StoreEntry,
682 * i.e. e->referenced()
683 * so that the entry can notify the creating Store
684 */
685void
686StoreController::reference(StoreEntry &e)
687{
c5426f8f
AR
688 // special entries do not belong to any specific Store, but are IN_MEMORY
689 if (EBIT_TEST(e.flags, ENTRY_SPECIAL))
690 return;
691
c8f4eac4 692 /* Notify the fs that we're referencing this object again */
693
694 if (e.swap_dirn > -1)
4c973beb 695 swapDir->reference(e);
c8f4eac4 696
9487bae9
AR
697 // Notify the memory cache that we're referencing this object again
698 if (memStore && e.mem_status == IN_MEMORY)
699 memStore->reference(e);
700
701 // TODO: move this code to a non-shared memory cache class when we have it
c8f4eac4 702 if (e.mem_obj) {
703 if (mem_policy->Referenced)
704 mem_policy->Referenced(mem_policy, &e, &e.mem_obj->repl);
705 }
706}
707
4c973beb 708bool
c8f4eac4 709StoreController::dereference(StoreEntry & e)
710{
897d5656 711 bool keepInStoreTable = true; // keep if there are no objections
4c973beb 712
c5426f8f
AR
713 // special entries do not belong to any specific Store, but are IN_MEMORY
714 if (EBIT_TEST(e.flags, ENTRY_SPECIAL))
715 return keepInStoreTable;
716
c8f4eac4 717 /* Notify the fs that we're not referencing this object any more */
718
719 if (e.swap_filen > -1)
897d5656 720 keepInStoreTable = swapDir->dereference(e) && keepInStoreTable;
c8f4eac4 721
9487bae9
AR
722 // Notify the memory cache that we're not referencing this object any more
723 if (memStore && e.mem_status == IN_MEMORY)
897d5656 724 keepInStoreTable = memStore->dereference(e) && keepInStoreTable;
9487bae9
AR
725
726 // TODO: move this code to a non-shared memory cache class when we have it
c8f4eac4 727 if (e.mem_obj) {
728 if (mem_policy->Dereferenced)
729 mem_policy->Dereferenced(mem_policy, &e, &e.mem_obj->repl);
730 }
4c973beb
AR
731
732 return keepInStoreTable;
c8f4eac4 733}
734
735StoreEntry *
6ca34f6f 736StoreController::get(const cache_key *key)
c8f4eac4 737{
44def0f9 738 if (StoreEntry *e = swapDir->get(key)) {
9487bae9
AR
739 // TODO: ignore and maybe handleIdleEntry() unlocked intransit entries
740 // because their backing store slot may be gone already.
171d5429 741 debugs(20, 3, HERE << "got in-transit entry: " << *e);
44def0f9
AR
742 return e;
743 }
744
9487bae9
AR
745 if (memStore) {
746 if (StoreEntry *e = memStore->get(key)) {
747 debugs(20, 3, HERE << "got mem-cached entry: " << *e);
748 return e;
749 }
750 }
751
022f96ad
AR
752 // TODO: this disk iteration is misplaced; move to StoreHashIndex when
753 // the global store_table is no longer used for in-transit objects.
44def0f9
AR
754 if (const int cacheDirs = Config.cacheSwap.n_configured) {
755 // ask each cache_dir until the entry is found; use static starting
756 // point to avoid asking the same subset of disks more often
757 // TODO: coordinate with put() to be able to guess the right disk often
14911a4e 758 static int idx = 0;
44def0f9 759 for (int n = 0; n < cacheDirs; ++n) {
14911a4e 760 idx = (idx + 1) % cacheDirs;
44def0f9 761 SwapDir *sd = dynamic_cast<SwapDir*>(INDEXSD(idx));
14911a4e
AR
762 if (!sd->active())
763 continue;
764
44def0f9 765 if (StoreEntry *e = sd->get(key)) {
eccba1d9 766 debugs(20, 3, HERE << "cache_dir " << idx <<
9199139f 767 " got cached entry: " << *e);
44def0f9
AR
768 return e;
769 }
770 }
771 }
c8f4eac4 772
eccba1d9 773 debugs(20, 4, HERE << "none of " << Config.cacheSwap.n_configured <<
9199139f 774 " cache_dirs have " << storeKeyText(key));
44def0f9 775 return NULL;
c8f4eac4 776}
777
778void
6ca34f6f 779StoreController::get(String const key, STOREGETCLIENT aCallback, void *aCallbackData)
c8f4eac4 780{
781 fatal("not implemented");
782}
783
9487bae9
AR
784void
785StoreController::handleIdleEntry(StoreEntry &e)
786{
787 bool keepInLocalMemory = false;
c5426f8f
AR
788
789 if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) {
790 // Icons (and cache digests?) should stay in store_table until we
791 // have a dedicated storage for them (that would not purge them).
792 // They are not managed [well] by any specific Store handled below.
793 keepInLocalMemory = true;
794 } else
9487bae9
AR
795 if (memStore) {
796 memStore->considerKeeping(e);
797 // leave keepInLocalMemory false; memStore maintains its own cache
798 } else {
799 keepInLocalMemory = e.memoryCachable() && // entry is in good shape and
9199139f
AR
800 // the local memory cache is not overflowing
801 (mem_node::InUseCount() <= store_pages_max);
9487bae9
AR
802 }
803
4c973beb
AR
804 // An idle, unlocked entry that belongs to a SwapDir which controls
805 // its own index, should not stay in the global store_table.
806 if (!dereference(e)) {
9487bae9
AR
807 debugs(20, 5, HERE << "destroying unlocked entry: " << &e << ' ' << e);
808 destroyStoreEntry(static_cast<hash_link*>(&e));
809 return;
810 }
811
c5426f8f
AR
812 debugs(20, 5, HERE << "keepInLocalMemory: " << keepInLocalMemory);
813
9487bae9
AR
814 // TODO: move this into [non-shared] memory cache class when we have one
815 if (keepInLocalMemory) {
816 e.setMemStatus(IN_MEMORY);
817 e.mem_obj->unlinkRequest();
818 } else {
819 e.purgeMem(); // may free e
820 }
821}
822
c8f4eac4 823StoreHashIndex::StoreHashIndex()
824{
47f6e231 825 if (store_table)
26ac0430 826 abort();
c8f4eac4 827 assert (store_table == NULL);
828}
829
830StoreHashIndex::~StoreHashIndex()
831{
832 if (store_table) {
833 hashFreeItems(store_table, destroyStoreEntry);
834 hashFreeMemory(store_table);
835 store_table = NULL;
836 }
837}
838
839int
840StoreHashIndex::callback()
841{
842 int result = 0;
843 int j;
844 static int ndir = 0;
845
846 do {
847 j = 0;
848
849 for (int i = 0; i < Config.cacheSwap.n_configured; i++) {
850 if (ndir >= Config.cacheSwap.n_configured)
851 ndir = ndir % Config.cacheSwap.n_configured;
852
853 int temp_result = store(ndir)->callback();
854
855 ++ndir;
856
857 j += temp_result;
858
859 result += temp_result;
860
861 if (j > 100)
862 fatal ("too much io\n");
863 }
864 } while (j > 0);
865
866 ndir++;
867
868 return result;
869}
870
871void
872StoreHashIndex::create()
873{
14911a4e
AR
874 for (int i = 0; i < Config.cacheSwap.n_configured; i++) {
875 if (dir(i).active())
876 store(i)->create();
877 }
c8f4eac4 878}
879
880/* Lookup an object in the cache.
881 * return just a reference to object, don't start swapping in yet. */
882StoreEntry *
6ca34f6f 883StoreHashIndex::get(const cache_key *key)
c8f4eac4 884{
885 PROF_start(storeGet);
bf8fe701 886 debugs(20, 3, "storeGet: looking up " << storeKeyText(key));
c8f4eac4 887 StoreEntry *p = static_cast<StoreEntry *>(hash_lookup(store_table, key));
888 PROF_stop(storeGet);
889 return p;
890}
891
892void
6ca34f6f 893StoreHashIndex::get(String const key, STOREGETCLIENT aCallback, void *aCallbackData)
c8f4eac4 894{
895 fatal("not implemented");
896}
897
898void
899StoreHashIndex::init()
900{
901 /* Calculate size of hash table (maximum currently 64k buckets). */
902 /* this is very bogus, its specific to the any Store maintaining an
903 * in-core index, not global */
58d5c5dd 904 size_t buckets = (Store::Root().maxSize() + Config.memMaxSize) / Config.Store.avgObjectSize;
cc34568d 905 debugs(20, 1, "Swap maxSize " << (Store::Root().maxSize() >> 10) <<
13a07022 906 " + " << ( Config.memMaxSize >> 10) << " KB, estimated " << buckets << " objects");
c8f4eac4 907 buckets /= Config.Store.objectsPerBucket;
908 debugs(20, 1, "Target number of buckets: " << buckets);
909 /* ideally the full scan period should be configurable, for the
910 * moment it remains at approximately 24 hours. */
911 store_hash_buckets = storeKeyHashBuckets(buckets);
912 debugs(20, 1, "Using " << store_hash_buckets << " Store buckets");
57af1e3f 913 debugs(20, 1, "Max Mem size: " << ( Config.memMaxSize >> 10) << " KB" <<
9199139f 914 (Config.memShared ? " [shared]" : ""));
cc34568d 915 debugs(20, 1, "Max Swap size: " << (Store::Root().maxSize() >> 10) << " KB");
c8f4eac4 916
917 store_table = hash_create(storeKeyHashCmp,
918 store_hash_buckets, storeKeyHashHash);
919
13a07022 920 for (int i = 0; i < Config.cacheSwap.n_configured; i++) {
c8f4eac4 921 /* this starts a search of the store dirs, loading their
922 * index. under the new Store api this should be
923 * driven by the StoreHashIndex, not by each store.
bef81ea5 924 *
925 * That is, the HashIndex should perform a search of each dir it is
26ac0430 926 * indexing to do the hash insertions. The search is then able to
bef81ea5 927 * decide 'from-memory', or 'from-clean-log' or 'from-dirty-log' or
928 * 'from-no-log'.
929 *
c8f4eac4 930 * Step 1: make the store rebuilds use a search internally
bef81ea5 931 * Step 2: change the search logic to use the four modes described
932 * above
933 * Step 3: have the hash index walk the searches itself.
c8f4eac4 934 */
14911a4e
AR
935 if (dir(i).active())
936 store(i)->init();
13a07022 937 }
c8f4eac4 938}
939
12e11a5c 940uint64_t
c8f4eac4 941StoreHashIndex::maxSize() const
942{
12e11a5c 943 uint64_t result = 0;
c8f4eac4 944
39c1e1d9
DK
945 for (int i = 0; i < Config.cacheSwap.n_configured; i++) {
946 if (dir(i).doReportStat())
947 result += store(i)->maxSize();
948 }
c8f4eac4 949
950 return result;
951}
952
12e11a5c 953uint64_t
c8f4eac4 954StoreHashIndex::minSize() const
955{
12e11a5c 956 uint64_t result = 0;
c8f4eac4 957
39c1e1d9
DK
958 for (int i = 0; i < Config.cacheSwap.n_configured; i++) {
959 if (dir(i).doReportStat())
960 result += store(i)->minSize();
961 }
962
963 return result;
964}
965
966uint64_t
967StoreHashIndex::currentSize() const
968{
969 uint64_t result = 0;
970
971 for (int i = 0; i < Config.cacheSwap.n_configured; i++) {
972 if (dir(i).doReportStat())
973 result += store(i)->currentSize();
974 }
975
976 return result;
977}
978
979uint64_t
980StoreHashIndex::currentCount() const
981{
982 uint64_t result = 0;
983
984 for (int i = 0; i < Config.cacheSwap.n_configured; i++) {
985 if (dir(i).doReportStat())
986 result += store(i)->currentCount();
987 }
c8f4eac4 988
989 return result;
990}
991
af2fda07
DK
992int64_t
993StoreHashIndex::maxObjectSize() const
994{
995 int64_t result = -1;
996
997 for (int i = 0; i < Config.cacheSwap.n_configured; i++) {
998 if (dir(i).active() && store(i)->maxObjectSize() > result)
999 result = store(i)->maxObjectSize();
1000 }
1001
1002 return result;
1003}
1004
93bc1434
AR
1005void
1006StoreHashIndex::getStats(StoreInfoStats &stats) const
1007{
1008 // accumulate per-disk cache stats
1009 for (int i = 0; i < Config.cacheSwap.n_configured; i++) {
1010 StoreInfoStats dirStats;
1011 store(i)->getStats(dirStats);
1012 stats += dirStats;
1013 }
1014
1015 // common to all disks
1016 stats.swap.open_disk_fd = store_open_disk_fd;
1017
1018 // memory cache stats are collected in StoreController::getStats(), for now
1019}
1020
c8f4eac4 1021void
1022StoreHashIndex::stat(StoreEntry & output) const
1023{
1024 int i;
1025
1026 /* Now go through each store, calling its stat routine */
1027
1028 for (i = 0; i < Config.cacheSwap.n_configured; i++) {
1029 storeAppendPrintf(&output, "\n");
1030 store(i)->stat(output);
1031 }
1032}
1033
1034void
4c973beb
AR
1035StoreHashIndex::reference(StoreEntry &e)
1036{
1037 e.store()->reference(e);
1038}
c8f4eac4 1039
4c973beb
AR
1040bool
1041StoreHashIndex::dereference(StoreEntry &e)
1042{
1043 return e.store()->dereference(e);
1044}
c8f4eac4 1045
1046void
1047StoreHashIndex::maintain()
1048{
1049 int i;
1050 /* walk each fs */
1051
1052 for (i = 0; i < Config.cacheSwap.n_configured; i++) {
1053 /* XXX FixMe: This should be done "in parallell" on the different
1054 * cache_dirs, not one at a time.
1055 */
1056 /* call the maintain function .. */
1057 store(i)->maintain();
1058 }
1059}
1060
c8f4eac4 1061void
1062StoreHashIndex::sync()
1063{
1064 for (int i = 0; i < Config.cacheSwap.n_configured; ++i)
1065 store(i)->sync();
1066}
1067
1068StoreSearch *
30abd221 1069StoreHashIndex::search(String const url, HttpRequest *)
c8f4eac4 1070{
1071 if (url.size())
1072 fatal ("Cannot search by url yet\n");
1073
1074 return new StoreSearchHashIndex (this);
1075}
1076
1077CBDATA_CLASS_INIT(StoreSearchHashIndex);
aa839030 1078
c8f4eac4 1079StoreSearchHashIndex::StoreSearchHashIndex(RefCount<StoreHashIndex> aSwapDir) : sd(aSwapDir), _done (false), bucket (0)
1080{}
1081
1082/* do not link
1083StoreSearchHashIndex::StoreSearchHashIndex(StoreSearchHashIndex const &);
1084*/
1085
1086StoreSearchHashIndex::~StoreSearchHashIndex()
1087{}
1088
1089void
70efcae0 1090StoreSearchHashIndex::next(void (aCallback)(void *), void *aCallbackData)
c8f4eac4 1091{
1092 next();
70efcae0 1093 aCallback (aCallbackData);
c8f4eac4 1094}
1095
1096bool
1097StoreSearchHashIndex::next()
1098{
1099 if (entries.size())
1100 entries.pop_back();
1101
1102 while (!isDone() && !entries.size())
1103 copyBucket();
1104
1105 return currentItem() != NULL;
1106}
1107
1108bool
1109StoreSearchHashIndex::error() const
1110{
1111 return false;
1112}
1113
1114bool
1115StoreSearchHashIndex::isDone() const
1116{
1117 return bucket >= store_hash_buckets || _done;
1118}
1119
1120StoreEntry *
1121StoreSearchHashIndex::currentItem()
1122{
1123 if (!entries.size())
1124 return NULL;
1125
1126 return entries.back();
1127}
1128
1129void
1130StoreSearchHashIndex::copyBucket()
1131{
1132 /* probably need to lock the store entries...
1133 * we copy them all to prevent races on the links. */
1134 debugs(47, 3, "StoreSearchHashIndex::copyBucket #" << bucket);
1135 assert (!entries.size());
1136 hash_link *link_ptr = NULL;
1137 hash_link *link_next = NULL;
1138 link_next = hash_get_bucket(store_table, bucket);
1139
1140 while (NULL != (link_ptr = link_next)) {
1141 link_next = link_ptr->next;
1142 StoreEntry *e = (StoreEntry *) link_ptr;
1143
1144 entries.push_back(e);
1145 }
1146
1147 bucket++;
1148 debugs(47,3, "got entries: " << entries.size());
1149}