]> git.ipfire.org Git - thirdparty/squid.git/blob - src/store_dir.cc
Revert r12231 - seems to break SMP cache
[thirdparty/squid.git] / src / store_dir.cc
1
2 /*
3 * $Id$
4 *
5 * DEBUG: section 47 Store Directory Routines
6 * AUTHOR: Duane Wessels
7 *
8 * SQUID Web Proxy Cache http://www.squid-cache.org/
9 * ----------------------------------------------------------
10 *
11 * Squid is the result of efforts by numerous individuals from
12 * the Internet community; see the CONTRIBUTORS file for full
13 * details. Many organizations have provided support for Squid's
14 * development; see the SPONSORS file for full details. Squid is
15 * Copyrighted (C) 2001 by the Regents of the University of
16 * California; see the COPYRIGHT file for full details. Squid
17 * incorporates software developed and/or copyrighted by other
18 * sources; see the CREDITS file for full details.
19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
24 *
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
29 *
30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
32 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
33 *
34 */
35
36 #include "squid-old.h"
37 #include "Store.h"
38 #include "MemObject.h"
39 #include "MemStore.h"
40 #include "mem_node.h"
41 #include "SquidMath.h"
42 #include "SquidTime.h"
43 #include "SwapDir.h"
44 #include "swap_log_op.h"
45
46 #if HAVE_STATVFS
47 #if HAVE_SYS_STATVFS_H
48 #include <sys/statvfs.h>
49 #endif
50 #endif /* HAVE_STATVFS */
51 /* statfs() needs <sys/param.h> and <sys/mount.h> on BSD systems */
52 #if HAVE_SYS_PARAM_H
53 #include <sys/param.h>
54 #endif
55 #if HAVE_SYS_MOUNT_H
56 #include <sys/mount.h>
57 #endif
58 /* Windows and Linux use sys/vfs.h */
59 #if HAVE_SYS_VFS_H
60 #include <sys/vfs.h>
61 #endif
62
63 #include "StoreHashIndex.h"
64
65 static STDIRSELECT storeDirSelectSwapDirRoundRobin;
66 static STDIRSELECT storeDirSelectSwapDirLeastLoad;
67
68 /*
69 * store_dirs_rebuilding is initialized to _1_ as a hack so that
70 * storeDirWriteCleanLogs() doesn't try to do anything unless _all_
71 * cache_dirs have been read. For example, without this hack, Squid
72 * will try to write clean log files if -kparse fails (becasue it
73 * calls fatal()).
74 */
75 int StoreController::store_dirs_rebuilding = 1;
76
77 StoreController::StoreController() : swapDir (new StoreHashIndex())
78 , memStore(NULL)
79 {}
80
81 StoreController::~StoreController()
82 {
83 delete memStore;
84 }
85
86 /*
87 * This function pointer is set according to 'store_dir_select_algorithm'
88 * in squid.conf.
89 */
90 STDIRSELECT *storeDirSelectSwapDir = storeDirSelectSwapDirLeastLoad;
91
92 void
93 StoreController::init()
94 {
95 if (Config.memShared && IamWorkerProcess()) {
96 memStore = new MemStore;
97 memStore->init();
98 }
99
100 swapDir->init();
101
102 if (0 == strcasecmp(Config.store_dir_select_algorithm, "round-robin")) {
103 storeDirSelectSwapDir = storeDirSelectSwapDirRoundRobin;
104 debugs(47, 1, "Using Round Robin store dir selection");
105 } else {
106 storeDirSelectSwapDir = storeDirSelectSwapDirLeastLoad;
107 debugs(47, 1, "Using Least Load store dir selection");
108 }
109 }
110
111 void
112 StoreController::createOneStore(Store &aStore)
113 {
114 /*
115 * On Windows, fork() is not available.
116 * The following is a workaround for create store directories sequentially
117 * when running on native Windows port.
118 */
119 #if !_SQUID_MSWIN_
120
121 if (fork())
122 return;
123
124 #endif
125
126 aStore.create();
127
128 #if !_SQUID_MSWIN_
129
130 exit(0);
131
132 #endif
133 }
134
135 void
136 StoreController::create()
137 {
138 swapDir->create();
139
140 #if !_SQUID_MSWIN_
141
142 pid_t pid;
143
144 do {
145 int status;
146 #if _SQUID_NEXT_
147
148 pid = wait3(&status, WNOHANG, NULL);
149 #else
150
151 pid = waitpid(-1, &status, 0);
152 #endif
153
154 } while (pid > 0 || (pid < 0 && errno == EINTR));
155
156 #endif
157 }
158
159 /**
160 * Determine whether the given directory can handle this object
161 * size
162 *
163 * Note: if the object size is -1, then the only swapdirs that
164 * will return true here are ones that have min and max unset,
165 * ie any-sized-object swapdirs. This is a good thing.
166 */
167 bool
168 SwapDir::objectSizeIsAcceptable(int64_t objsize) const
169 {
170 // If the swapdir has no range limits, then it definitely can
171 if (min_objsize <= 0 && max_objsize == -1)
172 return true;
173
174 /*
175 * If the object size is -1 and the storedir has limits we
176 * can't store it there.
177 */
178 if (objsize == -1)
179 return false;
180
181 // Else, make sure that the object size will fit.
182 if (max_objsize == -1 && min_objsize <= objsize)
183 return true;
184 else
185 return min_objsize <= objsize && max_objsize > objsize;
186 }
187
188
189 /*
190 * This new selection scheme simply does round-robin on all SwapDirs.
191 * A SwapDir is skipped if it is over the max_size (100%) limit, or
192 * overloaded.
193 */
194 static int
195 storeDirSelectSwapDirRoundRobin(const StoreEntry * e)
196 {
197 static int dirn = 0;
198 int i;
199 int load;
200 RefCount<SwapDir> sd;
201
202 // e->objectLen() is negative at this point when we are still STORE_PENDING
203 ssize_t objsize = e->mem_obj->expectedReplySize();
204 if (objsize != -1)
205 objsize += e->mem_obj->swap_hdr_sz;
206
207 for (i = 0; i < Config.cacheSwap.n_configured; ++i) {
208 if (++dirn >= Config.cacheSwap.n_configured)
209 dirn = 0;
210
211 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
212
213 if (!sd->canStore(*e, objsize, load))
214 continue;
215
216 if (load < 0 || load > 1000) {
217 continue;
218 }
219
220 return dirn;
221 }
222
223 return -1;
224 }
225
226 /*
227 * Spread load across all of the store directories
228 *
229 * Note: We should modify this later on to prefer sticking objects
230 * in the *tightest fit* swapdir to conserve space, along with the
231 * actual swapdir usage. But for now, this hack will do while
232 * testing, so you should order your swapdirs in the config file
233 * from smallest maxobjsize to unlimited (-1) maxobjsize.
234 *
235 * We also have to choose nleast == nconf since we need to consider
236 * ALL swapdirs, regardless of state. Again, this is a hack while
237 * we sort out the real usefulness of this algorithm.
238 */
239 static int
240 storeDirSelectSwapDirLeastLoad(const StoreEntry * e)
241 {
242 int64_t most_free = 0;
243 ssize_t least_objsize = -1;
244 int least_load = INT_MAX;
245 int load;
246 int dirn = -1;
247 int i;
248 RefCount<SwapDir> SD;
249
250 // e->objectLen() is negative at this point when we are still STORE_PENDING
251 ssize_t objsize = e->mem_obj->expectedReplySize();
252
253 if (objsize != -1)
254 objsize += e->mem_obj->swap_hdr_sz;
255
256 for (i = 0; i < Config.cacheSwap.n_configured; ++i) {
257 SD = dynamic_cast<SwapDir *>(INDEXSD(i));
258 SD->flags.selected = 0;
259
260 if (!SD->canStore(*e, objsize, load))
261 continue;
262
263 if (load < 0 || load > 1000)
264 continue;
265
266 if (load > least_load)
267 continue;
268
269 const int64_t cur_free = SD->maxSize() - SD->currentSize();
270
271 /* If the load is equal, then look in more details */
272 if (load == least_load) {
273 /* closest max_objsize fit */
274
275 if (least_objsize != -1)
276 if (SD->max_objsize > least_objsize || SD->max_objsize == -1)
277 continue;
278
279 /* most free */
280 if (cur_free < most_free)
281 continue;
282 }
283
284 least_load = load;
285 least_objsize = SD->max_objsize;
286 most_free = cur_free;
287 dirn = i;
288 }
289
290 if (dirn >= 0)
291 dynamic_cast<SwapDir *>(INDEXSD(dirn))->flags.selected = 1;
292
293 return dirn;
294 }
295
296 /*
297 * An entry written to the swap log MUST have the following
298 * properties.
299 * 1. It MUST be a public key. It does no good to log
300 * a public ADD, change the key, then log a private
301 * DEL. So we need to log a DEL before we change a
302 * key from public to private.
303 * 2. It MUST have a valid (> -1) swap_filen.
304 */
305 void
306 storeDirSwapLog(const StoreEntry * e, int op)
307 {
308 assert (e);
309 assert(!EBIT_TEST(e->flags, KEY_PRIVATE));
310 assert(e->swap_filen >= 0);
311 /*
312 * icons and such; don't write them to the swap log
313 */
314
315 if (EBIT_TEST(e->flags, ENTRY_SPECIAL))
316 return;
317
318 assert(op > SWAP_LOG_NOP && op < SWAP_LOG_MAX);
319
320 debugs(20, 3, "storeDirSwapLog: " <<
321 swap_log_op_str[op] << " " <<
322 e->getMD5Text() << " " <<
323 e->swap_dirn << " " <<
324 std::hex << std::uppercase << std::setfill('0') << std::setw(8) << e->swap_filen);
325
326 dynamic_cast<SwapDir *>(INDEXSD(e->swap_dirn))->logEntry(*e, op);
327 }
328
329 void
330 StoreController::getStats(StoreInfoStats &stats) const
331 {
332 if (memStore)
333 memStore->getStats(stats);
334 else {
335 // move this code to a non-shared memory cache class when we have it
336 stats.mem.shared = false;
337 stats.mem.capacity = Config.memMaxSize;
338 stats.mem.size = mem_node::StoreMemSize();
339 stats.mem.count = hot_obj_count;
340 }
341
342 swapDir->getStats(stats);
343
344 // low-level info not specific to memory or disk cache
345 stats.store_entry_count = StoreEntry::inUseCount();
346 stats.mem_object_count = MemObject::inUseCount();
347 }
348
349 void
350 StoreController::stat(StoreEntry &output) const
351 {
352 storeAppendPrintf(&output, "Store Directory Statistics:\n");
353 storeAppendPrintf(&output, "Store Entries : %lu\n",
354 (unsigned long int)StoreEntry::inUseCount());
355 storeAppendPrintf(&output, "Maximum Swap Size : %" PRIu64 " KB\n",
356 maxSize() >> 10);
357 storeAppendPrintf(&output, "Current Store Swap Size: %.2f KB\n",
358 currentSize() / 1024.0);
359 storeAppendPrintf(&output, "Current Capacity : %.2f%% used, %.2f%% free\n",
360 Math::doublePercent(currentSize(), maxSize()),
361 Math::doublePercent((maxSize() - currentSize()), maxSize()));
362
363 if (memStore)
364 memStore->stat(output);
365
366 /* now the swapDir */
367 swapDir->stat(output);
368 }
369
370 /* if needed, this could be taught to cache the result */
371 uint64_t
372 StoreController::maxSize() const
373 {
374 /* TODO: include memory cache ? */
375 return swapDir->maxSize();
376 }
377
378 uint64_t
379 StoreController::minSize() const
380 {
381 /* TODO: include memory cache ? */
382 return swapDir->minSize();
383 }
384
385 uint64_t
386 StoreController::currentSize() const
387 {
388 return swapDir->currentSize();
389 }
390
391 uint64_t
392 StoreController::currentCount() const
393 {
394 return swapDir->currentCount();
395 }
396
397 int64_t
398 StoreController::maxObjectSize() const
399 {
400 return swapDir->maxObjectSize();
401 }
402
403 void
404 SwapDir::diskFull()
405 {
406 if (currentSize() >= maxSize())
407 return;
408
409 max_size = currentSize();
410
411 debugs(20, 1, "WARNING: Shrinking cache_dir #" << index << " to " << currentSize() / 1024.0 << " KB");
412 }
413
414 void
415 storeDirOpenSwapLogs(void)
416 {
417 for (int dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
418 dynamic_cast<SwapDir *>(INDEXSD(dirn))->openLog();
419 }
420
421 void
422 storeDirCloseSwapLogs(void)
423 {
424 for (int dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
425 dynamic_cast<SwapDir *>(INDEXSD(dirn))->closeLog();
426 }
427
428 /*
429 * storeDirWriteCleanLogs
430 *
431 * Writes a "clean" swap log file from in-memory metadata.
432 * This is a rewrite of the original function to troll each
433 * StoreDir and write the logs, and flush at the end of
434 * the run. Thanks goes to Eric Stern, since this solution
435 * came out of his COSS code.
436 */
437 int
438 storeDirWriteCleanLogs(int reopen)
439 {
440 const StoreEntry *e = NULL;
441 int n = 0;
442
443 struct timeval start;
444 double dt;
445 RefCount<SwapDir> sd;
446 int dirn;
447 int notdone = 1;
448
449 if (StoreController::store_dirs_rebuilding) {
450 debugs(20, 1, "Not currently OK to rewrite swap log.");
451 debugs(20, 1, "storeDirWriteCleanLogs: Operation aborted.");
452 return 0;
453 }
454
455 debugs(20, 1, "storeDirWriteCleanLogs: Starting...");
456 getCurrentTime();
457 start = current_time;
458
459 for (dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn) {
460 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
461
462 if (sd->writeCleanStart() < 0) {
463 debugs(20, 1, "log.clean.start() failed for dir #" << sd->index);
464 continue;
465 }
466 }
467
468 /*
469 * This may look inefficient as CPU wise it is more efficient to do this
470 * sequentially, but I/O wise the parallellism helps as it allows more
471 * hdd spindles to be active.
472 */
473 while (notdone) {
474 notdone = 0;
475
476 for (dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn) {
477 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
478
479 if (NULL == sd->cleanLog)
480 continue;
481
482 e = sd->cleanLog->nextEntry();
483
484 if (!e)
485 continue;
486
487 notdone = 1;
488
489 if (!sd->canLog(*e))
490 continue;
491
492 sd->cleanLog->write(*e);
493
494 if ((++n & 0xFFFF) == 0) {
495 getCurrentTime();
496 debugs(20, 1, " " << std::setw(7) << n <<
497 " entries written so far.");
498 }
499 }
500 }
501
502 /* Flush */
503 for (dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
504 dynamic_cast<SwapDir *>(INDEXSD(dirn))->writeCleanDone();
505
506 if (reopen)
507 storeDirOpenSwapLogs();
508
509 getCurrentTime();
510
511 dt = tvSubDsec(start, current_time);
512
513 debugs(20, 1, " Finished. Wrote " << n << " entries.");
514 debugs(20, 1, " Took "<< std::setw(3)<< std::setprecision(2) << dt <<
515 " seconds ("<< std::setw(6) << ((double) n / (dt > 0.0 ? dt : 1.0)) << " entries/sec).");
516
517
518 return n;
519 }
520
521 StoreSearch *
522 StoreController::search(String const url, HttpRequest *request)
523 {
524 /* cheat, for now you can't search the memory hot cache */
525 return swapDir->search(url, request);
526 }
527
528 StorePointer
529 StoreHashIndex::store(int const x) const
530 {
531 return INDEXSD(x);
532 }
533
534 SwapDir &
535 StoreHashIndex::dir(const int i) const
536 {
537 SwapDir *sd = dynamic_cast<SwapDir*>(INDEXSD(i));
538 assert(sd);
539 return *sd;
540 }
541
542 void
543 StoreController::sync(void)
544 {
545 if (memStore)
546 memStore->sync();
547 swapDir->sync();
548 }
549
550 /*
551 * handle callbacks all avaliable fs'es
552 */
553 int
554 StoreController::callback()
555 {
556 /* This will likely double count. Thats ok. */
557 PROF_start(storeDirCallback);
558
559 /* mem cache callbacks ? */
560 int result = swapDir->callback();
561
562 PROF_stop(storeDirCallback);
563
564 return result;
565 }
566
567 int
568 storeDirGetBlkSize(const char *path, int *blksize)
569 {
570 #if HAVE_STATVFS
571
572 struct statvfs sfs;
573
574 if (statvfs(path, &sfs)) {
575 debugs(50, 1, "" << path << ": " << xstrerror());
576 *blksize = 2048;
577 return 1;
578 }
579
580 *blksize = (int) sfs.f_frsize;
581 #else
582
583 struct statfs sfs;
584
585 if (statfs(path, &sfs)) {
586 debugs(50, 1, "" << path << ": " << xstrerror());
587 *blksize = 2048;
588 return 1;
589 }
590
591 *blksize = (int) sfs.f_bsize;
592 #endif
593 /*
594 * Sanity check; make sure we have a meaningful value.
595 */
596
597 if (*blksize < 512)
598 *blksize = 2048;
599
600 return 0;
601 }
602
603 #define fsbtoblk(num, fsbs, bs) \
604 (((fsbs) != 0 && (fsbs) < (bs)) ? \
605 (num) / ((bs) / (fsbs)) : (num) * ((fsbs) / (bs)))
606 int
607 storeDirGetUFSStats(const char *path, int *totl_kb, int *free_kb, int *totl_in, int *free_in)
608 {
609 #if HAVE_STATVFS
610
611 struct statvfs sfs;
612
613 if (statvfs(path, &sfs)) {
614 debugs(50, 1, "" << path << ": " << xstrerror());
615 return 1;
616 }
617
618 *totl_kb = (int) fsbtoblk(sfs.f_blocks, sfs.f_frsize, 1024);
619 *free_kb = (int) fsbtoblk(sfs.f_bfree, sfs.f_frsize, 1024);
620 *totl_in = (int) sfs.f_files;
621 *free_in = (int) sfs.f_ffree;
622 #else
623
624 struct statfs sfs;
625
626 if (statfs(path, &sfs)) {
627 debugs(50, 1, "" << path << ": " << xstrerror());
628 return 1;
629 }
630
631 *totl_kb = (int) fsbtoblk(sfs.f_blocks, sfs.f_bsize, 1024);
632 *free_kb = (int) fsbtoblk(sfs.f_bfree, sfs.f_bsize, 1024);
633 *totl_in = (int) sfs.f_files;
634 *free_in = (int) sfs.f_ffree;
635 #endif
636
637 return 0;
638 }
639
640 void
641 allocate_new_swapdir(SquidConfig::_cacheSwap * swap)
642 {
643 if (swap->swapDirs == NULL) {
644 swap->n_allocated = 4;
645 swap->swapDirs = static_cast<SwapDir::Pointer *>(xcalloc(swap->n_allocated, sizeof(SwapDir::Pointer)));
646 }
647
648 if (swap->n_allocated == swap->n_configured) {
649 swap->n_allocated <<= 1;
650 SwapDir::Pointer *const tmp = static_cast<SwapDir::Pointer *>(xcalloc(swap->n_allocated, sizeof(SwapDir::Pointer)));
651 memcpy(tmp, swap->swapDirs, swap->n_configured * sizeof(SwapDir *));
652 xfree(swap->swapDirs);
653 swap->swapDirs = tmp;
654 }
655 }
656
657 void
658 free_cachedir(SquidConfig::_cacheSwap * swap)
659 {
660 int i;
661 /* DON'T FREE THESE FOR RECONFIGURE */
662
663 if (reconfiguring)
664 return;
665
666 for (i = 0; i < swap->n_configured; ++i) {
667 /* TODO XXX this lets the swapdir free resources asynchronously
668 * swap->swapDirs[i]->deactivate();
669 * but there may be such a means already.
670 * RBC 20041225
671 */
672 swap->swapDirs[i] = NULL;
673 }
674
675 safe_free(swap->swapDirs);
676 swap->swapDirs = NULL;
677 swap->n_allocated = 0;
678 swap->n_configured = 0;
679 }
680
681 /* this should be a virtual method on StoreEntry,
682 * i.e. e->referenced()
683 * so that the entry can notify the creating Store
684 */
685 void
686 StoreController::reference(StoreEntry &e)
687 {
688 // special entries do not belong to any specific Store, but are IN_MEMORY
689 if (EBIT_TEST(e.flags, ENTRY_SPECIAL))
690 return;
691
692 /* Notify the fs that we're referencing this object again */
693
694 if (e.swap_dirn > -1)
695 swapDir->reference(e);
696
697 // Notify the memory cache that we're referencing this object again
698 if (memStore && e.mem_status == IN_MEMORY)
699 memStore->reference(e);
700
701 // TODO: move this code to a non-shared memory cache class when we have it
702 if (e.mem_obj) {
703 if (mem_policy->Referenced)
704 mem_policy->Referenced(mem_policy, &e, &e.mem_obj->repl);
705 }
706 }
707
708 bool
709 StoreController::dereference(StoreEntry & e)
710 {
711 bool keepInStoreTable = true; // keep if there are no objections
712
713 // special entries do not belong to any specific Store, but are IN_MEMORY
714 if (EBIT_TEST(e.flags, ENTRY_SPECIAL))
715 return keepInStoreTable;
716
717 /* Notify the fs that we're not referencing this object any more */
718
719 if (e.swap_filen > -1)
720 keepInStoreTable = swapDir->dereference(e) && keepInStoreTable;
721
722 // Notify the memory cache that we're not referencing this object any more
723 if (memStore && e.mem_status == IN_MEMORY)
724 keepInStoreTable = memStore->dereference(e) && keepInStoreTable;
725
726 // TODO: move this code to a non-shared memory cache class when we have it
727 if (e.mem_obj) {
728 if (mem_policy->Dereferenced)
729 mem_policy->Dereferenced(mem_policy, &e, &e.mem_obj->repl);
730 }
731
732 return keepInStoreTable;
733 }
734
735 StoreEntry *
736 StoreController::get(const cache_key *key)
737 {
738 if (StoreEntry *e = swapDir->get(key)) {
739 // TODO: ignore and maybe handleIdleEntry() unlocked intransit entries
740 // because their backing store slot may be gone already.
741 debugs(20, 3, HERE << "got in-transit entry: " << *e);
742 return e;
743 }
744
745 if (memStore) {
746 if (StoreEntry *e = memStore->get(key)) {
747 debugs(20, 3, HERE << "got mem-cached entry: " << *e);
748 return e;
749 }
750 }
751
752 // TODO: this disk iteration is misplaced; move to StoreHashIndex when
753 // the global store_table is no longer used for in-transit objects.
754 if (const int cacheDirs = Config.cacheSwap.n_configured) {
755 // ask each cache_dir until the entry is found; use static starting
756 // point to avoid asking the same subset of disks more often
757 // TODO: coordinate with put() to be able to guess the right disk often
758 static int idx = 0;
759 for (int n = 0; n < cacheDirs; ++n) {
760 idx = (idx + 1) % cacheDirs;
761 SwapDir *sd = dynamic_cast<SwapDir*>(INDEXSD(idx));
762 if (!sd->active())
763 continue;
764
765 if (StoreEntry *e = sd->get(key)) {
766 debugs(20, 3, HERE << "cache_dir " << idx <<
767 " got cached entry: " << *e);
768 return e;
769 }
770 }
771 }
772
773 debugs(20, 4, HERE << "none of " << Config.cacheSwap.n_configured <<
774 " cache_dirs have " << storeKeyText(key));
775 return NULL;
776 }
777
778 void
779 StoreController::get(String const key, STOREGETCLIENT aCallback, void *aCallbackData)
780 {
781 fatal("not implemented");
782 }
783
784 // move this into [non-shared] memory cache class when we have one
785 /// whether e should be kept in local RAM for possible future caching
786 bool
787 StoreController::keepForLocalMemoryCache(const StoreEntry &e) const
788 {
789 if (!e.memoryCachable())
790 return false;
791
792 // does the current and expected size obey memory caching limits?
793 assert(e.mem_obj);
794 const int64_t loadedSize = e.mem_obj->endOffset();
795 const int64_t expectedSize = e.mem_obj->expectedReplySize(); // may be < 0
796 const int64_t ramSize = max(loadedSize, expectedSize);
797 const int64_t ramLimit = min(
798 static_cast<int64_t>(Config.memMaxSize),
799 static_cast<int64_t>(Config.Store.maxInMemObjSize));
800 return ramSize <= ramLimit;
801 }
802
803 void
804 StoreController::maybeTrimMemory(StoreEntry &e, const bool preserveSwappable)
805 {
806 bool keepInLocalMemory = false;
807 if (memStore)
808 keepInLocalMemory = memStore->keepInLocalMemory(e);
809 else
810 keepInLocalMemory = keepForLocalMemoryCache(e);
811
812 debugs(20, 7, HERE << "keepInLocalMemory: " << keepInLocalMemory);
813
814 if (!keepInLocalMemory)
815 e.trimMemory(preserveSwappable);
816 }
817
818 void
819 StoreController::handleIdleEntry(StoreEntry &e)
820 {
821 bool keepInLocalMemory = false;
822
823 if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) {
824 // Icons (and cache digests?) should stay in store_table until we
825 // have a dedicated storage for them (that would not purge them).
826 // They are not managed [well] by any specific Store handled below.
827 keepInLocalMemory = true;
828 } else if (memStore) {
829 memStore->considerKeeping(e);
830 // leave keepInLocalMemory false; memStore maintains its own cache
831 } else {
832 keepInLocalMemory = keepForLocalMemoryCache(e) && // in good shape and
833 // the local memory cache is not overflowing
834 (mem_node::InUseCount() <= store_pages_max);
835 }
836
837 // An idle, unlocked entry that belongs to a SwapDir which controls
838 // its own index, should not stay in the global store_table.
839 if (!dereference(e)) {
840 debugs(20, 5, HERE << "destroying unlocked entry: " << &e << ' ' << e);
841 destroyStoreEntry(static_cast<hash_link*>(&e));
842 return;
843 }
844
845 debugs(20, 5, HERE << "keepInLocalMemory: " << keepInLocalMemory);
846
847 // TODO: move this into [non-shared] memory cache class when we have one
848 if (keepInLocalMemory) {
849 e.setMemStatus(IN_MEMORY);
850 e.mem_obj->unlinkRequest();
851 } else {
852 e.purgeMem(); // may free e
853 }
854 }
855
856 StoreHashIndex::StoreHashIndex()
857 {
858 if (store_table)
859 abort();
860 assert (store_table == NULL);
861 }
862
863 StoreHashIndex::~StoreHashIndex()
864 {
865 if (store_table) {
866 hashFreeItems(store_table, destroyStoreEntry);
867 hashFreeMemory(store_table);
868 store_table = NULL;
869 }
870 }
871
872 int
873 StoreHashIndex::callback()
874 {
875 int result = 0;
876 int j;
877 static int ndir = 0;
878
879 do {
880 j = 0;
881
882 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
883 if (ndir >= Config.cacheSwap.n_configured)
884 ndir = ndir % Config.cacheSwap.n_configured;
885
886 int temp_result = store(ndir)->callback();
887
888 ++ndir;
889
890 j += temp_result;
891
892 result += temp_result;
893
894 if (j > 100)
895 fatal ("too much io\n");
896 }
897 } while (j > 0);
898
899 ++ndir;
900
901 return result;
902 }
903
904 void
905 StoreHashIndex::create()
906 {
907 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
908 if (dir(i).active())
909 store(i)->create();
910 }
911 }
912
913 /* Lookup an object in the cache.
914 * return just a reference to object, don't start swapping in yet. */
915 StoreEntry *
916 StoreHashIndex::get(const cache_key *key)
917 {
918 PROF_start(storeGet);
919 debugs(20, 3, "storeGet: looking up " << storeKeyText(key));
920 StoreEntry *p = static_cast<StoreEntry *>(hash_lookup(store_table, key));
921 PROF_stop(storeGet);
922 return p;
923 }
924
925 void
926 StoreHashIndex::get(String const key, STOREGETCLIENT aCallback, void *aCallbackData)
927 {
928 fatal("not implemented");
929 }
930
931 void
932 StoreHashIndex::init()
933 {
934 /* Calculate size of hash table (maximum currently 64k buckets). */
935 /* this is very bogus, its specific to the any Store maintaining an
936 * in-core index, not global */
937 size_t buckets = (Store::Root().maxSize() + Config.memMaxSize) / Config.Store.avgObjectSize;
938 debugs(20, 1, "Swap maxSize " << (Store::Root().maxSize() >> 10) <<
939 " + " << ( Config.memMaxSize >> 10) << " KB, estimated " << buckets << " objects");
940 buckets /= Config.Store.objectsPerBucket;
941 debugs(20, 1, "Target number of buckets: " << buckets);
942 /* ideally the full scan period should be configurable, for the
943 * moment it remains at approximately 24 hours. */
944 store_hash_buckets = storeKeyHashBuckets(buckets);
945 debugs(20, 1, "Using " << store_hash_buckets << " Store buckets");
946 debugs(20, 1, "Max Mem size: " << ( Config.memMaxSize >> 10) << " KB" <<
947 (Config.memShared ? " [shared]" : ""));
948 debugs(20, 1, "Max Swap size: " << (Store::Root().maxSize() >> 10) << " KB");
949
950 store_table = hash_create(storeKeyHashCmp,
951 store_hash_buckets, storeKeyHashHash);
952
953 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
954 /* this starts a search of the store dirs, loading their
955 * index. under the new Store api this should be
956 * driven by the StoreHashIndex, not by each store.
957 *
958 * That is, the HashIndex should perform a search of each dir it is
959 * indexing to do the hash insertions. The search is then able to
960 * decide 'from-memory', or 'from-clean-log' or 'from-dirty-log' or
961 * 'from-no-log'.
962 *
963 * Step 1: make the store rebuilds use a search internally
964 * Step 2: change the search logic to use the four modes described
965 * above
966 * Step 3: have the hash index walk the searches itself.
967 */
968 if (dir(i).active())
969 store(i)->init();
970 }
971 }
972
973 uint64_t
974 StoreHashIndex::maxSize() const
975 {
976 uint64_t result = 0;
977
978 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
979 if (dir(i).doReportStat())
980 result += store(i)->maxSize();
981 }
982
983 return result;
984 }
985
986 uint64_t
987 StoreHashIndex::minSize() const
988 {
989 uint64_t result = 0;
990
991 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
992 if (dir(i).doReportStat())
993 result += store(i)->minSize();
994 }
995
996 return result;
997 }
998
999 uint64_t
1000 StoreHashIndex::currentSize() const
1001 {
1002 uint64_t result = 0;
1003
1004 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
1005 if (dir(i).doReportStat())
1006 result += store(i)->currentSize();
1007 }
1008
1009 return result;
1010 }
1011
1012 uint64_t
1013 StoreHashIndex::currentCount() const
1014 {
1015 uint64_t result = 0;
1016
1017 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
1018 if (dir(i).doReportStat())
1019 result += store(i)->currentCount();
1020 }
1021
1022 return result;
1023 }
1024
1025 int64_t
1026 StoreHashIndex::maxObjectSize() const
1027 {
1028 int64_t result = -1;
1029
1030 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
1031 if (dir(i).active() && store(i)->maxObjectSize() > result)
1032 result = store(i)->maxObjectSize();
1033 }
1034
1035 return result;
1036 }
1037
1038 void
1039 StoreHashIndex::getStats(StoreInfoStats &stats) const
1040 {
1041 // accumulate per-disk cache stats
1042 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
1043 StoreInfoStats dirStats;
1044 store(i)->getStats(dirStats);
1045 stats += dirStats;
1046 }
1047
1048 // common to all disks
1049 stats.swap.open_disk_fd = store_open_disk_fd;
1050
1051 // memory cache stats are collected in StoreController::getStats(), for now
1052 }
1053
1054 void
1055 StoreHashIndex::stat(StoreEntry & output) const
1056 {
1057 int i;
1058
1059 /* Now go through each store, calling its stat routine */
1060
1061 for (i = 0; i < Config.cacheSwap.n_configured; ++i) {
1062 storeAppendPrintf(&output, "\n");
1063 store(i)->stat(output);
1064 }
1065 }
1066
1067 void
1068 StoreHashIndex::reference(StoreEntry &e)
1069 {
1070 e.store()->reference(e);
1071 }
1072
1073 bool
1074 StoreHashIndex::dereference(StoreEntry &e)
1075 {
1076 return e.store()->dereference(e);
1077 }
1078
1079 void
1080 StoreHashIndex::maintain()
1081 {
1082 int i;
1083 /* walk each fs */
1084
1085 for (i = 0; i < Config.cacheSwap.n_configured; ++i) {
1086 /* XXX FixMe: This should be done "in parallell" on the different
1087 * cache_dirs, not one at a time.
1088 */
1089 /* call the maintain function .. */
1090 store(i)->maintain();
1091 }
1092 }
1093
1094 void
1095 StoreHashIndex::sync()
1096 {
1097 for (int i = 0; i < Config.cacheSwap.n_configured; ++i)
1098 store(i)->sync();
1099 }
1100
1101 StoreSearch *
1102 StoreHashIndex::search(String const url, HttpRequest *)
1103 {
1104 if (url.size())
1105 fatal ("Cannot search by url yet\n");
1106
1107 return new StoreSearchHashIndex (this);
1108 }
1109
1110 CBDATA_CLASS_INIT(StoreSearchHashIndex);
1111
1112 StoreSearchHashIndex::StoreSearchHashIndex(RefCount<StoreHashIndex> aSwapDir) : sd(aSwapDir), _done (false), bucket (0)
1113 {}
1114
1115 /* do not link
1116 StoreSearchHashIndex::StoreSearchHashIndex(StoreSearchHashIndex const &);
1117 */
1118
1119 StoreSearchHashIndex::~StoreSearchHashIndex()
1120 {}
1121
1122 void
1123 StoreSearchHashIndex::next(void (aCallback)(void *), void *aCallbackData)
1124 {
1125 next();
1126 aCallback (aCallbackData);
1127 }
1128
1129 bool
1130 StoreSearchHashIndex::next()
1131 {
1132 if (entries.size())
1133 entries.pop_back();
1134
1135 while (!isDone() && !entries.size())
1136 copyBucket();
1137
1138 return currentItem() != NULL;
1139 }
1140
1141 bool
1142 StoreSearchHashIndex::error() const
1143 {
1144 return false;
1145 }
1146
1147 bool
1148 StoreSearchHashIndex::isDone() const
1149 {
1150 return bucket >= store_hash_buckets || _done;
1151 }
1152
1153 StoreEntry *
1154 StoreSearchHashIndex::currentItem()
1155 {
1156 if (!entries.size())
1157 return NULL;
1158
1159 return entries.back();
1160 }
1161
1162 void
1163 StoreSearchHashIndex::copyBucket()
1164 {
1165 /* probably need to lock the store entries...
1166 * we copy them all to prevent races on the links. */
1167 debugs(47, 3, "StoreSearchHashIndex::copyBucket #" << bucket);
1168 assert (!entries.size());
1169 hash_link *link_ptr = NULL;
1170 hash_link *link_next = NULL;
1171 link_next = hash_get_bucket(store_table, bucket);
1172
1173 while (NULL != (link_ptr = link_next)) {
1174 link_next = link_ptr->next;
1175 StoreEntry *e = (StoreEntry *) link_ptr;
1176
1177 entries.push_back(e);
1178 }
1179
1180 ++bucket;
1181 debugs(47,3, "got entries: " << entries.size());
1182 }