5 * DEBUG: section 47 Store Directory Routines
6 * AUTHOR: Duane Wessels
8 * SQUID Web Proxy Cache http://www.squid-cache.org/
9 * ----------------------------------------------------------
11 * Squid is the result of efforts by numerous individuals from
12 * the Internet community; see the CONTRIBUTORS file for full
13 * details. Many organizations have provided support for Squid's
14 * development; see the SPONSORS file for full details. Squid is
15 * Copyrighted (C) 2001 by the Regents of the University of
16 * California; see the COPYRIGHT file for full details. Squid
17 * incorporates software developed and/or copyrighted by other
18 * sources; see the CREDITS file for full details.
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
32 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
39 #include "MemObject.h"
41 #include "profiler/Profiler.h"
43 #include "SquidMath.h"
44 #include "SquidTime.h"
46 #include "StoreHashIndex.h"
48 #include "swap_log_op.h"
51 #if HAVE_SYS_STATVFS_H
52 #include <sys/statvfs.h>
54 #endif /* HAVE_STATVFS */
55 /* statfs() needs <sys/param.h> and <sys/mount.h> on BSD systems */
57 #include <sys/param.h>
60 #include <sys/mount.h>
62 /* Windows and Linux use sys/vfs.h */
73 static STDIRSELECT storeDirSelectSwapDirRoundRobin
;
74 static STDIRSELECT storeDirSelectSwapDirLeastLoad
;
77 * store_dirs_rebuilding is initialized to _1_ as a hack so that
78 * storeDirWriteCleanLogs() doesn't try to do anything unless _all_
79 * cache_dirs have been read. For example, without this hack, Squid
80 * will try to write clean log files if -kparse fails (becasue it
83 int StoreController::store_dirs_rebuilding
= 1;
85 StoreController::StoreController() : swapDir (new StoreHashIndex())
89 StoreController::~StoreController()
95 * This function pointer is set according to 'store_dir_select_algorithm'
98 STDIRSELECT
*storeDirSelectSwapDir
= storeDirSelectSwapDirLeastLoad
;
101 StoreController::init()
103 if (Config
.memShared
&& IamWorkerProcess()) {
104 memStore
= new MemStore
;
110 if (0 == strcasecmp(Config
.store_dir_select_algorithm
, "round-robin")) {
111 storeDirSelectSwapDir
= storeDirSelectSwapDirRoundRobin
;
112 debugs(47, DBG_IMPORTANT
, "Using Round Robin store dir selection");
114 storeDirSelectSwapDir
= storeDirSelectSwapDirLeastLoad
;
115 debugs(47, DBG_IMPORTANT
, "Using Least Load store dir selection");
120 StoreController::createOneStore(Store
&aStore
)
123 * On Windows, fork() is not available.
124 * The following is a workaround for create store directories sequentially
125 * when running on native Windows port.
144 StoreController::create()
156 pid
= wait3(&status
, WNOHANG
, NULL
);
159 pid
= waitpid(-1, &status
, 0);
162 } while (pid
> 0 || (pid
< 0 && errno
== EINTR
));
168 * Determine whether the given directory can handle this object
171 * Note: if the object size is -1, then the only swapdirs that
172 * will return true here are ones that have min and max unset,
173 * ie any-sized-object swapdirs. This is a good thing.
176 SwapDir::objectSizeIsAcceptable(int64_t objsize
) const
178 // If the swapdir has no range limits, then it definitely can
179 if (min_objsize
<= 0 && max_objsize
== -1)
183 * If the object size is -1 and the storedir has limits we
184 * can't store it there.
189 // Else, make sure that the object size will fit.
190 if (max_objsize
== -1 && min_objsize
<= objsize
)
193 return min_objsize
<= objsize
&& max_objsize
> objsize
;
197 * This new selection scheme simply does round-robin on all SwapDirs.
198 * A SwapDir is skipped if it is over the max_size (100%) limit, or
202 storeDirSelectSwapDirRoundRobin(const StoreEntry
* e
)
207 RefCount
<SwapDir
> sd
;
209 // e->objectLen() is negative at this point when we are still STORE_PENDING
210 ssize_t objsize
= e
->mem_obj
->expectedReplySize();
212 objsize
+= e
->mem_obj
->swap_hdr_sz
;
214 for (i
= 0; i
< Config
.cacheSwap
.n_configured
; ++i
) {
215 if (++dirn
>= Config
.cacheSwap
.n_configured
)
218 sd
= dynamic_cast<SwapDir
*>(INDEXSD(dirn
));
220 if (!sd
->canStore(*e
, objsize
, load
))
223 if (load
< 0 || load
> 1000) {
234 * Spread load across all of the store directories
236 * Note: We should modify this later on to prefer sticking objects
237 * in the *tightest fit* swapdir to conserve space, along with the
238 * actual swapdir usage. But for now, this hack will do while
239 * testing, so you should order your swapdirs in the config file
240 * from smallest maxobjsize to unlimited (-1) maxobjsize.
242 * We also have to choose nleast == nconf since we need to consider
243 * ALL swapdirs, regardless of state. Again, this is a hack while
244 * we sort out the real usefulness of this algorithm.
247 storeDirSelectSwapDirLeastLoad(const StoreEntry
* e
)
249 int64_t most_free
= 0;
250 ssize_t least_objsize
= -1;
251 int least_load
= INT_MAX
;
255 RefCount
<SwapDir
> SD
;
257 // e->objectLen() is negative at this point when we are still STORE_PENDING
258 ssize_t objsize
= e
->mem_obj
->expectedReplySize();
261 objsize
+= e
->mem_obj
->swap_hdr_sz
;
263 for (i
= 0; i
< Config
.cacheSwap
.n_configured
; ++i
) {
264 SD
= dynamic_cast<SwapDir
*>(INDEXSD(i
));
265 SD
->flags
.selected
= 0;
267 if (!SD
->canStore(*e
, objsize
, load
))
270 if (load
< 0 || load
> 1000)
273 if (load
> least_load
)
276 const int64_t cur_free
= SD
->maxSize() - SD
->currentSize();
278 /* If the load is equal, then look in more details */
279 if (load
== least_load
) {
280 /* closest max_objsize fit */
282 if (least_objsize
!= -1)
283 if (SD
->max_objsize
> least_objsize
|| SD
->max_objsize
== -1)
287 if (cur_free
< most_free
)
292 least_objsize
= SD
->max_objsize
;
293 most_free
= cur_free
;
298 dynamic_cast<SwapDir
*>(INDEXSD(dirn
))->flags
.selected
= 1;
304 * An entry written to the swap log MUST have the following
306 * 1. It MUST be a public key. It does no good to log
307 * a public ADD, change the key, then log a private
308 * DEL. So we need to log a DEL before we change a
309 * key from public to private.
310 * 2. It MUST have a valid (> -1) swap_filen.
313 storeDirSwapLog(const StoreEntry
* e
, int op
)
316 assert(!EBIT_TEST(e
->flags
, KEY_PRIVATE
));
317 assert(e
->swap_filen
>= 0);
319 * icons and such; don't write them to the swap log
322 if (EBIT_TEST(e
->flags
, ENTRY_SPECIAL
))
325 assert(op
> SWAP_LOG_NOP
&& op
< SWAP_LOG_MAX
);
327 debugs(20, 3, "storeDirSwapLog: " <<
328 swap_log_op_str
[op
] << " " <<
329 e
->getMD5Text() << " " <<
330 e
->swap_dirn
<< " " <<
331 std::hex
<< std::uppercase
<< std::setfill('0') << std::setw(8) << e
->swap_filen
);
333 dynamic_cast<SwapDir
*>(INDEXSD(e
->swap_dirn
))->logEntry(*e
, op
);
337 StoreController::getStats(StoreInfoStats
&stats
) const
340 memStore
->getStats(stats
);
342 // move this code to a non-shared memory cache class when we have it
343 stats
.mem
.shared
= false;
344 stats
.mem
.capacity
= Config
.memMaxSize
;
345 stats
.mem
.size
= mem_node::StoreMemSize();
346 stats
.mem
.count
= hot_obj_count
;
349 swapDir
->getStats(stats
);
351 // low-level info not specific to memory or disk cache
352 stats
.store_entry_count
= StoreEntry::inUseCount();
353 stats
.mem_object_count
= MemObject::inUseCount();
357 StoreController::stat(StoreEntry
&output
) const
359 storeAppendPrintf(&output
, "Store Directory Statistics:\n");
360 storeAppendPrintf(&output
, "Store Entries : %lu\n",
361 (unsigned long int)StoreEntry::inUseCount());
362 storeAppendPrintf(&output
, "Maximum Swap Size : %" PRIu64
" KB\n",
364 storeAppendPrintf(&output
, "Current Store Swap Size: %.2f KB\n",
365 currentSize() / 1024.0);
366 storeAppendPrintf(&output
, "Current Capacity : %.2f%% used, %.2f%% free\n",
367 Math::doublePercent(currentSize(), maxSize()),
368 Math::doublePercent((maxSize() - currentSize()), maxSize()));
371 memStore
->stat(output
);
373 /* now the swapDir */
374 swapDir
->stat(output
);
377 /* if needed, this could be taught to cache the result */
379 StoreController::maxSize() const
381 /* TODO: include memory cache ? */
382 return swapDir
->maxSize();
386 StoreController::minSize() const
388 /* TODO: include memory cache ? */
389 return swapDir
->minSize();
393 StoreController::currentSize() const
395 return swapDir
->currentSize();
399 StoreController::currentCount() const
401 return swapDir
->currentCount();
405 StoreController::maxObjectSize() const
407 return swapDir
->maxObjectSize();
413 if (currentSize() >= maxSize())
416 max_size
= currentSize();
418 debugs(20, DBG_IMPORTANT
, "WARNING: Shrinking cache_dir #" << index
<< " to " << currentSize() / 1024.0 << " KB");
422 storeDirOpenSwapLogs(void)
424 for (int dirn
= 0; dirn
< Config
.cacheSwap
.n_configured
; ++dirn
)
425 dynamic_cast<SwapDir
*>(INDEXSD(dirn
))->openLog();
429 storeDirCloseSwapLogs(void)
431 for (int dirn
= 0; dirn
< Config
.cacheSwap
.n_configured
; ++dirn
)
432 dynamic_cast<SwapDir
*>(INDEXSD(dirn
))->closeLog();
436 * storeDirWriteCleanLogs
438 * Writes a "clean" swap log file from in-memory metadata.
439 * This is a rewrite of the original function to troll each
440 * StoreDir and write the logs, and flush at the end of
441 * the run. Thanks goes to Eric Stern, since this solution
442 * came out of his COSS code.
445 storeDirWriteCleanLogs(int reopen
)
447 const StoreEntry
*e
= NULL
;
450 struct timeval start
;
452 RefCount
<SwapDir
> sd
;
456 if (StoreController::store_dirs_rebuilding
) {
457 debugs(20, DBG_IMPORTANT
, "Not currently OK to rewrite swap log.");
458 debugs(20, DBG_IMPORTANT
, "storeDirWriteCleanLogs: Operation aborted.");
462 debugs(20, DBG_IMPORTANT
, "storeDirWriteCleanLogs: Starting...");
464 start
= current_time
;
466 for (dirn
= 0; dirn
< Config
.cacheSwap
.n_configured
; ++dirn
) {
467 sd
= dynamic_cast<SwapDir
*>(INDEXSD(dirn
));
469 if (sd
->writeCleanStart() < 0) {
470 debugs(20, DBG_IMPORTANT
, "log.clean.start() failed for dir #" << sd
->index
);
476 * This may look inefficient as CPU wise it is more efficient to do this
477 * sequentially, but I/O wise the parallellism helps as it allows more
478 * hdd spindles to be active.
483 for (dirn
= 0; dirn
< Config
.cacheSwap
.n_configured
; ++dirn
) {
484 sd
= dynamic_cast<SwapDir
*>(INDEXSD(dirn
));
486 if (NULL
== sd
->cleanLog
)
489 e
= sd
->cleanLog
->nextEntry();
499 sd
->cleanLog
->write(*e
);
501 if ((++n
& 0xFFFF) == 0) {
503 debugs(20, DBG_IMPORTANT
, " " << std::setw(7) << n
<<
504 " entries written so far.");
510 for (dirn
= 0; dirn
< Config
.cacheSwap
.n_configured
; ++dirn
)
511 dynamic_cast<SwapDir
*>(INDEXSD(dirn
))->writeCleanDone();
514 storeDirOpenSwapLogs();
518 dt
= tvSubDsec(start
, current_time
);
520 debugs(20, DBG_IMPORTANT
, " Finished. Wrote " << n
<< " entries.");
521 debugs(20, DBG_IMPORTANT
, " Took "<< std::setw(3)<< std::setprecision(2) << dt
<<
522 " seconds ("<< std::setw(6) << ((double) n
/ (dt
> 0.0 ? dt
: 1.0)) << " entries/sec).");
528 StoreController::search(String
const url
, HttpRequest
*request
)
530 /* cheat, for now you can't search the memory hot cache */
531 return swapDir
->search(url
, request
);
535 StoreHashIndex::store(int const x
) const
541 StoreHashIndex::dir(const int i
) const
543 SwapDir
*sd
= dynamic_cast<SwapDir
*>(INDEXSD(i
));
549 StoreController::sync(void)
557 * handle callbacks all avaliable fs'es
560 StoreController::callback()
562 /* This will likely double count. Thats ok. */
563 PROF_start(storeDirCallback
);
565 /* mem cache callbacks ? */
566 int result
= swapDir
->callback();
568 PROF_stop(storeDirCallback
);
574 storeDirGetBlkSize(const char *path
, int *blksize
)
580 if (statvfs(path
, &sfs
)) {
581 debugs(50, DBG_IMPORTANT
, "" << path
<< ": " << xstrerror());
586 *blksize
= (int) sfs
.f_frsize
;
591 if (statfs(path
, &sfs
)) {
592 debugs(50, DBG_IMPORTANT
, "" << path
<< ": " << xstrerror());
597 *blksize
= (int) sfs
.f_bsize
;
600 * Sanity check; make sure we have a meaningful value.
609 #define fsbtoblk(num, fsbs, bs) \
610 (((fsbs) != 0 && (fsbs) < (bs)) ? \
611 (num) / ((bs) / (fsbs)) : (num) * ((fsbs) / (bs)))
613 storeDirGetUFSStats(const char *path
, int *totl_kb
, int *free_kb
, int *totl_in
, int *free_in
)
619 if (statvfs(path
, &sfs
)) {
620 debugs(50, DBG_IMPORTANT
, "" << path
<< ": " << xstrerror());
624 *totl_kb
= (int) fsbtoblk(sfs
.f_blocks
, sfs
.f_frsize
, 1024);
625 *free_kb
= (int) fsbtoblk(sfs
.f_bfree
, sfs
.f_frsize
, 1024);
626 *totl_in
= (int) sfs
.f_files
;
627 *free_in
= (int) sfs
.f_ffree
;
632 if (statfs(path
, &sfs
)) {
633 debugs(50, DBG_IMPORTANT
, "" << path
<< ": " << xstrerror());
637 *totl_kb
= (int) fsbtoblk(sfs
.f_blocks
, sfs
.f_bsize
, 1024);
638 *free_kb
= (int) fsbtoblk(sfs
.f_bfree
, sfs
.f_bsize
, 1024);
639 *totl_in
= (int) sfs
.f_files
;
640 *free_in
= (int) sfs
.f_ffree
;
647 allocate_new_swapdir(SquidConfig::_cacheSwap
* swap
)
649 if (swap
->swapDirs
== NULL
) {
650 swap
->n_allocated
= 4;
651 swap
->swapDirs
= static_cast<SwapDir::Pointer
*>(xcalloc(swap
->n_allocated
, sizeof(SwapDir::Pointer
)));
654 if (swap
->n_allocated
== swap
->n_configured
) {
655 swap
->n_allocated
<<= 1;
656 SwapDir::Pointer
*const tmp
= static_cast<SwapDir::Pointer
*>(xcalloc(swap
->n_allocated
, sizeof(SwapDir::Pointer
)));
657 memcpy(tmp
, swap
->swapDirs
, swap
->n_configured
* sizeof(SwapDir
*));
658 xfree(swap
->swapDirs
);
659 swap
->swapDirs
= tmp
;
664 free_cachedir(SquidConfig::_cacheSwap
* swap
)
667 /* DON'T FREE THESE FOR RECONFIGURE */
672 for (i
= 0; i
< swap
->n_configured
; ++i
) {
673 /* TODO XXX this lets the swapdir free resources asynchronously
674 * swap->swapDirs[i]->deactivate();
675 * but there may be such a means already.
678 swap
->swapDirs
[i
] = NULL
;
681 safe_free(swap
->swapDirs
);
682 swap
->swapDirs
= NULL
;
683 swap
->n_allocated
= 0;
684 swap
->n_configured
= 0;
687 /* this should be a virtual method on StoreEntry,
688 * i.e. e->referenced()
689 * so that the entry can notify the creating Store
692 StoreController::reference(StoreEntry
&e
)
694 // special entries do not belong to any specific Store, but are IN_MEMORY
695 if (EBIT_TEST(e
.flags
, ENTRY_SPECIAL
))
698 /* Notify the fs that we're referencing this object again */
700 if (e
.swap_dirn
> -1)
701 swapDir
->reference(e
);
703 // Notify the memory cache that we're referencing this object again
704 if (memStore
&& e
.mem_status
== IN_MEMORY
)
705 memStore
->reference(e
);
707 // TODO: move this code to a non-shared memory cache class when we have it
709 if (mem_policy
->Referenced
)
710 mem_policy
->Referenced(mem_policy
, &e
, &e
.mem_obj
->repl
);
715 StoreController::dereference(StoreEntry
& e
)
717 bool keepInStoreTable
= true; // keep if there are no objections
719 // special entries do not belong to any specific Store, but are IN_MEMORY
720 if (EBIT_TEST(e
.flags
, ENTRY_SPECIAL
))
721 return keepInStoreTable
;
723 /* Notify the fs that we're not referencing this object any more */
725 if (e
.swap_filen
> -1)
726 keepInStoreTable
= swapDir
->dereference(e
) && keepInStoreTable
;
728 // Notify the memory cache that we're not referencing this object any more
729 if (memStore
&& e
.mem_status
== IN_MEMORY
)
730 keepInStoreTable
= memStore
->dereference(e
) && keepInStoreTable
;
732 // TODO: move this code to a non-shared memory cache class when we have it
734 if (mem_policy
->Dereferenced
)
735 mem_policy
->Dereferenced(mem_policy
, &e
, &e
.mem_obj
->repl
);
738 return keepInStoreTable
;
742 StoreController::get(const cache_key
*key
)
744 if (StoreEntry
*e
= swapDir
->get(key
)) {
745 // TODO: ignore and maybe handleIdleEntry() unlocked intransit entries
746 // because their backing store slot may be gone already.
747 debugs(20, 3, HERE
<< "got in-transit entry: " << *e
);
752 if (StoreEntry
*e
= memStore
->get(key
)) {
753 debugs(20, 3, HERE
<< "got mem-cached entry: " << *e
);
758 // TODO: this disk iteration is misplaced; move to StoreHashIndex when
759 // the global store_table is no longer used for in-transit objects.
760 if (const int cacheDirs
= Config
.cacheSwap
.n_configured
) {
761 // ask each cache_dir until the entry is found; use static starting
762 // point to avoid asking the same subset of disks more often
763 // TODO: coordinate with put() to be able to guess the right disk often
765 for (int n
= 0; n
< cacheDirs
; ++n
) {
766 idx
= (idx
+ 1) % cacheDirs
;
767 SwapDir
*sd
= dynamic_cast<SwapDir
*>(INDEXSD(idx
));
771 if (StoreEntry
*e
= sd
->get(key
)) {
772 debugs(20, 3, HERE
<< "cache_dir " << idx
<<
773 " got cached entry: " << *e
);
779 debugs(20, 4, HERE
<< "none of " << Config
.cacheSwap
.n_configured
<<
780 " cache_dirs have " << storeKeyText(key
));
785 StoreController::get(String
const key
, STOREGETCLIENT aCallback
, void *aCallbackData
)
787 fatal("not implemented");
790 // move this into [non-shared] memory cache class when we have one
791 /// whether e should be kept in local RAM for possible future caching
793 StoreController::keepForLocalMemoryCache(const StoreEntry
&e
) const
795 if (!e
.memoryCachable())
798 // does the current and expected size obey memory caching limits?
800 const int64_t loadedSize
= e
.mem_obj
->endOffset();
801 const int64_t expectedSize
= e
.mem_obj
->expectedReplySize(); // may be < 0
802 const int64_t ramSize
= max(loadedSize
, expectedSize
);
803 const int64_t ramLimit
= min(
804 static_cast<int64_t>(Config
.memMaxSize
),
805 static_cast<int64_t>(Config
.Store
.maxInMemObjSize
));
806 return ramSize
<= ramLimit
;
810 StoreController::maybeTrimMemory(StoreEntry
&e
, const bool preserveSwappable
)
812 bool keepInLocalMemory
= false;
814 keepInLocalMemory
= memStore
->keepInLocalMemory(e
);
816 keepInLocalMemory
= keepForLocalMemoryCache(e
);
818 debugs(20, 7, HERE
<< "keepInLocalMemory: " << keepInLocalMemory
);
820 if (!keepInLocalMemory
)
821 e
.trimMemory(preserveSwappable
);
825 StoreController::handleIdleEntry(StoreEntry
&e
)
827 bool keepInLocalMemory
= false;
829 if (EBIT_TEST(e
.flags
, ENTRY_SPECIAL
)) {
830 // Icons (and cache digests?) should stay in store_table until we
831 // have a dedicated storage for them (that would not purge them).
832 // They are not managed [well] by any specific Store handled below.
833 keepInLocalMemory
= true;
834 } else if (memStore
) {
835 memStore
->considerKeeping(e
);
836 // leave keepInLocalMemory false; memStore maintains its own cache
838 keepInLocalMemory
= keepForLocalMemoryCache(e
) && // in good shape and
839 // the local memory cache is not overflowing
840 (mem_node::InUseCount() <= store_pages_max
);
843 // An idle, unlocked entry that belongs to a SwapDir which controls
844 // its own index, should not stay in the global store_table.
845 if (!dereference(e
)) {
846 debugs(20, 5, HERE
<< "destroying unlocked entry: " << &e
<< ' ' << e
);
847 destroyStoreEntry(static_cast<hash_link
*>(&e
));
851 debugs(20, 5, HERE
<< "keepInLocalMemory: " << keepInLocalMemory
);
853 // TODO: move this into [non-shared] memory cache class when we have one
854 if (keepInLocalMemory
) {
855 e
.setMemStatus(IN_MEMORY
);
856 e
.mem_obj
->unlinkRequest();
858 e
.purgeMem(); // may free e
862 StoreHashIndex::StoreHashIndex()
866 assert (store_table
== NULL
);
869 StoreHashIndex::~StoreHashIndex()
872 hashFreeItems(store_table
, destroyStoreEntry
);
873 hashFreeMemory(store_table
);
879 StoreHashIndex::callback()
888 for (int i
= 0; i
< Config
.cacheSwap
.n_configured
; ++i
) {
889 if (ndir
>= Config
.cacheSwap
.n_configured
)
890 ndir
= ndir
% Config
.cacheSwap
.n_configured
;
892 int temp_result
= store(ndir
)->callback();
898 result
+= temp_result
;
901 fatal ("too much io\n");
911 StoreHashIndex::create()
913 for (int i
= 0; i
< Config
.cacheSwap
.n_configured
; ++i
) {
919 /* Lookup an object in the cache.
920 * return just a reference to object, don't start swapping in yet. */
922 StoreHashIndex::get(const cache_key
*key
)
924 PROF_start(storeGet
);
925 debugs(20, 3, "storeGet: looking up " << storeKeyText(key
));
926 StoreEntry
*p
= static_cast<StoreEntry
*>(hash_lookup(store_table
, key
));
932 StoreHashIndex::get(String
const key
, STOREGETCLIENT aCallback
, void *aCallbackData
)
934 fatal("not implemented");
938 StoreHashIndex::init()
940 /* Calculate size of hash table (maximum currently 64k buckets). */
941 /* this is very bogus, its specific to the any Store maintaining an
942 * in-core index, not global */
943 size_t buckets
= (Store::Root().maxSize() + Config
.memMaxSize
) / Config
.Store
.avgObjectSize
;
944 debugs(20, DBG_IMPORTANT
, "Swap maxSize " << (Store::Root().maxSize() >> 10) <<
945 " + " << ( Config
.memMaxSize
>> 10) << " KB, estimated " << buckets
<< " objects");
946 buckets
/= Config
.Store
.objectsPerBucket
;
947 debugs(20, DBG_IMPORTANT
, "Target number of buckets: " << buckets
);
948 /* ideally the full scan period should be configurable, for the
949 * moment it remains at approximately 24 hours. */
950 store_hash_buckets
= storeKeyHashBuckets(buckets
);
951 debugs(20, DBG_IMPORTANT
, "Using " << store_hash_buckets
<< " Store buckets");
952 debugs(20, DBG_IMPORTANT
, "Max Mem size: " << ( Config
.memMaxSize
>> 10) << " KB" <<
953 (Config
.memShared
? " [shared]" : ""));
954 debugs(20, DBG_IMPORTANT
, "Max Swap size: " << (Store::Root().maxSize() >> 10) << " KB");
956 store_table
= hash_create(storeKeyHashCmp
,
957 store_hash_buckets
, storeKeyHashHash
);
959 for (int i
= 0; i
< Config
.cacheSwap
.n_configured
; ++i
) {
960 /* this starts a search of the store dirs, loading their
961 * index. under the new Store api this should be
962 * driven by the StoreHashIndex, not by each store.
964 * That is, the HashIndex should perform a search of each dir it is
965 * indexing to do the hash insertions. The search is then able to
966 * decide 'from-memory', or 'from-clean-log' or 'from-dirty-log' or
969 * Step 1: make the store rebuilds use a search internally
970 * Step 2: change the search logic to use the four modes described
972 * Step 3: have the hash index walk the searches itself.
980 StoreHashIndex::maxSize() const
984 for (int i
= 0; i
< Config
.cacheSwap
.n_configured
; ++i
) {
985 if (dir(i
).doReportStat())
986 result
+= store(i
)->maxSize();
993 StoreHashIndex::minSize() const
997 for (int i
= 0; i
< Config
.cacheSwap
.n_configured
; ++i
) {
998 if (dir(i
).doReportStat())
999 result
+= store(i
)->minSize();
1006 StoreHashIndex::currentSize() const
1008 uint64_t result
= 0;
1010 for (int i
= 0; i
< Config
.cacheSwap
.n_configured
; ++i
) {
1011 if (dir(i
).doReportStat())
1012 result
+= store(i
)->currentSize();
1019 StoreHashIndex::currentCount() const
1021 uint64_t result
= 0;
1023 for (int i
= 0; i
< Config
.cacheSwap
.n_configured
; ++i
) {
1024 if (dir(i
).doReportStat())
1025 result
+= store(i
)->currentCount();
1032 StoreHashIndex::maxObjectSize() const
1034 int64_t result
= -1;
1036 for (int i
= 0; i
< Config
.cacheSwap
.n_configured
; ++i
) {
1037 if (dir(i
).active() && store(i
)->maxObjectSize() > result
)
1038 result
= store(i
)->maxObjectSize();
1045 StoreHashIndex::getStats(StoreInfoStats
&stats
) const
1047 // accumulate per-disk cache stats
1048 for (int i
= 0; i
< Config
.cacheSwap
.n_configured
; ++i
) {
1049 StoreInfoStats dirStats
;
1050 store(i
)->getStats(dirStats
);
1054 // common to all disks
1055 stats
.swap
.open_disk_fd
= store_open_disk_fd
;
1057 // memory cache stats are collected in StoreController::getStats(), for now
1061 StoreHashIndex::stat(StoreEntry
& output
) const
1065 /* Now go through each store, calling its stat routine */
1067 for (i
= 0; i
< Config
.cacheSwap
.n_configured
; ++i
) {
1068 storeAppendPrintf(&output
, "\n");
1069 store(i
)->stat(output
);
1074 StoreHashIndex::reference(StoreEntry
&e
)
1076 e
.store()->reference(e
);
1080 StoreHashIndex::dereference(StoreEntry
&e
)
1082 return e
.store()->dereference(e
);
1086 StoreHashIndex::maintain()
1091 for (i
= 0; i
< Config
.cacheSwap
.n_configured
; ++i
) {
1092 /* XXX FixMe: This should be done "in parallell" on the different
1093 * cache_dirs, not one at a time.
1095 /* call the maintain function .. */
1096 store(i
)->maintain();
1101 StoreHashIndex::sync()
1103 for (int i
= 0; i
< Config
.cacheSwap
.n_configured
; ++i
)
1108 StoreHashIndex::search(String
const url
, HttpRequest
*)
1111 fatal ("Cannot search by url yet\n");
1113 return new StoreSearchHashIndex (this);
1116 CBDATA_CLASS_INIT(StoreSearchHashIndex
);
1118 StoreSearchHashIndex::StoreSearchHashIndex(RefCount
<StoreHashIndex
> aSwapDir
) : sd(aSwapDir
), _done (false), bucket (0)
1122 StoreSearchHashIndex::StoreSearchHashIndex(StoreSearchHashIndex const &);
1125 StoreSearchHashIndex::~StoreSearchHashIndex()
1129 StoreSearchHashIndex::next(void (aCallback
)(void *), void *aCallbackData
)
1132 aCallback (aCallbackData
);
1136 StoreSearchHashIndex::next()
1141 while (!isDone() && !entries
.size())
1144 return currentItem() != NULL
;
1148 StoreSearchHashIndex::error() const
1154 StoreSearchHashIndex::isDone() const
1156 return bucket
>= store_hash_buckets
|| _done
;
1160 StoreSearchHashIndex::currentItem()
1162 if (!entries
.size())
1165 return entries
.back();
1169 StoreSearchHashIndex::copyBucket()
1171 /* probably need to lock the store entries...
1172 * we copy them all to prevent races on the links. */
1173 debugs(47, 3, "StoreSearchHashIndex::copyBucket #" << bucket
);
1174 assert (!entries
.size());
1175 hash_link
*link_ptr
= NULL
;
1176 hash_link
*link_next
= NULL
;
1177 link_next
= hash_get_bucket(store_table
, bucket
);
1179 while (NULL
!= (link_ptr
= link_next
)) {
1180 link_next
= link_ptr
->next
;
1181 StoreEntry
*e
= (StoreEntry
*) link_ptr
;
1183 entries
.push_back(e
);
1187 debugs(47,3, "got entries: " << entries
.size());