5 * DEBUG: section 47 Store Directory Routines
6 * AUTHOR: Duane Wessels
8 * SQUID Web Proxy Cache http://www.squid-cache.org/
9 * ----------------------------------------------------------
11 * Squid is the result of efforts by numerous individuals from
12 * the Internet community; see the CONTRIBUTORS file for full
13 * details. Many organizations have provided support for Squid's
14 * development; see the SPONSORS file for full details. Squid is
15 * Copyrighted (C) 2001 by the Regents of the University of
16 * California; see the COPYRIGHT file for full details. Squid
17 * incorporates software developed and/or copyrighted by other
18 * sources; see the CREDITS file for full details.
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
32 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
38 #include "MemObject.h"
41 #include "SquidMath.h"
42 #include "SquidTime.h"
44 #include "swap_log_op.h"
47 #if HAVE_SYS_STATVFS_H
48 #include <sys/statvfs.h>
50 #endif /* HAVE_STATVFS */
51 /* statfs() needs <sys/param.h> and <sys/mount.h> on BSD systems */
53 #include <sys/param.h>
56 #include <sys/mount.h>
58 /* Windows and Linux use sys/vfs.h */
63 #include "StoreHashIndex.h"
65 static STDIRSELECT storeDirSelectSwapDirRoundRobin
;
66 static STDIRSELECT storeDirSelectSwapDirLeastLoad
;
69 * store_dirs_rebuilding is initialized to _1_ as a hack so that
70 * storeDirWriteCleanLogs() doesn't try to do anything unless _all_
71 * cache_dirs have been read. For example, without this hack, Squid
72 * will try to write clean log files if -kparse fails (becasue it
75 int StoreController::store_dirs_rebuilding
= 1;
77 StoreController::StoreController() : swapDir (new StoreHashIndex())
81 StoreController::~StoreController()
87 * This function pointer is set according to 'store_dir_select_algorithm'
90 STDIRSELECT
*storeDirSelectSwapDir
= storeDirSelectSwapDirLeastLoad
;
93 StoreController::init()
95 if (UsingSmp() && IamWorkerProcess()) {
96 memStore
= new MemStore
;
102 if (0 == strcasecmp(Config
.store_dir_select_algorithm
, "round-robin")) {
103 storeDirSelectSwapDir
= storeDirSelectSwapDirRoundRobin
;
104 debugs(47, 1, "Using Round Robin store dir selection");
106 storeDirSelectSwapDir
= storeDirSelectSwapDirLeastLoad
;
107 debugs(47, 1, "Using Least Load store dir selection");
112 StoreController::createOneStore(Store
&aStore
)
115 * On Windows, fork() is not available.
116 * The following is a workaround for create store directories sequentially
117 * when running on native Windows port.
119 #ifndef _SQUID_MSWIN_
128 #ifndef _SQUID_MSWIN_
136 StoreController::create()
140 #ifndef _SQUID_MSWIN_
148 pid
= wait3(&status
, WNOHANG
, NULL
);
151 pid
= waitpid(-1, &status
, 0);
154 } while (pid
> 0 || (pid
< 0 && errno
== EINTR
));
160 * Determine whether the given directory can handle this object
163 * Note: if the object size is -1, then the only swapdirs that
164 * will return true here are ones that have min and max unset,
165 * ie any-sized-object swapdirs. This is a good thing.
168 SwapDir::objectSizeIsAcceptable(int64_t objsize
) const
170 // If the swapdir has no range limits, then it definitely can
171 if (min_objsize
<= 0 && max_objsize
== -1)
175 * If the object size is -1 and the storedir has limits we
176 * can't store it there.
181 // Else, make sure that the object size will fit.
182 return min_objsize
<= objsize
&& max_objsize
> objsize
;
187 * This new selection scheme simply does round-robin on all SwapDirs.
188 * A SwapDir is skipped if it is over the max_size (100%) limit, or
192 storeDirSelectSwapDirRoundRobin(const StoreEntry
* e
)
197 RefCount
<SwapDir
> sd
;
199 // e->objectLen() is negative at this point when we are still STORE_PENDING
200 ssize_t objsize
= e
->mem_obj
->expectedReplySize();
202 objsize
+= e
->mem_obj
->swap_hdr_sz
;
204 for (i
= 0; i
< Config
.cacheSwap
.n_configured
; i
++) {
205 if (++dirn
>= Config
.cacheSwap
.n_configured
)
208 sd
= dynamic_cast<SwapDir
*>(INDEXSD(dirn
));
210 if (!sd
->canStore(*e
, objsize
, load
))
213 if (load
< 0 || load
> 1000) {
224 * Spread load across all of the store directories
226 * Note: We should modify this later on to prefer sticking objects
227 * in the *tightest fit* swapdir to conserve space, along with the
228 * actual swapdir usage. But for now, this hack will do while
229 * testing, so you should order your swapdirs in the config file
230 * from smallest maxobjsize to unlimited (-1) maxobjsize.
232 * We also have to choose nleast == nconf since we need to consider
233 * ALL swapdirs, regardless of state. Again, this is a hack while
234 * we sort out the real usefulness of this algorithm.
237 storeDirSelectSwapDirLeastLoad(const StoreEntry
* e
)
239 uint64_t most_free
= 0;
240 ssize_t least_objsize
= -1;
241 int least_load
= INT_MAX
;
245 RefCount
<SwapDir
> SD
;
247 // e->objectLen() is negative at this point when we are still STORE_PENDING
248 ssize_t objsize
= e
->mem_obj
->expectedReplySize();
251 objsize
+= e
->mem_obj
->swap_hdr_sz
;
253 for (i
= 0; i
< Config
.cacheSwap
.n_configured
; i
++) {
254 SD
= dynamic_cast<SwapDir
*>(INDEXSD(i
));
255 SD
->flags
.selected
= 0;
257 if (!SD
->canStore(*e
, objsize
, load
))
260 if (load
< 0 || load
> 1000)
263 if (load
> least_load
)
266 const uint64_t cur_free
= (SD
->max_size
<< 10) - SD
->currentSize();
268 /* If the load is equal, then look in more details */
269 if (load
== least_load
) {
270 /* closest max_objsize fit */
272 if (least_objsize
!= -1)
273 if (SD
->max_objsize
> least_objsize
|| SD
->max_objsize
== -1)
277 if (cur_free
< most_free
)
282 least_objsize
= SD
->max_objsize
;
283 most_free
= cur_free
;
288 dynamic_cast<SwapDir
*>(INDEXSD(dirn
))->flags
.selected
= 1;
294 * An entry written to the swap log MUST have the following
296 * 1. It MUST be a public key. It does no good to log
297 * a public ADD, change the key, then log a private
298 * DEL. So we need to log a DEL before we change a
299 * key from public to private.
300 * 2. It MUST have a valid (> -1) swap_filen.
303 storeDirSwapLog(const StoreEntry
* e
, int op
)
306 assert(!EBIT_TEST(e
->flags
, KEY_PRIVATE
));
307 assert(e
->swap_filen
>= 0);
309 * icons and such; don't write them to the swap log
312 if (EBIT_TEST(e
->flags
, ENTRY_SPECIAL
))
315 assert(op
> SWAP_LOG_NOP
&& op
< SWAP_LOG_MAX
);
317 debugs(20, 3, "storeDirSwapLog: " <<
318 swap_log_op_str
[op
] << " " <<
319 e
->getMD5Text() << " " <<
320 e
->swap_dirn
<< " " <<
321 std::hex
<< std::uppercase
<< std::setfill('0') << std::setw(8) << e
->swap_filen
);
323 dynamic_cast<SwapDir
*>(INDEXSD(e
->swap_dirn
))->logEntry(*e
, op
);
327 StoreController::updateSize(int64_t size
, int sign
)
329 fatal("StoreController has no independent size\n");
333 SwapDir::updateSize(int64_t size
, int sign
)
335 const int64_t blks
= (size
+ fs
.blksize
- 1) / fs
.blksize
;
336 const int64_t k
= blks
* fs
.blksize
* sign
;
346 StoreController::stat(StoreEntry
&output
) const
348 const double currentSizeInKB
= currentSize() / 1024.0;
349 storeAppendPrintf(&output
, "Store Directory Statistics:\n");
350 storeAppendPrintf(&output
, "Store Entries : %lu\n",
351 (unsigned long int)StoreEntry::inUseCount());
352 storeAppendPrintf(&output
, "Maximum Swap Size : %"PRIu64
" KB\n",
354 storeAppendPrintf(&output
, "Current Store Swap Size: %.2f KB\n",
356 storeAppendPrintf(&output
, "Current Capacity : %.2f%% used, %.2f%% free\n",
357 Math::doublePercent(currentSizeInKB
, maxSize()),
358 Math::doublePercent((maxSize() - currentSizeInKB
), maxSize()));
361 memStore
->stat(output
);
363 /* now the swapDir */
364 swapDir
->stat(output
);
367 /* if needed, this could be taught to cache the result */
369 StoreController::maxSize() const
371 /* TODO: include memory cache ? */
372 return swapDir
->maxSize();
376 StoreController::minSize() const
378 /* TODO: include memory cache ? */
379 return swapDir
->minSize();
383 StoreController::currentSize() const
385 return swapDir
->currentSize();
389 StoreController::currentCount() const
391 return swapDir
->currentCount();
395 StoreController::maxObjectSize() const
397 return swapDir
->maxObjectSize();
403 if (currentSize() >= max_size
<< 10)
406 max_size
= currentSize() >> 10;
408 debugs(20, 1, "WARNING: Shrinking cache_dir #" << index
<< " to " << currentSize() / 1024.0 << " KB");
412 storeDirOpenSwapLogs(void)
414 for (int dirn
= 0; dirn
< Config
.cacheSwap
.n_configured
; ++dirn
)
415 dynamic_cast<SwapDir
*>(INDEXSD(dirn
))->openLog();
419 storeDirCloseSwapLogs(void)
421 for (int dirn
= 0; dirn
< Config
.cacheSwap
.n_configured
; ++dirn
)
422 dynamic_cast<SwapDir
*>(INDEXSD(dirn
))->closeLog();
426 * storeDirWriteCleanLogs
428 * Writes a "clean" swap log file from in-memory metadata.
429 * This is a rewrite of the original function to troll each
430 * StoreDir and write the logs, and flush at the end of
431 * the run. Thanks goes to Eric Stern, since this solution
432 * came out of his COSS code.
435 storeDirWriteCleanLogs(int reopen
)
437 const StoreEntry
*e
= NULL
;
440 struct timeval start
;
442 RefCount
<SwapDir
> sd
;
446 if (StoreController::store_dirs_rebuilding
) {
447 debugs(20, 1, "Not currently OK to rewrite swap log.");
448 debugs(20, 1, "storeDirWriteCleanLogs: Operation aborted.");
452 debugs(20, 1, "storeDirWriteCleanLogs: Starting...");
454 start
= current_time
;
456 for (dirn
= 0; dirn
< Config
.cacheSwap
.n_configured
; dirn
++) {
457 sd
= dynamic_cast<SwapDir
*>(INDEXSD(dirn
));
459 if (sd
->writeCleanStart() < 0) {
460 debugs(20, 1, "log.clean.start() failed for dir #" << sd
->index
);
466 * This may look inefficient as CPU wise it is more efficient to do this
467 * sequentially, but I/O wise the parallellism helps as it allows more
468 * hdd spindles to be active.
473 for (dirn
= 0; dirn
< Config
.cacheSwap
.n_configured
; dirn
++) {
474 sd
= dynamic_cast<SwapDir
*>(INDEXSD(dirn
));
476 if (NULL
== sd
->cleanLog
)
479 e
= sd
->cleanLog
->nextEntry();
489 sd
->cleanLog
->write(*e
);
491 if ((++n
& 0xFFFF) == 0) {
493 debugs(20, 1, " " << std::setw(7) << n
<<
494 " entries written so far.");
500 for (dirn
= 0; dirn
< Config
.cacheSwap
.n_configured
; dirn
++)
501 dynamic_cast<SwapDir
*>(INDEXSD(dirn
))->writeCleanDone();
504 storeDirOpenSwapLogs();
508 dt
= tvSubDsec(start
, current_time
);
510 debugs(20, 1, " Finished. Wrote " << n
<< " entries.");
511 debugs(20, 1, " Took "<< std::setw(3)<< std::setprecision(2) << dt
<<
512 " seconds ("<< std::setw(6) << ((double) n
/ (dt
> 0.0 ? dt
: 1.0)) << " entries/sec).");
519 StoreController::search(String
const url
, HttpRequest
*request
)
521 /* cheat, for now you can't search the memory hot cache */
522 return swapDir
->search(url
, request
);
526 StoreHashIndex::store(int const x
) const
532 StoreHashIndex::dir(const int i
) const
534 SwapDir
*sd
= dynamic_cast<SwapDir
*>(INDEXSD(i
));
540 StoreController::sync(void)
548 * handle callbacks all avaliable fs'es
551 StoreController::callback()
553 /* This will likely double count. Thats ok. */
554 PROF_start(storeDirCallback
);
556 /* mem cache callbacks ? */
557 int result
= swapDir
->callback();
559 PROF_stop(storeDirCallback
);
565 storeDirGetBlkSize(const char *path
, int *blksize
)
571 if (statvfs(path
, &sfs
)) {
572 debugs(50, 1, "" << path
<< ": " << xstrerror());
577 *blksize
= (int) sfs
.f_frsize
;
582 if (statfs(path
, &sfs
)) {
583 debugs(50, 1, "" << path
<< ": " << xstrerror());
588 *blksize
= (int) sfs
.f_bsize
;
591 * Sanity check; make sure we have a meaningful value.
600 #define fsbtoblk(num, fsbs, bs) \
601 (((fsbs) != 0 && (fsbs) < (bs)) ? \
602 (num) / ((bs) / (fsbs)) : (num) * ((fsbs) / (bs)))
604 storeDirGetUFSStats(const char *path
, int *totl_kb
, int *free_kb
, int *totl_in
, int *free_in
)
610 if (statvfs(path
, &sfs
)) {
611 debugs(50, 1, "" << path
<< ": " << xstrerror());
615 *totl_kb
= (int) fsbtoblk(sfs
.f_blocks
, sfs
.f_frsize
, 1024);
616 *free_kb
= (int) fsbtoblk(sfs
.f_bfree
, sfs
.f_frsize
, 1024);
617 *totl_in
= (int) sfs
.f_files
;
618 *free_in
= (int) sfs
.f_ffree
;
623 if (statfs(path
, &sfs
)) {
624 debugs(50, 1, "" << path
<< ": " << xstrerror());
628 *totl_kb
= (int) fsbtoblk(sfs
.f_blocks
, sfs
.f_bsize
, 1024);
629 *free_kb
= (int) fsbtoblk(sfs
.f_bfree
, sfs
.f_bsize
, 1024);
630 *totl_in
= (int) sfs
.f_files
;
631 *free_in
= (int) sfs
.f_ffree
;
638 allocate_new_swapdir(SquidConfig::_cacheSwap
* swap
)
640 if (swap
->swapDirs
== NULL
) {
641 swap
->n_allocated
= 4;
642 swap
->swapDirs
= static_cast<SwapDir::Pointer
*>(xcalloc(swap
->n_allocated
, sizeof(SwapDir::Pointer
)));
645 if (swap
->n_allocated
== swap
->n_configured
) {
646 swap
->n_allocated
<<= 1;
647 SwapDir::Pointer
*const tmp
= static_cast<SwapDir::Pointer
*>(xcalloc(swap
->n_allocated
, sizeof(SwapDir::Pointer
)));
648 memcpy(tmp
, swap
->swapDirs
, swap
->n_configured
* sizeof(SwapDir
*));
649 xfree(swap
->swapDirs
);
650 swap
->swapDirs
= tmp
;
655 free_cachedir(SquidConfig::_cacheSwap
* swap
)
658 /* DON'T FREE THESE FOR RECONFIGURE */
663 for (i
= 0; i
< swap
->n_configured
; i
++) {
664 /* TODO XXX this lets the swapdir free resources asynchronously
665 * swap->swapDirs[i]->deactivate();
666 * but there may be such a means already.
669 swap
->swapDirs
[i
] = NULL
;
672 safe_free(swap
->swapDirs
);
673 swap
->swapDirs
= NULL
;
674 swap
->n_allocated
= 0;
675 swap
->n_configured
= 0;
678 /* this should be a virtual method on StoreEntry,
679 * i.e. e->referenced()
680 * so that the entry can notify the creating Store
683 StoreController::reference(StoreEntry
&e
)
685 /* Notify the fs that we're referencing this object again */
687 if (e
.swap_dirn
> -1)
688 e
.store()->reference(e
);
690 // Notify the memory cache that we're referencing this object again
691 if (memStore
&& e
.mem_status
== IN_MEMORY
)
692 memStore
->reference(e
);
694 // TODO: move this code to a non-shared memory cache class when we have it
696 if (mem_policy
->Referenced
)
697 mem_policy
->Referenced(mem_policy
, &e
, &e
.mem_obj
->repl
);
702 StoreController::dereference(StoreEntry
& e
)
704 /* Notify the fs that we're not referencing this object any more */
706 if (e
.swap_filen
> -1)
707 e
.store()->dereference(e
);
709 // Notify the memory cache that we're not referencing this object any more
710 if (memStore
&& e
.mem_status
== IN_MEMORY
)
711 memStore
->dereference(e
);
713 // TODO: move this code to a non-shared memory cache class when we have it
715 if (mem_policy
->Dereferenced
)
716 mem_policy
->Dereferenced(mem_policy
, &e
, &e
.mem_obj
->repl
);
721 StoreController::get(const cache_key
*key
)
723 if (StoreEntry
*e
= swapDir
->get(key
)) {
724 // TODO: ignore and maybe handleIdleEntry() unlocked intransit entries
725 // because their backing store slot may be gone already.
726 debugs(20, 3, HERE
<< "got in-transit entry: " << *e
);
731 if (StoreEntry
*e
= memStore
->get(key
)) {
732 debugs(20, 3, HERE
<< "got mem-cached entry: " << *e
);
737 // TODO: this disk iteration is misplaced; move to StoreHashIndex
738 if (const int cacheDirs
= Config
.cacheSwap
.n_configured
) {
739 // ask each cache_dir until the entry is found; use static starting
740 // point to avoid asking the same subset of disks more often
741 // TODO: coordinate with put() to be able to guess the right disk often
743 for (int n
= 0; n
< cacheDirs
; ++n
) {
744 idx
= (idx
+ 1) % cacheDirs
;
745 SwapDir
*sd
= dynamic_cast<SwapDir
*>(INDEXSD(idx
));
749 if (StoreEntry
*e
= sd
->get(key
)) {
750 debugs(20, 3, HERE
<< "cache_dir " << idx
<<
751 " got cached entry: " << *e
);
757 debugs(20, 4, HERE
<< "none of " << Config
.cacheSwap
.n_configured
<<
758 " cache_dirs have " << storeKeyText(key
));
763 StoreController::get(String
const key
, STOREGETCLIENT aCallback
, void *aCallbackData
)
765 fatal("not implemented");
769 StoreController::handleIdleEntry(StoreEntry
&e
)
771 bool keepInLocalMemory
= false;
773 memStore
->considerKeeping(e
);
774 // leave keepInLocalMemory false; memStore maintains its own cache
776 keepInLocalMemory
= e
.memoryCachable() && // entry is in good shape and
777 // the local memory cache is not overflowing
778 (mem_node::InUseCount() <= store_pages_max
);
783 // XXX: Rock store specific: Since each SwapDir controls its index,
784 // unlocked entries should not stay in the global store_table.
786 debugs(20, 5, HERE
<< "destroying unlocked entry: " << &e
<< ' ' << e
);
787 destroyStoreEntry(static_cast<hash_link
*>(&e
));
791 // TODO: move this into [non-shared] memory cache class when we have one
792 if (keepInLocalMemory
) {
793 e
.setMemStatus(IN_MEMORY
);
794 e
.mem_obj
->unlinkRequest();
796 e
.purgeMem(); // may free e
800 StoreHashIndex::StoreHashIndex()
804 assert (store_table
== NULL
);
807 StoreHashIndex::~StoreHashIndex()
810 hashFreeItems(store_table
, destroyStoreEntry
);
811 hashFreeMemory(store_table
);
817 StoreHashIndex::callback()
826 for (int i
= 0; i
< Config
.cacheSwap
.n_configured
; i
++) {
827 if (ndir
>= Config
.cacheSwap
.n_configured
)
828 ndir
= ndir
% Config
.cacheSwap
.n_configured
;
830 int temp_result
= store(ndir
)->callback();
836 result
+= temp_result
;
839 fatal ("too much io\n");
849 StoreHashIndex::create()
851 for (int i
= 0; i
< Config
.cacheSwap
.n_configured
; i
++) {
857 /* Lookup an object in the cache.
858 * return just a reference to object, don't start swapping in yet. */
860 StoreHashIndex::get(const cache_key
*key
)
862 PROF_start(storeGet
);
863 debugs(20, 3, "storeGet: looking up " << storeKeyText(key
));
864 StoreEntry
*p
= static_cast<StoreEntry
*>(hash_lookup(store_table
, key
));
870 StoreHashIndex::get(String
const key
, STOREGETCLIENT aCallback
, void *aCallbackData
)
872 fatal("not implemented");
876 StoreHashIndex::init()
878 /* Calculate size of hash table (maximum currently 64k buckets). */
879 /* this is very bogus, its specific to the any Store maintaining an
880 * in-core index, not global */
881 size_t buckets
= (Store::Root().maxSize() + ( Config
.memMaxSize
>> 10)) / Config
.Store
.avgObjectSize
;
882 debugs(20, 1, "Swap maxSize " << Store::Root().maxSize() <<
883 " + " << ( Config
.memMaxSize
>> 10) << " KB, estimated " << buckets
<< " objects");
884 buckets
/= Config
.Store
.objectsPerBucket
;
885 debugs(20, 1, "Target number of buckets: " << buckets
);
886 /* ideally the full scan period should be configurable, for the
887 * moment it remains at approximately 24 hours. */
888 store_hash_buckets
= storeKeyHashBuckets(buckets
);
889 debugs(20, 1, "Using " << store_hash_buckets
<< " Store buckets");
890 debugs(20, 1, "Max Mem size: " << ( Config
.memMaxSize
>> 10) << " KB");
891 debugs(20, 1, "Max Swap size: " << Store::Root().maxSize() << " KB");
893 store_table
= hash_create(storeKeyHashCmp
,
894 store_hash_buckets
, storeKeyHashHash
);
896 for (int i
= 0; i
< Config
.cacheSwap
.n_configured
; i
++) {
897 /* this starts a search of the store dirs, loading their
898 * index. under the new Store api this should be
899 * driven by the StoreHashIndex, not by each store.
901 * That is, the HashIndex should perform a search of each dir it is
902 * indexing to do the hash insertions. The search is then able to
903 * decide 'from-memory', or 'from-clean-log' or 'from-dirty-log' or
906 * Step 1: make the store rebuilds use a search internally
907 * Step 2: change the search logic to use the four modes described
909 * Step 3: have the hash index walk the searches itself.
917 StoreHashIndex::maxSize() const
921 for (int i
= 0; i
< Config
.cacheSwap
.n_configured
; i
++) {
922 if (dir(i
).doReportStat())
923 result
+= store(i
)->maxSize();
930 StoreHashIndex::minSize() const
934 for (int i
= 0; i
< Config
.cacheSwap
.n_configured
; i
++) {
935 if (dir(i
).doReportStat())
936 result
+= store(i
)->minSize();
943 StoreHashIndex::currentSize() const
947 for (int i
= 0; i
< Config
.cacheSwap
.n_configured
; i
++) {
948 if (dir(i
).doReportStat())
949 result
+= store(i
)->currentSize();
956 StoreHashIndex::currentCount() const
960 for (int i
= 0; i
< Config
.cacheSwap
.n_configured
; i
++) {
961 if (dir(i
).doReportStat())
962 result
+= store(i
)->currentCount();
969 StoreHashIndex::maxObjectSize() const
973 for (int i
= 0; i
< Config
.cacheSwap
.n_configured
; i
++) {
974 if (dir(i
).active() && store(i
)->maxObjectSize() > result
)
975 result
= store(i
)->maxObjectSize();
982 StoreHashIndex::stat(StoreEntry
& output
) const
986 /* Now go through each store, calling its stat routine */
988 for (i
= 0; i
< Config
.cacheSwap
.n_configured
; i
++) {
989 storeAppendPrintf(&output
, "\n");
990 store(i
)->stat(output
);
995 StoreHashIndex::reference(StoreEntry
&)
999 StoreHashIndex::dereference(StoreEntry
&)
1003 StoreHashIndex::maintain()
1008 for (i
= 0; i
< Config
.cacheSwap
.n_configured
; i
++) {
1009 /* XXX FixMe: This should be done "in parallell" on the different
1010 * cache_dirs, not one at a time.
1012 /* call the maintain function .. */
1013 store(i
)->maintain();
1018 StoreHashIndex::updateSize(int64_t, int)
1022 StoreHashIndex::sync()
1024 for (int i
= 0; i
< Config
.cacheSwap
.n_configured
; ++i
)
1029 StoreHashIndex::search(String
const url
, HttpRequest
*)
1032 fatal ("Cannot search by url yet\n");
1034 return new StoreSearchHashIndex (this);
1037 CBDATA_CLASS_INIT(StoreSearchHashIndex
);
1039 StoreSearchHashIndex::StoreSearchHashIndex(RefCount
<StoreHashIndex
> aSwapDir
) : sd(aSwapDir
), _done (false), bucket (0)
1043 StoreSearchHashIndex::StoreSearchHashIndex(StoreSearchHashIndex const &);
1046 StoreSearchHashIndex::~StoreSearchHashIndex()
1050 StoreSearchHashIndex::next(void (aCallback
)(void *), void *aCallbackData
)
1053 aCallback (aCallbackData
);
1057 StoreSearchHashIndex::next()
1062 while (!isDone() && !entries
.size())
1065 return currentItem() != NULL
;
1069 StoreSearchHashIndex::error() const
1075 StoreSearchHashIndex::isDone() const
1077 return bucket
>= store_hash_buckets
|| _done
;
1081 StoreSearchHashIndex::currentItem()
1083 if (!entries
.size())
1086 return entries
.back();
1090 StoreSearchHashIndex::copyBucket()
1092 /* probably need to lock the store entries...
1093 * we copy them all to prevent races on the links. */
1094 debugs(47, 3, "StoreSearchHashIndex::copyBucket #" << bucket
);
1095 assert (!entries
.size());
1096 hash_link
*link_ptr
= NULL
;
1097 hash_link
*link_next
= NULL
;
1098 link_next
= hash_get_bucket(store_table
, bucket
);
1100 while (NULL
!= (link_ptr
= link_next
)) {
1101 link_next
= link_ptr
->next
;
1102 StoreEntry
*e
= (StoreEntry
*) link_ptr
;
1104 entries
.push_back(e
);
1108 debugs(47,3, "got entries: " << entries
.size());