3 * DEBUG: section 47 Store Directory Routines
4 * AUTHOR: Duane Wessels
6 * SQUID Web Proxy Cache http://www.squid-cache.org/
7 * ----------------------------------------------------------
9 * Squid is the result of efforts by numerous individuals from
10 * the Internet community; see the CONTRIBUTORS file for full
11 * details. Many organizations have provided support for Squid's
12 * development; see the SPONSORS file for full details. Squid is
13 * Copyrighted (C) 2001 by the Regents of the University of
14 * California; see the COPYRIGHT file for full details. Squid
15 * incorporates software developed and/or copyrighted by other
16 * sources; see the CREDITS file for full details.
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2 of the License, or
21 * (at your option) any later version.
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
37 #include "MemObject.h"
39 #include "profiler/Profiler.h"
40 #include "SquidConfig.h"
41 #include "SquidMath.h"
42 #include "SquidTime.h"
44 #include "store_key_md5.h"
45 #include "StoreHashIndex.h"
46 #include "swap_log_op.h"
49 #include "Transients.h"
54 #if HAVE_SYS_STATVFS_H
55 #include <sys/statvfs.h>
57 #endif /* HAVE_STATVFS */
58 /* statfs() needs <sys/param.h> and <sys/mount.h> on BSD systems */
60 #include <sys/param.h>
63 #include <sys/mount.h>
65 /* Windows and Linux use sys/vfs.h */
73 static STDIRSELECT storeDirSelectSwapDirRoundRobin
;
74 static STDIRSELECT storeDirSelectSwapDirLeastLoad
;
77 * store_dirs_rebuilding is initialized to _1_ as a hack so that
78 * storeDirWriteCleanLogs() doesn't try to do anything unless _all_
79 * cache_dirs have been read. For example, without this hack, Squid
80 * will try to write clean log files if -kparse fails (becasue it
83 int StoreController::store_dirs_rebuilding
= 1;
85 StoreController::StoreController() : swapDir (new StoreHashIndex())
86 , memStore(NULL
), transients(NULL
)
89 StoreController::~StoreController()
96 * This function pointer is set according to 'store_dir_select_algorithm'
99 STDIRSELECT
*storeDirSelectSwapDir
= storeDirSelectSwapDirLeastLoad
;
102 StoreController::init()
104 if (Config
.memShared
&& IamWorkerProcess()) {
105 memStore
= new MemStore
;
111 if (0 == strcasecmp(Config
.store_dir_select_algorithm
, "round-robin")) {
112 storeDirSelectSwapDir
= storeDirSelectSwapDirRoundRobin
;
113 debugs(47, DBG_IMPORTANT
, "Using Round Robin store dir selection");
115 storeDirSelectSwapDir
= storeDirSelectSwapDirLeastLoad
;
116 debugs(47, DBG_IMPORTANT
, "Using Least Load store dir selection");
119 if (UsingSmp() && IamWorkerProcess() && Config
.onoff
.collapsed_forwarding
) {
120 transients
= new Transients
;
126 StoreController::createOneStore(Store
&aStore
)
129 * On Windows, fork() is not available.
130 * The following is a workaround for create store directories sequentially
131 * when running on native Windows port.
150 StoreController::create()
162 pid
= wait3(&status
, WNOHANG
, NULL
);
165 pid
= waitpid(-1, &status
, 0);
168 } while (pid
> 0 || (pid
< 0 && errno
== EINTR
));
174 * Determine whether the given directory can handle this object
177 * Note: if the object size is -1, then the only swapdirs that
178 * will return true here are ones that have min and max unset,
179 * ie any-sized-object swapdirs. This is a good thing.
182 SwapDir::objectSizeIsAcceptable(int64_t objsize
) const
184 // If the swapdir has no range limits, then it definitely can
185 if (min_objsize
<= 0 && max_objsize
== -1)
189 * If the object size is -1 and the storedir has limits we
190 * can't store it there.
195 // Else, make sure that the object size will fit.
196 if (max_objsize
== -1 && min_objsize
<= objsize
)
199 return min_objsize
<= objsize
&& max_objsize
> objsize
;
203 * This new selection scheme simply does round-robin on all SwapDirs.
204 * A SwapDir is skipped if it is over the max_size (100%) limit, or
208 storeDirSelectSwapDirRoundRobin(const StoreEntry
* e
)
210 // e->objectLen() is negative at this point when we are still STORE_PENDING
211 ssize_t objsize
= e
->mem_obj
->expectedReplySize();
213 objsize
+= e
->mem_obj
->swap_hdr_sz
;
215 // Increment the first candidate once per selection (not once per
216 // iteration) to reduce bias when some disk(s) attract more entries.
217 static int firstCandidate
= 0;
218 if (++firstCandidate
>= Config
.cacheSwap
.n_configured
)
221 for (int i
= 0; i
< Config
.cacheSwap
.n_configured
; ++i
) {
222 const int dirn
= (firstCandidate
+ i
) % Config
.cacheSwap
.n_configured
;
223 const SwapDir
*sd
= dynamic_cast<SwapDir
*>(INDEXSD(dirn
));
226 if (!sd
->canStore(*e
, objsize
, load
))
229 if (load
< 0 || load
> 1000) {
240 * Spread load across all of the store directories
242 * Note: We should modify this later on to prefer sticking objects
243 * in the *tightest fit* swapdir to conserve space, along with the
244 * actual swapdir usage. But for now, this hack will do while
245 * testing, so you should order your swapdirs in the config file
246 * from smallest max-size= to largest max-size=.
248 * We also have to choose nleast == nconf since we need to consider
249 * ALL swapdirs, regardless of state. Again, this is a hack while
250 * we sort out the real usefulness of this algorithm.
253 storeDirSelectSwapDirLeastLoad(const StoreEntry
* e
)
255 int64_t most_free
= 0;
256 ssize_t least_objsize
= -1;
257 int least_load
= INT_MAX
;
261 RefCount
<SwapDir
> SD
;
263 // e->objectLen() is negative at this point when we are still STORE_PENDING
264 ssize_t objsize
= e
->mem_obj
->expectedReplySize();
267 objsize
+= e
->mem_obj
->swap_hdr_sz
;
269 for (i
= 0; i
< Config
.cacheSwap
.n_configured
; ++i
) {
270 SD
= dynamic_cast<SwapDir
*>(INDEXSD(i
));
271 SD
->flags
.selected
= false;
273 if (!SD
->canStore(*e
, objsize
, load
))
276 if (load
< 0 || load
> 1000)
279 if (load
> least_load
)
282 const int64_t cur_free
= SD
->maxSize() - SD
->currentSize();
284 /* If the load is equal, then look in more details */
285 if (load
== least_load
) {
286 /* closest max-size fit */
288 if (least_objsize
!= -1)
289 if (SD
->maxObjectSize() > least_objsize
)
293 if (cur_free
< most_free
)
298 least_objsize
= SD
->maxObjectSize();
299 most_free
= cur_free
;
304 dynamic_cast<SwapDir
*>(INDEXSD(dirn
))->flags
.selected
= true;
310 * An entry written to the swap log MUST have the following
312 * 1. It MUST be a public key. It does no good to log
313 * a public ADD, change the key, then log a private
314 * DEL. So we need to log a DEL before we change a
315 * key from public to private.
316 * 2. It MUST have a valid (> -1) swap_filen.
319 storeDirSwapLog(const StoreEntry
* e
, int op
)
322 assert(!EBIT_TEST(e
->flags
, KEY_PRIVATE
));
323 assert(e
->swap_filen
>= 0);
325 * icons and such; don't write them to the swap log
328 if (EBIT_TEST(e
->flags
, ENTRY_SPECIAL
))
331 assert(op
> SWAP_LOG_NOP
&& op
< SWAP_LOG_MAX
);
333 debugs(20, 3, "storeDirSwapLog: " <<
334 swap_log_op_str
[op
] << " " <<
335 e
->getMD5Text() << " " <<
336 e
->swap_dirn
<< " " <<
337 std::hex
<< std::uppercase
<< std::setfill('0') << std::setw(8) << e
->swap_filen
);
339 dynamic_cast<SwapDir
*>(INDEXSD(e
->swap_dirn
))->logEntry(*e
, op
);
343 StoreController::getStats(StoreInfoStats
&stats
) const
346 memStore
->getStats(stats
);
348 // move this code to a non-shared memory cache class when we have it
349 stats
.mem
.shared
= false;
350 stats
.mem
.capacity
= Config
.memMaxSize
;
351 stats
.mem
.size
= mem_node::StoreMemSize();
352 stats
.mem
.count
= hot_obj_count
;
355 swapDir
->getStats(stats
);
357 // low-level info not specific to memory or disk cache
358 stats
.store_entry_count
= StoreEntry::inUseCount();
359 stats
.mem_object_count
= MemObject::inUseCount();
363 StoreController::stat(StoreEntry
&output
) const
365 storeAppendPrintf(&output
, "Store Directory Statistics:\n");
366 storeAppendPrintf(&output
, "Store Entries : %lu\n",
367 (unsigned long int)StoreEntry::inUseCount());
368 storeAppendPrintf(&output
, "Maximum Swap Size : %" PRIu64
" KB\n",
370 storeAppendPrintf(&output
, "Current Store Swap Size: %.2f KB\n",
371 currentSize() / 1024.0);
372 storeAppendPrintf(&output
, "Current Capacity : %.2f%% used, %.2f%% free\n",
373 Math::doublePercent(currentSize(), maxSize()),
374 Math::doublePercent((maxSize() - currentSize()), maxSize()));
377 memStore
->stat(output
);
379 /* now the swapDir */
380 swapDir
->stat(output
);
383 /* if needed, this could be taught to cache the result */
385 StoreController::maxSize() const
387 /* TODO: include memory cache ? */
388 return swapDir
->maxSize();
392 StoreController::minSize() const
394 /* TODO: include memory cache ? */
395 return swapDir
->minSize();
399 StoreController::currentSize() const
401 return swapDir
->currentSize();
405 StoreController::currentCount() const
407 return swapDir
->currentCount();
411 StoreController::maxObjectSize() const
413 return swapDir
->maxObjectSize();
419 if (currentSize() >= maxSize())
422 max_size
= currentSize();
424 debugs(20, DBG_IMPORTANT
, "WARNING: Shrinking cache_dir #" << index
<< " to " << currentSize() / 1024.0 << " KB");
428 storeDirOpenSwapLogs(void)
430 for (int dirn
= 0; dirn
< Config
.cacheSwap
.n_configured
; ++dirn
)
431 dynamic_cast<SwapDir
*>(INDEXSD(dirn
))->openLog();
435 storeDirCloseSwapLogs(void)
437 for (int dirn
= 0; dirn
< Config
.cacheSwap
.n_configured
; ++dirn
)
438 dynamic_cast<SwapDir
*>(INDEXSD(dirn
))->closeLog();
442 * storeDirWriteCleanLogs
444 * Writes a "clean" swap log file from in-memory metadata.
445 * This is a rewrite of the original function to troll each
446 * StoreDir and write the logs, and flush at the end of
447 * the run. Thanks goes to Eric Stern, since this solution
448 * came out of his COSS code.
451 storeDirWriteCleanLogs(int reopen
)
453 const StoreEntry
*e
= NULL
;
456 struct timeval start
;
458 RefCount
<SwapDir
> sd
;
462 if (StoreController::store_dirs_rebuilding
) {
463 debugs(20, DBG_IMPORTANT
, "Not currently OK to rewrite swap log.");
464 debugs(20, DBG_IMPORTANT
, "storeDirWriteCleanLogs: Operation aborted.");
468 debugs(20, DBG_IMPORTANT
, "storeDirWriteCleanLogs: Starting...");
470 start
= current_time
;
472 for (dirn
= 0; dirn
< Config
.cacheSwap
.n_configured
; ++dirn
) {
473 sd
= dynamic_cast<SwapDir
*>(INDEXSD(dirn
));
475 if (sd
->writeCleanStart() < 0) {
476 debugs(20, DBG_IMPORTANT
, "log.clean.start() failed for dir #" << sd
->index
);
482 * This may look inefficient as CPU wise it is more efficient to do this
483 * sequentially, but I/O wise the parallellism helps as it allows more
484 * hdd spindles to be active.
489 for (dirn
= 0; dirn
< Config
.cacheSwap
.n_configured
; ++dirn
) {
490 sd
= dynamic_cast<SwapDir
*>(INDEXSD(dirn
));
492 if (NULL
== sd
->cleanLog
)
495 e
= sd
->cleanLog
->nextEntry();
505 sd
->cleanLog
->write(*e
);
507 if ((++n
& 0xFFFF) == 0) {
509 debugs(20, DBG_IMPORTANT
, " " << std::setw(7) << n
<<
510 " entries written so far.");
516 for (dirn
= 0; dirn
< Config
.cacheSwap
.n_configured
; ++dirn
)
517 dynamic_cast<SwapDir
*>(INDEXSD(dirn
))->writeCleanDone();
520 storeDirOpenSwapLogs();
524 dt
= tvSubDsec(start
, current_time
);
526 debugs(20, DBG_IMPORTANT
, " Finished. Wrote " << n
<< " entries.");
527 debugs(20, DBG_IMPORTANT
, " Took "<< std::setw(3)<< std::setprecision(2) << dt
<<
528 " seconds ("<< std::setw(6) << ((double) n
/ (dt
> 0.0 ? dt
: 1.0)) << " entries/sec).");
534 StoreController::search(String
const url
, HttpRequest
*request
)
536 /* cheat, for now you can't search the memory hot cache */
537 return swapDir
->search(url
, request
);
541 StoreHashIndex::store(int const x
) const
547 StoreHashIndex::dir(const int i
) const
549 SwapDir
*sd
= dynamic_cast<SwapDir
*>(INDEXSD(i
));
555 StoreController::sync(void)
563 * handle callbacks all avaliable fs'es
566 StoreController::callback()
568 /* This will likely double count. Thats ok. */
569 PROF_start(storeDirCallback
);
571 /* mem cache callbacks ? */
572 int result
= swapDir
->callback();
574 PROF_stop(storeDirCallback
);
580 storeDirGetBlkSize(const char *path
, int *blksize
)
586 if (statvfs(path
, &sfs
)) {
587 debugs(50, DBG_IMPORTANT
, "" << path
<< ": " << xstrerror());
592 *blksize
= (int) sfs
.f_frsize
;
597 if (statfs(path
, &sfs
)) {
598 debugs(50, DBG_IMPORTANT
, "" << path
<< ": " << xstrerror());
603 *blksize
= (int) sfs
.f_bsize
;
606 * Sanity check; make sure we have a meaningful value.
615 #define fsbtoblk(num, fsbs, bs) \
616 (((fsbs) != 0 && (fsbs) < (bs)) ? \
617 (num) / ((bs) / (fsbs)) : (num) * ((fsbs) / (bs)))
619 storeDirGetUFSStats(const char *path
, int *totl_kb
, int *free_kb
, int *totl_in
, int *free_in
)
625 if (statvfs(path
, &sfs
)) {
626 debugs(50, DBG_IMPORTANT
, "" << path
<< ": " << xstrerror());
630 *totl_kb
= (int) fsbtoblk(sfs
.f_blocks
, sfs
.f_frsize
, 1024);
631 *free_kb
= (int) fsbtoblk(sfs
.f_bfree
, sfs
.f_frsize
, 1024);
632 *totl_in
= (int) sfs
.f_files
;
633 *free_in
= (int) sfs
.f_ffree
;
638 if (statfs(path
, &sfs
)) {
639 debugs(50, DBG_IMPORTANT
, "" << path
<< ": " << xstrerror());
643 *totl_kb
= (int) fsbtoblk(sfs
.f_blocks
, sfs
.f_bsize
, 1024);
644 *free_kb
= (int) fsbtoblk(sfs
.f_bfree
, sfs
.f_bsize
, 1024);
645 *totl_in
= (int) sfs
.f_files
;
646 *free_in
= (int) sfs
.f_ffree
;
653 allocate_new_swapdir(SquidConfig::_cacheSwap
* swap
)
655 if (swap
->swapDirs
== NULL
) {
656 swap
->n_allocated
= 4;
657 swap
->swapDirs
= static_cast<SwapDir::Pointer
*>(xcalloc(swap
->n_allocated
, sizeof(SwapDir::Pointer
)));
660 if (swap
->n_allocated
== swap
->n_configured
) {
661 swap
->n_allocated
<<= 1;
662 SwapDir::Pointer
*const tmp
= static_cast<SwapDir::Pointer
*>(xcalloc(swap
->n_allocated
, sizeof(SwapDir::Pointer
)));
663 memcpy(tmp
, swap
->swapDirs
, swap
->n_configured
* sizeof(SwapDir
*));
664 xfree(swap
->swapDirs
);
665 swap
->swapDirs
= tmp
;
670 free_cachedir(SquidConfig::_cacheSwap
* swap
)
673 /* DON'T FREE THESE FOR RECONFIGURE */
678 for (i
= 0; i
< swap
->n_configured
; ++i
) {
679 /* TODO XXX this lets the swapdir free resources asynchronously
680 * swap->swapDirs[i]->deactivate();
681 * but there may be such a means already.
684 swap
->swapDirs
[i
] = NULL
;
687 safe_free(swap
->swapDirs
);
688 swap
->swapDirs
= NULL
;
689 swap
->n_allocated
= 0;
690 swap
->n_configured
= 0;
693 /* this should be a virtual method on StoreEntry,
694 * i.e. e->referenced()
695 * so that the entry can notify the creating Store
698 StoreController::reference(StoreEntry
&e
)
700 // special entries do not belong to any specific Store, but are IN_MEMORY
701 if (EBIT_TEST(e
.flags
, ENTRY_SPECIAL
))
704 /* Notify the fs that we're referencing this object again */
706 if (e
.swap_dirn
> -1)
707 swapDir
->reference(e
);
709 // Notify the memory cache that we're referencing this object again
710 if (memStore
&& e
.mem_status
== IN_MEMORY
)
711 memStore
->reference(e
);
713 // TODO: move this code to a non-shared memory cache class when we have it
715 if (mem_policy
->Referenced
)
716 mem_policy
->Referenced(mem_policy
, &e
, &e
.mem_obj
->repl
);
721 StoreController::dereference(StoreEntry
&e
, bool wantsLocalMemory
)
723 // special entries do not belong to any specific Store, but are IN_MEMORY
724 if (EBIT_TEST(e
.flags
, ENTRY_SPECIAL
))
727 bool keepInStoreTable
= false; // keep only if somebody needs it there
729 /* Notify the fs that we're not referencing this object any more */
731 if (e
.swap_filen
> -1)
732 keepInStoreTable
= swapDir
->dereference(e
, wantsLocalMemory
) || keepInStoreTable
;
734 // Notify the memory cache that we're not referencing this object any more
735 if (memStore
&& e
.mem_status
== IN_MEMORY
)
736 keepInStoreTable
= memStore
->dereference(e
, wantsLocalMemory
) || keepInStoreTable
;
738 // TODO: move this code to a non-shared memory cache class when we have it
740 if (mem_policy
->Dereferenced
)
741 mem_policy
->Dereferenced(mem_policy
, &e
, &e
.mem_obj
->repl
);
742 // non-shared memory cache relies on store_table
744 keepInStoreTable
= wantsLocalMemory
|| keepInStoreTable
;
747 return keepInStoreTable
;
751 StoreController::get(const cache_key
*key
)
753 if (StoreEntry
*e
= find(key
)) {
754 // this is not very precise: some get()s are not initiated by clients
761 /// Internal method to implements the guts of the Store::get() API:
762 /// returns an in-transit or cached object with a given key, if any.
764 StoreController::find(const cache_key
*key
)
766 if (StoreEntry
*e
= swapDir
->get(key
)) {
767 // TODO: ignore and maybe handleIdleEntry() unlocked intransit entries
768 // because their backing store slot may be gone already.
769 debugs(20, 3, HERE
<< "got in-transit entry: " << *e
);
773 // Must search transients before caches because we must sync those we find.
775 if (StoreEntry
*e
= transients
->get(key
)) {
776 debugs(20, 3, "got shared in-transit entry: " << *e
);
778 const bool found
= anchorCollapsed(*e
, inSync
);
779 if (!found
|| inSync
)
781 assert(!e
->locked()); // ensure release will destroyStoreEntry()
782 e
->release(); // do not let others into the same trap
788 if (StoreEntry
*e
= memStore
->get(key
)) {
789 debugs(20, 3, HERE
<< "got mem-cached entry: " << *e
);
794 // TODO: this disk iteration is misplaced; move to StoreHashIndex when
795 // the global store_table is no longer used for in-transit objects.
796 if (const int cacheDirs
= Config
.cacheSwap
.n_configured
) {
797 // ask each cache_dir until the entry is found; use static starting
798 // point to avoid asking the same subset of disks more often
799 // TODO: coordinate with put() to be able to guess the right disk often
801 for (int n
= 0; n
< cacheDirs
; ++n
) {
802 idx
= (idx
+ 1) % cacheDirs
;
803 SwapDir
*sd
= dynamic_cast<SwapDir
*>(INDEXSD(idx
));
807 if (StoreEntry
*e
= sd
->get(key
)) {
808 debugs(20, 3, HERE
<< "cache_dir " << idx
<<
809 " got cached entry: " << *e
);
815 debugs(20, 4, HERE
<< "none of " << Config
.cacheSwap
.n_configured
<<
816 " cache_dirs have " << storeKeyText(key
));
821 StoreController::get(String
const key
, STOREGETCLIENT aCallback
, void *aCallbackData
)
823 fatal("not implemented");
826 /// updates the collapsed entry with the corresponding on-disk entry, if any
827 /// In other words, the SwapDir::anchorCollapsed() API applied to all disks.
829 StoreController::anchorCollapsedOnDisk(StoreEntry
&collapsed
, bool &inSync
)
831 // TODO: move this loop to StoreHashIndex, just like the one in get().
832 if (const int cacheDirs
= Config
.cacheSwap
.n_configured
) {
833 // ask each cache_dir until the entry is found; use static starting
834 // point to avoid asking the same subset of disks more often
835 // TODO: coordinate with put() to be able to guess the right disk often
837 for (int n
= 0; n
< cacheDirs
; ++n
) {
838 idx
= (idx
+ 1) % cacheDirs
;
839 SwapDir
*sd
= dynamic_cast<SwapDir
*>(INDEXSD(idx
));
843 if (sd
->anchorCollapsed(collapsed
, inSync
)) {
844 debugs(20, 3, "cache_dir " << idx
<< " anchors " << collapsed
);
850 debugs(20, 4, "none of " << Config
.cacheSwap
.n_configured
<<
851 " cache_dirs have " << collapsed
);
855 void StoreController::markForUnlink(StoreEntry
&e
)
857 if (transients
&& e
.mem_obj
&& e
.mem_obj
->xitTable
.index
>= 0)
858 transients
->markForUnlink(e
);
859 if (memStore
&& e
.mem_obj
&& e
.mem_obj
->memCache
.index
>= 0)
860 memStore
->markForUnlink(e
);
861 if (e
.swap_filen
>= 0)
862 e
.store()->markForUnlink(e
);
865 // move this into [non-shared] memory cache class when we have one
866 /// whether e should be kept in local RAM for possible future caching
868 StoreController::keepForLocalMemoryCache(StoreEntry
&e
) const
870 if (!e
.memoryCachable())
873 // does the current and expected size obey memory caching limits?
875 const int64_t loadedSize
= e
.mem_obj
->endOffset();
876 const int64_t expectedSize
= e
.mem_obj
->expectedReplySize(); // may be < 0
877 const int64_t ramSize
= max(loadedSize
, expectedSize
);
878 const int64_t ramLimit
= min(
879 static_cast<int64_t>(Config
.memMaxSize
),
880 static_cast<int64_t>(Config
.Store
.maxInMemObjSize
));
881 return ramSize
<= ramLimit
;
885 StoreController::memoryOut(StoreEntry
&e
, const bool preserveSwappable
)
887 bool keepInLocalMemory
= false;
889 memStore
->write(e
); // leave keepInLocalMemory false
891 keepInLocalMemory
= keepForLocalMemoryCache(e
);
893 debugs(20, 7, HERE
<< "keepInLocalMemory: " << keepInLocalMemory
);
895 if (!keepInLocalMemory
)
896 e
.trimMemory(preserveSwappable
);
900 StoreController::memoryUnlink(StoreEntry
&e
)
904 else // TODO: move into [non-shared] memory cache class when we have one
905 e
.destroyMemObject();
909 StoreController::memoryDisconnect(StoreEntry
&e
)
912 memStore
->disconnect(e
);
913 // else nothing to do for non-shared memory cache
917 StoreController::transientsAbandon(StoreEntry
&e
)
921 if (e
.mem_obj
->xitTable
.index
>= 0)
922 transients
->abandon(e
);
927 StoreController::transientsCompleteWriting(StoreEntry
&e
)
931 if (e
.mem_obj
->xitTable
.index
>= 0)
932 transients
->completeWriting(e
);
937 StoreController::transientReaders(const StoreEntry
&e
) const
939 return (transients
&& e
.mem_obj
&& e
.mem_obj
->xitTable
.index
>= 0) ?
940 transients
->readers(e
) : 0;
944 StoreController::transientsDisconnect(MemObject
&mem_obj
)
947 transients
->disconnect(mem_obj
);
951 StoreController::handleIdleEntry(StoreEntry
&e
)
953 bool keepInLocalMemory
= false;
955 if (EBIT_TEST(e
.flags
, ENTRY_SPECIAL
)) {
956 // Icons (and cache digests?) should stay in store_table until we
957 // have a dedicated storage for them (that would not purge them).
958 // They are not managed [well] by any specific Store handled below.
959 keepInLocalMemory
= true;
960 } else if (memStore
) {
961 // leave keepInLocalMemory false; memStore maintains its own cache
963 keepInLocalMemory
= keepForLocalMemoryCache(e
) && // in good shape and
964 // the local memory cache is not overflowing
965 (mem_node::InUseCount() <= store_pages_max
);
968 // An idle, unlocked entry that only belongs to a SwapDir which controls
969 // its own index, should not stay in the global store_table.
970 if (!dereference(e
, keepInLocalMemory
)) {
971 debugs(20, 5, HERE
<< "destroying unlocked entry: " << &e
<< ' ' << e
);
972 destroyStoreEntry(static_cast<hash_link
*>(&e
));
976 debugs(20, 5, HERE
<< "keepInLocalMemory: " << keepInLocalMemory
);
978 // TODO: move this into [non-shared] memory cache class when we have one
979 if (keepInLocalMemory
) {
980 e
.setMemStatus(IN_MEMORY
);
981 e
.mem_obj
->unlinkRequest();
983 e
.purgeMem(); // may free e
988 StoreController::allowCollapsing(StoreEntry
*e
, const RequestFlags
&reqFlags
,
989 const HttpRequestMethod
&reqMethod
)
991 e
->makePublic(); // this is needed for both local and SMP collapsing
993 transients
->startWriting(e
, reqFlags
, reqMethod
);
994 debugs(20, 3, "may " << (transients
&& e
->mem_obj
->xitTable
.index
>= 0 ?
995 "SMP-" : "locally-") << "collapse " << *e
);
999 StoreController::syncCollapsed(const sfileno xitIndex
)
1003 StoreEntry
*collapsed
= transients
->findCollapsed(xitIndex
);
1004 if (!collapsed
) { // the entry is no longer locally active, ignore update
1005 debugs(20, 7, "not SMP-syncing not-transient " << xitIndex
);
1008 assert(collapsed
->mem_obj
);
1009 assert(collapsed
->mem_obj
->smpCollapsed
);
1011 debugs(20, 7, "syncing " << *collapsed
);
1013 bool abandoned
= transients
->abandoned(*collapsed
);
1015 bool inSync
= false;
1016 if (memStore
&& collapsed
->mem_obj
->memCache
.io
== MemObject::ioDone
) {
1019 debugs(20, 7, "fully mem-loaded " << *collapsed
);
1020 } else if (memStore
&& collapsed
->mem_obj
->memCache
.index
>= 0) {
1022 inSync
= memStore
->updateCollapsed(*collapsed
);
1023 } else if (collapsed
->swap_filen
>= 0) {
1025 inSync
= collapsed
->store()->updateCollapsed(*collapsed
);
1027 found
= anchorCollapsed(*collapsed
, inSync
);
1030 if (abandoned
&& collapsed
->store_status
== STORE_PENDING
) {
1031 debugs(20, 3, "aborting abandoned but STORE_PENDING " << *collapsed
);
1037 debugs(20, 5, "synced " << *collapsed
);
1038 collapsed
->invokeHandlers();
1039 } else if (found
) { // unrecoverable problem syncing this entry
1040 debugs(20, 3, "aborting unsyncable " << *collapsed
);
1042 } else { // the entry is still not in one of the caches
1043 debugs(20, 7, "waiting " << *collapsed
);
1047 /// Called for in-transit entries that are not yet anchored to a cache.
1048 /// For cached entries, return true after synchronizing them with their cache
1049 /// (making inSync true on success). For not-yet-cached entries, return false.
1051 StoreController::anchorCollapsed(StoreEntry
&collapsed
, bool &inSync
)
1053 // this method is designed to work with collapsed transients only
1054 assert(collapsed
.mem_obj
);
1055 assert(collapsed
.mem_obj
->xitTable
.index
>= 0);
1056 assert(collapsed
.mem_obj
->smpCollapsed
);
1058 debugs(20, 7, "anchoring " << collapsed
);
1062 found
= memStore
->anchorCollapsed(collapsed
, inSync
);
1063 if (!found
&& Config
.cacheSwap
.n_configured
)
1064 found
= anchorCollapsedOnDisk(collapsed
, inSync
);
1068 debugs(20, 7, "anchored " << collapsed
);
1070 debugs(20, 5, "failed to anchor " << collapsed
);
1072 debugs(20, 7, "skipping not yet cached " << collapsed
);
1078 StoreHashIndex::StoreHashIndex()
1082 assert (store_table
== NULL
);
1085 StoreHashIndex::~StoreHashIndex()
1088 hashFreeItems(store_table
, destroyStoreEntry
);
1089 hashFreeMemory(store_table
);
1095 StoreHashIndex::callback()
1099 static int ndir
= 0;
1104 for (int i
= 0; i
< Config
.cacheSwap
.n_configured
; ++i
) {
1105 if (ndir
>= Config
.cacheSwap
.n_configured
)
1106 ndir
= ndir
% Config
.cacheSwap
.n_configured
;
1108 int temp_result
= store(ndir
)->callback();
1114 result
+= temp_result
;
1117 fatal ("too much io\n");
1127 StoreHashIndex::create()
1129 if (Config
.cacheSwap
.n_configured
== 0) {
1130 debugs(0, DBG_PARSE_NOTE(DBG_CRITICAL
), "No cache_dir stores are configured.");
1133 for (int i
= 0; i
< Config
.cacheSwap
.n_configured
; ++i
) {
1134 if (dir(i
).active())
1139 /* Lookup an object in the cache.
1140 * return just a reference to object, don't start swapping in yet. */
1142 StoreHashIndex::get(const cache_key
*key
)
1144 PROF_start(storeGet
);
1145 debugs(20, 3, "storeGet: looking up " << storeKeyText(key
));
1146 StoreEntry
*p
= static_cast<StoreEntry
*>(hash_lookup(store_table
, key
));
1147 PROF_stop(storeGet
);
1152 StoreHashIndex::get(String
const key
, STOREGETCLIENT aCallback
, void *aCallbackData
)
1154 fatal("not implemented");
1158 StoreHashIndex::init()
1160 if (Config
.Store
.objectsPerBucket
<= 0)
1161 fatal("'store_objects_per_bucket' should be larger than 0.");
1163 if (Config
.Store
.avgObjectSize
<= 0)
1164 fatal("'store_avg_object_size' should be larger than 0.");
1166 /* Calculate size of hash table (maximum currently 64k buckets). */
1167 /* this is very bogus, its specific to the any Store maintaining an
1168 * in-core index, not global */
1169 size_t buckets
= (Store::Root().maxSize() + Config
.memMaxSize
) / Config
.Store
.avgObjectSize
;
1170 debugs(20, DBG_IMPORTANT
, "Swap maxSize " << (Store::Root().maxSize() >> 10) <<
1171 " + " << ( Config
.memMaxSize
>> 10) << " KB, estimated " << buckets
<< " objects");
1172 buckets
/= Config
.Store
.objectsPerBucket
;
1173 debugs(20, DBG_IMPORTANT
, "Target number of buckets: " << buckets
);
1174 /* ideally the full scan period should be configurable, for the
1175 * moment it remains at approximately 24 hours. */
1176 store_hash_buckets
= storeKeyHashBuckets(buckets
);
1177 debugs(20, DBG_IMPORTANT
, "Using " << store_hash_buckets
<< " Store buckets");
1178 debugs(20, DBG_IMPORTANT
, "Max Mem size: " << ( Config
.memMaxSize
>> 10) << " KB" <<
1179 (Config
.memShared
? " [shared]" : ""));
1180 debugs(20, DBG_IMPORTANT
, "Max Swap size: " << (Store::Root().maxSize() >> 10) << " KB");
1182 store_table
= hash_create(storeKeyHashCmp
,
1183 store_hash_buckets
, storeKeyHashHash
);
1185 for (int i
= 0; i
< Config
.cacheSwap
.n_configured
; ++i
) {
1186 /* this starts a search of the store dirs, loading their
1187 * index. under the new Store api this should be
1188 * driven by the StoreHashIndex, not by each store.
1190 * That is, the HashIndex should perform a search of each dir it is
1191 * indexing to do the hash insertions. The search is then able to
1192 * decide 'from-memory', or 'from-clean-log' or 'from-dirty-log' or
1195 * Step 1: make the store rebuilds use a search internally
1196 * Step 2: change the search logic to use the four modes described
1198 * Step 3: have the hash index walk the searches itself.
1200 if (dir(i
).active())
1206 StoreHashIndex::maxSize() const
1208 uint64_t result
= 0;
1210 for (int i
= 0; i
< Config
.cacheSwap
.n_configured
; ++i
) {
1211 if (dir(i
).doReportStat())
1212 result
+= store(i
)->maxSize();
1219 StoreHashIndex::minSize() const
1221 uint64_t result
= 0;
1223 for (int i
= 0; i
< Config
.cacheSwap
.n_configured
; ++i
) {
1224 if (dir(i
).doReportStat())
1225 result
+= store(i
)->minSize();
1232 StoreHashIndex::currentSize() const
1234 uint64_t result
= 0;
1236 for (int i
= 0; i
< Config
.cacheSwap
.n_configured
; ++i
) {
1237 if (dir(i
).doReportStat())
1238 result
+= store(i
)->currentSize();
1245 StoreHashIndex::currentCount() const
1247 uint64_t result
= 0;
1249 for (int i
= 0; i
< Config
.cacheSwap
.n_configured
; ++i
) {
1250 if (dir(i
).doReportStat())
1251 result
+= store(i
)->currentCount();
1258 StoreHashIndex::maxObjectSize() const
1260 int64_t result
= -1;
1262 for (int i
= 0; i
< Config
.cacheSwap
.n_configured
; ++i
) {
1263 if (dir(i
).active() && store(i
)->maxObjectSize() > result
)
1264 result
= store(i
)->maxObjectSize();
1271 StoreHashIndex::getStats(StoreInfoStats
&stats
) const
1273 // accumulate per-disk cache stats
1274 for (int i
= 0; i
< Config
.cacheSwap
.n_configured
; ++i
) {
1275 StoreInfoStats dirStats
;
1276 store(i
)->getStats(dirStats
);
1280 // common to all disks
1281 stats
.swap
.open_disk_fd
= store_open_disk_fd
;
1283 // memory cache stats are collected in StoreController::getStats(), for now
1287 StoreHashIndex::stat(StoreEntry
& output
) const
1291 /* Now go through each store, calling its stat routine */
1293 for (i
= 0; i
< Config
.cacheSwap
.n_configured
; ++i
) {
1294 storeAppendPrintf(&output
, "\n");
1295 store(i
)->stat(output
);
1300 StoreHashIndex::reference(StoreEntry
&e
)
1302 e
.store()->reference(e
);
1306 StoreHashIndex::dereference(StoreEntry
&e
, bool wantsLocalMemory
)
1308 return e
.store()->dereference(e
, wantsLocalMemory
);
1312 StoreHashIndex::maintain()
1317 for (i
= 0; i
< Config
.cacheSwap
.n_configured
; ++i
) {
1318 /* XXX FixMe: This should be done "in parallell" on the different
1319 * cache_dirs, not one at a time.
1321 /* call the maintain function .. */
1322 store(i
)->maintain();
1327 StoreHashIndex::sync()
1329 for (int i
= 0; i
< Config
.cacheSwap
.n_configured
; ++i
)
1334 StoreHashIndex::search(String
const url
, HttpRequest
*)
1337 fatal ("Cannot search by url yet\n");
1339 return new StoreSearchHashIndex (this);
1342 CBDATA_CLASS_INIT(StoreSearchHashIndex
);
1344 StoreSearchHashIndex::StoreSearchHashIndex(RefCount
<StoreHashIndex
> aSwapDir
) :
1353 StoreSearchHashIndex::StoreSearchHashIndex(StoreSearchHashIndex const &);
1356 StoreSearchHashIndex::~StoreSearchHashIndex()
1360 StoreSearchHashIndex::next(void (aCallback
)(void *), void *aCallbackData
)
1363 aCallback (aCallbackData
);
1367 StoreSearchHashIndex::next()
1369 if (!entries
.empty())
1372 while (!isDone() && !entries
.size())
1375 return currentItem() != NULL
;
1379 StoreSearchHashIndex::error() const
1385 StoreSearchHashIndex::isDone() const
1387 return bucket
>= store_hash_buckets
|| _done
;
1391 StoreSearchHashIndex::currentItem()
1393 if (!entries
.size())
1396 return entries
.back();
1400 StoreSearchHashIndex::copyBucket()
1402 /* probably need to lock the store entries...
1403 * we copy them all to prevent races on the links. */
1404 debugs(47, 3, "StoreSearchHashIndex::copyBucket #" << bucket
);
1405 assert (!entries
.size());
1406 hash_link
*link_ptr
= NULL
;
1407 hash_link
*link_next
= NULL
;
1408 link_next
= hash_get_bucket(store_table
, bucket
);
1410 while (NULL
!= (link_ptr
= link_next
)) {
1411 link_next
= link_ptr
->next
;
1412 StoreEntry
*e
= (StoreEntry
*) link_ptr
;
1414 entries
.push_back(e
);
1418 debugs(47,3, "got entries: " << entries
.size());