5 * DEBUG: section 47 Store Directory Routines
6 * AUTHOR: Duane Wessels
8 * SQUID Web Proxy Cache http://www.squid-cache.org/
9 * ----------------------------------------------------------
11 * Squid is the result of efforts by numerous individuals from
12 * the Internet community; see the CONTRIBUTORS file for full
13 * details. Many organizations have provided support for Squid's
14 * development; see the SPONSORS file for full details. Squid is
15 * Copyrighted (C) 2001 by the Regents of the University of
16 * California; see the COPYRIGHT file for full details. Squid
17 * incorporates software developed and/or copyrighted by other
18 * sources; see the CREDITS file for full details.
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
32 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
38 #include "MemObject.h"
39 #include "SquidTime.h"
41 #include "swap_log_op.h"
44 #if HAVE_SYS_STATVFS_H
45 #include <sys/statvfs.h>
47 #endif /* HAVE_STATVFS */
48 /* statfs() needs <sys/param.h> and <sys/mount.h> on BSD systems */
50 #include <sys/param.h>
53 #include <sys/mount.h>
55 /* Windows and Linux use sys/vfs.h */
60 #include "StoreHashIndex.h"
62 static STDIRSELECT storeDirSelectSwapDirRoundRobin
;
63 static STDIRSELECT storeDirSelectSwapDirLeastLoad
;
66 * store_dirs_rebuilding is initialized to _1_ as a hack so that
67 * storeDirWriteCleanLogs() doesn't try to do anything unless _all_
68 * cache_dirs have been read. For example, without this hack, Squid
69 * will try to write clean log files if -kparse fails (becasue it
72 int StoreController::store_dirs_rebuilding
= 1;
74 StoreController::StoreController() : swapDir (new StoreHashIndex())
77 StoreController::~StoreController()
81 * This function pointer is set according to 'store_dir_select_algorithm'
84 STDIRSELECT
*storeDirSelectSwapDir
= storeDirSelectSwapDirLeastLoad
;
87 StoreController::init()
91 if (0 == strcasecmp(Config
.store_dir_select_algorithm
, "round-robin")) {
92 storeDirSelectSwapDir
= storeDirSelectSwapDirRoundRobin
;
93 debugs(47, 1, "Using Round Robin store dir selection");
95 storeDirSelectSwapDir
= storeDirSelectSwapDirLeastLoad
;
96 debugs(47, 1, "Using Least Load store dir selection");
101 StoreController::createOneStore(Store
&aStore
)
104 * On Windows, fork() is not available.
105 * The following is a workaround for create store directories sequentially
106 * when running on native Windows port.
108 #ifndef _SQUID_MSWIN_
117 #ifndef _SQUID_MSWIN_
125 StoreController::create()
129 #ifndef _SQUID_MSWIN_
137 pid
= wait3(&status
, WNOHANG
, NULL
);
140 pid
= waitpid(-1, &status
, 0);
143 } while (pid
> 0 || (pid
< 0 && errno
== EINTR
));
149 * Determine whether the given directory can handle this object
152 * Note: if the object size is -1, then the only swapdirs that
153 * will return true here are ones that have max_obj_size = -1,
154 * ie any-sized-object swapdirs. This is a good thing.
157 SwapDir::objectSizeIsAcceptable(int64_t objsize
) const
160 * If the swapdir's max_obj_size is -1, then it definitely can
163 if (max_objsize
== -1)
167 * If the object size is -1, then if the storedir isn't -1 we
170 if ((objsize
== -1) && (max_objsize
!= -1))
174 * Else, make sure that the max object size is larger than objsize
176 return max_objsize
> objsize
;
181 * This new selection scheme simply does round-robin on all SwapDirs.
182 * A SwapDir is skipped if it is over the max_size (100%) limit, or
186 storeDirSelectSwapDirRoundRobin(const StoreEntry
* e
)
191 RefCount
<SwapDir
> sd
;
193 ssize_t objsize
= e
->objectLen();
195 objsize
+= e
->mem_obj
->swap_hdr_sz
;
197 for (i
= 0; i
<= Config
.cacheSwap
.n_configured
; i
++) {
198 if (++dirn
>= Config
.cacheSwap
.n_configured
)
201 sd
= dynamic_cast<SwapDir
*>(INDEXSD(dirn
));
203 if (sd
->flags
.read_only
)
206 if (sd
->cur_size
> sd
->max_size
)
209 if (!sd
->objectSizeIsAcceptable(objsize
))
212 /* check for error or overload condition */
213 load
= sd
->canStore(*e
);
215 if (load
< 0 || load
> 1000) {
226 * Spread load across all of the store directories
228 * Note: We should modify this later on to prefer sticking objects
229 * in the *tightest fit* swapdir to conserve space, along with the
230 * actual swapdir usage. But for now, this hack will do while
231 * testing, so you should order your swapdirs in the config file
232 * from smallest maxobjsize to unlimited (-1) maxobjsize.
234 * We also have to choose nleast == nconf since we need to consider
235 * ALL swapdirs, regardless of state. Again, this is a hack while
236 * we sort out the real usefulness of this algorithm.
239 storeDirSelectSwapDirLeastLoad(const StoreEntry
* e
)
242 ssize_t most_free
= 0, cur_free
;
243 ssize_t least_objsize
= -1;
244 int least_load
= INT_MAX
;
248 RefCount
<SwapDir
> SD
;
250 /* Calculate the object size */
251 objsize
= e
->objectLen();
254 objsize
+= e
->mem_obj
->swap_hdr_sz
;
256 for (i
= 0; i
< Config
.cacheSwap
.n_configured
; i
++) {
257 SD
= dynamic_cast<SwapDir
*>(INDEXSD(i
));
258 SD
->flags
.selected
= 0;
259 load
= SD
->canStore(*e
);
261 if (load
< 0 || load
> 1000) {
265 if (!SD
->objectSizeIsAcceptable(objsize
))
268 if (SD
->flags
.read_only
)
271 if (SD
->cur_size
> SD
->max_size
)
274 if (load
> least_load
)
277 cur_free
= SD
->max_size
- SD
->cur_size
;
279 /* If the load is equal, then look in more details */
280 if (load
== least_load
) {
281 /* closest max_objsize fit */
283 if (least_objsize
!= -1)
284 if (SD
->max_objsize
> least_objsize
|| SD
->max_objsize
== -1)
288 if (cur_free
< most_free
)
293 least_objsize
= SD
->max_objsize
;
294 most_free
= cur_free
;
299 dynamic_cast<SwapDir
*>(INDEXSD(dirn
))->flags
.selected
= 1;
305 * An entry written to the swap log MUST have the following
307 * 1. It MUST be a public key. It does no good to log
308 * a public ADD, change the key, then log a private
309 * DEL. So we need to log a DEL before we change a
310 * key from public to private.
311 * 2. It MUST have a valid (> -1) swap_filen.
314 storeDirSwapLog(const StoreEntry
* e
, int op
)
317 assert(!EBIT_TEST(e
->flags
, KEY_PRIVATE
));
318 assert(e
->swap_filen
>= 0);
320 * icons and such; don't write them to the swap log
323 if (EBIT_TEST(e
->flags
, ENTRY_SPECIAL
))
326 assert(op
> SWAP_LOG_NOP
&& op
< SWAP_LOG_MAX
);
328 debugs(20, 3, "storeDirSwapLog: " <<
329 swap_log_op_str
[op
] << " " <<
330 e
->getMD5Text() << " " <<
331 e
->swap_dirn
<< " " <<
332 std::hex
<< std::uppercase
<< std::setfill('0') << std::setw(8) << e
->swap_filen
);
334 dynamic_cast<SwapDir
*>(INDEXSD(e
->swap_dirn
))->logEntry(*e
, op
);
338 StoreController::updateSize(int64_t size
, int sign
)
340 fatal("StoreController has no independent size\n");
344 SwapDir::updateSize(int64_t size
, int sign
)
346 int blks
= (size
+ fs
.blksize
- 1) / fs
.blksize
;
347 int k
= (blks
* fs
.blksize
>> 10) * sign
;
349 store_swap_size
+= k
;
358 StoreController::stat(StoreEntry
&output
) const
360 storeAppendPrintf(&output
, "Store Directory Statistics:\n");
361 storeAppendPrintf(&output
, "Store Entries : %lu\n",
362 (unsigned long int)StoreEntry::inUseCount());
363 storeAppendPrintf(&output
, "Maximum Swap Size : %8ld KB\n",
364 (long int) maxSize());
365 storeAppendPrintf(&output
, "Current Store Swap Size: %8lu KB\n",
367 storeAppendPrintf(&output
, "Current Capacity : %d%% used, %d%% free\n",
368 percent((int) store_swap_size
, (int) maxSize()),
369 percent((int) (maxSize() - store_swap_size
), (int) maxSize()));
370 /* FIXME Here we should output memory statistics */
372 /* now the swapDir */
373 swapDir
->stat(output
);
376 /* if needed, this could be taught to cache the result */
378 StoreController::maxSize() const
380 /* TODO: include memory cache ? */
381 return swapDir
->maxSize();
385 StoreController::minSize() const
387 /* TODO: include memory cache ? */
388 return swapDir
->minSize();
394 if (cur_size
>= max_size
)
399 debugs(20, 1, "WARNING: Shrinking cache_dir #" << index
<< " to " << cur_size
<< " KB");
403 storeDirOpenSwapLogs(void)
405 for (int dirn
= 0; dirn
< Config
.cacheSwap
.n_configured
; ++dirn
)
406 dynamic_cast<SwapDir
*>(INDEXSD(dirn
))->openLog();
410 storeDirCloseSwapLogs(void)
412 for (int dirn
= 0; dirn
< Config
.cacheSwap
.n_configured
; ++dirn
)
413 dynamic_cast<SwapDir
*>(INDEXSD(dirn
))->closeLog();
417 * storeDirWriteCleanLogs
419 * Writes a "clean" swap log file from in-memory metadata.
420 * This is a rewrite of the original function to troll each
421 * StoreDir and write the logs, and flush at the end of
422 * the run. Thanks goes to Eric Stern, since this solution
423 * came out of his COSS code.
426 storeDirWriteCleanLogs(int reopen
)
428 const StoreEntry
*e
= NULL
;
431 struct timeval start
;
433 RefCount
<SwapDir
> sd
;
437 if (StoreController::store_dirs_rebuilding
) {
438 debugs(20, 1, "Not currently OK to rewrite swap log.");
439 debugs(20, 1, "storeDirWriteCleanLogs: Operation aborted.");
443 debugs(20, 1, "storeDirWriteCleanLogs: Starting...");
445 start
= current_time
;
447 for (dirn
= 0; dirn
< Config
.cacheSwap
.n_configured
; dirn
++) {
448 sd
= dynamic_cast<SwapDir
*>(INDEXSD(dirn
));
450 if (sd
->writeCleanStart() < 0) {
451 debugs(20, 1, "log.clean.start() failed for dir #" << sd
->index
);
457 * This may look inefficient as CPU wise it is more efficient to do this
458 * sequentially, but I/O wise the parallellism helps as it allows more
459 * hdd spindles to be active.
464 for (dirn
= 0; dirn
< Config
.cacheSwap
.n_configured
; dirn
++) {
465 sd
= dynamic_cast<SwapDir
*>(INDEXSD(dirn
));
467 if (NULL
== sd
->cleanLog
)
470 e
= sd
->cleanLog
->nextEntry();
480 sd
->cleanLog
->write(*e
);
482 if ((++n
& 0xFFFF) == 0) {
484 debugs(20, 1, " " << std::setw(7) << n
<<
485 " entries written so far.");
491 for (dirn
= 0; dirn
< Config
.cacheSwap
.n_configured
; dirn
++)
492 dynamic_cast<SwapDir
*>(INDEXSD(dirn
))->writeCleanDone();
495 storeDirOpenSwapLogs();
499 dt
= tvSubDsec(start
, current_time
);
501 debugs(20, 1, " Finished. Wrote " << n
<< " entries.");
502 debugs(20, 1, " Took "<< std::setw(3)<< std::setprecision(2) << dt
<<
503 " seconds ("<< std::setw(6) << ((double) n
/ (dt
> 0.0 ? dt
: 1.0)) << " entries/sec).");
510 StoreController::search(String
const url
, HttpRequest
*request
)
512 /* cheat, for now you can't search the memory hot cache */
513 return swapDir
->search(url
, request
);
517 StoreHashIndex::store(int const x
) const
523 StoreController::sync(void)
525 /* sync mem cache? */
530 * handle callbacks all avaliable fs'es
533 StoreController::callback()
535 /* This will likely double count. Thats ok. */
536 PROF_start(storeDirCallback
);
538 /* mem cache callbacks ? */
539 int result
= swapDir
->callback();
541 PROF_stop(storeDirCallback
);
547 storeDirGetBlkSize(const char *path
, int *blksize
)
553 if (statvfs(path
, &sfs
)) {
554 debugs(50, 1, "" << path
<< ": " << xstrerror());
559 *blksize
= (int) sfs
.f_frsize
;
564 if (statfs(path
, &sfs
)) {
565 debugs(50, 1, "" << path
<< ": " << xstrerror());
570 *blksize
= (int) sfs
.f_bsize
;
573 * Sanity check; make sure we have a meaningful value.
582 #define fsbtoblk(num, fsbs, bs) \
583 (((fsbs) != 0 && (fsbs) < (bs)) ? \
584 (num) / ((bs) / (fsbs)) : (num) * ((fsbs) / (bs)))
586 storeDirGetUFSStats(const char *path
, int *totl_kb
, int *free_kb
, int *totl_in
, int *free_in
)
592 if (statvfs(path
, &sfs
)) {
593 debugs(50, 1, "" << path
<< ": " << xstrerror());
597 *totl_kb
= (int) fsbtoblk(sfs
.f_blocks
, sfs
.f_frsize
, 1024);
598 *free_kb
= (int) fsbtoblk(sfs
.f_bfree
, sfs
.f_frsize
, 1024);
599 *totl_in
= (int) sfs
.f_files
;
600 *free_in
= (int) sfs
.f_ffree
;
605 if (statfs(path
, &sfs
)) {
606 debugs(50, 1, "" << path
<< ": " << xstrerror());
610 *totl_kb
= (int) fsbtoblk(sfs
.f_blocks
, sfs
.f_bsize
, 1024);
611 *free_kb
= (int) fsbtoblk(sfs
.f_bfree
, sfs
.f_bsize
, 1024);
612 *totl_in
= (int) sfs
.f_files
;
613 *free_in
= (int) sfs
.f_ffree
;
620 allocate_new_swapdir(SquidConfig::_cacheSwap
* swap
)
622 if (swap
->swapDirs
== NULL
) {
623 swap
->n_allocated
= 4;
624 swap
->swapDirs
= static_cast<StorePointer
*>(xcalloc(swap
->n_allocated
, sizeof(StorePointer
)));
627 if (swap
->n_allocated
== swap
->n_configured
) {
629 swap
->n_allocated
<<= 1;
630 tmp
= static_cast<StorePointer
*>(xcalloc(swap
->n_allocated
, sizeof(StorePointer
)));
631 xmemcpy(tmp
, swap
->swapDirs
, swap
->n_configured
* sizeof(SwapDir
*));
632 xfree(swap
->swapDirs
);
633 swap
->swapDirs
= tmp
;
638 free_cachedir(SquidConfig::_cacheSwap
* swap
)
641 /* DON'T FREE THESE FOR RECONFIGURE */
646 for (i
= 0; i
< swap
->n_configured
; i
++) {
647 /* TODO XXX this lets the swapdir free resources asynchronously
648 * swap->swapDirs[i]->deactivate();
649 * but there may be such a means already.
652 swap
->swapDirs
[i
] = NULL
;
655 safe_free(swap
->swapDirs
);
656 swap
->swapDirs
= NULL
;
657 swap
->n_allocated
= 0;
658 swap
->n_configured
= 0;
661 /* this should be a virtual method on StoreEntry,
662 * i.e. e->referenced()
663 * so that the entry can notify the creating Store
666 StoreController::reference(StoreEntry
&e
)
668 /* Notify the fs that we're referencing this object again */
670 if (e
.swap_dirn
> -1)
671 e
.store()->reference(e
);
673 /* Notify the memory cache that we're referencing this object again */
675 if (mem_policy
->Referenced
)
676 mem_policy
->Referenced(mem_policy
, &e
, &e
.mem_obj
->repl
);
681 StoreController::dereference(StoreEntry
& e
)
683 /* Notify the fs that we're not referencing this object any more */
685 if (e
.swap_filen
> -1)
686 e
.store()->dereference(e
);
688 /* Notify the memory cache that we're not referencing this object any more */
690 if (mem_policy
->Dereferenced
)
691 mem_policy
->Dereferenced(mem_policy
, &e
, &e
.mem_obj
->repl
);
698 (const cache_key
*key
)
708 (String
const key
, STOREGETCLIENT callback
, void *cbdata
)
710 fatal("not implemented");
713 StoreHashIndex::StoreHashIndex()
717 assert (store_table
== NULL
);
720 StoreHashIndex::~StoreHashIndex()
723 hashFreeItems(store_table
, destroyStoreEntry
);
724 hashFreeMemory(store_table
);
730 StoreHashIndex::callback()
739 for (int i
= 0; i
< Config
.cacheSwap
.n_configured
; i
++) {
740 if (ndir
>= Config
.cacheSwap
.n_configured
)
741 ndir
= ndir
% Config
.cacheSwap
.n_configured
;
743 int temp_result
= store(ndir
)->callback();
749 result
+= temp_result
;
752 fatal ("too much io\n");
762 StoreHashIndex::create()
764 for (int i
= 0; i
< Config
.cacheSwap
.n_configured
; i
++)
768 /* Lookup an object in the cache.
769 * return just a reference to object, don't start swapping in yet. */
773 (const cache_key
*key
)
775 PROF_start(storeGet
);
776 debugs(20, 3, "storeGet: looking up " << storeKeyText(key
));
777 StoreEntry
*p
= static_cast<StoreEntry
*>(hash_lookup(store_table
, key
));
785 (String
const key
, STOREGETCLIENT callback
, void *cbdata
)
787 fatal("not implemented");
791 StoreHashIndex::init()
793 /* Calculate size of hash table (maximum currently 64k buckets). */
794 /* this is very bogus, its specific to the any Store maintaining an
795 * in-core index, not global */
796 size_t buckets
= (Store::Root().maxSize() + ( Config
.memMaxSize
>> 10)) / Config
.Store
.avgObjectSize
;
797 debugs(20, 1, "Swap maxSize " << Store::Root().maxSize() <<
798 " + " << ( Config
.memMaxSize
>> 10) << " KB, estimated " << buckets
<< " objects");
799 buckets
/= Config
.Store
.objectsPerBucket
;
800 debugs(20, 1, "Target number of buckets: " << buckets
);
801 /* ideally the full scan period should be configurable, for the
802 * moment it remains at approximately 24 hours. */
803 store_hash_buckets
= storeKeyHashBuckets(buckets
);
804 debugs(20, 1, "Using " << store_hash_buckets
<< " Store buckets");
805 debugs(20, 1, "Max Mem size: " << ( Config
.memMaxSize
>> 10) << " KB");
806 debugs(20, 1, "Max Swap size: " << Store::Root().maxSize() << " KB");
808 store_table
= hash_create(storeKeyHashCmp
,
809 store_hash_buckets
, storeKeyHashHash
);
811 for (int i
= 0; i
< Config
.cacheSwap
.n_configured
; i
++) {
812 /* this starts a search of the store dirs, loading their
813 * index. under the new Store api this should be
814 * driven by the StoreHashIndex, not by each store.
816 * That is, the HashIndex should perform a search of each dir it is
817 * indexing to do the hash insertions. The search is then able to
818 * decide 'from-memory', or 'from-clean-log' or 'from-dirty-log' or
821 * Step 1: make the store rebuilds use a search internally
822 * Step 2: change the search logic to use the four modes described
824 * Step 3: have the hash index walk the searches itself.
831 StoreHashIndex::maxSize() const
836 for (i
= 0; i
< Config
.cacheSwap
.n_configured
; i
++)
837 result
+= store(i
)->maxSize();
843 StoreHashIndex::minSize() const
847 for (int i
= 0; i
< Config
.cacheSwap
.n_configured
; i
++)
848 result
+= store(i
)->minSize();
854 StoreHashIndex::stat(StoreEntry
& output
) const
858 /* Now go through each store, calling its stat routine */
860 for (i
= 0; i
< Config
.cacheSwap
.n_configured
; i
++) {
861 storeAppendPrintf(&output
, "\n");
862 store(i
)->stat(output
);
867 StoreHashIndex::reference(StoreEntry
&)
871 StoreHashIndex::dereference(StoreEntry
&)
875 StoreHashIndex::maintain()
880 for (i
= 0; i
< Config
.cacheSwap
.n_configured
; i
++) {
881 /* XXX FixMe: This should be done "in parallell" on the different
882 * cache_dirs, not one at a time.
884 /* call the maintain function .. */
885 store(i
)->maintain();
890 StoreHashIndex::updateSize(int64_t, int)
894 StoreHashIndex::sync()
896 for (int i
= 0; i
< Config
.cacheSwap
.n_configured
; ++i
)
901 StoreHashIndex::search(String
const url
, HttpRequest
*)
904 fatal ("Cannot search by url yet\n");
906 return new StoreSearchHashIndex (this);
909 CBDATA_CLASS_INIT(StoreSearchHashIndex
);
911 StoreSearchHashIndex::StoreSearchHashIndex(RefCount
<StoreHashIndex
> aSwapDir
) : sd(aSwapDir
), _done (false), bucket (0)
915 StoreSearchHashIndex::StoreSearchHashIndex(StoreSearchHashIndex const &);
918 StoreSearchHashIndex::~StoreSearchHashIndex()
922 StoreSearchHashIndex::next(void (callback
)(void *cbdata
), void *cbdata
)
929 StoreSearchHashIndex::next()
934 while (!isDone() && !entries
.size())
937 return currentItem() != NULL
;
941 StoreSearchHashIndex::error() const
947 StoreSearchHashIndex::isDone() const
949 return bucket
>= store_hash_buckets
|| _done
;
953 StoreSearchHashIndex::currentItem()
958 return entries
.back();
962 StoreSearchHashIndex::copyBucket()
964 /* probably need to lock the store entries...
965 * we copy them all to prevent races on the links. */
966 debugs(47, 3, "StoreSearchHashIndex::copyBucket #" << bucket
);
967 assert (!entries
.size());
968 hash_link
*link_ptr
= NULL
;
969 hash_link
*link_next
= NULL
;
970 link_next
= hash_get_bucket(store_table
, bucket
);
972 while (NULL
!= (link_ptr
= link_next
)) {
973 link_next
= link_ptr
->next
;
974 StoreEntry
*e
= (StoreEntry
*) link_ptr
;
976 entries
.push_back(e
);
980 debugs(47,3, "got entries: " << entries
.size());