]> git.ipfire.org Git - thirdparty/squid.git/blob - src/store_dir.cc
Merge in current state of store refactoring work. The search method has been
[thirdparty/squid.git] / src / store_dir.cc
1
2 /*
3 * $Id: store_dir.cc,v 1.150 2005/01/03 16:08:26 robertc Exp $
4 *
5 * DEBUG: section 47 Store Directory Routines
6 * AUTHOR: Duane Wessels
7 *
8 * SQUID Web Proxy Cache http://www.squid-cache.org/
9 * ----------------------------------------------------------
10 *
11 * Squid is the result of efforts by numerous individuals from
12 * the Internet community; see the CONTRIBUTORS file for full
13 * details. Many organizations have provided support for Squid's
14 * development; see the SPONSORS file for full details. Squid is
15 * Copyrighted (C) 2001 by the Regents of the University of
16 * California; see the COPYRIGHT file for full details. Squid
17 * incorporates software developed and/or copyrighted by other
18 * sources; see the CREDITS file for full details.
19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
24 *
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
29 *
30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
32 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
33 *
34 */
35
36 #include "squid.h"
37 #include "Store.h"
38 #include "MemObject.h"
39 #include "SwapDir.h"
40
41 #if HAVE_STATVFS
42 #if HAVE_SYS_STATVFS_H
43 #include <sys/statvfs.h>
44 #endif
45 #endif /* HAVE_STATVFS */
46 /* statfs() needs <sys/param.h> and <sys/mount.h> on BSD systems */
47 #if HAVE_SYS_PARAM_H
48 #include <sys/param.h>
49 #endif
50 #if HAVE_SYS_MOUNT_H
51 #include <sys/mount.h>
52 #endif
53 /* Windows and Linux use sys/vfs.h */
54 #if HAVE_SYS_VFS_H
55 #include <sys/vfs.h>
56 #endif
57
58 #include "StoreHashIndex.h"
59
60 static STDIRSELECT storeDirSelectSwapDirRoundRobin;
61 static STDIRSELECT storeDirSelectSwapDirLeastLoad;
62
63 StoreController::StoreController() : swapDir (new StoreHashIndex())
64 {}
65
66 StoreController::~StoreController()
67 {}
68
69 /*
70 * This function pointer is set according to 'store_dir_select_algorithm'
71 * in squid.conf.
72 */
73 STDIRSELECT *storeDirSelectSwapDir = storeDirSelectSwapDirLeastLoad;
74
75 void
76 StoreController::init()
77 {
78 swapDir->init();
79
80 if (0 == strcasecmp(Config.store_dir_select_algorithm, "round-robin")) {
81 storeDirSelectSwapDir = storeDirSelectSwapDirRoundRobin;
82 debug(47, 1) ("Using Round Robin store dir selection\n");
83 } else {
84 storeDirSelectSwapDir = storeDirSelectSwapDirLeastLoad;
85 debug(47, 1) ("Using Least Load store dir selection\n");
86 }
87 }
88
89 void
90 StoreController::createOneStore(Store &aStore)
91 {
92 /*
93 * On Windows, fork() is not available.
94 * The following is a workaround for create store directories sequentially
95 * when running on native Windows port.
96 */
97 #ifndef _SQUID_MSWIN_
98
99 if (fork())
100 return;
101
102 #endif
103
104 aStore.create();
105
106 #ifndef _SQUID_MSWIN_
107
108 exit(0);
109
110 #endif
111 }
112
113 void
114 StoreController::create()
115 {
116 swapDir->create();
117
118 #ifndef _SQUID_MSWIN_
119
120 pid_t pid;
121
122 do {
123 int status;
124 #ifdef _SQUID_NEXT_
125
126 pid = wait3(&status, WNOHANG, NULL);
127 #else
128
129 pid = waitpid(-1, &status, 0);
130 #endif
131
132 } while (pid > 0 || (pid < 0 && errno == EINTR));
133
134 #endif
135 }
136
137 /*
138 * Determine whether the given directory can handle this object
139 * size
140 *
141 * Note: if the object size is -1, then the only swapdirs that
142 * will return true here are ones that have max_obj_size = -1,
143 * ie any-sized-object swapdirs. This is a good thing.
144 */
145 bool
146 SwapDir::objectSizeIsAcceptable(ssize_t objsize) const
147 {
148 /*
149 * If the swapdir's max_obj_size is -1, then it definitely can
150 */
151
152 if (max_objsize == -1)
153 return true;
154
155 /*
156 * If the object size is -1, then if the storedir isn't -1 we
157 * can't store it
158 */
159 if ((objsize == -1) && (max_objsize != -1))
160 return false;
161
162 /*
163 * Else, make sure that the max object size is larger than objsize
164 */
165 return max_objsize > objsize;
166 }
167
168
169 /*
170 * This new selection scheme simply does round-robin on all SwapDirs.
171 * A SwapDir is skipped if it is over the max_size (100%) limit, or
172 * overloaded.
173 */
174 static int
175 storeDirSelectSwapDirRoundRobin(const StoreEntry * e)
176 {
177 static int dirn = 0;
178 int i;
179 int load;
180 RefCount<SwapDir> sd;
181 ssize_t objsize = (ssize_t) objectLen(e);
182
183 for (i = 0; i <= Config.cacheSwap.n_configured; i++) {
184 if (++dirn >= Config.cacheSwap.n_configured)
185 dirn = 0;
186
187 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
188
189 if (sd->flags.read_only)
190 continue;
191
192 if (sd->cur_size > sd->max_size)
193 continue;
194
195 if (!sd->objectSizeIsAcceptable(objsize))
196 continue;
197
198 /* check for error or overload condition */
199 load = sd->canStore(*e);
200
201 if (load < 0 || load > 1000) {
202 continue;
203 }
204
205 return dirn;
206 }
207
208 return -1;
209 }
210
211 /*
212 * Spread load across all of the store directories
213 *
214 * Note: We should modify this later on to prefer sticking objects
215 * in the *tightest fit* swapdir to conserve space, along with the
216 * actual swapdir usage. But for now, this hack will do while
217 * testing, so you should order your swapdirs in the config file
218 * from smallest maxobjsize to unlimited (-1) maxobjsize.
219 *
220 * We also have to choose nleast == nconf since we need to consider
221 * ALL swapdirs, regardless of state. Again, this is a hack while
222 * we sort out the real usefulness of this algorithm.
223 */
224 static int
225 storeDirSelectSwapDirLeastLoad(const StoreEntry * e)
226 {
227 ssize_t objsize;
228 ssize_t most_free = 0, cur_free;
229 ssize_t least_objsize = -1;
230 int least_load = INT_MAX;
231 int load;
232 int dirn = -1;
233 int i;
234 RefCount<SwapDir> SD;
235
236 /* Calculate the object size */
237 objsize = (ssize_t) objectLen(e);
238
239 if (objsize != -1)
240 objsize += e->mem_obj->swap_hdr_sz;
241
242 for (i = 0; i < Config.cacheSwap.n_configured; i++) {
243 SD = dynamic_cast<SwapDir *>(INDEXSD(i));
244 SD->flags.selected = 0;
245 load = SD->canStore(*e);
246
247 if (load < 0 || load > 1000) {
248 continue;
249 }
250
251 if (!SD->objectSizeIsAcceptable(objsize))
252 continue;
253
254 if (SD->flags.read_only)
255 continue;
256
257 if (SD->cur_size > SD->max_size)
258 continue;
259
260 if (load > least_load)
261 continue;
262
263 cur_free = SD->max_size - SD->cur_size;
264
265 /* If the load is equal, then look in more details */
266 if (load == least_load) {
267 /* closest max_objsize fit */
268
269 if (least_objsize != -1)
270 if (SD->max_objsize > least_objsize || SD->max_objsize == -1)
271 continue;
272
273 /* most free */
274 if (cur_free < most_free)
275 continue;
276 }
277
278 least_load = load;
279 least_objsize = SD->max_objsize;
280 most_free = cur_free;
281 dirn = i;
282 }
283
284 if (dirn >= 0)
285 dynamic_cast<SwapDir *>(INDEXSD(dirn))->flags.selected = 1;
286
287 return dirn;
288 }
289
290 /*
291 * An entry written to the swap log MUST have the following
292 * properties.
293 * 1. It MUST be a public key. It does no good to log
294 * a public ADD, change the key, then log a private
295 * DEL. So we need to log a DEL before we change a
296 * key from public to private.
297 * 2. It MUST have a valid (> -1) swap_filen.
298 */
299 void
300 storeDirSwapLog(const StoreEntry * e, int op)
301 {
302 assert (e);
303 assert(!EBIT_TEST(e->flags, KEY_PRIVATE));
304 assert(e->swap_filen >= 0);
305 /*
306 * icons and such; don't write them to the swap log
307 */
308
309 if (EBIT_TEST(e->flags, ENTRY_SPECIAL))
310 return;
311
312 assert(op > SWAP_LOG_NOP && op < SWAP_LOG_MAX);
313
314 debug(20, 3) ("storeDirSwapLog: %s %s %d %08X\n",
315 swap_log_op_str[op],
316 e->getMD5Text(),
317 e->swap_dirn,
318 e->swap_filen);
319
320 dynamic_cast<SwapDir *>(INDEXSD(e->swap_dirn))->logEntry(*e, op);
321 }
322
323 void
324 StoreController::updateSize(size_t size, int sign)
325 {
326 fatal("StoreController has no independent size\n");
327 }
328
329 void
330 SwapDir::updateSize(size_t size, int sign)
331 {
332 int blks = (size + fs.blksize - 1) / fs.blksize;
333 int k = (blks * fs.blksize >> 10) * sign;
334 cur_size += k;
335 store_swap_size += k;
336
337 if (sign > 0)
338 n_disk_objects++;
339 else if (sign < 0)
340 n_disk_objects--;
341 }
342
343 void
344 StoreController::stat(StoreEntry &output) const
345 {
346 storeAppendPrintf(&output, "Store Directory Statistics:\n");
347 storeAppendPrintf(&output, "Store Entries : %lu\n",
348 (unsigned long int)StoreEntry::inUseCount());
349 storeAppendPrintf(&output, "Maximum Swap Size : %8ld KB\n",
350 (long int) maxSize());
351 storeAppendPrintf(&output, "Current Store Swap Size: %8lu KB\n",
352 store_swap_size);
353 storeAppendPrintf(&output, "Current Capacity : %d%% used, %d%% free\n",
354 percent((int) store_swap_size, (int) maxSize()),
355 percent((int) (maxSize() - store_swap_size), (int) maxSize()));
356 /* FIXME Here we should output memory statistics */
357
358 /* now the swapDir */
359 swapDir->stat(output);
360 }
361
362 /* if needed, this could be taught to cache the result */
363 size_t
364 StoreController::maxSize() const
365 {
366 /* TODO: include memory cache ? */
367 return swapDir->maxSize();
368 }
369
370 size_t
371 StoreController::minSize() const
372 {
373 /* TODO: include memory cache ? */
374 return swapDir->minSize();
375 }
376
377 void
378 SwapDir::diskFull()
379 {
380 if (cur_size >= max_size)
381 return;
382
383 max_size = cur_size;
384
385 debugs(20, 1, "WARNING: Shrinking cache_dir #" << index << " to " << cur_size << " KB");
386 }
387
388 void
389 storeDirOpenSwapLogs(void)
390 {
391 for (int dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
392 dynamic_cast<SwapDir *>(INDEXSD(dirn))->openLog();
393 }
394
395 void
396 storeDirCloseSwapLogs(void)
397 {
398 for (int dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
399 dynamic_cast<SwapDir *>(INDEXSD(dirn))->closeLog();
400 }
401
402 /*
403 * storeDirWriteCleanLogs
404 *
405 * Writes a "clean" swap log file from in-memory metadata.
406 * This is a rewrite of the original function to troll each
407 * StoreDir and write the logs, and flush at the end of
408 * the run. Thanks goes to Eric Stern, since this solution
409 * came out of his COSS code.
410 */
411 int
412 storeDirWriteCleanLogs(int reopen)
413 {
414 const StoreEntry *e = NULL;
415 int n = 0;
416
417 struct timeval start;
418 double dt;
419 RefCount<SwapDir> sd;
420 int dirn;
421 int notdone = 1;
422
423 if (store_dirs_rebuilding) {
424 debug(20, 1) ("Not currently OK to rewrite swap log.\n");
425 debug(20, 1) ("storeDirWriteCleanLogs: Operation aborted.\n");
426 return 0;
427 }
428
429 debug(20, 1) ("storeDirWriteCleanLogs: Starting...\n");
430 getCurrentTime();
431 start = current_time;
432
433 for (dirn = 0; dirn < Config.cacheSwap.n_configured; dirn++) {
434 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
435
436 if (sd->writeCleanStart() < 0) {
437 debug(20, 1) ("log.clean.start() failed for dir #%d\n", sd->index);
438 continue;
439 }
440 }
441
442 /* This writes all logs in parallel. It seems to me to be more efficient
443 * to write them sequentially. - RBC 20021214
444 */
445 while (notdone) {
446 notdone = 0;
447
448 for (dirn = 0; dirn < Config.cacheSwap.n_configured; dirn++) {
449 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
450
451 if (NULL == sd->cleanLog)
452 continue;
453
454 e = sd->cleanLog->nextEntry();
455
456 if (!e)
457 continue;
458
459 notdone = 1;
460
461 if (!sd->canLog(*e))
462 continue;
463
464 sd->cleanLog->write(*e);
465
466 if ((++n & 0xFFFF) == 0) {
467 getCurrentTime();
468 debug(20, 1) (" %7d entries written so far.\n", n);
469 }
470 }
471 }
472
473 /* Flush */
474 for (dirn = 0; dirn < Config.cacheSwap.n_configured; dirn++)
475 dynamic_cast<SwapDir *>(INDEXSD(dirn))->writeCleanDone();
476
477 if (reopen)
478 storeDirOpenSwapLogs();
479
480 getCurrentTime();
481
482 dt = tvSubDsec(start, current_time);
483
484 debug(20, 1) (" Finished. Wrote %d entries.\n", n);
485
486 debug(20, 1) (" Took %3.1f seconds (%6.1f entries/sec).\n",
487 dt, (double) n / (dt > 0.0 ? dt : 1.0));
488
489 return n;
490 }
491
492 StoreSearch *
493 StoreController::search(String const url, HttpRequest *request)
494 {
495 /* cheat, for now you can't search the memory hot cache */
496 return swapDir->search(url, request);
497 }
498
499 StorePointer
500 StoreHashIndex::store(int const x) const
501 {
502 return INDEXSD(x);
503 }
504
505 void
506 StoreController::sync(void)
507 {
508 /* sync mem cache? */
509 swapDir->sync();
510 }
511
512 /*
513 * handle callbacks all avaliable fs'es
514 */
515 int
516 StoreController::callback()
517 {
518 /* This will likely double count. Thats ok. */
519 PROF_start(storeDirCallback);
520
521 /* mem cache callbacks ? */
522 int result = swapDir->callback();
523
524 PROF_stop(storeDirCallback);
525
526 return result;
527 }
528
529 int
530 storeDirGetBlkSize(const char *path, int *blksize)
531 {
532 #if HAVE_STATVFS
533
534 struct statvfs sfs;
535
536 if (statvfs(path, &sfs)) {
537 debug(50, 1) ("%s: %s\n", path, xstrerror());
538 *blksize = 2048;
539 return 1;
540 }
541
542 *blksize = (int) sfs.f_frsize;
543 #else
544
545 struct statfs sfs;
546
547 if (statfs(path, &sfs)) {
548 debug(50, 1) ("%s: %s\n", path, xstrerror());
549 *blksize = 2048;
550 return 1;
551 }
552
553 *blksize = (int) sfs.f_bsize;
554 #endif
555 /*
556 * Sanity check; make sure we have a meaningful value.
557 */
558
559 if (*blksize < 512)
560 *blksize = 2048;
561
562 return 0;
563 }
564
565 #define fsbtoblk(num, fsbs, bs) \
566 (((fsbs) != 0 && (fsbs) < (bs)) ? \
567 (num) / ((bs) / (fsbs)) : (num) * ((fsbs) / (bs)))
568 int
569 storeDirGetUFSStats(const char *path, int *totl_kb, int *free_kb, int *totl_in, int *free_in)
570 {
571 #if HAVE_STATVFS
572
573 struct statvfs sfs;
574
575 if (statvfs(path, &sfs)) {
576 debug(50, 1) ("%s: %s\n", path, xstrerror());
577 return 1;
578 }
579
580 *totl_kb = (int) fsbtoblk(sfs.f_blocks, sfs.f_frsize, 1024);
581 *free_kb = (int) fsbtoblk(sfs.f_bfree, sfs.f_frsize, 1024);
582 *totl_in = (int) sfs.f_files;
583 *free_in = (int) sfs.f_ffree;
584 #else
585
586 struct statfs sfs;
587
588 if (statfs(path, &sfs)) {
589 debug(50, 1) ("%s: %s\n", path, xstrerror());
590 return 1;
591 }
592
593 *totl_kb = (int) fsbtoblk(sfs.f_blocks, sfs.f_bsize, 1024);
594 *free_kb = (int) fsbtoblk(sfs.f_bfree, sfs.f_bsize, 1024);
595 *totl_in = (int) sfs.f_files;
596 *free_in = (int) sfs.f_ffree;
597 #endif
598
599 return 0;
600 }
601
602 void
603 allocate_new_swapdir(_SquidConfig::_cacheSwap * swap)
604 {
605 if (swap->swapDirs == NULL) {
606 swap->n_allocated = 4;
607 swap->swapDirs = static_cast<StorePointer *>(xcalloc(swap->n_allocated, sizeof(StorePointer)));
608 }
609
610 if (swap->n_allocated == swap->n_configured) {
611 StorePointer *tmp;
612 swap->n_allocated <<= 1;
613 tmp = static_cast<StorePointer *>(xcalloc(swap->n_allocated, sizeof(StorePointer)));
614 xmemcpy(tmp, swap->swapDirs, swap->n_configured * sizeof(SwapDir *));
615 xfree(swap->swapDirs);
616 swap->swapDirs = tmp;
617 }
618 }
619
620 void
621 free_cachedir(_SquidConfig::_cacheSwap * swap)
622 {
623 int i;
624 /* DON'T FREE THESE FOR RECONFIGURE */
625
626 if (reconfiguring)
627 return;
628
629 for (i = 0; i < swap->n_configured; i++) {
630 /* TODO XXX this lets the swapdir free resources asynchronously
631 * swap->swapDirs[i]->deactivate();
632 * but there may be such a means already.
633 * RBC 20041225
634 */
635 swap->swapDirs[i] = NULL;
636 }
637
638 safe_free(swap->swapDirs);
639 swap->swapDirs = NULL;
640 swap->n_allocated = 0;
641 swap->n_configured = 0;
642 }
643
644 /* this should be a virtual method on StoreEntry,
645 * i.e. e->referenced()
646 * so that the entry can notify the creating Store
647 */
648 void
649 StoreController::reference(StoreEntry &e)
650 {
651 /* Notify the fs that we're referencing this object again */
652
653 if (e.swap_dirn > -1)
654 e.store()->reference(e);
655
656 /* Notify the memory cache that we're referencing this object again */
657 if (e.mem_obj) {
658 if (mem_policy->Referenced)
659 mem_policy->Referenced(mem_policy, &e, &e.mem_obj->repl);
660 }
661 }
662
663 void
664 StoreController::dereference(StoreEntry & e)
665 {
666 /* Notify the fs that we're not referencing this object any more */
667
668 if (e.swap_filen > -1)
669 e.store()->dereference(e);
670
671 /* Notify the memory cache that we're not referencing this object any more */
672 if (e.mem_obj) {
673 if (mem_policy->Dereferenced)
674 mem_policy->Dereferenced(mem_policy, &e, &e.mem_obj->repl);
675 }
676 }
677
678 StoreEntry *
679
680 StoreController::get
681 (const cache_key *key)
682 {
683
684 return swapDir->get
685 (key);
686 }
687
688 void
689
690 StoreController::get
691 (String const key, STOREGETCLIENT callback, void *cbdata)
692 {
693 fatal("not implemented");
694 }
695
696 StoreHashIndex::StoreHashIndex()
697 {
698 assert (store_table == NULL);
699 }
700
701 StoreHashIndex::~StoreHashIndex()
702 {
703 if (store_table) {
704 hashFreeItems(store_table, destroyStoreEntry);
705 hashFreeMemory(store_table);
706 store_table = NULL;
707 }
708 }
709
710 int
711 StoreHashIndex::callback()
712 {
713 int result = 0;
714 int j;
715 static int ndir = 0;
716
717 do {
718 j = 0;
719
720 for (int i = 0; i < Config.cacheSwap.n_configured; i++) {
721 if (ndir >= Config.cacheSwap.n_configured)
722 ndir = ndir % Config.cacheSwap.n_configured;
723
724 int temp_result = store(ndir)->callback();
725
726 ++ndir;
727
728 j += temp_result;
729
730 result += temp_result;
731
732 if (j > 100)
733 fatal ("too much io\n");
734 }
735 } while (j > 0);
736
737 ndir++;
738
739 return result;
740 }
741
742 void
743 StoreHashIndex::create()
744 {
745 for (int i = 0; i < Config.cacheSwap.n_configured; i++)
746 store(i)->create();
747 }
748
749 /* Lookup an object in the cache.
750 * return just a reference to object, don't start swapping in yet. */
751 StoreEntry *
752
753 StoreHashIndex::get
754 (const cache_key *key)
755 {
756 PROF_start(storeGet);
757 debug(20, 3) ("storeGet: looking up %s\n", storeKeyText(key));
758 StoreEntry *p = static_cast<StoreEntry *>(hash_lookup(store_table, key));
759 PROF_stop(storeGet);
760 return p;
761 }
762
763 void
764
765 StoreHashIndex::get
766 (String const key, STOREGETCLIENT callback, void *cbdata)
767 {
768 fatal("not implemented");
769 }
770
771 void
772 StoreHashIndex::init()
773 {
774 /* Calculate size of hash table (maximum currently 64k buckets). */
775 /* this is very bogus, its specific to the any Store maintaining an
776 * in-core index, not global */
777 size_t buckets = Store::Root().maxSize() / Config.Store.avgObjectSize;
778 debugs(20, 1, "Swap maxSize " << Store::Root().maxSize() <<
779 " KB, estimated " << buckets << " objects\n");
780 buckets /= Config.Store.objectsPerBucket;
781 debugs(20, 1, "Target number of buckets: " << buckets);
782 /* ideally the full scan period should be configurable, for the
783 * moment it remains at approximately 24 hours. */
784 store_hash_buckets = storeKeyHashBuckets(buckets);
785 debugs(20, 1, "Using " << store_hash_buckets << " Store buckets");
786 debugs(20, 1, "Max Mem size: " << ( Config.memMaxSize >> 10) << " KB");
787 debugs(20, 1, "Max Swap size: " << Store::Root().maxSize() << " KB");
788
789 store_table = hash_create(storeKeyHashCmp,
790 store_hash_buckets, storeKeyHashHash);
791
792 for (int i = 0; i < Config.cacheSwap.n_configured; i++)
793 /* this starts a search of the store dirs, loading their
794 * index. under the new Store api this should be
795 * driven by the StoreHashIndex, not by each store.
796 * Step 1: make the store rebuilds use a search internally
797 */
798 store(i)->init();
799
800 }
801
802 size_t
803 StoreHashIndex::maxSize() const
804 {
805 int i;
806 size_t result = 0;
807
808 for (i = 0; i < Config.cacheSwap.n_configured; i++)
809 result += store(i)->maxSize();
810
811 return result;
812 }
813
814 size_t
815 StoreHashIndex::minSize() const
816 {
817 size_t result = 0;
818
819 for (int i = 0; i < Config.cacheSwap.n_configured; i++)
820 result += store(i)->minSize();
821
822 return result;
823 }
824
825 void
826 StoreHashIndex::stat(StoreEntry & output) const
827 {
828 int i;
829
830 /* Now go through each store, calling its stat routine */
831
832 for (i = 0; i < Config.cacheSwap.n_configured; i++) {
833 storeAppendPrintf(&output, "\n");
834 store(i)->stat(output);
835 }
836 }
837
838 void
839 StoreHashIndex::reference(StoreEntry&)
840 {}
841
842 void
843 StoreHashIndex::dereference(StoreEntry&)
844 {}
845
846 void
847 StoreHashIndex::maintain()
848 {
849 int i;
850 /* walk each fs */
851
852 for (i = 0; i < Config.cacheSwap.n_configured; i++) {
853 /* XXX FixMe: This should be done "in parallell" on the different
854 * cache_dirs, not one at a time.
855 */
856 /* call the maintain function .. */
857 store(i)->maintain();
858 }
859 }
860
861 void
862 StoreHashIndex::updateSize(unsigned int, int)
863 {}
864
865 void
866 StoreHashIndex::sync()
867 {
868 for (int i = 0; i < Config.cacheSwap.n_configured; ++i)
869 store(i)->sync();
870 }
871
872 StoreSearch *
873 StoreHashIndex::search(String const url, HttpRequest *)
874 {
875 if (url.size())
876 fatal ("Cannot search by url yet\n");
877
878 return new StoreSearchHashIndex (this);
879 }
880
881 CBDATA_CLASS_INIT(StoreSearchHashIndex);
882 StoreSearchHashIndex::StoreSearchHashIndex(RefCount<StoreHashIndex> aSwapDir) : sd(aSwapDir), _done (false), bucket (0)
883 {}
884
885 /* do not link
886 StoreSearchHashIndex::StoreSearchHashIndex(StoreSearchHashIndex const &);
887 */
888
889 StoreSearchHashIndex::~StoreSearchHashIndex()
890 {}
891
892 void
893 StoreSearchHashIndex::next(void (callback)(void *cbdata), void *cbdata)
894 {
895 next();
896 callback (cbdata);
897 }
898
899 bool
900 StoreSearchHashIndex::next()
901 {
902 if (entries.size())
903 entries.pop_back();
904
905 while (!isDone() && !entries.size())
906 copyBucket();
907
908 return currentItem() != NULL;
909 }
910
911 bool
912 StoreSearchHashIndex::error() const
913 {
914 return false;
915 }
916
917 bool
918 StoreSearchHashIndex::isDone() const
919 {
920 return bucket >= store_hash_buckets || _done;
921 }
922
923 StoreEntry *
924 StoreSearchHashIndex::currentItem()
925 {
926 if (!entries.size())
927 return NULL;
928
929 return entries.back();
930 }
931
932 void
933 StoreSearchHashIndex::copyBucket()
934 {
935 /* probably need to lock the store entries...
936 * we copy them all to prevent races on the links. */
937 debugs(47, 3, "StoreSearchHashIndex::copyBucket #" << bucket);
938 assert (!entries.size());
939 hash_link *link_ptr = NULL;
940 hash_link *link_next = NULL;
941 link_next = hash_get_bucket(store_table, bucket);
942
943 while (NULL != (link_ptr = link_next)) {
944 link_next = link_ptr->next;
945 StoreEntry *e = (StoreEntry *) link_ptr;
946
947 entries.push_back(e);
948 }
949
950 bucket++;
951 debugs(47,3, "got entries: " << entries.size());
952 }