]> git.ipfire.org Git - thirdparty/squid.git/blob - src/store_dir.cc
Converted objectLen() to a StoreEntry class method.
[thirdparty/squid.git] / src / store_dir.cc
1
2 /*
3 * $Id: store_dir.cc,v 1.158 2007/04/17 05:40:18 wessels Exp $
4 *
5 * DEBUG: section 47 Store Directory Routines
6 * AUTHOR: Duane Wessels
7 *
8 * SQUID Web Proxy Cache http://www.squid-cache.org/
9 * ----------------------------------------------------------
10 *
11 * Squid is the result of efforts by numerous individuals from
12 * the Internet community; see the CONTRIBUTORS file for full
13 * details. Many organizations have provided support for Squid's
14 * development; see the SPONSORS file for full details. Squid is
15 * Copyrighted (C) 2001 by the Regents of the University of
16 * California; see the COPYRIGHT file for full details. Squid
17 * incorporates software developed and/or copyrighted by other
18 * sources; see the CREDITS file for full details.
19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
24 *
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
29 *
30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
32 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
33 *
34 */
35
36 #include "squid.h"
37 #include "Store.h"
38 #include "MemObject.h"
39 #include "SquidTime.h"
40 #include "SwapDir.h"
41
42 #if HAVE_STATVFS
43 #if HAVE_SYS_STATVFS_H
44 #include <sys/statvfs.h>
45 #endif
46 #endif /* HAVE_STATVFS */
47 /* statfs() needs <sys/param.h> and <sys/mount.h> on BSD systems */
48 #if HAVE_SYS_PARAM_H
49 #include <sys/param.h>
50 #endif
51 #if HAVE_SYS_MOUNT_H
52 #include <sys/mount.h>
53 #endif
54 /* Windows and Linux use sys/vfs.h */
55 #if HAVE_SYS_VFS_H
56 #include <sys/vfs.h>
57 #endif
58
59 #include "StoreHashIndex.h"
60
61 static STDIRSELECT storeDirSelectSwapDirRoundRobin;
62 static STDIRSELECT storeDirSelectSwapDirLeastLoad;
63
64 /*
65 * store_dirs_rebuilding is initialized to _1_ as a hack so that
66 * storeDirWriteCleanLogs() doesn't try to do anything unless _all_
67 * cache_dirs have been read. For example, without this hack, Squid
68 * will try to write clean log files if -kparse fails (becasue it
69 * calls fatal()).
70 */
71 int StoreController::store_dirs_rebuilding = 1;
72
73 StoreController::StoreController() : swapDir (new StoreHashIndex())
74 {}
75
76 StoreController::~StoreController()
77 {}
78
79 /*
80 * This function pointer is set according to 'store_dir_select_algorithm'
81 * in squid.conf.
82 */
83 STDIRSELECT *storeDirSelectSwapDir = storeDirSelectSwapDirLeastLoad;
84
85 void
86 StoreController::init()
87 {
88 swapDir->init();
89
90 if (0 == strcasecmp(Config.store_dir_select_algorithm, "round-robin")) {
91 storeDirSelectSwapDir = storeDirSelectSwapDirRoundRobin;
92 debug(47, 1) ("Using Round Robin store dir selection\n");
93 } else {
94 storeDirSelectSwapDir = storeDirSelectSwapDirLeastLoad;
95 debug(47, 1) ("Using Least Load store dir selection\n");
96 }
97 }
98
99 void
100 StoreController::createOneStore(Store &aStore)
101 {
102 /*
103 * On Windows, fork() is not available.
104 * The following is a workaround for create store directories sequentially
105 * when running on native Windows port.
106 */
107 #ifndef _SQUID_MSWIN_
108
109 if (fork())
110 return;
111
112 #endif
113
114 aStore.create();
115
116 #ifndef _SQUID_MSWIN_
117
118 exit(0);
119
120 #endif
121 }
122
123 void
124 StoreController::create()
125 {
126 swapDir->create();
127
128 #ifndef _SQUID_MSWIN_
129
130 pid_t pid;
131
132 do {
133 int status;
134 #ifdef _SQUID_NEXT_
135
136 pid = wait3(&status, WNOHANG, NULL);
137 #else
138
139 pid = waitpid(-1, &status, 0);
140 #endif
141
142 } while (pid > 0 || (pid < 0 && errno == EINTR));
143
144 #endif
145 }
146
147 /*
148 * Determine whether the given directory can handle this object
149 * size
150 *
151 * Note: if the object size is -1, then the only swapdirs that
152 * will return true here are ones that have max_obj_size = -1,
153 * ie any-sized-object swapdirs. This is a good thing.
154 */
155 bool
156 SwapDir::objectSizeIsAcceptable(ssize_t objsize) const
157 {
158 /*
159 * If the swapdir's max_obj_size is -1, then it definitely can
160 */
161
162 if (max_objsize == -1)
163 return true;
164
165 /*
166 * If the object size is -1, then if the storedir isn't -1 we
167 * can't store it
168 */
169 if ((objsize == -1) && (max_objsize != -1))
170 return false;
171
172 /*
173 * Else, make sure that the max object size is larger than objsize
174 */
175 return max_objsize > objsize;
176 }
177
178
179 /*
180 * This new selection scheme simply does round-robin on all SwapDirs.
181 * A SwapDir is skipped if it is over the max_size (100%) limit, or
182 * overloaded.
183 */
184 static int
185 storeDirSelectSwapDirRoundRobin(const StoreEntry * e)
186 {
187 static int dirn = 0;
188 int i;
189 int load;
190 RefCount<SwapDir> sd;
191
192 for (i = 0; i <= Config.cacheSwap.n_configured; i++) {
193 if (++dirn >= Config.cacheSwap.n_configured)
194 dirn = 0;
195
196 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
197
198 if (sd->flags.read_only)
199 continue;
200
201 if (sd->cur_size > sd->max_size)
202 continue;
203
204 if (!sd->objectSizeIsAcceptable(e->objectLen()))
205 continue;
206
207 /* check for error or overload condition */
208 load = sd->canStore(*e);
209
210 if (load < 0 || load > 1000) {
211 continue;
212 }
213
214 return dirn;
215 }
216
217 return -1;
218 }
219
220 /*
221 * Spread load across all of the store directories
222 *
223 * Note: We should modify this later on to prefer sticking objects
224 * in the *tightest fit* swapdir to conserve space, along with the
225 * actual swapdir usage. But for now, this hack will do while
226 * testing, so you should order your swapdirs in the config file
227 * from smallest maxobjsize to unlimited (-1) maxobjsize.
228 *
229 * We also have to choose nleast == nconf since we need to consider
230 * ALL swapdirs, regardless of state. Again, this is a hack while
231 * we sort out the real usefulness of this algorithm.
232 */
233 static int
234 storeDirSelectSwapDirLeastLoad(const StoreEntry * e)
235 {
236 ssize_t objsize;
237 ssize_t most_free = 0, cur_free;
238 ssize_t least_objsize = -1;
239 int least_load = INT_MAX;
240 int load;
241 int dirn = -1;
242 int i;
243 RefCount<SwapDir> SD;
244
245 /* Calculate the object size */
246 objsize = e->objectLen();
247
248 if (objsize != -1)
249 objsize += e->mem_obj->swap_hdr_sz;
250
251 for (i = 0; i < Config.cacheSwap.n_configured; i++) {
252 SD = dynamic_cast<SwapDir *>(INDEXSD(i));
253 SD->flags.selected = 0;
254 load = SD->canStore(*e);
255
256 if (load < 0 || load > 1000) {
257 continue;
258 }
259
260 if (!SD->objectSizeIsAcceptable(objsize))
261 continue;
262
263 if (SD->flags.read_only)
264 continue;
265
266 if (SD->cur_size > SD->max_size)
267 continue;
268
269 if (load > least_load)
270 continue;
271
272 cur_free = SD->max_size - SD->cur_size;
273
274 /* If the load is equal, then look in more details */
275 if (load == least_load) {
276 /* closest max_objsize fit */
277
278 if (least_objsize != -1)
279 if (SD->max_objsize > least_objsize || SD->max_objsize == -1)
280 continue;
281
282 /* most free */
283 if (cur_free < most_free)
284 continue;
285 }
286
287 least_load = load;
288 least_objsize = SD->max_objsize;
289 most_free = cur_free;
290 dirn = i;
291 }
292
293 if (dirn >= 0)
294 dynamic_cast<SwapDir *>(INDEXSD(dirn))->flags.selected = 1;
295
296 return dirn;
297 }
298
299 /*
300 * An entry written to the swap log MUST have the following
301 * properties.
302 * 1. It MUST be a public key. It does no good to log
303 * a public ADD, change the key, then log a private
304 * DEL. So we need to log a DEL before we change a
305 * key from public to private.
306 * 2. It MUST have a valid (> -1) swap_filen.
307 */
308 void
309 storeDirSwapLog(const StoreEntry * e, int op)
310 {
311 assert (e);
312 assert(!EBIT_TEST(e->flags, KEY_PRIVATE));
313 assert(e->swap_filen >= 0);
314 /*
315 * icons and such; don't write them to the swap log
316 */
317
318 if (EBIT_TEST(e->flags, ENTRY_SPECIAL))
319 return;
320
321 assert(op > SWAP_LOG_NOP && op < SWAP_LOG_MAX);
322
323 debug(20, 3) ("storeDirSwapLog: %s %s %d %08X\n",
324 swap_log_op_str[op],
325 e->getMD5Text(),
326 e->swap_dirn,
327 e->swap_filen);
328
329 dynamic_cast<SwapDir *>(INDEXSD(e->swap_dirn))->logEntry(*e, op);
330 }
331
332 void
333 StoreController::updateSize(size_t size, int sign)
334 {
335 fatal("StoreController has no independent size\n");
336 }
337
338 void
339 SwapDir::updateSize(size_t size, int sign)
340 {
341 int blks = (size + fs.blksize - 1) / fs.blksize;
342 int k = (blks * fs.blksize >> 10) * sign;
343 cur_size += k;
344 store_swap_size += k;
345
346 if (sign > 0)
347 n_disk_objects++;
348 else if (sign < 0)
349 n_disk_objects--;
350 }
351
352 void
353 StoreController::stat(StoreEntry &output) const
354 {
355 storeAppendPrintf(&output, "Store Directory Statistics:\n");
356 storeAppendPrintf(&output, "Store Entries : %lu\n",
357 (unsigned long int)StoreEntry::inUseCount());
358 storeAppendPrintf(&output, "Maximum Swap Size : %8ld KB\n",
359 (long int) maxSize());
360 storeAppendPrintf(&output, "Current Store Swap Size: %8lu KB\n",
361 store_swap_size);
362 storeAppendPrintf(&output, "Current Capacity : %d%% used, %d%% free\n",
363 percent((int) store_swap_size, (int) maxSize()),
364 percent((int) (maxSize() - store_swap_size), (int) maxSize()));
365 /* FIXME Here we should output memory statistics */
366
367 /* now the swapDir */
368 swapDir->stat(output);
369 }
370
371 /* if needed, this could be taught to cache the result */
372 size_t
373 StoreController::maxSize() const
374 {
375 /* TODO: include memory cache ? */
376 return swapDir->maxSize();
377 }
378
379 size_t
380 StoreController::minSize() const
381 {
382 /* TODO: include memory cache ? */
383 return swapDir->minSize();
384 }
385
386 void
387 SwapDir::diskFull()
388 {
389 if (cur_size >= max_size)
390 return;
391
392 max_size = cur_size;
393
394 debugs(20, 1, "WARNING: Shrinking cache_dir #" << index << " to " << cur_size << " KB");
395 }
396
397 void
398 storeDirOpenSwapLogs(void)
399 {
400 for (int dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
401 dynamic_cast<SwapDir *>(INDEXSD(dirn))->openLog();
402 }
403
404 void
405 storeDirCloseSwapLogs(void)
406 {
407 for (int dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
408 dynamic_cast<SwapDir *>(INDEXSD(dirn))->closeLog();
409 }
410
411 /*
412 * storeDirWriteCleanLogs
413 *
414 * Writes a "clean" swap log file from in-memory metadata.
415 * This is a rewrite of the original function to troll each
416 * StoreDir and write the logs, and flush at the end of
417 * the run. Thanks goes to Eric Stern, since this solution
418 * came out of his COSS code.
419 */
420 int
421 storeDirWriteCleanLogs(int reopen)
422 {
423 const StoreEntry *e = NULL;
424 int n = 0;
425
426 struct timeval start;
427 double dt;
428 RefCount<SwapDir> sd;
429 int dirn;
430 int notdone = 1;
431
432 if (StoreController::store_dirs_rebuilding) {
433 debug(20, 1) ("Not currently OK to rewrite swap log.\n");
434 debug(20, 1) ("storeDirWriteCleanLogs: Operation aborted.\n");
435 return 0;
436 }
437
438 debug(20, 1) ("storeDirWriteCleanLogs: Starting...\n");
439 getCurrentTime();
440 start = current_time;
441
442 for (dirn = 0; dirn < Config.cacheSwap.n_configured; dirn++) {
443 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
444
445 if (sd->writeCleanStart() < 0) {
446 debug(20, 1) ("log.clean.start() failed for dir #%d\n", sd->index);
447 continue;
448 }
449 }
450
451 /*
452 * This may look inefficient as CPU wise it is more efficient to do this
453 * sequentially, but I/O wise the parallellism helps as it allows more
454 * hdd spindles to be active.
455 */
456 while (notdone) {
457 notdone = 0;
458
459 for (dirn = 0; dirn < Config.cacheSwap.n_configured; dirn++) {
460 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
461
462 if (NULL == sd->cleanLog)
463 continue;
464
465 e = sd->cleanLog->nextEntry();
466
467 if (!e)
468 continue;
469
470 notdone = 1;
471
472 if (!sd->canLog(*e))
473 continue;
474
475 sd->cleanLog->write(*e);
476
477 if ((++n & 0xFFFF) == 0) {
478 getCurrentTime();
479 debug(20, 1) (" %7d entries written so far.\n", n);
480 }
481 }
482 }
483
484 /* Flush */
485 for (dirn = 0; dirn < Config.cacheSwap.n_configured; dirn++)
486 dynamic_cast<SwapDir *>(INDEXSD(dirn))->writeCleanDone();
487
488 if (reopen)
489 storeDirOpenSwapLogs();
490
491 getCurrentTime();
492
493 dt = tvSubDsec(start, current_time);
494
495 debug(20, 1) (" Finished. Wrote %d entries.\n", n);
496
497 debug(20, 1) (" Took %3.1f seconds (%6.1f entries/sec).\n",
498 dt, (double) n / (dt > 0.0 ? dt : 1.0));
499
500 return n;
501 }
502
503 StoreSearch *
504 StoreController::search(String const url, HttpRequest *request)
505 {
506 /* cheat, for now you can't search the memory hot cache */
507 return swapDir->search(url, request);
508 }
509
510 StorePointer
511 StoreHashIndex::store(int const x) const
512 {
513 return INDEXSD(x);
514 }
515
516 void
517 StoreController::sync(void)
518 {
519 /* sync mem cache? */
520 swapDir->sync();
521 }
522
523 /*
524 * handle callbacks all avaliable fs'es
525 */
526 int
527 StoreController::callback()
528 {
529 /* This will likely double count. Thats ok. */
530 PROF_start(storeDirCallback);
531
532 /* mem cache callbacks ? */
533 int result = swapDir->callback();
534
535 PROF_stop(storeDirCallback);
536
537 return result;
538 }
539
540 int
541 storeDirGetBlkSize(const char *path, int *blksize)
542 {
543 #if HAVE_STATVFS
544
545 struct statvfs sfs;
546
547 if (statvfs(path, &sfs)) {
548 debug(50, 1) ("%s: %s\n", path, xstrerror());
549 *blksize = 2048;
550 return 1;
551 }
552
553 *blksize = (int) sfs.f_frsize;
554 #else
555
556 struct statfs sfs;
557
558 if (statfs(path, &sfs)) {
559 debug(50, 1) ("%s: %s\n", path, xstrerror());
560 *blksize = 2048;
561 return 1;
562 }
563
564 *blksize = (int) sfs.f_bsize;
565 #endif
566 /*
567 * Sanity check; make sure we have a meaningful value.
568 */
569
570 if (*blksize < 512)
571 *blksize = 2048;
572
573 return 0;
574 }
575
576 #define fsbtoblk(num, fsbs, bs) \
577 (((fsbs) != 0 && (fsbs) < (bs)) ? \
578 (num) / ((bs) / (fsbs)) : (num) * ((fsbs) / (bs)))
579 int
580 storeDirGetUFSStats(const char *path, int *totl_kb, int *free_kb, int *totl_in, int *free_in)
581 {
582 #if HAVE_STATVFS
583
584 struct statvfs sfs;
585
586 if (statvfs(path, &sfs)) {
587 debug(50, 1) ("%s: %s\n", path, xstrerror());
588 return 1;
589 }
590
591 *totl_kb = (int) fsbtoblk(sfs.f_blocks, sfs.f_frsize, 1024);
592 *free_kb = (int) fsbtoblk(sfs.f_bfree, sfs.f_frsize, 1024);
593 *totl_in = (int) sfs.f_files;
594 *free_in = (int) sfs.f_ffree;
595 #else
596
597 struct statfs sfs;
598
599 if (statfs(path, &sfs)) {
600 debug(50, 1) ("%s: %s\n", path, xstrerror());
601 return 1;
602 }
603
604 *totl_kb = (int) fsbtoblk(sfs.f_blocks, sfs.f_bsize, 1024);
605 *free_kb = (int) fsbtoblk(sfs.f_bfree, sfs.f_bsize, 1024);
606 *totl_in = (int) sfs.f_files;
607 *free_in = (int) sfs.f_ffree;
608 #endif
609
610 return 0;
611 }
612
613 void
614 allocate_new_swapdir(_SquidConfig::_cacheSwap * swap)
615 {
616 if (swap->swapDirs == NULL) {
617 swap->n_allocated = 4;
618 swap->swapDirs = static_cast<StorePointer *>(xcalloc(swap->n_allocated, sizeof(StorePointer)));
619 }
620
621 if (swap->n_allocated == swap->n_configured) {
622 StorePointer *tmp;
623 swap->n_allocated <<= 1;
624 tmp = static_cast<StorePointer *>(xcalloc(swap->n_allocated, sizeof(StorePointer)));
625 xmemcpy(tmp, swap->swapDirs, swap->n_configured * sizeof(SwapDir *));
626 xfree(swap->swapDirs);
627 swap->swapDirs = tmp;
628 }
629 }
630
631 void
632 free_cachedir(_SquidConfig::_cacheSwap * swap)
633 {
634 int i;
635 /* DON'T FREE THESE FOR RECONFIGURE */
636
637 if (reconfiguring)
638 return;
639
640 for (i = 0; i < swap->n_configured; i++) {
641 /* TODO XXX this lets the swapdir free resources asynchronously
642 * swap->swapDirs[i]->deactivate();
643 * but there may be such a means already.
644 * RBC 20041225
645 */
646 swap->swapDirs[i] = NULL;
647 }
648
649 safe_free(swap->swapDirs);
650 swap->swapDirs = NULL;
651 swap->n_allocated = 0;
652 swap->n_configured = 0;
653 }
654
655 /* this should be a virtual method on StoreEntry,
656 * i.e. e->referenced()
657 * so that the entry can notify the creating Store
658 */
659 void
660 StoreController::reference(StoreEntry &e)
661 {
662 /* Notify the fs that we're referencing this object again */
663
664 if (e.swap_dirn > -1)
665 e.store()->reference(e);
666
667 /* Notify the memory cache that we're referencing this object again */
668 if (e.mem_obj) {
669 if (mem_policy->Referenced)
670 mem_policy->Referenced(mem_policy, &e, &e.mem_obj->repl);
671 }
672 }
673
674 void
675 StoreController::dereference(StoreEntry & e)
676 {
677 /* Notify the fs that we're not referencing this object any more */
678
679 if (e.swap_filen > -1)
680 e.store()->dereference(e);
681
682 /* Notify the memory cache that we're not referencing this object any more */
683 if (e.mem_obj) {
684 if (mem_policy->Dereferenced)
685 mem_policy->Dereferenced(mem_policy, &e, &e.mem_obj->repl);
686 }
687 }
688
689 StoreEntry *
690
691 StoreController::get
692 (const cache_key *key)
693 {
694
695 return swapDir->get
696 (key);
697 }
698
699 void
700
701 StoreController::get
702 (String const key, STOREGETCLIENT callback, void *cbdata)
703 {
704 fatal("not implemented");
705 }
706
707 StoreHashIndex::StoreHashIndex()
708 {
709 assert (store_table == NULL);
710 }
711
712 StoreHashIndex::~StoreHashIndex()
713 {
714 if (store_table) {
715 hashFreeItems(store_table, destroyStoreEntry);
716 hashFreeMemory(store_table);
717 store_table = NULL;
718 }
719 }
720
721 int
722 StoreHashIndex::callback()
723 {
724 int result = 0;
725 int j;
726 static int ndir = 0;
727
728 do {
729 j = 0;
730
731 for (int i = 0; i < Config.cacheSwap.n_configured; i++) {
732 if (ndir >= Config.cacheSwap.n_configured)
733 ndir = ndir % Config.cacheSwap.n_configured;
734
735 int temp_result = store(ndir)->callback();
736
737 ++ndir;
738
739 j += temp_result;
740
741 result += temp_result;
742
743 if (j > 100)
744 fatal ("too much io\n");
745 }
746 } while (j > 0);
747
748 ndir++;
749
750 return result;
751 }
752
753 void
754 StoreHashIndex::create()
755 {
756 for (int i = 0; i < Config.cacheSwap.n_configured; i++)
757 store(i)->create();
758 }
759
760 /* Lookup an object in the cache.
761 * return just a reference to object, don't start swapping in yet. */
762 StoreEntry *
763
764 StoreHashIndex::get
765 (const cache_key *key)
766 {
767 PROF_start(storeGet);
768 debug(20, 3) ("storeGet: looking up %s\n", storeKeyText(key));
769 StoreEntry *p = static_cast<StoreEntry *>(hash_lookup(store_table, key));
770 PROF_stop(storeGet);
771 return p;
772 }
773
774 void
775
776 StoreHashIndex::get
777 (String const key, STOREGETCLIENT callback, void *cbdata)
778 {
779 fatal("not implemented");
780 }
781
782 void
783 StoreHashIndex::init()
784 {
785 /* Calculate size of hash table (maximum currently 64k buckets). */
786 /* this is very bogus, its specific to the any Store maintaining an
787 * in-core index, not global */
788 size_t buckets = Store::Root().maxSize() / Config.Store.avgObjectSize;
789 debugs(20, 1, "Swap maxSize " << Store::Root().maxSize() <<
790 " KB, estimated " << buckets << " objects");
791 buckets /= Config.Store.objectsPerBucket;
792 debugs(20, 1, "Target number of buckets: " << buckets);
793 /* ideally the full scan period should be configurable, for the
794 * moment it remains at approximately 24 hours. */
795 store_hash_buckets = storeKeyHashBuckets(buckets);
796 debugs(20, 1, "Using " << store_hash_buckets << " Store buckets");
797 debugs(20, 1, "Max Mem size: " << ( Config.memMaxSize >> 10) << " KB");
798 debugs(20, 1, "Max Swap size: " << Store::Root().maxSize() << " KB");
799
800 store_table = hash_create(storeKeyHashCmp,
801 store_hash_buckets, storeKeyHashHash);
802
803 for (int i = 0; i < Config.cacheSwap.n_configured; i++)
804 /* this starts a search of the store dirs, loading their
805 * index. under the new Store api this should be
806 * driven by the StoreHashIndex, not by each store.
807 *
808 * That is, the HashIndex should perform a search of each dir it is
809 * indexing to do the hash insertions. The search is then able to
810 * decide 'from-memory', or 'from-clean-log' or 'from-dirty-log' or
811 * 'from-no-log'.
812 *
813 * Step 1: make the store rebuilds use a search internally
814 * Step 2: change the search logic to use the four modes described
815 * above
816 * Step 3: have the hash index walk the searches itself.
817 */
818 store(i)->init();
819
820 }
821
822 size_t
823 StoreHashIndex::maxSize() const
824 {
825 int i;
826 size_t result = 0;
827
828 for (i = 0; i < Config.cacheSwap.n_configured; i++)
829 result += store(i)->maxSize();
830
831 return result;
832 }
833
834 size_t
835 StoreHashIndex::minSize() const
836 {
837 size_t result = 0;
838
839 for (int i = 0; i < Config.cacheSwap.n_configured; i++)
840 result += store(i)->minSize();
841
842 return result;
843 }
844
845 void
846 StoreHashIndex::stat(StoreEntry & output) const
847 {
848 int i;
849
850 /* Now go through each store, calling its stat routine */
851
852 for (i = 0; i < Config.cacheSwap.n_configured; i++) {
853 storeAppendPrintf(&output, "\n");
854 store(i)->stat(output);
855 }
856 }
857
858 void
859 StoreHashIndex::reference(StoreEntry&)
860 {}
861
862 void
863 StoreHashIndex::dereference(StoreEntry&)
864 {}
865
866 void
867 StoreHashIndex::maintain()
868 {
869 int i;
870 /* walk each fs */
871
872 for (i = 0; i < Config.cacheSwap.n_configured; i++) {
873 /* XXX FixMe: This should be done "in parallell" on the different
874 * cache_dirs, not one at a time.
875 */
876 /* call the maintain function .. */
877 store(i)->maintain();
878 }
879 }
880
881 void
882 StoreHashIndex::updateSize(size_t, int)
883 {}
884
885 void
886 StoreHashIndex::sync()
887 {
888 for (int i = 0; i < Config.cacheSwap.n_configured; ++i)
889 store(i)->sync();
890 }
891
892 StoreSearch *
893 StoreHashIndex::search(String const url, HttpRequest *)
894 {
895 if (url.size())
896 fatal ("Cannot search by url yet\n");
897
898 return new StoreSearchHashIndex (this);
899 }
900
901 CBDATA_CLASS_INIT(StoreSearchHashIndex);
902
903 StoreSearchHashIndex::StoreSearchHashIndex(RefCount<StoreHashIndex> aSwapDir) : sd(aSwapDir), _done (false), bucket (0)
904 {}
905
906 /* do not link
907 StoreSearchHashIndex::StoreSearchHashIndex(StoreSearchHashIndex const &);
908 */
909
910 StoreSearchHashIndex::~StoreSearchHashIndex()
911 {}
912
913 void
914 StoreSearchHashIndex::next(void (callback)(void *cbdata), void *cbdata)
915 {
916 next();
917 callback (cbdata);
918 }
919
920 bool
921 StoreSearchHashIndex::next()
922 {
923 if (entries.size())
924 entries.pop_back();
925
926 while (!isDone() && !entries.size())
927 copyBucket();
928
929 return currentItem() != NULL;
930 }
931
932 bool
933 StoreSearchHashIndex::error() const
934 {
935 return false;
936 }
937
938 bool
939 StoreSearchHashIndex::isDone() const
940 {
941 return bucket >= store_hash_buckets || _done;
942 }
943
944 StoreEntry *
945 StoreSearchHashIndex::currentItem()
946 {
947 if (!entries.size())
948 return NULL;
949
950 return entries.back();
951 }
952
953 void
954 StoreSearchHashIndex::copyBucket()
955 {
956 /* probably need to lock the store entries...
957 * we copy them all to prevent races on the links. */
958 debugs(47, 3, "StoreSearchHashIndex::copyBucket #" << bucket);
959 assert (!entries.size());
960 hash_link *link_ptr = NULL;
961 hash_link *link_next = NULL;
962 link_next = hash_get_bucket(store_table, bucket);
963
964 while (NULL != (link_ptr = link_next)) {
965 link_next = link_ptr->next;
966 StoreEntry *e = (StoreEntry *) link_ptr;
967
968 entries.push_back(e);
969 }
970
971 bucket++;
972 debugs(47,3, "got entries: " << entries.size());
973 }