]> git.ipfire.org Git - thirdparty/squid.git/blob - src/store_dir.cc
Portability polish: use #if instead of #ifdef or #ifndef
[thirdparty/squid.git] / src / store_dir.cc
1
2 /*
3 * $Id$
4 *
5 * DEBUG: section 47 Store Directory Routines
6 * AUTHOR: Duane Wessels
7 *
8 * SQUID Web Proxy Cache http://www.squid-cache.org/
9 * ----------------------------------------------------------
10 *
11 * Squid is the result of efforts by numerous individuals from
12 * the Internet community; see the CONTRIBUTORS file for full
13 * details. Many organizations have provided support for Squid's
14 * development; see the SPONSORS file for full details. Squid is
15 * Copyrighted (C) 2001 by the Regents of the University of
16 * California; see the COPYRIGHT file for full details. Squid
17 * incorporates software developed and/or copyrighted by other
18 * sources; see the CREDITS file for full details.
19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
24 *
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
29 *
30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
32 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
33 *
34 */
35
36 #include "squid.h"
37 #include "Store.h"
38 #include "MemObject.h"
39 #include "SquidMath.h"
40 #include "SquidTime.h"
41 #include "SwapDir.h"
42 #include "swap_log_op.h"
43
44 #if HAVE_STATVFS
45 #if HAVE_SYS_STATVFS_H
46 #include <sys/statvfs.h>
47 #endif
48 #endif /* HAVE_STATVFS */
49 /* statfs() needs <sys/param.h> and <sys/mount.h> on BSD systems */
50 #if HAVE_SYS_PARAM_H
51 #include <sys/param.h>
52 #endif
53 #if HAVE_SYS_MOUNT_H
54 #include <sys/mount.h>
55 #endif
56 /* Windows and Linux use sys/vfs.h */
57 #if HAVE_SYS_VFS_H
58 #include <sys/vfs.h>
59 #endif
60
61 #include "StoreHashIndex.h"
62
63 static STDIRSELECT storeDirSelectSwapDirRoundRobin;
64 static STDIRSELECT storeDirSelectSwapDirLeastLoad;
65
66 /*
67 * store_dirs_rebuilding is initialized to _1_ as a hack so that
68 * storeDirWriteCleanLogs() doesn't try to do anything unless _all_
69 * cache_dirs have been read. For example, without this hack, Squid
70 * will try to write clean log files if -kparse fails (becasue it
71 * calls fatal()).
72 */
73 int StoreController::store_dirs_rebuilding = 1;
74
75 StoreController::StoreController() : swapDir (new StoreHashIndex())
76 {}
77
78 StoreController::~StoreController()
79 {}
80
81 /*
82 * This function pointer is set according to 'store_dir_select_algorithm'
83 * in squid.conf.
84 */
85 STDIRSELECT *storeDirSelectSwapDir = storeDirSelectSwapDirLeastLoad;
86
87 void
88 StoreController::init()
89 {
90 swapDir->init();
91
92 if (0 == strcasecmp(Config.store_dir_select_algorithm, "round-robin")) {
93 storeDirSelectSwapDir = storeDirSelectSwapDirRoundRobin;
94 debugs(47, 1, "Using Round Robin store dir selection");
95 } else {
96 storeDirSelectSwapDir = storeDirSelectSwapDirLeastLoad;
97 debugs(47, 1, "Using Least Load store dir selection");
98 }
99 }
100
101 void
102 StoreController::createOneStore(Store &aStore)
103 {
104 /*
105 * On Windows, fork() is not available.
106 * The following is a workaround for create store directories sequentially
107 * when running on native Windows port.
108 */
109 #if !_SQUID_MSWIN_
110
111 if (fork())
112 return;
113
114 #endif
115
116 aStore.create();
117
118 #if !_SQUID_MSWIN_
119
120 exit(0);
121
122 #endif
123 }
124
125 void
126 StoreController::create()
127 {
128 swapDir->create();
129
130 #if !_SQUID_MSWIN_
131
132 pid_t pid;
133
134 do {
135 int status;
136 #if _SQUID_NEXT_
137
138 pid = wait3(&status, WNOHANG, NULL);
139 #else
140
141 pid = waitpid(-1, &status, 0);
142 #endif
143
144 } while (pid > 0 || (pid < 0 && errno == EINTR));
145
146 #endif
147 }
148
149 /**
150 * Determine whether the given directory can handle this object
151 * size
152 *
153 * Note: if the object size is -1, then the only swapdirs that
154 * will return true here are ones that have min and max unset,
155 * ie any-sized-object swapdirs. This is a good thing.
156 */
157 bool
158 SwapDir::objectSizeIsAcceptable(int64_t objsize) const
159 {
160 // If the swapdir has no range limits, then it definitely can
161 if (min_objsize <= 0 && max_objsize == -1)
162 return true;
163
164 /*
165 * If the object size is -1 and the storedir has limits we
166 * can't store it there.
167 */
168 if (objsize == -1)
169 return false;
170
171 // Else, make sure that the object size will fit.
172 if (max_objsize == -1 && min_objsize <= objsize)
173 return true;
174 else
175 return min_objsize <= objsize && max_objsize > objsize;
176 }
177
178
179 /*
180 * This new selection scheme simply does round-robin on all SwapDirs.
181 * A SwapDir is skipped if it is over the max_size (100%) limit, or
182 * overloaded.
183 */
184 static int
185 storeDirSelectSwapDirRoundRobin(const StoreEntry * e)
186 {
187 static int dirn = 0;
188 int i;
189 int load;
190 RefCount<SwapDir> sd;
191
192 ssize_t objsize = e->objectLen();
193 if (objsize != -1)
194 objsize += e->mem_obj->swap_hdr_sz;
195
196 for (i = 0; i <= Config.cacheSwap.n_configured; i++) {
197 if (++dirn >= Config.cacheSwap.n_configured)
198 dirn = 0;
199
200 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
201
202 if (sd->flags.read_only)
203 continue;
204
205 if (sd->cur_size > sd->max_size)
206 continue;
207
208 if (!sd->objectSizeIsAcceptable(objsize))
209 continue;
210
211 /* check for error or overload condition */
212 load = sd->canStore(*e);
213
214 if (load < 0 || load > 1000) {
215 continue;
216 }
217
218 return dirn;
219 }
220
221 return -1;
222 }
223
224 /*
225 * Spread load across all of the store directories
226 *
227 * Note: We should modify this later on to prefer sticking objects
228 * in the *tightest fit* swapdir to conserve space, along with the
229 * actual swapdir usage. But for now, this hack will do while
230 * testing, so you should order your swapdirs in the config file
231 * from smallest maxobjsize to unlimited (-1) maxobjsize.
232 *
233 * We also have to choose nleast == nconf since we need to consider
234 * ALL swapdirs, regardless of state. Again, this is a hack while
235 * we sort out the real usefulness of this algorithm.
236 */
237 static int
238 storeDirSelectSwapDirLeastLoad(const StoreEntry * e)
239 {
240 ssize_t objsize;
241 ssize_t most_free = 0, cur_free;
242 ssize_t least_objsize = -1;
243 int least_load = INT_MAX;
244 int load;
245 int dirn = -1;
246 int i;
247 RefCount<SwapDir> SD;
248
249 /* Calculate the object size */
250 objsize = e->objectLen();
251
252 if (objsize != -1)
253 objsize += e->mem_obj->swap_hdr_sz;
254
255 for (i = 0; i < Config.cacheSwap.n_configured; i++) {
256 SD = dynamic_cast<SwapDir *>(INDEXSD(i));
257 SD->flags.selected = 0;
258 load = SD->canStore(*e);
259
260 if (load < 0 || load > 1000) {
261 continue;
262 }
263
264 if (!SD->objectSizeIsAcceptable(objsize))
265 continue;
266
267 if (SD->flags.read_only)
268 continue;
269
270 if (SD->cur_size > SD->max_size)
271 continue;
272
273 if (load > least_load)
274 continue;
275
276 cur_free = SD->max_size - SD->cur_size;
277
278 /* If the load is equal, then look in more details */
279 if (load == least_load) {
280 /* closest max_objsize fit */
281
282 if (least_objsize != -1)
283 if (SD->max_objsize > least_objsize || SD->max_objsize == -1)
284 continue;
285
286 /* most free */
287 if (cur_free < most_free)
288 continue;
289 }
290
291 least_load = load;
292 least_objsize = SD->max_objsize;
293 most_free = cur_free;
294 dirn = i;
295 }
296
297 if (dirn >= 0)
298 dynamic_cast<SwapDir *>(INDEXSD(dirn))->flags.selected = 1;
299
300 return dirn;
301 }
302
303 /*
304 * An entry written to the swap log MUST have the following
305 * properties.
306 * 1. It MUST be a public key. It does no good to log
307 * a public ADD, change the key, then log a private
308 * DEL. So we need to log a DEL before we change a
309 * key from public to private.
310 * 2. It MUST have a valid (> -1) swap_filen.
311 */
312 void
313 storeDirSwapLog(const StoreEntry * e, int op)
314 {
315 assert (e);
316 assert(!EBIT_TEST(e->flags, KEY_PRIVATE));
317 assert(e->swap_filen >= 0);
318 /*
319 * icons and such; don't write them to the swap log
320 */
321
322 if (EBIT_TEST(e->flags, ENTRY_SPECIAL))
323 return;
324
325 assert(op > SWAP_LOG_NOP && op < SWAP_LOG_MAX);
326
327 debugs(20, 3, "storeDirSwapLog: " <<
328 swap_log_op_str[op] << " " <<
329 e->getMD5Text() << " " <<
330 e->swap_dirn << " " <<
331 std::hex << std::uppercase << std::setfill('0') << std::setw(8) << e->swap_filen);
332
333 dynamic_cast<SwapDir *>(INDEXSD(e->swap_dirn))->logEntry(*e, op);
334 }
335
336 void
337 StoreController::updateSize(int64_t size, int sign)
338 {
339 fatal("StoreController has no independent size\n");
340 }
341
342 void
343 SwapDir::updateSize(int64_t size, int sign)
344 {
345 int64_t blks = (size + fs.blksize - 1) / fs.blksize;
346 int64_t k = ((blks * fs.blksize) >> 10) * sign;
347 cur_size += k;
348 store_swap_size += k;
349
350 if (sign > 0)
351 n_disk_objects++;
352 else if (sign < 0)
353 n_disk_objects--;
354 }
355
356 void
357 StoreController::stat(StoreEntry &output) const
358 {
359 storeAppendPrintf(&output, "Store Directory Statistics:\n");
360 storeAppendPrintf(&output, "Store Entries : %lu\n",
361 (unsigned long int)StoreEntry::inUseCount());
362 storeAppendPrintf(&output, "Maximum Swap Size : %"PRIu64" KB\n",
363 maxSize());
364 storeAppendPrintf(&output, "Current Store Swap Size: %8lu KB\n",
365 store_swap_size);
366 storeAppendPrintf(&output, "Current Capacity : %"PRId64"%% used, %"PRId64"%% free\n",
367 Math::int64Percent(store_swap_size, maxSize()),
368 Math::int64Percent((maxSize() - store_swap_size), maxSize()));
369 /* FIXME Here we should output memory statistics */
370
371 /* now the swapDir */
372 swapDir->stat(output);
373 }
374
375 /* if needed, this could be taught to cache the result */
376 uint64_t
377 StoreController::maxSize() const
378 {
379 /* TODO: include memory cache ? */
380 return swapDir->maxSize();
381 }
382
383 uint64_t
384 StoreController::minSize() const
385 {
386 /* TODO: include memory cache ? */
387 return swapDir->minSize();
388 }
389
390 void
391 SwapDir::diskFull()
392 {
393 if (cur_size >= max_size)
394 return;
395
396 max_size = cur_size;
397
398 debugs(20, 1, "WARNING: Shrinking cache_dir #" << index << " to " << cur_size << " KB");
399 }
400
401 void
402 storeDirOpenSwapLogs(void)
403 {
404 for (int dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
405 dynamic_cast<SwapDir *>(INDEXSD(dirn))->openLog();
406 }
407
408 void
409 storeDirCloseSwapLogs(void)
410 {
411 for (int dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
412 dynamic_cast<SwapDir *>(INDEXSD(dirn))->closeLog();
413 }
414
415 /*
416 * storeDirWriteCleanLogs
417 *
418 * Writes a "clean" swap log file from in-memory metadata.
419 * This is a rewrite of the original function to troll each
420 * StoreDir and write the logs, and flush at the end of
421 * the run. Thanks goes to Eric Stern, since this solution
422 * came out of his COSS code.
423 */
424 int
425 storeDirWriteCleanLogs(int reopen)
426 {
427 const StoreEntry *e = NULL;
428 int n = 0;
429
430 struct timeval start;
431 double dt;
432 RefCount<SwapDir> sd;
433 int dirn;
434 int notdone = 1;
435
436 if (StoreController::store_dirs_rebuilding) {
437 debugs(20, 1, "Not currently OK to rewrite swap log.");
438 debugs(20, 1, "storeDirWriteCleanLogs: Operation aborted.");
439 return 0;
440 }
441
442 debugs(20, 1, "storeDirWriteCleanLogs: Starting...");
443 getCurrentTime();
444 start = current_time;
445
446 for (dirn = 0; dirn < Config.cacheSwap.n_configured; dirn++) {
447 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
448
449 if (sd->writeCleanStart() < 0) {
450 debugs(20, 1, "log.clean.start() failed for dir #" << sd->index);
451 continue;
452 }
453 }
454
455 /*
456 * This may look inefficient as CPU wise it is more efficient to do this
457 * sequentially, but I/O wise the parallellism helps as it allows more
458 * hdd spindles to be active.
459 */
460 while (notdone) {
461 notdone = 0;
462
463 for (dirn = 0; dirn < Config.cacheSwap.n_configured; dirn++) {
464 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
465
466 if (NULL == sd->cleanLog)
467 continue;
468
469 e = sd->cleanLog->nextEntry();
470
471 if (!e)
472 continue;
473
474 notdone = 1;
475
476 if (!sd->canLog(*e))
477 continue;
478
479 sd->cleanLog->write(*e);
480
481 if ((++n & 0xFFFF) == 0) {
482 getCurrentTime();
483 debugs(20, 1, " " << std::setw(7) << n <<
484 " entries written so far.");
485 }
486 }
487 }
488
489 /* Flush */
490 for (dirn = 0; dirn < Config.cacheSwap.n_configured; dirn++)
491 dynamic_cast<SwapDir *>(INDEXSD(dirn))->writeCleanDone();
492
493 if (reopen)
494 storeDirOpenSwapLogs();
495
496 getCurrentTime();
497
498 dt = tvSubDsec(start, current_time);
499
500 debugs(20, 1, " Finished. Wrote " << n << " entries.");
501 debugs(20, 1, " Took "<< std::setw(3)<< std::setprecision(2) << dt <<
502 " seconds ("<< std::setw(6) << ((double) n / (dt > 0.0 ? dt : 1.0)) << " entries/sec).");
503
504
505 return n;
506 }
507
508 StoreSearch *
509 StoreController::search(String const url, HttpRequest *request)
510 {
511 /* cheat, for now you can't search the memory hot cache */
512 return swapDir->search(url, request);
513 }
514
515 StorePointer
516 StoreHashIndex::store(int const x) const
517 {
518 return INDEXSD(x);
519 }
520
521 void
522 StoreController::sync(void)
523 {
524 /* sync mem cache? */
525 swapDir->sync();
526 }
527
528 /*
529 * handle callbacks all avaliable fs'es
530 */
531 int
532 StoreController::callback()
533 {
534 /* This will likely double count. Thats ok. */
535 PROF_start(storeDirCallback);
536
537 /* mem cache callbacks ? */
538 int result = swapDir->callback();
539
540 PROF_stop(storeDirCallback);
541
542 return result;
543 }
544
545 int
546 storeDirGetBlkSize(const char *path, int *blksize)
547 {
548 #if HAVE_STATVFS
549
550 struct statvfs sfs;
551
552 if (statvfs(path, &sfs)) {
553 debugs(50, 1, "" << path << ": " << xstrerror());
554 *blksize = 2048;
555 return 1;
556 }
557
558 *blksize = (int) sfs.f_frsize;
559 #else
560
561 struct statfs sfs;
562
563 if (statfs(path, &sfs)) {
564 debugs(50, 1, "" << path << ": " << xstrerror());
565 *blksize = 2048;
566 return 1;
567 }
568
569 *blksize = (int) sfs.f_bsize;
570 #endif
571 /*
572 * Sanity check; make sure we have a meaningful value.
573 */
574
575 if (*blksize < 512)
576 *blksize = 2048;
577
578 return 0;
579 }
580
581 #define fsbtoblk(num, fsbs, bs) \
582 (((fsbs) != 0 && (fsbs) < (bs)) ? \
583 (num) / ((bs) / (fsbs)) : (num) * ((fsbs) / (bs)))
584 int
585 storeDirGetUFSStats(const char *path, int *totl_kb, int *free_kb, int *totl_in, int *free_in)
586 {
587 #if HAVE_STATVFS
588
589 struct statvfs sfs;
590
591 if (statvfs(path, &sfs)) {
592 debugs(50, 1, "" << path << ": " << xstrerror());
593 return 1;
594 }
595
596 *totl_kb = (int) fsbtoblk(sfs.f_blocks, sfs.f_frsize, 1024);
597 *free_kb = (int) fsbtoblk(sfs.f_bfree, sfs.f_frsize, 1024);
598 *totl_in = (int) sfs.f_files;
599 *free_in = (int) sfs.f_ffree;
600 #else
601
602 struct statfs sfs;
603
604 if (statfs(path, &sfs)) {
605 debugs(50, 1, "" << path << ": " << xstrerror());
606 return 1;
607 }
608
609 *totl_kb = (int) fsbtoblk(sfs.f_blocks, sfs.f_bsize, 1024);
610 *free_kb = (int) fsbtoblk(sfs.f_bfree, sfs.f_bsize, 1024);
611 *totl_in = (int) sfs.f_files;
612 *free_in = (int) sfs.f_ffree;
613 #endif
614
615 return 0;
616 }
617
618 void
619 allocate_new_swapdir(SquidConfig::_cacheSwap * swap)
620 {
621 if (swap->swapDirs == NULL) {
622 swap->n_allocated = 4;
623 swap->swapDirs = static_cast<StorePointer *>(xcalloc(swap->n_allocated, sizeof(StorePointer)));
624 }
625
626 if (swap->n_allocated == swap->n_configured) {
627 StorePointer *tmp;
628 swap->n_allocated <<= 1;
629 tmp = static_cast<StorePointer *>(xcalloc(swap->n_allocated, sizeof(StorePointer)));
630 memcpy(tmp, swap->swapDirs, swap->n_configured * sizeof(SwapDir *));
631 xfree(swap->swapDirs);
632 swap->swapDirs = tmp;
633 }
634 }
635
636 void
637 free_cachedir(SquidConfig::_cacheSwap * swap)
638 {
639 int i;
640 /* DON'T FREE THESE FOR RECONFIGURE */
641
642 if (reconfiguring)
643 return;
644
645 for (i = 0; i < swap->n_configured; i++) {
646 /* TODO XXX this lets the swapdir free resources asynchronously
647 * swap->swapDirs[i]->deactivate();
648 * but there may be such a means already.
649 * RBC 20041225
650 */
651 swap->swapDirs[i] = NULL;
652 }
653
654 safe_free(swap->swapDirs);
655 swap->swapDirs = NULL;
656 swap->n_allocated = 0;
657 swap->n_configured = 0;
658 }
659
660 /* this should be a virtual method on StoreEntry,
661 * i.e. e->referenced()
662 * so that the entry can notify the creating Store
663 */
664 void
665 StoreController::reference(StoreEntry &e)
666 {
667 /* Notify the fs that we're referencing this object again */
668
669 if (e.swap_dirn > -1)
670 e.store()->reference(e);
671
672 /* Notify the memory cache that we're referencing this object again */
673 if (e.mem_obj) {
674 if (mem_policy->Referenced)
675 mem_policy->Referenced(mem_policy, &e, &e.mem_obj->repl);
676 }
677 }
678
679 void
680 StoreController::dereference(StoreEntry & e)
681 {
682 /* Notify the fs that we're not referencing this object any more */
683
684 if (e.swap_filen > -1)
685 e.store()->dereference(e);
686
687 /* Notify the memory cache that we're not referencing this object any more */
688 if (e.mem_obj) {
689 if (mem_policy->Dereferenced)
690 mem_policy->Dereferenced(mem_policy, &e, &e.mem_obj->repl);
691 }
692 }
693
694 StoreEntry *
695 StoreController::get(const cache_key *key)
696 {
697
698 return swapDir->get(key);
699 }
700
701 void
702 StoreController::get(String const key, STOREGETCLIENT aCallback, void *aCallbackData)
703 {
704 fatal("not implemented");
705 }
706
707 StoreHashIndex::StoreHashIndex()
708 {
709 if (store_table)
710 abort();
711 assert (store_table == NULL);
712 }
713
714 StoreHashIndex::~StoreHashIndex()
715 {
716 if (store_table) {
717 hashFreeItems(store_table, destroyStoreEntry);
718 hashFreeMemory(store_table);
719 store_table = NULL;
720 }
721 }
722
723 int
724 StoreHashIndex::callback()
725 {
726 int result = 0;
727 int j;
728 static int ndir = 0;
729
730 do {
731 j = 0;
732
733 for (int i = 0; i < Config.cacheSwap.n_configured; i++) {
734 if (ndir >= Config.cacheSwap.n_configured)
735 ndir = ndir % Config.cacheSwap.n_configured;
736
737 int temp_result = store(ndir)->callback();
738
739 ++ndir;
740
741 j += temp_result;
742
743 result += temp_result;
744
745 if (j > 100)
746 fatal ("too much io\n");
747 }
748 } while (j > 0);
749
750 ndir++;
751
752 return result;
753 }
754
755 void
756 StoreHashIndex::create()
757 {
758 for (int i = 0; i < Config.cacheSwap.n_configured; i++)
759 store(i)->create();
760 }
761
762 /* Lookup an object in the cache.
763 * return just a reference to object, don't start swapping in yet. */
764 StoreEntry *
765 StoreHashIndex::get(const cache_key *key)
766 {
767 PROF_start(storeGet);
768 debugs(20, 3, "storeGet: looking up " << storeKeyText(key));
769 StoreEntry *p = static_cast<StoreEntry *>(hash_lookup(store_table, key));
770 PROF_stop(storeGet);
771 return p;
772 }
773
774 void
775 StoreHashIndex::get(String const key, STOREGETCLIENT aCallback, void *aCallbackData)
776 {
777 fatal("not implemented");
778 }
779
780 void
781 StoreHashIndex::init()
782 {
783 /* Calculate size of hash table (maximum currently 64k buckets). */
784 /* this is very bogus, its specific to the any Store maintaining an
785 * in-core index, not global */
786 size_t buckets = (Store::Root().maxSize() + ( Config.memMaxSize >> 10)) / Config.Store.avgObjectSize;
787 debugs(20, 1, "Swap maxSize " << Store::Root().maxSize() <<
788 " + " << ( Config.memMaxSize >> 10) << " KB, estimated " << buckets << " objects");
789 buckets /= Config.Store.objectsPerBucket;
790 debugs(20, 1, "Target number of buckets: " << buckets);
791 /* ideally the full scan period should be configurable, for the
792 * moment it remains at approximately 24 hours. */
793 store_hash_buckets = storeKeyHashBuckets(buckets);
794 debugs(20, 1, "Using " << store_hash_buckets << " Store buckets");
795 debugs(20, 1, "Max Mem size: " << ( Config.memMaxSize >> 10) << " KB");
796 debugs(20, 1, "Max Swap size: " << Store::Root().maxSize() << " KB");
797
798 store_table = hash_create(storeKeyHashCmp,
799 store_hash_buckets, storeKeyHashHash);
800
801 for (int i = 0; i < Config.cacheSwap.n_configured; i++) {
802 /* this starts a search of the store dirs, loading their
803 * index. under the new Store api this should be
804 * driven by the StoreHashIndex, not by each store.
805 *
806 * That is, the HashIndex should perform a search of each dir it is
807 * indexing to do the hash insertions. The search is then able to
808 * decide 'from-memory', or 'from-clean-log' or 'from-dirty-log' or
809 * 'from-no-log'.
810 *
811 * Step 1: make the store rebuilds use a search internally
812 * Step 2: change the search logic to use the four modes described
813 * above
814 * Step 3: have the hash index walk the searches itself.
815 */
816 store(i)->init();
817 }
818 }
819
820 uint64_t
821 StoreHashIndex::maxSize() const
822 {
823 uint64_t result = 0;
824
825 for (int i = 0; i < Config.cacheSwap.n_configured; i++)
826 result += store(i)->maxSize();
827
828 return result;
829 }
830
831 uint64_t
832 StoreHashIndex::minSize() const
833 {
834 uint64_t result = 0;
835
836 for (int i = 0; i < Config.cacheSwap.n_configured; i++)
837 result += store(i)->minSize();
838
839 return result;
840 }
841
842 void
843 StoreHashIndex::stat(StoreEntry & output) const
844 {
845 int i;
846
847 /* Now go through each store, calling its stat routine */
848
849 for (i = 0; i < Config.cacheSwap.n_configured; i++) {
850 storeAppendPrintf(&output, "\n");
851 store(i)->stat(output);
852 }
853 }
854
855 void
856 StoreHashIndex::reference(StoreEntry&)
857 {}
858
859 void
860 StoreHashIndex::dereference(StoreEntry&)
861 {}
862
863 void
864 StoreHashIndex::maintain()
865 {
866 int i;
867 /* walk each fs */
868
869 for (i = 0; i < Config.cacheSwap.n_configured; i++) {
870 /* XXX FixMe: This should be done "in parallell" on the different
871 * cache_dirs, not one at a time.
872 */
873 /* call the maintain function .. */
874 store(i)->maintain();
875 }
876 }
877
878 void
879 StoreHashIndex::updateSize(int64_t, int)
880 {}
881
882 void
883 StoreHashIndex::sync()
884 {
885 for (int i = 0; i < Config.cacheSwap.n_configured; ++i)
886 store(i)->sync();
887 }
888
889 StoreSearch *
890 StoreHashIndex::search(String const url, HttpRequest *)
891 {
892 if (url.size())
893 fatal ("Cannot search by url yet\n");
894
895 return new StoreSearchHashIndex (this);
896 }
897
898 CBDATA_CLASS_INIT(StoreSearchHashIndex);
899
900 StoreSearchHashIndex::StoreSearchHashIndex(RefCount<StoreHashIndex> aSwapDir) : sd(aSwapDir), _done (false), bucket (0)
901 {}
902
903 /* do not link
904 StoreSearchHashIndex::StoreSearchHashIndex(StoreSearchHashIndex const &);
905 */
906
907 StoreSearchHashIndex::~StoreSearchHashIndex()
908 {}
909
910 void
911 StoreSearchHashIndex::next(void (aCallback)(void *), void *aCallbackData)
912 {
913 next();
914 aCallback (aCallbackData);
915 }
916
917 bool
918 StoreSearchHashIndex::next()
919 {
920 if (entries.size())
921 entries.pop_back();
922
923 while (!isDone() && !entries.size())
924 copyBucket();
925
926 return currentItem() != NULL;
927 }
928
929 bool
930 StoreSearchHashIndex::error() const
931 {
932 return false;
933 }
934
935 bool
936 StoreSearchHashIndex::isDone() const
937 {
938 return bucket >= store_hash_buckets || _done;
939 }
940
941 StoreEntry *
942 StoreSearchHashIndex::currentItem()
943 {
944 if (!entries.size())
945 return NULL;
946
947 return entries.back();
948 }
949
950 void
951 StoreSearchHashIndex::copyBucket()
952 {
953 /* probably need to lock the store entries...
954 * we copy them all to prevent races on the links. */
955 debugs(47, 3, "StoreSearchHashIndex::copyBucket #" << bucket);
956 assert (!entries.size());
957 hash_link *link_ptr = NULL;
958 hash_link *link_next = NULL;
959 link_next = hash_get_bucket(store_table, bucket);
960
961 while (NULL != (link_ptr = link_next)) {
962 link_next = link_ptr->next;
963 StoreEntry *e = (StoreEntry *) link_ptr;
964
965 entries.push_back(e);
966 }
967
968 bucket++;
969 debugs(47,3, "got entries: " << entries.size());
970 }