]> git.ipfire.org Git - thirdparty/squid.git/blob - src/store_dir.cc
Merge 3p2-rock.
[thirdparty/squid.git] / src / store_dir.cc
1
2 /*
3 * $Id$
4 *
5 * DEBUG: section 47 Store Directory Routines
6 * AUTHOR: Duane Wessels
7 *
8 * SQUID Web Proxy Cache http://www.squid-cache.org/
9 * ----------------------------------------------------------
10 *
11 * Squid is the result of efforts by numerous individuals from
12 * the Internet community; see the CONTRIBUTORS file for full
13 * details. Many organizations have provided support for Squid's
14 * development; see the SPONSORS file for full details. Squid is
15 * Copyrighted (C) 2001 by the Regents of the University of
16 * California; see the COPYRIGHT file for full details. Squid
17 * incorporates software developed and/or copyrighted by other
18 * sources; see the CREDITS file for full details.
19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
24 *
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
29 *
30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
32 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
33 *
34 */
35
36 #include "squid.h"
37 #include "Store.h"
38 #include "MemObject.h"
39 #include "SquidMath.h"
40 #include "SquidTime.h"
41 #include "SwapDir.h"
42 #include "swap_log_op.h"
43
44 #if HAVE_STATVFS
45 #if HAVE_SYS_STATVFS_H
46 #include <sys/statvfs.h>
47 #endif
48 #endif /* HAVE_STATVFS */
49 /* statfs() needs <sys/param.h> and <sys/mount.h> on BSD systems */
50 #if HAVE_SYS_PARAM_H
51 #include <sys/param.h>
52 #endif
53 #if HAVE_SYS_MOUNT_H
54 #include <sys/mount.h>
55 #endif
56 /* Windows and Linux use sys/vfs.h */
57 #if HAVE_SYS_VFS_H
58 #include <sys/vfs.h>
59 #endif
60
61 #include "StoreHashIndex.h"
62
63 static STDIRSELECT storeDirSelectSwapDirRoundRobin;
64 static STDIRSELECT storeDirSelectSwapDirLeastLoad;
65
66 /*
67 * store_dirs_rebuilding is initialized to _1_ as a hack so that
68 * storeDirWriteCleanLogs() doesn't try to do anything unless _all_
69 * cache_dirs have been read. For example, without this hack, Squid
70 * will try to write clean log files if -kparse fails (becasue it
71 * calls fatal()).
72 */
73 int StoreController::store_dirs_rebuilding = 1;
74
75 StoreController::StoreController() : swapDir (new StoreHashIndex())
76 {}
77
78 StoreController::~StoreController()
79 {}
80
81 /*
82 * This function pointer is set according to 'store_dir_select_algorithm'
83 * in squid.conf.
84 */
85 STDIRSELECT *storeDirSelectSwapDir = storeDirSelectSwapDirLeastLoad;
86
87 void
88 StoreController::init()
89 {
90 swapDir->init();
91
92 if (0 == strcasecmp(Config.store_dir_select_algorithm, "round-robin")) {
93 storeDirSelectSwapDir = storeDirSelectSwapDirRoundRobin;
94 debugs(47, 1, "Using Round Robin store dir selection");
95 } else {
96 storeDirSelectSwapDir = storeDirSelectSwapDirLeastLoad;
97 debugs(47, 1, "Using Least Load store dir selection");
98 }
99 }
100
101 void
102 StoreController::createOneStore(Store &aStore)
103 {
104 /*
105 * On Windows, fork() is not available.
106 * The following is a workaround for create store directories sequentially
107 * when running on native Windows port.
108 */
109 #ifndef _SQUID_MSWIN_
110
111 if (fork())
112 return;
113
114 #endif
115
116 aStore.create();
117
118 #ifndef _SQUID_MSWIN_
119
120 exit(0);
121
122 #endif
123 }
124
125 void
126 StoreController::create()
127 {
128 swapDir->create();
129
130 #ifndef _SQUID_MSWIN_
131
132 pid_t pid;
133
134 do {
135 int status;
136 #ifdef _SQUID_NEXT_
137
138 pid = wait3(&status, WNOHANG, NULL);
139 #else
140
141 pid = waitpid(-1, &status, 0);
142 #endif
143
144 } while (pid > 0 || (pid < 0 && errno == EINTR));
145
146 #endif
147 }
148
149 /**
150 * Determine whether the given directory can handle this object
151 * size
152 *
153 * Note: if the object size is -1, then the only swapdirs that
154 * will return true here are ones that have min and max unset,
155 * ie any-sized-object swapdirs. This is a good thing.
156 */
157 bool
158 SwapDir::objectSizeIsAcceptable(int64_t objsize) const
159 {
160 // If the swapdir has no range limits, then it definitely can
161 if (min_objsize <= 0 && max_objsize == -1)
162 return true;
163
164 /*
165 * If the object size is -1 and the storedir has limits we
166 * can't store it there.
167 */
168 if (objsize == -1)
169 return false;
170
171 // Else, make sure that the object size will fit.
172 return min_objsize <= objsize && max_objsize > objsize;
173 }
174
175
176 /*
177 * This new selection scheme simply does round-robin on all SwapDirs.
178 * A SwapDir is skipped if it is over the max_size (100%) limit, or
179 * overloaded.
180 */
181 static int
182 storeDirSelectSwapDirRoundRobin(const StoreEntry * e)
183 {
184 static int dirn = 0;
185 int i;
186 int load;
187 RefCount<SwapDir> sd;
188
189 ssize_t objsize = e->objectLen();
190 if (objsize != -1)
191 objsize += e->mem_obj->swap_hdr_sz;
192
193 for (i = 0; i <= Config.cacheSwap.n_configured; i++) {
194 if (++dirn >= Config.cacheSwap.n_configured)
195 dirn = 0;
196
197 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
198
199 if (sd->flags.read_only)
200 continue;
201
202 if (sd->cur_size > sd->max_size)
203 continue;
204
205 if (!sd->objectSizeIsAcceptable(objsize))
206 continue;
207
208 /* check for error or overload condition */
209 load = sd->canStore(*e);
210
211 if (load < 0 || load > 1000) {
212 continue;
213 }
214
215 return dirn;
216 }
217
218 return -1;
219 }
220
221 /*
222 * Spread load across all of the store directories
223 *
224 * Note: We should modify this later on to prefer sticking objects
225 * in the *tightest fit* swapdir to conserve space, along with the
226 * actual swapdir usage. But for now, this hack will do while
227 * testing, so you should order your swapdirs in the config file
228 * from smallest maxobjsize to unlimited (-1) maxobjsize.
229 *
230 * We also have to choose nleast == nconf since we need to consider
231 * ALL swapdirs, regardless of state. Again, this is a hack while
232 * we sort out the real usefulness of this algorithm.
233 */
234 static int
235 storeDirSelectSwapDirLeastLoad(const StoreEntry * e)
236 {
237 ssize_t objsize;
238 ssize_t most_free = 0, cur_free;
239 ssize_t least_objsize = -1;
240 int least_load = INT_MAX;
241 int load;
242 int dirn = -1;
243 int i;
244 RefCount<SwapDir> SD;
245
246 /* Calculate the object size */
247 objsize = e->objectLen();
248
249 if (objsize != -1)
250 objsize += e->mem_obj->swap_hdr_sz;
251
252 for (i = 0; i < Config.cacheSwap.n_configured; i++) {
253 SD = dynamic_cast<SwapDir *>(INDEXSD(i));
254 SD->flags.selected = 0;
255 load = SD->canStore(*e);
256
257 if (load < 0 || load > 1000) {
258 continue;
259 }
260
261 if (!SD->objectSizeIsAcceptable(objsize))
262 continue;
263
264 if (SD->flags.read_only)
265 continue;
266
267 if (SD->cur_size > SD->max_size)
268 continue;
269
270 if (load > least_load)
271 continue;
272
273 cur_free = SD->max_size - SD->cur_size;
274
275 /* If the load is equal, then look in more details */
276 if (load == least_load) {
277 /* closest max_objsize fit */
278
279 if (least_objsize != -1)
280 if (SD->max_objsize > least_objsize || SD->max_objsize == -1)
281 continue;
282
283 /* most free */
284 if (cur_free < most_free)
285 continue;
286 }
287
288 least_load = load;
289 least_objsize = SD->max_objsize;
290 most_free = cur_free;
291 dirn = i;
292 }
293
294 if (dirn >= 0)
295 dynamic_cast<SwapDir *>(INDEXSD(dirn))->flags.selected = 1;
296
297 return dirn;
298 }
299
300 /*
301 * An entry written to the swap log MUST have the following
302 * properties.
303 * 1. It MUST be a public key. It does no good to log
304 * a public ADD, change the key, then log a private
305 * DEL. So we need to log a DEL before we change a
306 * key from public to private.
307 * 2. It MUST have a valid (> -1) swap_filen.
308 */
309 void
310 storeDirSwapLog(const StoreEntry * e, int op)
311 {
312 assert (e);
313 assert(!EBIT_TEST(e->flags, KEY_PRIVATE));
314 assert(e->swap_filen >= 0);
315 /*
316 * icons and such; don't write them to the swap log
317 */
318
319 if (EBIT_TEST(e->flags, ENTRY_SPECIAL))
320 return;
321
322 assert(op > SWAP_LOG_NOP && op < SWAP_LOG_MAX);
323
324 debugs(20, 3, "storeDirSwapLog: " <<
325 swap_log_op_str[op] << " " <<
326 e->getMD5Text() << " " <<
327 e->swap_dirn << " " <<
328 std::hex << std::uppercase << std::setfill('0') << std::setw(8) << e->swap_filen);
329
330 dynamic_cast<SwapDir *>(INDEXSD(e->swap_dirn))->logEntry(*e, op);
331 }
332
333 void
334 StoreController::updateSize(int64_t size, int sign)
335 {
336 fatal("StoreController has no independent size\n");
337 }
338
339 void
340 SwapDir::updateSize(int64_t size, int sign)
341 {
342 int64_t blks = (size + fs.blksize - 1) / fs.blksize;
343 int64_t k = ((blks * fs.blksize) >> 10) * sign;
344 cur_size += k;
345 store_swap_size += k;
346
347 if (sign > 0)
348 n_disk_objects++;
349 else if (sign < 0)
350 n_disk_objects--;
351 }
352
353 void
354 StoreController::stat(StoreEntry &output) const
355 {
356 storeAppendPrintf(&output, "Store Directory Statistics:\n");
357 storeAppendPrintf(&output, "Store Entries : %lu\n",
358 (unsigned long int)StoreEntry::inUseCount());
359 storeAppendPrintf(&output, "Maximum Swap Size : %"PRIu64" KB\n",
360 maxSize());
361 storeAppendPrintf(&output, "Current Store Swap Size: %8lu KB\n",
362 store_swap_size);
363 storeAppendPrintf(&output, "Current Capacity : %"PRId64"%% used, %"PRId64"%% free\n",
364 Math::int64Percent(store_swap_size, maxSize()),
365 Math::int64Percent((maxSize() - store_swap_size), maxSize()));
366 /* FIXME Here we should output memory statistics */
367
368 /* now the swapDir */
369 swapDir->stat(output);
370 }
371
372 /* if needed, this could be taught to cache the result */
373 uint64_t
374 StoreController::maxSize() const
375 {
376 /* TODO: include memory cache ? */
377 return swapDir->maxSize();
378 }
379
380 uint64_t
381 StoreController::minSize() const
382 {
383 /* TODO: include memory cache ? */
384 return swapDir->minSize();
385 }
386
387 void
388 SwapDir::diskFull()
389 {
390 if (cur_size >= max_size)
391 return;
392
393 max_size = cur_size;
394
395 debugs(20, 1, "WARNING: Shrinking cache_dir #" << index << " to " << cur_size << " KB");
396 }
397
398 void
399 storeDirOpenSwapLogs(void)
400 {
401 for (int dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
402 dynamic_cast<SwapDir *>(INDEXSD(dirn))->openLog();
403 }
404
405 void
406 storeDirCloseSwapLogs(void)
407 {
408 for (int dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
409 dynamic_cast<SwapDir *>(INDEXSD(dirn))->closeLog();
410 }
411
412 /*
413 * storeDirWriteCleanLogs
414 *
415 * Writes a "clean" swap log file from in-memory metadata.
416 * This is a rewrite of the original function to troll each
417 * StoreDir and write the logs, and flush at the end of
418 * the run. Thanks goes to Eric Stern, since this solution
419 * came out of his COSS code.
420 */
421 int
422 storeDirWriteCleanLogs(int reopen)
423 {
424 const StoreEntry *e = NULL;
425 int n = 0;
426
427 struct timeval start;
428 double dt;
429 RefCount<SwapDir> sd;
430 int dirn;
431 int notdone = 1;
432
433 if (StoreController::store_dirs_rebuilding) {
434 debugs(20, 1, "Not currently OK to rewrite swap log.");
435 debugs(20, 1, "storeDirWriteCleanLogs: Operation aborted.");
436 return 0;
437 }
438
439 debugs(20, 1, "storeDirWriteCleanLogs: Starting...");
440 getCurrentTime();
441 start = current_time;
442
443 for (dirn = 0; dirn < Config.cacheSwap.n_configured; dirn++) {
444 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
445
446 if (sd->writeCleanStart() < 0) {
447 debugs(20, 1, "log.clean.start() failed for dir #" << sd->index);
448 continue;
449 }
450 }
451
452 /*
453 * This may look inefficient as CPU wise it is more efficient to do this
454 * sequentially, but I/O wise the parallellism helps as it allows more
455 * hdd spindles to be active.
456 */
457 while (notdone) {
458 notdone = 0;
459
460 for (dirn = 0; dirn < Config.cacheSwap.n_configured; dirn++) {
461 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
462
463 if (NULL == sd->cleanLog)
464 continue;
465
466 e = sd->cleanLog->nextEntry();
467
468 if (!e)
469 continue;
470
471 notdone = 1;
472
473 if (!sd->canLog(*e))
474 continue;
475
476 sd->cleanLog->write(*e);
477
478 if ((++n & 0xFFFF) == 0) {
479 getCurrentTime();
480 debugs(20, 1, " " << std::setw(7) << n <<
481 " entries written so far.");
482 }
483 }
484 }
485
486 /* Flush */
487 for (dirn = 0; dirn < Config.cacheSwap.n_configured; dirn++)
488 dynamic_cast<SwapDir *>(INDEXSD(dirn))->writeCleanDone();
489
490 if (reopen)
491 storeDirOpenSwapLogs();
492
493 getCurrentTime();
494
495 dt = tvSubDsec(start, current_time);
496
497 debugs(20, 1, " Finished. Wrote " << n << " entries.");
498 debugs(20, 1, " Took "<< std::setw(3)<< std::setprecision(2) << dt <<
499 " seconds ("<< std::setw(6) << ((double) n / (dt > 0.0 ? dt : 1.0)) << " entries/sec).");
500
501
502 return n;
503 }
504
505 StoreSearch *
506 StoreController::search(String const url, HttpRequest *request)
507 {
508 /* cheat, for now you can't search the memory hot cache */
509 return swapDir->search(url, request);
510 }
511
512 StorePointer
513 StoreHashIndex::store(int const x) const
514 {
515 return INDEXSD(x);
516 }
517
518 SwapDir &
519 StoreHashIndex::dir(const int i) const
520 {
521 SwapDir *sd = dynamic_cast<SwapDir*>(INDEXSD(i));
522 assert(sd);
523 return *sd;
524 }
525
526 void
527 StoreController::sync(void)
528 {
529 /* sync mem cache? */
530 swapDir->sync();
531 }
532
533 /*
534 * handle callbacks all avaliable fs'es
535 */
536 int
537 StoreController::callback()
538 {
539 /* This will likely double count. Thats ok. */
540 PROF_start(storeDirCallback);
541
542 /* mem cache callbacks ? */
543 int result = swapDir->callback();
544
545 PROF_stop(storeDirCallback);
546
547 return result;
548 }
549
550 int
551 storeDirGetBlkSize(const char *path, int *blksize)
552 {
553 #if HAVE_STATVFS
554
555 struct statvfs sfs;
556
557 if (statvfs(path, &sfs)) {
558 debugs(50, 1, "" << path << ": " << xstrerror());
559 *blksize = 2048;
560 return 1;
561 }
562
563 *blksize = (int) sfs.f_frsize;
564 #else
565
566 struct statfs sfs;
567
568 if (statfs(path, &sfs)) {
569 debugs(50, 1, "" << path << ": " << xstrerror());
570 *blksize = 2048;
571 return 1;
572 }
573
574 *blksize = (int) sfs.f_bsize;
575 #endif
576 /*
577 * Sanity check; make sure we have a meaningful value.
578 */
579
580 if (*blksize < 512)
581 *blksize = 2048;
582
583 return 0;
584 }
585
586 #define fsbtoblk(num, fsbs, bs) \
587 (((fsbs) != 0 && (fsbs) < (bs)) ? \
588 (num) / ((bs) / (fsbs)) : (num) * ((fsbs) / (bs)))
589 int
590 storeDirGetUFSStats(const char *path, int *totl_kb, int *free_kb, int *totl_in, int *free_in)
591 {
592 #if HAVE_STATVFS
593
594 struct statvfs sfs;
595
596 if (statvfs(path, &sfs)) {
597 debugs(50, 1, "" << path << ": " << xstrerror());
598 return 1;
599 }
600
601 *totl_kb = (int) fsbtoblk(sfs.f_blocks, sfs.f_frsize, 1024);
602 *free_kb = (int) fsbtoblk(sfs.f_bfree, sfs.f_frsize, 1024);
603 *totl_in = (int) sfs.f_files;
604 *free_in = (int) sfs.f_ffree;
605 #else
606
607 struct statfs sfs;
608
609 if (statfs(path, &sfs)) {
610 debugs(50, 1, "" << path << ": " << xstrerror());
611 return 1;
612 }
613
614 *totl_kb = (int) fsbtoblk(sfs.f_blocks, sfs.f_bsize, 1024);
615 *free_kb = (int) fsbtoblk(sfs.f_bfree, sfs.f_bsize, 1024);
616 *totl_in = (int) sfs.f_files;
617 *free_in = (int) sfs.f_ffree;
618 #endif
619
620 return 0;
621 }
622
623 void
624 allocate_new_swapdir(SquidConfig::_cacheSwap * swap)
625 {
626 if (swap->swapDirs == NULL) {
627 swap->n_allocated = 4;
628 swap->swapDirs = static_cast<StorePointer *>(xcalloc(swap->n_allocated, sizeof(StorePointer)));
629 }
630
631 if (swap->n_allocated == swap->n_configured) {
632 StorePointer *tmp;
633 swap->n_allocated <<= 1;
634 tmp = static_cast<StorePointer *>(xcalloc(swap->n_allocated, sizeof(StorePointer)));
635 memcpy(tmp, swap->swapDirs, swap->n_configured * sizeof(SwapDir *));
636 xfree(swap->swapDirs);
637 swap->swapDirs = tmp;
638 }
639 }
640
641 void
642 free_cachedir(SquidConfig::_cacheSwap * swap)
643 {
644 int i;
645 /* DON'T FREE THESE FOR RECONFIGURE */
646
647 if (reconfiguring)
648 return;
649
650 for (i = 0; i < swap->n_configured; i++) {
651 /* TODO XXX this lets the swapdir free resources asynchronously
652 * swap->swapDirs[i]->deactivate();
653 * but there may be such a means already.
654 * RBC 20041225
655 */
656 swap->swapDirs[i] = NULL;
657 }
658
659 safe_free(swap->swapDirs);
660 swap->swapDirs = NULL;
661 swap->n_allocated = 0;
662 swap->n_configured = 0;
663 }
664
665 /* this should be a virtual method on StoreEntry,
666 * i.e. e->referenced()
667 * so that the entry can notify the creating Store
668 */
669 void
670 StoreController::reference(StoreEntry &e)
671 {
672 /* Notify the fs that we're referencing this object again */
673
674 if (e.swap_dirn > -1)
675 e.store()->reference(e);
676
677 /* Notify the memory cache that we're referencing this object again */
678 if (e.mem_obj) {
679 if (mem_policy->Referenced)
680 mem_policy->Referenced(mem_policy, &e, &e.mem_obj->repl);
681 }
682 }
683
684 void
685 StoreController::dereference(StoreEntry & e)
686 {
687 /* Notify the fs that we're not referencing this object any more */
688
689 if (e.swap_filen > -1)
690 e.store()->dereference(e);
691
692 /* Notify the memory cache that we're not referencing this object any more */
693 if (e.mem_obj) {
694 if (mem_policy->Dereferenced)
695 mem_policy->Dereferenced(mem_policy, &e, &e.mem_obj->repl);
696 }
697 }
698
699 StoreEntry *
700 StoreController::get(const cache_key *key)
701 {
702 if (StoreEntry *e = swapDir->get(key)) {
703 debugs(20, 3, HERE << "got in-transit entry: " << *e);
704 return e;
705 }
706
707 if (const int cacheDirs = Config.cacheSwap.n_configured) {
708 // ask each cache_dir until the entry is found; use static starting
709 // point to avoid asking the same subset of disks more often
710 // TODO: coordinate with put() to be able to guess the right disk often
711 static int idx = 0;
712 for (int n = 0; n < cacheDirs; ++n) {
713 idx = (idx + 1) % cacheDirs;
714 SwapDir *sd = dynamic_cast<SwapDir*>(INDEXSD(idx));
715 if (!sd->active())
716 continue;
717
718 if (StoreEntry *e = sd->get(key)) {
719 debugs(20, 3, HERE << "cache_dir " << idx <<
720 " got cached entry: " << *e);
721 return e;
722 }
723 }
724 }
725
726 debugs(20, 4, HERE << "none of " << Config.cacheSwap.n_configured <<
727 " cache_dirs have " << storeKeyText(key));
728 return NULL;
729 }
730
731 void
732 StoreController::get(String const key, STOREGETCLIENT aCallback, void *aCallbackData)
733 {
734 fatal("not implemented");
735 }
736
737 StoreHashIndex::StoreHashIndex()
738 {
739 if (store_table)
740 abort();
741 assert (store_table == NULL);
742 }
743
744 StoreHashIndex::~StoreHashIndex()
745 {
746 if (store_table) {
747 hashFreeItems(store_table, destroyStoreEntry);
748 hashFreeMemory(store_table);
749 store_table = NULL;
750 }
751 }
752
753 int
754 StoreHashIndex::callback()
755 {
756 int result = 0;
757 int j;
758 static int ndir = 0;
759
760 do {
761 j = 0;
762
763 for (int i = 0; i < Config.cacheSwap.n_configured; i++) {
764 if (ndir >= Config.cacheSwap.n_configured)
765 ndir = ndir % Config.cacheSwap.n_configured;
766
767 int temp_result = store(ndir)->callback();
768
769 ++ndir;
770
771 j += temp_result;
772
773 result += temp_result;
774
775 if (j > 100)
776 fatal ("too much io\n");
777 }
778 } while (j > 0);
779
780 ndir++;
781
782 return result;
783 }
784
785 void
786 StoreHashIndex::create()
787 {
788 for (int i = 0; i < Config.cacheSwap.n_configured; i++) {
789 if (dir(i).active())
790 store(i)->create();
791 }
792 }
793
794 /* Lookup an object in the cache.
795 * return just a reference to object, don't start swapping in yet. */
796 StoreEntry *
797 StoreHashIndex::get(const cache_key *key)
798 {
799 PROF_start(storeGet);
800 debugs(20, 3, "storeGet: looking up " << storeKeyText(key));
801 StoreEntry *p = static_cast<StoreEntry *>(hash_lookup(store_table, key));
802 PROF_stop(storeGet);
803 return p;
804 }
805
806 void
807 StoreHashIndex::get(String const key, STOREGETCLIENT aCallback, void *aCallbackData)
808 {
809 fatal("not implemented");
810 }
811
812 void
813 StoreHashIndex::init()
814 {
815 /* Calculate size of hash table (maximum currently 64k buckets). */
816 /* this is very bogus, its specific to the any Store maintaining an
817 * in-core index, not global */
818 size_t buckets = (Store::Root().maxSize() + ( Config.memMaxSize >> 10)) / Config.Store.avgObjectSize;
819 debugs(20, 1, "Swap maxSize " << Store::Root().maxSize() <<
820 " + " << ( Config.memMaxSize >> 10) << " KB, estimated " << buckets << " objects");
821 buckets /= Config.Store.objectsPerBucket;
822 debugs(20, 1, "Target number of buckets: " << buckets);
823 /* ideally the full scan period should be configurable, for the
824 * moment it remains at approximately 24 hours. */
825 store_hash_buckets = storeKeyHashBuckets(buckets);
826 debugs(20, 1, "Using " << store_hash_buckets << " Store buckets");
827 debugs(20, 1, "Max Mem size: " << ( Config.memMaxSize >> 10) << " KB");
828 debugs(20, 1, "Max Swap size: " << Store::Root().maxSize() << " KB");
829
830 store_table = hash_create(storeKeyHashCmp,
831 store_hash_buckets, storeKeyHashHash);
832
833 for (int i = 0; i < Config.cacheSwap.n_configured; i++) {
834 /* this starts a search of the store dirs, loading their
835 * index. under the new Store api this should be
836 * driven by the StoreHashIndex, not by each store.
837 *
838 * That is, the HashIndex should perform a search of each dir it is
839 * indexing to do the hash insertions. The search is then able to
840 * decide 'from-memory', or 'from-clean-log' or 'from-dirty-log' or
841 * 'from-no-log'.
842 *
843 * Step 1: make the store rebuilds use a search internally
844 * Step 2: change the search logic to use the four modes described
845 * above
846 * Step 3: have the hash index walk the searches itself.
847 */
848 if (dir(i).active())
849 store(i)->init();
850 }
851 }
852
853 uint64_t
854 StoreHashIndex::maxSize() const
855 {
856 uint64_t result = 0;
857
858 for (int i = 0; i < Config.cacheSwap.n_configured; i++)
859 result += store(i)->maxSize();
860
861 return result;
862 }
863
864 uint64_t
865 StoreHashIndex::minSize() const
866 {
867 uint64_t result = 0;
868
869 for (int i = 0; i < Config.cacheSwap.n_configured; i++)
870 result += store(i)->minSize();
871
872 return result;
873 }
874
875 void
876 StoreHashIndex::stat(StoreEntry & output) const
877 {
878 int i;
879
880 /* Now go through each store, calling its stat routine */
881
882 for (i = 0; i < Config.cacheSwap.n_configured; i++) {
883 storeAppendPrintf(&output, "\n");
884 store(i)->stat(output);
885 }
886 }
887
888 void
889 StoreHashIndex::reference(StoreEntry&)
890 {}
891
892 void
893 StoreHashIndex::dereference(StoreEntry&)
894 {}
895
896 void
897 StoreHashIndex::maintain()
898 {
899 int i;
900 /* walk each fs */
901
902 for (i = 0; i < Config.cacheSwap.n_configured; i++) {
903 /* XXX FixMe: This should be done "in parallell" on the different
904 * cache_dirs, not one at a time.
905 */
906 /* call the maintain function .. */
907 store(i)->maintain();
908 }
909 }
910
911 void
912 StoreHashIndex::updateSize(int64_t, int)
913 {}
914
915 void
916 StoreHashIndex::sync()
917 {
918 for (int i = 0; i < Config.cacheSwap.n_configured; ++i)
919 store(i)->sync();
920 }
921
922 StoreSearch *
923 StoreHashIndex::search(String const url, HttpRequest *)
924 {
925 if (url.size())
926 fatal ("Cannot search by url yet\n");
927
928 return new StoreSearchHashIndex (this);
929 }
930
931 CBDATA_CLASS_INIT(StoreSearchHashIndex);
932
933 StoreSearchHashIndex::StoreSearchHashIndex(RefCount<StoreHashIndex> aSwapDir) : sd(aSwapDir), _done (false), bucket (0)
934 {}
935
936 /* do not link
937 StoreSearchHashIndex::StoreSearchHashIndex(StoreSearchHashIndex const &);
938 */
939
940 StoreSearchHashIndex::~StoreSearchHashIndex()
941 {}
942
943 void
944 StoreSearchHashIndex::next(void (aCallback)(void *), void *aCallbackData)
945 {
946 next();
947 aCallback (aCallbackData);
948 }
949
950 bool
951 StoreSearchHashIndex::next()
952 {
953 if (entries.size())
954 entries.pop_back();
955
956 while (!isDone() && !entries.size())
957 copyBucket();
958
959 return currentItem() != NULL;
960 }
961
962 bool
963 StoreSearchHashIndex::error() const
964 {
965 return false;
966 }
967
968 bool
969 StoreSearchHashIndex::isDone() const
970 {
971 return bucket >= store_hash_buckets || _done;
972 }
973
974 StoreEntry *
975 StoreSearchHashIndex::currentItem()
976 {
977 if (!entries.size())
978 return NULL;
979
980 return entries.back();
981 }
982
983 void
984 StoreSearchHashIndex::copyBucket()
985 {
986 /* probably need to lock the store entries...
987 * we copy them all to prevent races on the links. */
988 debugs(47, 3, "StoreSearchHashIndex::copyBucket #" << bucket);
989 assert (!entries.size());
990 hash_link *link_ptr = NULL;
991 hash_link *link_next = NULL;
992 link_next = hash_get_bucket(store_table, bucket);
993
994 while (NULL != (link_ptr = link_next)) {
995 link_next = link_ptr->next;
996 StoreEntry *e = (StoreEntry *) link_ptr;
997
998 entries.push_back(e);
999 }
1000
1001 bucket++;
1002 debugs(47,3, "got entries: " << entries.size());
1003 }