]> git.ipfire.org Git - thirdparty/squid.git/blob - src/store_dir.cc
Merge from trunk
[thirdparty/squid.git] / src / store_dir.cc
1
2 /*
3 * $Id$
4 *
5 * DEBUG: section 47 Store Directory Routines
6 * AUTHOR: Duane Wessels
7 *
8 * SQUID Web Proxy Cache http://www.squid-cache.org/
9 * ----------------------------------------------------------
10 *
11 * Squid is the result of efforts by numerous individuals from
12 * the Internet community; see the CONTRIBUTORS file for full
13 * details. Many organizations have provided support for Squid's
14 * development; see the SPONSORS file for full details. Squid is
15 * Copyrighted (C) 2001 by the Regents of the University of
16 * California; see the COPYRIGHT file for full details. Squid
17 * incorporates software developed and/or copyrighted by other
18 * sources; see the CREDITS file for full details.
19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
24 *
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
29 *
30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
32 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
33 *
34 */
35
36 #include "squid.h"
37 #include "Store.h"
38 #include "MemObject.h"
39 #include "SquidTime.h"
40 #include "SwapDir.h"
41 #include "swap_log_op.h"
42
43 #if HAVE_STATVFS
44 #if HAVE_SYS_STATVFS_H
45 #include <sys/statvfs.h>
46 #endif
47 #endif /* HAVE_STATVFS */
48 /* statfs() needs <sys/param.h> and <sys/mount.h> on BSD systems */
49 #if HAVE_SYS_PARAM_H
50 #include <sys/param.h>
51 #endif
52 #if HAVE_SYS_MOUNT_H
53 #include <sys/mount.h>
54 #endif
55 /* Windows and Linux use sys/vfs.h */
56 #if HAVE_SYS_VFS_H
57 #include <sys/vfs.h>
58 #endif
59
60 #include "StoreHashIndex.h"
61
62 static STDIRSELECT storeDirSelectSwapDirRoundRobin;
63 static STDIRSELECT storeDirSelectSwapDirLeastLoad;
64
65 /*
66 * store_dirs_rebuilding is initialized to _1_ as a hack so that
67 * storeDirWriteCleanLogs() doesn't try to do anything unless _all_
68 * cache_dirs have been read. For example, without this hack, Squid
69 * will try to write clean log files if -kparse fails (becasue it
70 * calls fatal()).
71 */
72 int StoreController::store_dirs_rebuilding = 1;
73
74 StoreController::StoreController() : swapDir (new StoreHashIndex())
75 {}
76
77 StoreController::~StoreController()
78 {}
79
80 /*
81 * This function pointer is set according to 'store_dir_select_algorithm'
82 * in squid.conf.
83 */
84 STDIRSELECT *storeDirSelectSwapDir = storeDirSelectSwapDirLeastLoad;
85
86 void
87 StoreController::init()
88 {
89 swapDir->init();
90
91 if (0 == strcasecmp(Config.store_dir_select_algorithm, "round-robin")) {
92 storeDirSelectSwapDir = storeDirSelectSwapDirRoundRobin;
93 debugs(47, 1, "Using Round Robin store dir selection");
94 } else {
95 storeDirSelectSwapDir = storeDirSelectSwapDirLeastLoad;
96 debugs(47, 1, "Using Least Load store dir selection");
97 }
98 }
99
100 void
101 StoreController::createOneStore(Store &aStore)
102 {
103 /*
104 * On Windows, fork() is not available.
105 * The following is a workaround for create store directories sequentially
106 * when running on native Windows port.
107 */
108 #ifndef _SQUID_MSWIN_
109
110 if (fork())
111 return;
112
113 #endif
114
115 aStore.create();
116
117 #ifndef _SQUID_MSWIN_
118
119 exit(0);
120
121 #endif
122 }
123
124 void
125 StoreController::create()
126 {
127 swapDir->create();
128
129 #ifndef _SQUID_MSWIN_
130
131 pid_t pid;
132
133 do {
134 int status;
135 #ifdef _SQUID_NEXT_
136
137 pid = wait3(&status, WNOHANG, NULL);
138 #else
139
140 pid = waitpid(-1, &status, 0);
141 #endif
142
143 } while (pid > 0 || (pid < 0 && errno == EINTR));
144
145 #endif
146 }
147
148 /*
149 * Determine whether the given directory can handle this object
150 * size
151 *
152 * Note: if the object size is -1, then the only swapdirs that
153 * will return true here are ones that have max_obj_size = -1,
154 * ie any-sized-object swapdirs. This is a good thing.
155 */
156 bool
157 SwapDir::objectSizeIsAcceptable(int64_t objsize) const
158 {
159 /*
160 * If the swapdir's max_obj_size is -1, then it definitely can
161 */
162
163 if (max_objsize == -1)
164 return true;
165
166 /*
167 * If the object size is -1, then if the storedir isn't -1 we
168 * can't store it
169 */
170 if ((objsize == -1) && (max_objsize != -1))
171 return false;
172
173 /*
174 * Else, make sure that the max object size is larger than objsize
175 */
176 return max_objsize > objsize;
177 }
178
179
180 /*
181 * This new selection scheme simply does round-robin on all SwapDirs.
182 * A SwapDir is skipped if it is over the max_size (100%) limit, or
183 * overloaded.
184 */
185 static int
186 storeDirSelectSwapDirRoundRobin(const StoreEntry * e)
187 {
188 static int dirn = 0;
189 int i;
190 int load;
191 RefCount<SwapDir> sd;
192
193 ssize_t objsize = e->objectLen();
194 if (objsize != -1)
195 objsize += e->mem_obj->swap_hdr_sz;
196
197 for (i = 0; i <= Config.cacheSwap.n_configured; i++) {
198 if (++dirn >= Config.cacheSwap.n_configured)
199 dirn = 0;
200
201 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
202
203 if (sd->flags.read_only)
204 continue;
205
206 if (sd->cur_size > sd->max_size)
207 continue;
208
209 if (!sd->objectSizeIsAcceptable(objsize))
210 continue;
211
212 /* check for error or overload condition */
213 load = sd->canStore(*e);
214
215 if (load < 0 || load > 1000) {
216 continue;
217 }
218
219 return dirn;
220 }
221
222 return -1;
223 }
224
225 /*
226 * Spread load across all of the store directories
227 *
228 * Note: We should modify this later on to prefer sticking objects
229 * in the *tightest fit* swapdir to conserve space, along with the
230 * actual swapdir usage. But for now, this hack will do while
231 * testing, so you should order your swapdirs in the config file
232 * from smallest maxobjsize to unlimited (-1) maxobjsize.
233 *
234 * We also have to choose nleast == nconf since we need to consider
235 * ALL swapdirs, regardless of state. Again, this is a hack while
236 * we sort out the real usefulness of this algorithm.
237 */
238 static int
239 storeDirSelectSwapDirLeastLoad(const StoreEntry * e)
240 {
241 ssize_t objsize;
242 ssize_t most_free = 0, cur_free;
243 ssize_t least_objsize = -1;
244 int least_load = INT_MAX;
245 int load;
246 int dirn = -1;
247 int i;
248 RefCount<SwapDir> SD;
249
250 /* Calculate the object size */
251 objsize = e->objectLen();
252
253 if (objsize != -1)
254 objsize += e->mem_obj->swap_hdr_sz;
255
256 for (i = 0; i < Config.cacheSwap.n_configured; i++) {
257 SD = dynamic_cast<SwapDir *>(INDEXSD(i));
258 SD->flags.selected = 0;
259 load = SD->canStore(*e);
260
261 if (load < 0 || load > 1000) {
262 continue;
263 }
264
265 if (!SD->objectSizeIsAcceptable(objsize))
266 continue;
267
268 if (SD->flags.read_only)
269 continue;
270
271 if (SD->cur_size > SD->max_size)
272 continue;
273
274 if (load > least_load)
275 continue;
276
277 cur_free = SD->max_size - SD->cur_size;
278
279 /* If the load is equal, then look in more details */
280 if (load == least_load) {
281 /* closest max_objsize fit */
282
283 if (least_objsize != -1)
284 if (SD->max_objsize > least_objsize || SD->max_objsize == -1)
285 continue;
286
287 /* most free */
288 if (cur_free < most_free)
289 continue;
290 }
291
292 least_load = load;
293 least_objsize = SD->max_objsize;
294 most_free = cur_free;
295 dirn = i;
296 }
297
298 if (dirn >= 0)
299 dynamic_cast<SwapDir *>(INDEXSD(dirn))->flags.selected = 1;
300
301 return dirn;
302 }
303
304 /*
305 * An entry written to the swap log MUST have the following
306 * properties.
307 * 1. It MUST be a public key. It does no good to log
308 * a public ADD, change the key, then log a private
309 * DEL. So we need to log a DEL before we change a
310 * key from public to private.
311 * 2. It MUST have a valid (> -1) swap_filen.
312 */
313 void
314 storeDirSwapLog(const StoreEntry * e, int op)
315 {
316 assert (e);
317 assert(!EBIT_TEST(e->flags, KEY_PRIVATE));
318 assert(e->swap_filen >= 0);
319 /*
320 * icons and such; don't write them to the swap log
321 */
322
323 if (EBIT_TEST(e->flags, ENTRY_SPECIAL))
324 return;
325
326 assert(op > SWAP_LOG_NOP && op < SWAP_LOG_MAX);
327
328 debugs(20, 3, "storeDirSwapLog: " <<
329 swap_log_op_str[op] << " " <<
330 e->getMD5Text() << " " <<
331 e->swap_dirn << " " <<
332 std::hex << std::uppercase << std::setfill('0') << std::setw(8) << e->swap_filen);
333
334 dynamic_cast<SwapDir *>(INDEXSD(e->swap_dirn))->logEntry(*e, op);
335 }
336
337 void
338 StoreController::updateSize(int64_t size, int sign)
339 {
340 fatal("StoreController has no independent size\n");
341 }
342
343 void
344 SwapDir::updateSize(int64_t size, int sign)
345 {
346 int blks = (size + fs.blksize - 1) / fs.blksize;
347 int k = (blks * fs.blksize >> 10) * sign;
348 cur_size += k;
349 store_swap_size += k;
350
351 if (sign > 0)
352 n_disk_objects++;
353 else if (sign < 0)
354 n_disk_objects--;
355 }
356
357 void
358 StoreController::stat(StoreEntry &output) const
359 {
360 storeAppendPrintf(&output, "Store Directory Statistics:\n");
361 storeAppendPrintf(&output, "Store Entries : %lu\n",
362 (unsigned long int)StoreEntry::inUseCount());
363 storeAppendPrintf(&output, "Maximum Swap Size : %8ld KB\n",
364 (long int) maxSize());
365 storeAppendPrintf(&output, "Current Store Swap Size: %8lu KB\n",
366 store_swap_size);
367 storeAppendPrintf(&output, "Current Capacity : %d%% used, %d%% free\n",
368 percent((int) store_swap_size, (int) maxSize()),
369 percent((int) (maxSize() - store_swap_size), (int) maxSize()));
370 /* FIXME Here we should output memory statistics */
371
372 /* now the swapDir */
373 swapDir->stat(output);
374 }
375
376 /* if needed, this could be taught to cache the result */
377 size_t
378 StoreController::maxSize() const
379 {
380 /* TODO: include memory cache ? */
381 return swapDir->maxSize();
382 }
383
384 size_t
385 StoreController::minSize() const
386 {
387 /* TODO: include memory cache ? */
388 return swapDir->minSize();
389 }
390
391 void
392 SwapDir::diskFull()
393 {
394 if (cur_size >= max_size)
395 return;
396
397 max_size = cur_size;
398
399 debugs(20, 1, "WARNING: Shrinking cache_dir #" << index << " to " << cur_size << " KB");
400 }
401
402 void
403 storeDirOpenSwapLogs(void)
404 {
405 for (int dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
406 dynamic_cast<SwapDir *>(INDEXSD(dirn))->openLog();
407 }
408
409 void
410 storeDirCloseSwapLogs(void)
411 {
412 for (int dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
413 dynamic_cast<SwapDir *>(INDEXSD(dirn))->closeLog();
414 }
415
416 /*
417 * storeDirWriteCleanLogs
418 *
419 * Writes a "clean" swap log file from in-memory metadata.
420 * This is a rewrite of the original function to troll each
421 * StoreDir and write the logs, and flush at the end of
422 * the run. Thanks goes to Eric Stern, since this solution
423 * came out of his COSS code.
424 */
425 int
426 storeDirWriteCleanLogs(int reopen)
427 {
428 const StoreEntry *e = NULL;
429 int n = 0;
430
431 struct timeval start;
432 double dt;
433 RefCount<SwapDir> sd;
434 int dirn;
435 int notdone = 1;
436
437 if (StoreController::store_dirs_rebuilding) {
438 debugs(20, 1, "Not currently OK to rewrite swap log.");
439 debugs(20, 1, "storeDirWriteCleanLogs: Operation aborted.");
440 return 0;
441 }
442
443 debugs(20, 1, "storeDirWriteCleanLogs: Starting...");
444 getCurrentTime();
445 start = current_time;
446
447 for (dirn = 0; dirn < Config.cacheSwap.n_configured; dirn++) {
448 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
449
450 if (sd->writeCleanStart() < 0) {
451 debugs(20, 1, "log.clean.start() failed for dir #" << sd->index);
452 continue;
453 }
454 }
455
456 /*
457 * This may look inefficient as CPU wise it is more efficient to do this
458 * sequentially, but I/O wise the parallellism helps as it allows more
459 * hdd spindles to be active.
460 */
461 while (notdone) {
462 notdone = 0;
463
464 for (dirn = 0; dirn < Config.cacheSwap.n_configured; dirn++) {
465 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
466
467 if (NULL == sd->cleanLog)
468 continue;
469
470 e = sd->cleanLog->nextEntry();
471
472 if (!e)
473 continue;
474
475 notdone = 1;
476
477 if (!sd->canLog(*e))
478 continue;
479
480 sd->cleanLog->write(*e);
481
482 if ((++n & 0xFFFF) == 0) {
483 getCurrentTime();
484 debugs(20, 1, " " << std::setw(7) << n <<
485 " entries written so far.");
486 }
487 }
488 }
489
490 /* Flush */
491 for (dirn = 0; dirn < Config.cacheSwap.n_configured; dirn++)
492 dynamic_cast<SwapDir *>(INDEXSD(dirn))->writeCleanDone();
493
494 if (reopen)
495 storeDirOpenSwapLogs();
496
497 getCurrentTime();
498
499 dt = tvSubDsec(start, current_time);
500
501 debugs(20, 1, " Finished. Wrote " << n << " entries.");
502 debugs(20, 1, " Took "<< std::setw(3)<< std::setprecision(2) << dt <<
503 " seconds ("<< std::setw(6) << ((double) n / (dt > 0.0 ? dt : 1.0)) << " entries/sec).");
504
505
506 return n;
507 }
508
509 StoreSearch *
510 StoreController::search(String const url, HttpRequest *request)
511 {
512 /* cheat, for now you can't search the memory hot cache */
513 return swapDir->search(url, request);
514 }
515
516 StorePointer
517 StoreHashIndex::store(int const x) const
518 {
519 return INDEXSD(x);
520 }
521
522 void
523 StoreController::sync(void)
524 {
525 /* sync mem cache? */
526 swapDir->sync();
527 }
528
529 /*
530 * handle callbacks all avaliable fs'es
531 */
532 int
533 StoreController::callback()
534 {
535 /* This will likely double count. Thats ok. */
536 PROF_start(storeDirCallback);
537
538 /* mem cache callbacks ? */
539 int result = swapDir->callback();
540
541 PROF_stop(storeDirCallback);
542
543 return result;
544 }
545
546 int
547 storeDirGetBlkSize(const char *path, int *blksize)
548 {
549 #if HAVE_STATVFS
550
551 struct statvfs sfs;
552
553 if (statvfs(path, &sfs)) {
554 debugs(50, 1, "" << path << ": " << xstrerror());
555 *blksize = 2048;
556 return 1;
557 }
558
559 *blksize = (int) sfs.f_frsize;
560 #else
561
562 struct statfs sfs;
563
564 if (statfs(path, &sfs)) {
565 debugs(50, 1, "" << path << ": " << xstrerror());
566 *blksize = 2048;
567 return 1;
568 }
569
570 *blksize = (int) sfs.f_bsize;
571 #endif
572 /*
573 * Sanity check; make sure we have a meaningful value.
574 */
575
576 if (*blksize < 512)
577 *blksize = 2048;
578
579 return 0;
580 }
581
582 #define fsbtoblk(num, fsbs, bs) \
583 (((fsbs) != 0 && (fsbs) < (bs)) ? \
584 (num) / ((bs) / (fsbs)) : (num) * ((fsbs) / (bs)))
585 int
586 storeDirGetUFSStats(const char *path, int *totl_kb, int *free_kb, int *totl_in, int *free_in)
587 {
588 #if HAVE_STATVFS
589
590 struct statvfs sfs;
591
592 if (statvfs(path, &sfs)) {
593 debugs(50, 1, "" << path << ": " << xstrerror());
594 return 1;
595 }
596
597 *totl_kb = (int) fsbtoblk(sfs.f_blocks, sfs.f_frsize, 1024);
598 *free_kb = (int) fsbtoblk(sfs.f_bfree, sfs.f_frsize, 1024);
599 *totl_in = (int) sfs.f_files;
600 *free_in = (int) sfs.f_ffree;
601 #else
602
603 struct statfs sfs;
604
605 if (statfs(path, &sfs)) {
606 debugs(50, 1, "" << path << ": " << xstrerror());
607 return 1;
608 }
609
610 *totl_kb = (int) fsbtoblk(sfs.f_blocks, sfs.f_bsize, 1024);
611 *free_kb = (int) fsbtoblk(sfs.f_bfree, sfs.f_bsize, 1024);
612 *totl_in = (int) sfs.f_files;
613 *free_in = (int) sfs.f_ffree;
614 #endif
615
616 return 0;
617 }
618
619 void
620 allocate_new_swapdir(SquidConfig::_cacheSwap * swap)
621 {
622 if (swap->swapDirs == NULL) {
623 swap->n_allocated = 4;
624 swap->swapDirs = static_cast<StorePointer *>(xcalloc(swap->n_allocated, sizeof(StorePointer)));
625 }
626
627 if (swap->n_allocated == swap->n_configured) {
628 StorePointer *tmp;
629 swap->n_allocated <<= 1;
630 tmp = static_cast<StorePointer *>(xcalloc(swap->n_allocated, sizeof(StorePointer)));
631 xmemcpy(tmp, swap->swapDirs, swap->n_configured * sizeof(SwapDir *));
632 xfree(swap->swapDirs);
633 swap->swapDirs = tmp;
634 }
635 }
636
637 void
638 free_cachedir(SquidConfig::_cacheSwap * swap)
639 {
640 int i;
641 /* DON'T FREE THESE FOR RECONFIGURE */
642
643 if (reconfiguring)
644 return;
645
646 for (i = 0; i < swap->n_configured; i++) {
647 /* TODO XXX this lets the swapdir free resources asynchronously
648 * swap->swapDirs[i]->deactivate();
649 * but there may be such a means already.
650 * RBC 20041225
651 */
652 swap->swapDirs[i] = NULL;
653 }
654
655 safe_free(swap->swapDirs);
656 swap->swapDirs = NULL;
657 swap->n_allocated = 0;
658 swap->n_configured = 0;
659 }
660
661 /* this should be a virtual method on StoreEntry,
662 * i.e. e->referenced()
663 * so that the entry can notify the creating Store
664 */
665 void
666 StoreController::reference(StoreEntry &e)
667 {
668 /* Notify the fs that we're referencing this object again */
669
670 if (e.swap_dirn > -1)
671 e.store()->reference(e);
672
673 /* Notify the memory cache that we're referencing this object again */
674 if (e.mem_obj) {
675 if (mem_policy->Referenced)
676 mem_policy->Referenced(mem_policy, &e, &e.mem_obj->repl);
677 }
678 }
679
680 void
681 StoreController::dereference(StoreEntry & e)
682 {
683 /* Notify the fs that we're not referencing this object any more */
684
685 if (e.swap_filen > -1)
686 e.store()->dereference(e);
687
688 /* Notify the memory cache that we're not referencing this object any more */
689 if (e.mem_obj) {
690 if (mem_policy->Dereferenced)
691 mem_policy->Dereferenced(mem_policy, &e, &e.mem_obj->repl);
692 }
693 }
694
695 StoreEntry *
696
697 StoreController::get
698 (const cache_key *key)
699 {
700
701 return swapDir->get
702 (key);
703 }
704
705 void
706
707 StoreController::get
708 (String const key, STOREGETCLIENT callback, void *cbdata)
709 {
710 fatal("not implemented");
711 }
712
713 StoreHashIndex::StoreHashIndex()
714 {
715 if (store_table)
716 abort();
717 assert (store_table == NULL);
718 }
719
720 StoreHashIndex::~StoreHashIndex()
721 {
722 if (store_table) {
723 hashFreeItems(store_table, destroyStoreEntry);
724 hashFreeMemory(store_table);
725 store_table = NULL;
726 }
727 }
728
729 int
730 StoreHashIndex::callback()
731 {
732 int result = 0;
733 int j;
734 static int ndir = 0;
735
736 do {
737 j = 0;
738
739 for (int i = 0; i < Config.cacheSwap.n_configured; i++) {
740 if (ndir >= Config.cacheSwap.n_configured)
741 ndir = ndir % Config.cacheSwap.n_configured;
742
743 int temp_result = store(ndir)->callback();
744
745 ++ndir;
746
747 j += temp_result;
748
749 result += temp_result;
750
751 if (j > 100)
752 fatal ("too much io\n");
753 }
754 } while (j > 0);
755
756 ndir++;
757
758 return result;
759 }
760
761 void
762 StoreHashIndex::create()
763 {
764 for (int i = 0; i < Config.cacheSwap.n_configured; i++)
765 store(i)->create();
766 }
767
768 /* Lookup an object in the cache.
769 * return just a reference to object, don't start swapping in yet. */
770 StoreEntry *
771
772 StoreHashIndex::get
773 (const cache_key *key)
774 {
775 PROF_start(storeGet);
776 debugs(20, 3, "storeGet: looking up " << storeKeyText(key));
777 StoreEntry *p = static_cast<StoreEntry *>(hash_lookup(store_table, key));
778 PROF_stop(storeGet);
779 return p;
780 }
781
782 void
783
784 StoreHashIndex::get
785 (String const key, STOREGETCLIENT callback, void *cbdata)
786 {
787 fatal("not implemented");
788 }
789
790 void
791 StoreHashIndex::init()
792 {
793 /* Calculate size of hash table (maximum currently 64k buckets). */
794 /* this is very bogus, its specific to the any Store maintaining an
795 * in-core index, not global */
796 size_t buckets = (Store::Root().maxSize() + ( Config.memMaxSize >> 10)) / Config.Store.avgObjectSize;
797 debugs(20, 1, "Swap maxSize " << Store::Root().maxSize() <<
798 " + " << ( Config.memMaxSize >> 10) << " KB, estimated " << buckets << " objects");
799 buckets /= Config.Store.objectsPerBucket;
800 debugs(20, 1, "Target number of buckets: " << buckets);
801 /* ideally the full scan period should be configurable, for the
802 * moment it remains at approximately 24 hours. */
803 store_hash_buckets = storeKeyHashBuckets(buckets);
804 debugs(20, 1, "Using " << store_hash_buckets << " Store buckets");
805 debugs(20, 1, "Max Mem size: " << ( Config.memMaxSize >> 10) << " KB");
806 debugs(20, 1, "Max Swap size: " << Store::Root().maxSize() << " KB");
807
808 store_table = hash_create(storeKeyHashCmp,
809 store_hash_buckets, storeKeyHashHash);
810
811 for (int i = 0; i < Config.cacheSwap.n_configured; i++) {
812 /* this starts a search of the store dirs, loading their
813 * index. under the new Store api this should be
814 * driven by the StoreHashIndex, not by each store.
815 *
816 * That is, the HashIndex should perform a search of each dir it is
817 * indexing to do the hash insertions. The search is then able to
818 * decide 'from-memory', or 'from-clean-log' or 'from-dirty-log' or
819 * 'from-no-log'.
820 *
821 * Step 1: make the store rebuilds use a search internally
822 * Step 2: change the search logic to use the four modes described
823 * above
824 * Step 3: have the hash index walk the searches itself.
825 */
826 store(i)->init();
827 }
828 }
829
830 size_t
831 StoreHashIndex::maxSize() const
832 {
833 int i;
834 size_t result = 0;
835
836 for (i = 0; i < Config.cacheSwap.n_configured; i++)
837 result += store(i)->maxSize();
838
839 return result;
840 }
841
842 size_t
843 StoreHashIndex::minSize() const
844 {
845 size_t result = 0;
846
847 for (int i = 0; i < Config.cacheSwap.n_configured; i++)
848 result += store(i)->minSize();
849
850 return result;
851 }
852
853 void
854 StoreHashIndex::stat(StoreEntry & output) const
855 {
856 int i;
857
858 /* Now go through each store, calling its stat routine */
859
860 for (i = 0; i < Config.cacheSwap.n_configured; i++) {
861 storeAppendPrintf(&output, "\n");
862 store(i)->stat(output);
863 }
864 }
865
866 void
867 StoreHashIndex::reference(StoreEntry&)
868 {}
869
870 void
871 StoreHashIndex::dereference(StoreEntry&)
872 {}
873
874 void
875 StoreHashIndex::maintain()
876 {
877 int i;
878 /* walk each fs */
879
880 for (i = 0; i < Config.cacheSwap.n_configured; i++) {
881 /* XXX FixMe: This should be done "in parallell" on the different
882 * cache_dirs, not one at a time.
883 */
884 /* call the maintain function .. */
885 store(i)->maintain();
886 }
887 }
888
889 void
890 StoreHashIndex::updateSize(int64_t, int)
891 {}
892
893 void
894 StoreHashIndex::sync()
895 {
896 for (int i = 0; i < Config.cacheSwap.n_configured; ++i)
897 store(i)->sync();
898 }
899
900 StoreSearch *
901 StoreHashIndex::search(String const url, HttpRequest *)
902 {
903 if (url.size())
904 fatal ("Cannot search by url yet\n");
905
906 return new StoreSearchHashIndex (this);
907 }
908
909 CBDATA_CLASS_INIT(StoreSearchHashIndex);
910
911 StoreSearchHashIndex::StoreSearchHashIndex(RefCount<StoreHashIndex> aSwapDir) : sd(aSwapDir), _done (false), bucket (0)
912 {}
913
914 /* do not link
915 StoreSearchHashIndex::StoreSearchHashIndex(StoreSearchHashIndex const &);
916 */
917
918 StoreSearchHashIndex::~StoreSearchHashIndex()
919 {}
920
921 void
922 StoreSearchHashIndex::next(void (callback)(void *cbdata), void *cbdata)
923 {
924 next();
925 callback (cbdata);
926 }
927
928 bool
929 StoreSearchHashIndex::next()
930 {
931 if (entries.size())
932 entries.pop_back();
933
934 while (!isDone() && !entries.size())
935 copyBucket();
936
937 return currentItem() != NULL;
938 }
939
940 bool
941 StoreSearchHashIndex::error() const
942 {
943 return false;
944 }
945
946 bool
947 StoreSearchHashIndex::isDone() const
948 {
949 return bucket >= store_hash_buckets || _done;
950 }
951
952 StoreEntry *
953 StoreSearchHashIndex::currentItem()
954 {
955 if (!entries.size())
956 return NULL;
957
958 return entries.back();
959 }
960
961 void
962 StoreSearchHashIndex::copyBucket()
963 {
964 /* probably need to lock the store entries...
965 * we copy them all to prevent races on the links. */
966 debugs(47, 3, "StoreSearchHashIndex::copyBucket #" << bucket);
967 assert (!entries.size());
968 hash_link *link_ptr = NULL;
969 hash_link *link_next = NULL;
970 link_next = hash_get_bucket(store_table, bucket);
971
972 while (NULL != (link_ptr = link_next)) {
973 link_next = link_ptr->next;
974 StoreEntry *e = (StoreEntry *) link_ptr;
975
976 entries.push_back(e);
977 }
978
979 bucket++;
980 debugs(47,3, "got entries: " << entries.size());
981 }