]> git.ipfire.org Git - thirdparty/squid.git/blob - src/store_dir.cc
Move max_size and n_disk_objects to specific SwapDirs, remove updateSize().
[thirdparty/squid.git] / src / store_dir.cc
1
2 /*
3 * $Id$
4 *
5 * DEBUG: section 47 Store Directory Routines
6 * AUTHOR: Duane Wessels
7 *
8 * SQUID Web Proxy Cache http://www.squid-cache.org/
9 * ----------------------------------------------------------
10 *
11 * Squid is the result of efforts by numerous individuals from
12 * the Internet community; see the CONTRIBUTORS file for full
13 * details. Many organizations have provided support for Squid's
14 * development; see the SPONSORS file for full details. Squid is
15 * Copyrighted (C) 2001 by the Regents of the University of
16 * California; see the COPYRIGHT file for full details. Squid
17 * incorporates software developed and/or copyrighted by other
18 * sources; see the CREDITS file for full details.
19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
24 *
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
29 *
30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
32 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
33 *
34 */
35
36 #include "squid.h"
37 #include "Store.h"
38 #include "MemObject.h"
39 #include "MemStore.h"
40 #include "mem_node.h"
41 #include "SquidMath.h"
42 #include "SquidTime.h"
43 #include "SwapDir.h"
44 #include "swap_log_op.h"
45
46 #if HAVE_STATVFS
47 #if HAVE_SYS_STATVFS_H
48 #include <sys/statvfs.h>
49 #endif
50 #endif /* HAVE_STATVFS */
51 /* statfs() needs <sys/param.h> and <sys/mount.h> on BSD systems */
52 #if HAVE_SYS_PARAM_H
53 #include <sys/param.h>
54 #endif
55 #if HAVE_SYS_MOUNT_H
56 #include <sys/mount.h>
57 #endif
58 /* Windows and Linux use sys/vfs.h */
59 #if HAVE_SYS_VFS_H
60 #include <sys/vfs.h>
61 #endif
62
63 #include "StoreHashIndex.h"
64
65 static STDIRSELECT storeDirSelectSwapDirRoundRobin;
66 static STDIRSELECT storeDirSelectSwapDirLeastLoad;
67
68 /*
69 * store_dirs_rebuilding is initialized to _1_ as a hack so that
70 * storeDirWriteCleanLogs() doesn't try to do anything unless _all_
71 * cache_dirs have been read. For example, without this hack, Squid
72 * will try to write clean log files if -kparse fails (becasue it
73 * calls fatal()).
74 */
75 int StoreController::store_dirs_rebuilding = 1;
76
77 StoreController::StoreController() : swapDir (new StoreHashIndex())
78 , memStore(NULL)
79 {}
80
81 StoreController::~StoreController()
82 {
83 delete memStore;
84 }
85
86 /*
87 * This function pointer is set according to 'store_dir_select_algorithm'
88 * in squid.conf.
89 */
90 STDIRSELECT *storeDirSelectSwapDir = storeDirSelectSwapDirLeastLoad;
91
92 void
93 StoreController::init()
94 {
95 if (UsingSmp() && IamWorkerProcess()) {
96 memStore = new MemStore;
97 memStore->init();
98 }
99
100 swapDir->init();
101
102 if (0 == strcasecmp(Config.store_dir_select_algorithm, "round-robin")) {
103 storeDirSelectSwapDir = storeDirSelectSwapDirRoundRobin;
104 debugs(47, 1, "Using Round Robin store dir selection");
105 } else {
106 storeDirSelectSwapDir = storeDirSelectSwapDirLeastLoad;
107 debugs(47, 1, "Using Least Load store dir selection");
108 }
109 }
110
111 void
112 StoreController::createOneStore(Store &aStore)
113 {
114 /*
115 * On Windows, fork() is not available.
116 * The following is a workaround for create store directories sequentially
117 * when running on native Windows port.
118 */
119 #ifndef _SQUID_MSWIN_
120
121 if (fork())
122 return;
123
124 #endif
125
126 aStore.create();
127
128 #ifndef _SQUID_MSWIN_
129
130 exit(0);
131
132 #endif
133 }
134
135 void
136 StoreController::create()
137 {
138 swapDir->create();
139
140 #ifndef _SQUID_MSWIN_
141
142 pid_t pid;
143
144 do {
145 int status;
146 #ifdef _SQUID_NEXT_
147
148 pid = wait3(&status, WNOHANG, NULL);
149 #else
150
151 pid = waitpid(-1, &status, 0);
152 #endif
153
154 } while (pid > 0 || (pid < 0 && errno == EINTR));
155
156 #endif
157 }
158
159 /**
160 * Determine whether the given directory can handle this object
161 * size
162 *
163 * Note: if the object size is -1, then the only swapdirs that
164 * will return true here are ones that have min and max unset,
165 * ie any-sized-object swapdirs. This is a good thing.
166 */
167 bool
168 SwapDir::objectSizeIsAcceptable(int64_t objsize) const
169 {
170 // If the swapdir has no range limits, then it definitely can
171 if (min_objsize <= 0 && max_objsize == -1)
172 return true;
173
174 /*
175 * If the object size is -1 and the storedir has limits we
176 * can't store it there.
177 */
178 if (objsize == -1)
179 return false;
180
181 // Else, make sure that the object size will fit.
182 return min_objsize <= objsize && max_objsize > objsize;
183 }
184
185
186 /*
187 * This new selection scheme simply does round-robin on all SwapDirs.
188 * A SwapDir is skipped if it is over the max_size (100%) limit, or
189 * overloaded.
190 */
191 static int
192 storeDirSelectSwapDirRoundRobin(const StoreEntry * e)
193 {
194 static int dirn = 0;
195 int i;
196 int load;
197 RefCount<SwapDir> sd;
198
199 // e->objectLen() is negative at this point when we are still STORE_PENDING
200 ssize_t objsize = e->mem_obj->expectedReplySize();
201 if (objsize != -1)
202 objsize += e->mem_obj->swap_hdr_sz;
203
204 for (i = 0; i < Config.cacheSwap.n_configured; i++) {
205 if (++dirn >= Config.cacheSwap.n_configured)
206 dirn = 0;
207
208 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
209
210 if (!sd->canStore(*e, objsize, load))
211 continue;
212
213 if (load < 0 || load > 1000) {
214 continue;
215 }
216
217 return dirn;
218 }
219
220 return -1;
221 }
222
223 /*
224 * Spread load across all of the store directories
225 *
226 * Note: We should modify this later on to prefer sticking objects
227 * in the *tightest fit* swapdir to conserve space, along with the
228 * actual swapdir usage. But for now, this hack will do while
229 * testing, so you should order your swapdirs in the config file
230 * from smallest maxobjsize to unlimited (-1) maxobjsize.
231 *
232 * We also have to choose nleast == nconf since we need to consider
233 * ALL swapdirs, regardless of state. Again, this is a hack while
234 * we sort out the real usefulness of this algorithm.
235 */
236 static int
237 storeDirSelectSwapDirLeastLoad(const StoreEntry * e)
238 {
239 int64_t most_free = 0;
240 ssize_t least_objsize = -1;
241 int least_load = INT_MAX;
242 int load;
243 int dirn = -1;
244 int i;
245 RefCount<SwapDir> SD;
246
247 // e->objectLen() is negative at this point when we are still STORE_PENDING
248 ssize_t objsize = e->mem_obj->expectedReplySize();
249
250 if (objsize != -1)
251 objsize += e->mem_obj->swap_hdr_sz;
252
253 for (i = 0; i < Config.cacheSwap.n_configured; i++) {
254 SD = dynamic_cast<SwapDir *>(INDEXSD(i));
255 SD->flags.selected = 0;
256
257 if (!SD->canStore(*e, objsize, load))
258 continue;
259
260 if (load < 0 || load > 1000)
261 continue;
262
263 if (load > least_load)
264 continue;
265
266 const int64_t cur_free = SD->maxSize() - SD->currentSize();
267
268 /* If the load is equal, then look in more details */
269 if (load == least_load) {
270 /* closest max_objsize fit */
271
272 if (least_objsize != -1)
273 if (SD->max_objsize > least_objsize || SD->max_objsize == -1)
274 continue;
275
276 /* most free */
277 if (cur_free < most_free)
278 continue;
279 }
280
281 least_load = load;
282 least_objsize = SD->max_objsize;
283 most_free = cur_free;
284 dirn = i;
285 }
286
287 if (dirn >= 0)
288 dynamic_cast<SwapDir *>(INDEXSD(dirn))->flags.selected = 1;
289
290 return dirn;
291 }
292
293 /*
294 * An entry written to the swap log MUST have the following
295 * properties.
296 * 1. It MUST be a public key. It does no good to log
297 * a public ADD, change the key, then log a private
298 * DEL. So we need to log a DEL before we change a
299 * key from public to private.
300 * 2. It MUST have a valid (> -1) swap_filen.
301 */
302 void
303 storeDirSwapLog(const StoreEntry * e, int op)
304 {
305 assert (e);
306 assert(!EBIT_TEST(e->flags, KEY_PRIVATE));
307 assert(e->swap_filen >= 0);
308 /*
309 * icons and such; don't write them to the swap log
310 */
311
312 if (EBIT_TEST(e->flags, ENTRY_SPECIAL))
313 return;
314
315 assert(op > SWAP_LOG_NOP && op < SWAP_LOG_MAX);
316
317 debugs(20, 3, "storeDirSwapLog: " <<
318 swap_log_op_str[op] << " " <<
319 e->getMD5Text() << " " <<
320 e->swap_dirn << " " <<
321 std::hex << std::uppercase << std::setfill('0') << std::setw(8) << e->swap_filen);
322
323 dynamic_cast<SwapDir *>(INDEXSD(e->swap_dirn))->logEntry(*e, op);
324 }
325
326 void
327 StoreController::stat(StoreEntry &output) const
328 {
329 storeAppendPrintf(&output, "Store Directory Statistics:\n");
330 storeAppendPrintf(&output, "Store Entries : %lu\n",
331 (unsigned long int)StoreEntry::inUseCount());
332 storeAppendPrintf(&output, "Maximum Swap Size : %"PRIu64" KB\n",
333 maxSize() >> 10);
334 storeAppendPrintf(&output, "Current Store Swap Size: %.2f KB\n",
335 currentSize() / 1024.0);
336 storeAppendPrintf(&output, "Current Capacity : %.2f%% used, %.2f%% free\n",
337 Math::doublePercent(currentSize(), maxSize()),
338 Math::doublePercent((maxSize() - currentSize()), maxSize()));
339
340 if (memStore)
341 memStore->stat(output);
342
343 /* now the swapDir */
344 swapDir->stat(output);
345 }
346
347 /* if needed, this could be taught to cache the result */
348 uint64_t
349 StoreController::maxSize() const
350 {
351 /* TODO: include memory cache ? */
352 return swapDir->maxSize();
353 }
354
355 uint64_t
356 StoreController::minSize() const
357 {
358 /* TODO: include memory cache ? */
359 return swapDir->minSize();
360 }
361
362 uint64_t
363 StoreController::currentSize() const
364 {
365 return swapDir->currentSize();
366 }
367
368 uint64_t
369 StoreController::currentCount() const
370 {
371 return swapDir->currentCount();
372 }
373
374 int64_t
375 StoreController::maxObjectSize() const
376 {
377 return swapDir->maxObjectSize();
378 }
379
380 void
381 SwapDir::diskFull()
382 {
383 if (currentSize() >= maxSize())
384 return;
385
386 max_size = currentSize();
387
388 debugs(20, 1, "WARNING: Shrinking cache_dir #" << index << " to " << currentSize() / 1024.0 << " KB");
389 }
390
391 void
392 storeDirOpenSwapLogs(void)
393 {
394 for (int dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
395 dynamic_cast<SwapDir *>(INDEXSD(dirn))->openLog();
396 }
397
398 void
399 storeDirCloseSwapLogs(void)
400 {
401 for (int dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
402 dynamic_cast<SwapDir *>(INDEXSD(dirn))->closeLog();
403 }
404
405 /*
406 * storeDirWriteCleanLogs
407 *
408 * Writes a "clean" swap log file from in-memory metadata.
409 * This is a rewrite of the original function to troll each
410 * StoreDir and write the logs, and flush at the end of
411 * the run. Thanks goes to Eric Stern, since this solution
412 * came out of his COSS code.
413 */
414 int
415 storeDirWriteCleanLogs(int reopen)
416 {
417 const StoreEntry *e = NULL;
418 int n = 0;
419
420 struct timeval start;
421 double dt;
422 RefCount<SwapDir> sd;
423 int dirn;
424 int notdone = 1;
425
426 if (StoreController::store_dirs_rebuilding) {
427 debugs(20, 1, "Not currently OK to rewrite swap log.");
428 debugs(20, 1, "storeDirWriteCleanLogs: Operation aborted.");
429 return 0;
430 }
431
432 debugs(20, 1, "storeDirWriteCleanLogs: Starting...");
433 getCurrentTime();
434 start = current_time;
435
436 for (dirn = 0; dirn < Config.cacheSwap.n_configured; dirn++) {
437 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
438
439 if (sd->writeCleanStart() < 0) {
440 debugs(20, 1, "log.clean.start() failed for dir #" << sd->index);
441 continue;
442 }
443 }
444
445 /*
446 * This may look inefficient as CPU wise it is more efficient to do this
447 * sequentially, but I/O wise the parallellism helps as it allows more
448 * hdd spindles to be active.
449 */
450 while (notdone) {
451 notdone = 0;
452
453 for (dirn = 0; dirn < Config.cacheSwap.n_configured; dirn++) {
454 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
455
456 if (NULL == sd->cleanLog)
457 continue;
458
459 e = sd->cleanLog->nextEntry();
460
461 if (!e)
462 continue;
463
464 notdone = 1;
465
466 if (!sd->canLog(*e))
467 continue;
468
469 sd->cleanLog->write(*e);
470
471 if ((++n & 0xFFFF) == 0) {
472 getCurrentTime();
473 debugs(20, 1, " " << std::setw(7) << n <<
474 " entries written so far.");
475 }
476 }
477 }
478
479 /* Flush */
480 for (dirn = 0; dirn < Config.cacheSwap.n_configured; dirn++)
481 dynamic_cast<SwapDir *>(INDEXSD(dirn))->writeCleanDone();
482
483 if (reopen)
484 storeDirOpenSwapLogs();
485
486 getCurrentTime();
487
488 dt = tvSubDsec(start, current_time);
489
490 debugs(20, 1, " Finished. Wrote " << n << " entries.");
491 debugs(20, 1, " Took "<< std::setw(3)<< std::setprecision(2) << dt <<
492 " seconds ("<< std::setw(6) << ((double) n / (dt > 0.0 ? dt : 1.0)) << " entries/sec).");
493
494
495 return n;
496 }
497
498 StoreSearch *
499 StoreController::search(String const url, HttpRequest *request)
500 {
501 /* cheat, for now you can't search the memory hot cache */
502 return swapDir->search(url, request);
503 }
504
505 StorePointer
506 StoreHashIndex::store(int const x) const
507 {
508 return INDEXSD(x);
509 }
510
511 SwapDir &
512 StoreHashIndex::dir(const int i) const
513 {
514 SwapDir *sd = dynamic_cast<SwapDir*>(INDEXSD(i));
515 assert(sd);
516 return *sd;
517 }
518
519 void
520 StoreController::sync(void)
521 {
522 if (memStore)
523 memStore->sync();
524 swapDir->sync();
525 }
526
527 /*
528 * handle callbacks all avaliable fs'es
529 */
530 int
531 StoreController::callback()
532 {
533 /* This will likely double count. Thats ok. */
534 PROF_start(storeDirCallback);
535
536 /* mem cache callbacks ? */
537 int result = swapDir->callback();
538
539 PROF_stop(storeDirCallback);
540
541 return result;
542 }
543
544 int
545 storeDirGetBlkSize(const char *path, int *blksize)
546 {
547 #if HAVE_STATVFS
548
549 struct statvfs sfs;
550
551 if (statvfs(path, &sfs)) {
552 debugs(50, 1, "" << path << ": " << xstrerror());
553 *blksize = 2048;
554 return 1;
555 }
556
557 *blksize = (int) sfs.f_frsize;
558 #else
559
560 struct statfs sfs;
561
562 if (statfs(path, &sfs)) {
563 debugs(50, 1, "" << path << ": " << xstrerror());
564 *blksize = 2048;
565 return 1;
566 }
567
568 *blksize = (int) sfs.f_bsize;
569 #endif
570 /*
571 * Sanity check; make sure we have a meaningful value.
572 */
573
574 if (*blksize < 512)
575 *blksize = 2048;
576
577 return 0;
578 }
579
580 #define fsbtoblk(num, fsbs, bs) \
581 (((fsbs) != 0 && (fsbs) < (bs)) ? \
582 (num) / ((bs) / (fsbs)) : (num) * ((fsbs) / (bs)))
583 int
584 storeDirGetUFSStats(const char *path, int *totl_kb, int *free_kb, int *totl_in, int *free_in)
585 {
586 #if HAVE_STATVFS
587
588 struct statvfs sfs;
589
590 if (statvfs(path, &sfs)) {
591 debugs(50, 1, "" << path << ": " << xstrerror());
592 return 1;
593 }
594
595 *totl_kb = (int) fsbtoblk(sfs.f_blocks, sfs.f_frsize, 1024);
596 *free_kb = (int) fsbtoblk(sfs.f_bfree, sfs.f_frsize, 1024);
597 *totl_in = (int) sfs.f_files;
598 *free_in = (int) sfs.f_ffree;
599 #else
600
601 struct statfs sfs;
602
603 if (statfs(path, &sfs)) {
604 debugs(50, 1, "" << path << ": " << xstrerror());
605 return 1;
606 }
607
608 *totl_kb = (int) fsbtoblk(sfs.f_blocks, sfs.f_bsize, 1024);
609 *free_kb = (int) fsbtoblk(sfs.f_bfree, sfs.f_bsize, 1024);
610 *totl_in = (int) sfs.f_files;
611 *free_in = (int) sfs.f_ffree;
612 #endif
613
614 return 0;
615 }
616
617 void
618 allocate_new_swapdir(SquidConfig::_cacheSwap * swap)
619 {
620 if (swap->swapDirs == NULL) {
621 swap->n_allocated = 4;
622 swap->swapDirs = static_cast<SwapDir::Pointer *>(xcalloc(swap->n_allocated, sizeof(SwapDir::Pointer)));
623 }
624
625 if (swap->n_allocated == swap->n_configured) {
626 swap->n_allocated <<= 1;
627 SwapDir::Pointer *const tmp = static_cast<SwapDir::Pointer *>(xcalloc(swap->n_allocated, sizeof(SwapDir::Pointer)));
628 memcpy(tmp, swap->swapDirs, swap->n_configured * sizeof(SwapDir *));
629 xfree(swap->swapDirs);
630 swap->swapDirs = tmp;
631 }
632 }
633
634 void
635 free_cachedir(SquidConfig::_cacheSwap * swap)
636 {
637 int i;
638 /* DON'T FREE THESE FOR RECONFIGURE */
639
640 if (reconfiguring)
641 return;
642
643 for (i = 0; i < swap->n_configured; i++) {
644 /* TODO XXX this lets the swapdir free resources asynchronously
645 * swap->swapDirs[i]->deactivate();
646 * but there may be such a means already.
647 * RBC 20041225
648 */
649 swap->swapDirs[i] = NULL;
650 }
651
652 safe_free(swap->swapDirs);
653 swap->swapDirs = NULL;
654 swap->n_allocated = 0;
655 swap->n_configured = 0;
656 }
657
658 /* this should be a virtual method on StoreEntry,
659 * i.e. e->referenced()
660 * so that the entry can notify the creating Store
661 */
662 void
663 StoreController::reference(StoreEntry &e)
664 {
665 /* Notify the fs that we're referencing this object again */
666
667 if (e.swap_dirn > -1)
668 e.store()->reference(e);
669
670 // Notify the memory cache that we're referencing this object again
671 if (memStore && e.mem_status == IN_MEMORY)
672 memStore->reference(e);
673
674 // TODO: move this code to a non-shared memory cache class when we have it
675 if (e.mem_obj) {
676 if (mem_policy->Referenced)
677 mem_policy->Referenced(mem_policy, &e, &e.mem_obj->repl);
678 }
679 }
680
681 void
682 StoreController::dereference(StoreEntry & e)
683 {
684 /* Notify the fs that we're not referencing this object any more */
685
686 if (e.swap_filen > -1)
687 e.store()->dereference(e);
688
689 // Notify the memory cache that we're not referencing this object any more
690 if (memStore && e.mem_status == IN_MEMORY)
691 memStore->dereference(e);
692
693 // TODO: move this code to a non-shared memory cache class when we have it
694 if (e.mem_obj) {
695 if (mem_policy->Dereferenced)
696 mem_policy->Dereferenced(mem_policy, &e, &e.mem_obj->repl);
697 }
698 }
699
700 StoreEntry *
701 StoreController::get(const cache_key *key)
702 {
703 if (StoreEntry *e = swapDir->get(key)) {
704 // TODO: ignore and maybe handleIdleEntry() unlocked intransit entries
705 // because their backing store slot may be gone already.
706 debugs(20, 3, HERE << "got in-transit entry: " << *e);
707 return e;
708 }
709
710 if (memStore) {
711 if (StoreEntry *e = memStore->get(key)) {
712 debugs(20, 3, HERE << "got mem-cached entry: " << *e);
713 return e;
714 }
715 }
716
717 // TODO: this disk iteration is misplaced; move to StoreHashIndex
718 if (const int cacheDirs = Config.cacheSwap.n_configured) {
719 // ask each cache_dir until the entry is found; use static starting
720 // point to avoid asking the same subset of disks more often
721 // TODO: coordinate with put() to be able to guess the right disk often
722 static int idx = 0;
723 for (int n = 0; n < cacheDirs; ++n) {
724 idx = (idx + 1) % cacheDirs;
725 SwapDir *sd = dynamic_cast<SwapDir*>(INDEXSD(idx));
726 if (!sd->active())
727 continue;
728
729 if (StoreEntry *e = sd->get(key)) {
730 debugs(20, 3, HERE << "cache_dir " << idx <<
731 " got cached entry: " << *e);
732 return e;
733 }
734 }
735 }
736
737 debugs(20, 4, HERE << "none of " << Config.cacheSwap.n_configured <<
738 " cache_dirs have " << storeKeyText(key));
739 return NULL;
740 }
741
742 void
743 StoreController::get(String const key, STOREGETCLIENT aCallback, void *aCallbackData)
744 {
745 fatal("not implemented");
746 }
747
748 void
749 StoreController::handleIdleEntry(StoreEntry &e)
750 {
751 bool keepInLocalMemory = false;
752 if (memStore) {
753 memStore->considerKeeping(e);
754 // leave keepInLocalMemory false; memStore maintains its own cache
755 } else {
756 keepInLocalMemory = e.memoryCachable() && // entry is in good shape and
757 // the local memory cache is not overflowing
758 (mem_node::InUseCount() <= store_pages_max);
759 }
760
761 dereference(e);
762
763 // XXX: Rock store specific: Since each SwapDir controls its index,
764 // unlocked entries should not stay in the global store_table.
765 if (fileno >= 0) {
766 debugs(20, 5, HERE << "destroying unlocked entry: " << &e << ' ' << e);
767 destroyStoreEntry(static_cast<hash_link*>(&e));
768 return;
769 }
770
771 // TODO: move this into [non-shared] memory cache class when we have one
772 if (keepInLocalMemory) {
773 e.setMemStatus(IN_MEMORY);
774 e.mem_obj->unlinkRequest();
775 } else {
776 e.purgeMem(); // may free e
777 }
778 }
779
780 StoreHashIndex::StoreHashIndex()
781 {
782 if (store_table)
783 abort();
784 assert (store_table == NULL);
785 }
786
787 StoreHashIndex::~StoreHashIndex()
788 {
789 if (store_table) {
790 hashFreeItems(store_table, destroyStoreEntry);
791 hashFreeMemory(store_table);
792 store_table = NULL;
793 }
794 }
795
796 int
797 StoreHashIndex::callback()
798 {
799 int result = 0;
800 int j;
801 static int ndir = 0;
802
803 do {
804 j = 0;
805
806 for (int i = 0; i < Config.cacheSwap.n_configured; i++) {
807 if (ndir >= Config.cacheSwap.n_configured)
808 ndir = ndir % Config.cacheSwap.n_configured;
809
810 int temp_result = store(ndir)->callback();
811
812 ++ndir;
813
814 j += temp_result;
815
816 result += temp_result;
817
818 if (j > 100)
819 fatal ("too much io\n");
820 }
821 } while (j > 0);
822
823 ndir++;
824
825 return result;
826 }
827
828 void
829 StoreHashIndex::create()
830 {
831 for (int i = 0; i < Config.cacheSwap.n_configured; i++) {
832 if (dir(i).active())
833 store(i)->create();
834 }
835 }
836
837 /* Lookup an object in the cache.
838 * return just a reference to object, don't start swapping in yet. */
839 StoreEntry *
840 StoreHashIndex::get(const cache_key *key)
841 {
842 PROF_start(storeGet);
843 debugs(20, 3, "storeGet: looking up " << storeKeyText(key));
844 StoreEntry *p = static_cast<StoreEntry *>(hash_lookup(store_table, key));
845 PROF_stop(storeGet);
846 return p;
847 }
848
849 void
850 StoreHashIndex::get(String const key, STOREGETCLIENT aCallback, void *aCallbackData)
851 {
852 fatal("not implemented");
853 }
854
855 void
856 StoreHashIndex::init()
857 {
858 /* Calculate size of hash table (maximum currently 64k buckets). */
859 /* this is very bogus, its specific to the any Store maintaining an
860 * in-core index, not global */
861 size_t buckets = (Store::Root().maxSize() + Config.memMaxSize) / Config.Store.avgObjectSize;
862 debugs(20, 1, "Swap maxSize " << (Store::Root().maxSize() >> 10) <<
863 " + " << ( Config.memMaxSize >> 10) << " KB, estimated " << buckets << " objects");
864 buckets /= Config.Store.objectsPerBucket;
865 debugs(20, 1, "Target number of buckets: " << buckets);
866 /* ideally the full scan period should be configurable, for the
867 * moment it remains at approximately 24 hours. */
868 store_hash_buckets = storeKeyHashBuckets(buckets);
869 debugs(20, 1, "Using " << store_hash_buckets << " Store buckets");
870 debugs(20, 1, "Max Mem size: " << ( Config.memMaxSize >> 10) << " KB");
871 debugs(20, 1, "Max Swap size: " << (Store::Root().maxSize() >> 10) << " KB");
872
873 store_table = hash_create(storeKeyHashCmp,
874 store_hash_buckets, storeKeyHashHash);
875
876 for (int i = 0; i < Config.cacheSwap.n_configured; i++) {
877 /* this starts a search of the store dirs, loading their
878 * index. under the new Store api this should be
879 * driven by the StoreHashIndex, not by each store.
880 *
881 * That is, the HashIndex should perform a search of each dir it is
882 * indexing to do the hash insertions. The search is then able to
883 * decide 'from-memory', or 'from-clean-log' or 'from-dirty-log' or
884 * 'from-no-log'.
885 *
886 * Step 1: make the store rebuilds use a search internally
887 * Step 2: change the search logic to use the four modes described
888 * above
889 * Step 3: have the hash index walk the searches itself.
890 */
891 if (dir(i).active())
892 store(i)->init();
893 }
894 }
895
896 uint64_t
897 StoreHashIndex::maxSize() const
898 {
899 uint64_t result = 0;
900
901 for (int i = 0; i < Config.cacheSwap.n_configured; i++) {
902 if (dir(i).doReportStat())
903 result += store(i)->maxSize();
904 }
905
906 return result;
907 }
908
909 uint64_t
910 StoreHashIndex::minSize() const
911 {
912 uint64_t result = 0;
913
914 for (int i = 0; i < Config.cacheSwap.n_configured; i++) {
915 if (dir(i).doReportStat())
916 result += store(i)->minSize();
917 }
918
919 return result;
920 }
921
922 uint64_t
923 StoreHashIndex::currentSize() const
924 {
925 uint64_t result = 0;
926
927 for (int i = 0; i < Config.cacheSwap.n_configured; i++) {
928 if (dir(i).doReportStat())
929 result += store(i)->currentSize();
930 }
931
932 return result;
933 }
934
935 uint64_t
936 StoreHashIndex::currentCount() const
937 {
938 uint64_t result = 0;
939
940 for (int i = 0; i < Config.cacheSwap.n_configured; i++) {
941 if (dir(i).doReportStat())
942 result += store(i)->currentCount();
943 }
944
945 return result;
946 }
947
948 int64_t
949 StoreHashIndex::maxObjectSize() const
950 {
951 int64_t result = -1;
952
953 for (int i = 0; i < Config.cacheSwap.n_configured; i++) {
954 if (dir(i).active() && store(i)->maxObjectSize() > result)
955 result = store(i)->maxObjectSize();
956 }
957
958 return result;
959 }
960
961 void
962 StoreHashIndex::stat(StoreEntry & output) const
963 {
964 int i;
965
966 /* Now go through each store, calling its stat routine */
967
968 for (i = 0; i < Config.cacheSwap.n_configured; i++) {
969 storeAppendPrintf(&output, "\n");
970 store(i)->stat(output);
971 }
972 }
973
974 void
975 StoreHashIndex::reference(StoreEntry&)
976 {}
977
978 void
979 StoreHashIndex::dereference(StoreEntry&)
980 {}
981
982 void
983 StoreHashIndex::maintain()
984 {
985 int i;
986 /* walk each fs */
987
988 for (i = 0; i < Config.cacheSwap.n_configured; i++) {
989 /* XXX FixMe: This should be done "in parallell" on the different
990 * cache_dirs, not one at a time.
991 */
992 /* call the maintain function .. */
993 store(i)->maintain();
994 }
995 }
996
997 void
998 StoreHashIndex::sync()
999 {
1000 for (int i = 0; i < Config.cacheSwap.n_configured; ++i)
1001 store(i)->sync();
1002 }
1003
1004 StoreSearch *
1005 StoreHashIndex::search(String const url, HttpRequest *)
1006 {
1007 if (url.size())
1008 fatal ("Cannot search by url yet\n");
1009
1010 return new StoreSearchHashIndex (this);
1011 }
1012
1013 CBDATA_CLASS_INIT(StoreSearchHashIndex);
1014
1015 StoreSearchHashIndex::StoreSearchHashIndex(RefCount<StoreHashIndex> aSwapDir) : sd(aSwapDir), _done (false), bucket (0)
1016 {}
1017
1018 /* do not link
1019 StoreSearchHashIndex::StoreSearchHashIndex(StoreSearchHashIndex const &);
1020 */
1021
1022 StoreSearchHashIndex::~StoreSearchHashIndex()
1023 {}
1024
1025 void
1026 StoreSearchHashIndex::next(void (aCallback)(void *), void *aCallbackData)
1027 {
1028 next();
1029 aCallback (aCallbackData);
1030 }
1031
1032 bool
1033 StoreSearchHashIndex::next()
1034 {
1035 if (entries.size())
1036 entries.pop_back();
1037
1038 while (!isDone() && !entries.size())
1039 copyBucket();
1040
1041 return currentItem() != NULL;
1042 }
1043
1044 bool
1045 StoreSearchHashIndex::error() const
1046 {
1047 return false;
1048 }
1049
1050 bool
1051 StoreSearchHashIndex::isDone() const
1052 {
1053 return bucket >= store_hash_buckets || _done;
1054 }
1055
1056 StoreEntry *
1057 StoreSearchHashIndex::currentItem()
1058 {
1059 if (!entries.size())
1060 return NULL;
1061
1062 return entries.back();
1063 }
1064
1065 void
1066 StoreSearchHashIndex::copyBucket()
1067 {
1068 /* probably need to lock the store entries...
1069 * we copy them all to prevent races on the links. */
1070 debugs(47, 3, "StoreSearchHashIndex::copyBucket #" << bucket);
1071 assert (!entries.size());
1072 hash_link *link_ptr = NULL;
1073 hash_link *link_next = NULL;
1074 link_next = hash_get_bucket(store_table, bucket);
1075
1076 while (NULL != (link_ptr = link_next)) {
1077 link_next = link_ptr->next;
1078 StoreEntry *e = (StoreEntry *) link_ptr;
1079
1080 entries.push_back(e);
1081 }
1082
1083 bucket++;
1084 debugs(47,3, "got entries: " << entries.size());
1085 }