]> git.ipfire.org Git - thirdparty/squid.git/blob - src/store_dir.cc
Fixed compiler warnings about wrong printf() format for currentSize().
[thirdparty/squid.git] / src / store_dir.cc
1
2 /*
3 * $Id$
4 *
5 * DEBUG: section 47 Store Directory Routines
6 * AUTHOR: Duane Wessels
7 *
8 * SQUID Web Proxy Cache http://www.squid-cache.org/
9 * ----------------------------------------------------------
10 *
11 * Squid is the result of efforts by numerous individuals from
12 * the Internet community; see the CONTRIBUTORS file for full
13 * details. Many organizations have provided support for Squid's
14 * development; see the SPONSORS file for full details. Squid is
15 * Copyrighted (C) 2001 by the Regents of the University of
16 * California; see the COPYRIGHT file for full details. Squid
17 * incorporates software developed and/or copyrighted by other
18 * sources; see the CREDITS file for full details.
19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
24 *
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
29 *
30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
32 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
33 *
34 */
35
36 #include "squid.h"
37 #include "Store.h"
38 #include "MemObject.h"
39 #include "MemStore.h"
40 #include "mem_node.h"
41 #include "SquidMath.h"
42 #include "SquidTime.h"
43 #include "SwapDir.h"
44 #include "swap_log_op.h"
45
46 #if HAVE_STATVFS
47 #if HAVE_SYS_STATVFS_H
48 #include <sys/statvfs.h>
49 #endif
50 #endif /* HAVE_STATVFS */
51 /* statfs() needs <sys/param.h> and <sys/mount.h> on BSD systems */
52 #if HAVE_SYS_PARAM_H
53 #include <sys/param.h>
54 #endif
55 #if HAVE_SYS_MOUNT_H
56 #include <sys/mount.h>
57 #endif
58 /* Windows and Linux use sys/vfs.h */
59 #if HAVE_SYS_VFS_H
60 #include <sys/vfs.h>
61 #endif
62
63 #include "StoreHashIndex.h"
64
65 static STDIRSELECT storeDirSelectSwapDirRoundRobin;
66 static STDIRSELECT storeDirSelectSwapDirLeastLoad;
67
68 /*
69 * store_dirs_rebuilding is initialized to _1_ as a hack so that
70 * storeDirWriteCleanLogs() doesn't try to do anything unless _all_
71 * cache_dirs have been read. For example, without this hack, Squid
72 * will try to write clean log files if -kparse fails (becasue it
73 * calls fatal()).
74 */
75 int StoreController::store_dirs_rebuilding = 1;
76
77 StoreController::StoreController() : swapDir (new StoreHashIndex())
78 , memStore(NULL)
79 {}
80
81 StoreController::~StoreController()
82 {
83 delete memStore;
84 }
85
86 /*
87 * This function pointer is set according to 'store_dir_select_algorithm'
88 * in squid.conf.
89 */
90 STDIRSELECT *storeDirSelectSwapDir = storeDirSelectSwapDirLeastLoad;
91
92 void
93 StoreController::init()
94 {
95 // XXX: add: if (UsingSmp())
96 memStore = new MemStore;
97 memStore->init();
98
99 swapDir->init();
100
101 if (0 == strcasecmp(Config.store_dir_select_algorithm, "round-robin")) {
102 storeDirSelectSwapDir = storeDirSelectSwapDirRoundRobin;
103 debugs(47, 1, "Using Round Robin store dir selection");
104 } else {
105 storeDirSelectSwapDir = storeDirSelectSwapDirLeastLoad;
106 debugs(47, 1, "Using Least Load store dir selection");
107 }
108 }
109
110 void
111 StoreController::createOneStore(Store &aStore)
112 {
113 /*
114 * On Windows, fork() is not available.
115 * The following is a workaround for create store directories sequentially
116 * when running on native Windows port.
117 */
118 #ifndef _SQUID_MSWIN_
119
120 if (fork())
121 return;
122
123 #endif
124
125 aStore.create();
126
127 #ifndef _SQUID_MSWIN_
128
129 exit(0);
130
131 #endif
132 }
133
134 void
135 StoreController::create()
136 {
137 swapDir->create();
138
139 #ifndef _SQUID_MSWIN_
140
141 pid_t pid;
142
143 do {
144 int status;
145 #ifdef _SQUID_NEXT_
146
147 pid = wait3(&status, WNOHANG, NULL);
148 #else
149
150 pid = waitpid(-1, &status, 0);
151 #endif
152
153 } while (pid > 0 || (pid < 0 && errno == EINTR));
154
155 #endif
156 }
157
158 /**
159 * Determine whether the given directory can handle this object
160 * size
161 *
162 * Note: if the object size is -1, then the only swapdirs that
163 * will return true here are ones that have min and max unset,
164 * ie any-sized-object swapdirs. This is a good thing.
165 */
166 bool
167 SwapDir::objectSizeIsAcceptable(int64_t objsize) const
168 {
169 // If the swapdir has no range limits, then it definitely can
170 if (min_objsize <= 0 && max_objsize == -1)
171 return true;
172
173 /*
174 * If the object size is -1 and the storedir has limits we
175 * can't store it there.
176 */
177 if (objsize == -1)
178 return false;
179
180 // Else, make sure that the object size will fit.
181 return min_objsize <= objsize && max_objsize > objsize;
182 }
183
184
185 /*
186 * This new selection scheme simply does round-robin on all SwapDirs.
187 * A SwapDir is skipped if it is over the max_size (100%) limit, or
188 * overloaded.
189 */
190 static int
191 storeDirSelectSwapDirRoundRobin(const StoreEntry * e)
192 {
193 static int dirn = 0;
194 int i;
195 int load;
196 RefCount<SwapDir> sd;
197
198 // e->objectLen() is negative at this point when we are still STORE_PENDING
199 ssize_t objsize = e->mem_obj->expectedReplySize();
200 if (objsize != -1)
201 objsize += e->mem_obj->swap_hdr_sz;
202
203 for (i = 0; i < Config.cacheSwap.n_configured; i++) {
204 if (++dirn >= Config.cacheSwap.n_configured)
205 dirn = 0;
206
207 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
208
209 if (!sd->canStore(*e, objsize, load))
210 continue;
211
212 if (load < 0 || load > 1000) {
213 continue;
214 }
215
216 return dirn;
217 }
218
219 return -1;
220 }
221
222 /*
223 * Spread load across all of the store directories
224 *
225 * Note: We should modify this later on to prefer sticking objects
226 * in the *tightest fit* swapdir to conserve space, along with the
227 * actual swapdir usage. But for now, this hack will do while
228 * testing, so you should order your swapdirs in the config file
229 * from smallest maxobjsize to unlimited (-1) maxobjsize.
230 *
231 * We also have to choose nleast == nconf since we need to consider
232 * ALL swapdirs, regardless of state. Again, this is a hack while
233 * we sort out the real usefulness of this algorithm.
234 */
235 static int
236 storeDirSelectSwapDirLeastLoad(const StoreEntry * e)
237 {
238 ssize_t most_free = 0, cur_free;
239 ssize_t least_objsize = -1;
240 int least_load = INT_MAX;
241 int load;
242 int dirn = -1;
243 int i;
244 RefCount<SwapDir> SD;
245
246 // e->objectLen() is negative at this point when we are still STORE_PENDING
247 ssize_t objsize = e->mem_obj->expectedReplySize();
248
249 if (objsize != -1)
250 objsize += e->mem_obj->swap_hdr_sz;
251
252 for (i = 0; i < Config.cacheSwap.n_configured; i++) {
253 SD = dynamic_cast<SwapDir *>(INDEXSD(i));
254 SD->flags.selected = 0;
255
256 if (!SD->canStore(*e, objsize, load))
257 continue;
258
259 if (load < 0 || load > 1000)
260 continue;
261
262 if (load > least_load)
263 continue;
264
265 cur_free = SD->max_size - SD->cur_size;
266
267 /* If the load is equal, then look in more details */
268 if (load == least_load) {
269 /* closest max_objsize fit */
270
271 if (least_objsize != -1)
272 if (SD->max_objsize > least_objsize || SD->max_objsize == -1)
273 continue;
274
275 /* most free */
276 if (cur_free < most_free)
277 continue;
278 }
279
280 least_load = load;
281 least_objsize = SD->max_objsize;
282 most_free = cur_free;
283 dirn = i;
284 }
285
286 if (dirn >= 0)
287 dynamic_cast<SwapDir *>(INDEXSD(dirn))->flags.selected = 1;
288
289 return dirn;
290 }
291
292 /*
293 * An entry written to the swap log MUST have the following
294 * properties.
295 * 1. It MUST be a public key. It does no good to log
296 * a public ADD, change the key, then log a private
297 * DEL. So we need to log a DEL before we change a
298 * key from public to private.
299 * 2. It MUST have a valid (> -1) swap_filen.
300 */
301 void
302 storeDirSwapLog(const StoreEntry * e, int op)
303 {
304 assert (e);
305 assert(!EBIT_TEST(e->flags, KEY_PRIVATE));
306 assert(e->swap_filen >= 0);
307 /*
308 * icons and such; don't write them to the swap log
309 */
310
311 if (EBIT_TEST(e->flags, ENTRY_SPECIAL))
312 return;
313
314 assert(op > SWAP_LOG_NOP && op < SWAP_LOG_MAX);
315
316 debugs(20, 3, "storeDirSwapLog: " <<
317 swap_log_op_str[op] << " " <<
318 e->getMD5Text() << " " <<
319 e->swap_dirn << " " <<
320 std::hex << std::uppercase << std::setfill('0') << std::setw(8) << e->swap_filen);
321
322 dynamic_cast<SwapDir *>(INDEXSD(e->swap_dirn))->logEntry(*e, op);
323 }
324
325 void
326 StoreController::updateSize(int64_t size, int sign)
327 {
328 fatal("StoreController has no independent size\n");
329 }
330
331 void
332 SwapDir::updateSize(int64_t size, int sign)
333 {
334 int64_t blks = (size + fs.blksize - 1) / fs.blksize;
335 int64_t k = ((blks * fs.blksize) >> 10) * sign;
336 cur_size += k;
337
338 if (sign > 0)
339 n_disk_objects++;
340 else if (sign < 0)
341 n_disk_objects--;
342 }
343
344 void
345 StoreController::stat(StoreEntry &output) const
346 {
347 storeAppendPrintf(&output, "Store Directory Statistics:\n");
348 storeAppendPrintf(&output, "Store Entries : %lu\n",
349 (unsigned long int)StoreEntry::inUseCount());
350 storeAppendPrintf(&output, "Maximum Swap Size : %"PRIu64" KB\n",
351 maxSize());
352 storeAppendPrintf(&output, "Current Store Swap Size: %"PRIu64" KB\n",
353 currentSize());
354 storeAppendPrintf(&output, "Current Capacity : %"PRId64"%% used, %"PRId64"%% free\n",
355 Math::int64Percent(currentSize(), maxSize()),
356 Math::int64Percent((maxSize() - currentSize()), maxSize()));
357
358 if (memStore)
359 memStore->stat(output);
360
361 /* now the swapDir */
362 swapDir->stat(output);
363 }
364
365 /* if needed, this could be taught to cache the result */
366 uint64_t
367 StoreController::maxSize() const
368 {
369 /* TODO: include memory cache ? */
370 return swapDir->maxSize();
371 }
372
373 uint64_t
374 StoreController::minSize() const
375 {
376 /* TODO: include memory cache ? */
377 return swapDir->minSize();
378 }
379
380 uint64_t
381 StoreController::currentSize() const
382 {
383 return swapDir->currentSize();
384 }
385
386 uint64_t
387 StoreController::currentCount() const
388 {
389 return swapDir->currentCount();
390 }
391
392 int64_t
393 StoreController::maxObjectSize() const
394 {
395 return swapDir->maxObjectSize();
396 }
397
398 void
399 SwapDir::diskFull()
400 {
401 if (cur_size >= max_size)
402 return;
403
404 max_size = cur_size;
405
406 debugs(20, 1, "WARNING: Shrinking cache_dir #" << index << " to " << cur_size << " KB");
407 }
408
409 void
410 storeDirOpenSwapLogs(void)
411 {
412 for (int dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
413 dynamic_cast<SwapDir *>(INDEXSD(dirn))->openLog();
414 }
415
416 void
417 storeDirCloseSwapLogs(void)
418 {
419 for (int dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
420 dynamic_cast<SwapDir *>(INDEXSD(dirn))->closeLog();
421 }
422
423 /*
424 * storeDirWriteCleanLogs
425 *
426 * Writes a "clean" swap log file from in-memory metadata.
427 * This is a rewrite of the original function to troll each
428 * StoreDir and write the logs, and flush at the end of
429 * the run. Thanks goes to Eric Stern, since this solution
430 * came out of his COSS code.
431 */
432 int
433 storeDirWriteCleanLogs(int reopen)
434 {
435 const StoreEntry *e = NULL;
436 int n = 0;
437
438 struct timeval start;
439 double dt;
440 RefCount<SwapDir> sd;
441 int dirn;
442 int notdone = 1;
443
444 if (StoreController::store_dirs_rebuilding) {
445 debugs(20, 1, "Not currently OK to rewrite swap log.");
446 debugs(20, 1, "storeDirWriteCleanLogs: Operation aborted.");
447 return 0;
448 }
449
450 debugs(20, 1, "storeDirWriteCleanLogs: Starting...");
451 getCurrentTime();
452 start = current_time;
453
454 for (dirn = 0; dirn < Config.cacheSwap.n_configured; dirn++) {
455 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
456
457 if (sd->writeCleanStart() < 0) {
458 debugs(20, 1, "log.clean.start() failed for dir #" << sd->index);
459 continue;
460 }
461 }
462
463 /*
464 * This may look inefficient as CPU wise it is more efficient to do this
465 * sequentially, but I/O wise the parallellism helps as it allows more
466 * hdd spindles to be active.
467 */
468 while (notdone) {
469 notdone = 0;
470
471 for (dirn = 0; dirn < Config.cacheSwap.n_configured; dirn++) {
472 sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
473
474 if (NULL == sd->cleanLog)
475 continue;
476
477 e = sd->cleanLog->nextEntry();
478
479 if (!e)
480 continue;
481
482 notdone = 1;
483
484 if (!sd->canLog(*e))
485 continue;
486
487 sd->cleanLog->write(*e);
488
489 if ((++n & 0xFFFF) == 0) {
490 getCurrentTime();
491 debugs(20, 1, " " << std::setw(7) << n <<
492 " entries written so far.");
493 }
494 }
495 }
496
497 /* Flush */
498 for (dirn = 0; dirn < Config.cacheSwap.n_configured; dirn++)
499 dynamic_cast<SwapDir *>(INDEXSD(dirn))->writeCleanDone();
500
501 if (reopen)
502 storeDirOpenSwapLogs();
503
504 getCurrentTime();
505
506 dt = tvSubDsec(start, current_time);
507
508 debugs(20, 1, " Finished. Wrote " << n << " entries.");
509 debugs(20, 1, " Took "<< std::setw(3)<< std::setprecision(2) << dt <<
510 " seconds ("<< std::setw(6) << ((double) n / (dt > 0.0 ? dt : 1.0)) << " entries/sec).");
511
512
513 return n;
514 }
515
516 StoreSearch *
517 StoreController::search(String const url, HttpRequest *request)
518 {
519 /* cheat, for now you can't search the memory hot cache */
520 return swapDir->search(url, request);
521 }
522
523 StorePointer
524 StoreHashIndex::store(int const x) const
525 {
526 return INDEXSD(x);
527 }
528
529 SwapDir &
530 StoreHashIndex::dir(const int i) const
531 {
532 SwapDir *sd = dynamic_cast<SwapDir*>(INDEXSD(i));
533 assert(sd);
534 return *sd;
535 }
536
537 void
538 StoreController::sync(void)
539 {
540 if (memStore)
541 memStore->sync();
542 swapDir->sync();
543 }
544
545 /*
546 * handle callbacks all avaliable fs'es
547 */
548 int
549 StoreController::callback()
550 {
551 /* This will likely double count. Thats ok. */
552 PROF_start(storeDirCallback);
553
554 /* mem cache callbacks ? */
555 int result = swapDir->callback();
556
557 PROF_stop(storeDirCallback);
558
559 return result;
560 }
561
562 int
563 storeDirGetBlkSize(const char *path, int *blksize)
564 {
565 #if HAVE_STATVFS
566
567 struct statvfs sfs;
568
569 if (statvfs(path, &sfs)) {
570 debugs(50, 1, "" << path << ": " << xstrerror());
571 *blksize = 2048;
572 return 1;
573 }
574
575 *blksize = (int) sfs.f_frsize;
576 #else
577
578 struct statfs sfs;
579
580 if (statfs(path, &sfs)) {
581 debugs(50, 1, "" << path << ": " << xstrerror());
582 *blksize = 2048;
583 return 1;
584 }
585
586 *blksize = (int) sfs.f_bsize;
587 #endif
588 /*
589 * Sanity check; make sure we have a meaningful value.
590 */
591
592 if (*blksize < 512)
593 *blksize = 2048;
594
595 return 0;
596 }
597
598 #define fsbtoblk(num, fsbs, bs) \
599 (((fsbs) != 0 && (fsbs) < (bs)) ? \
600 (num) / ((bs) / (fsbs)) : (num) * ((fsbs) / (bs)))
601 int
602 storeDirGetUFSStats(const char *path, int *totl_kb, int *free_kb, int *totl_in, int *free_in)
603 {
604 #if HAVE_STATVFS
605
606 struct statvfs sfs;
607
608 if (statvfs(path, &sfs)) {
609 debugs(50, 1, "" << path << ": " << xstrerror());
610 return 1;
611 }
612
613 *totl_kb = (int) fsbtoblk(sfs.f_blocks, sfs.f_frsize, 1024);
614 *free_kb = (int) fsbtoblk(sfs.f_bfree, sfs.f_frsize, 1024);
615 *totl_in = (int) sfs.f_files;
616 *free_in = (int) sfs.f_ffree;
617 #else
618
619 struct statfs sfs;
620
621 if (statfs(path, &sfs)) {
622 debugs(50, 1, "" << path << ": " << xstrerror());
623 return 1;
624 }
625
626 *totl_kb = (int) fsbtoblk(sfs.f_blocks, sfs.f_bsize, 1024);
627 *free_kb = (int) fsbtoblk(sfs.f_bfree, sfs.f_bsize, 1024);
628 *totl_in = (int) sfs.f_files;
629 *free_in = (int) sfs.f_ffree;
630 #endif
631
632 return 0;
633 }
634
635 void
636 allocate_new_swapdir(SquidConfig::_cacheSwap * swap)
637 {
638 if (swap->swapDirs == NULL) {
639 swap->n_allocated = 4;
640 swap->swapDirs = static_cast<StorePointer *>(xcalloc(swap->n_allocated, sizeof(StorePointer)));
641 }
642
643 if (swap->n_allocated == swap->n_configured) {
644 StorePointer *tmp;
645 swap->n_allocated <<= 1;
646 tmp = static_cast<StorePointer *>(xcalloc(swap->n_allocated, sizeof(StorePointer)));
647 memcpy(tmp, swap->swapDirs, swap->n_configured * sizeof(SwapDir *));
648 xfree(swap->swapDirs);
649 swap->swapDirs = tmp;
650 }
651 }
652
653 void
654 free_cachedir(SquidConfig::_cacheSwap * swap)
655 {
656 int i;
657 /* DON'T FREE THESE FOR RECONFIGURE */
658
659 if (reconfiguring)
660 return;
661
662 for (i = 0; i < swap->n_configured; i++) {
663 /* TODO XXX this lets the swapdir free resources asynchronously
664 * swap->swapDirs[i]->deactivate();
665 * but there may be such a means already.
666 * RBC 20041225
667 */
668 swap->swapDirs[i] = NULL;
669 }
670
671 safe_free(swap->swapDirs);
672 swap->swapDirs = NULL;
673 swap->n_allocated = 0;
674 swap->n_configured = 0;
675 }
676
677 /* this should be a virtual method on StoreEntry,
678 * i.e. e->referenced()
679 * so that the entry can notify the creating Store
680 */
681 void
682 StoreController::reference(StoreEntry &e)
683 {
684 /* Notify the fs that we're referencing this object again */
685
686 if (e.swap_dirn > -1)
687 e.store()->reference(e);
688
689 // Notify the memory cache that we're referencing this object again
690 if (memStore && e.mem_status == IN_MEMORY)
691 memStore->reference(e);
692
693 // TODO: move this code to a non-shared memory cache class when we have it
694 if (e.mem_obj) {
695 if (mem_policy->Referenced)
696 mem_policy->Referenced(mem_policy, &e, &e.mem_obj->repl);
697 }
698 }
699
700 void
701 StoreController::dereference(StoreEntry & e)
702 {
703 /* Notify the fs that we're not referencing this object any more */
704
705 if (e.swap_filen > -1)
706 e.store()->dereference(e);
707
708 // Notify the memory cache that we're not referencing this object any more
709 if (memStore && e.mem_status == IN_MEMORY)
710 memStore->dereference(e);
711
712 // TODO: move this code to a non-shared memory cache class when we have it
713 if (e.mem_obj) {
714 if (mem_policy->Dereferenced)
715 mem_policy->Dereferenced(mem_policy, &e, &e.mem_obj->repl);
716 }
717 }
718
719 StoreEntry *
720 StoreController::get(const cache_key *key)
721 {
722 if (StoreEntry *e = swapDir->get(key)) {
723 // TODO: ignore and maybe handleIdleEntry() unlocked intransit entries
724 // because their backing store slot may be gone already.
725 debugs(20, 3, HERE << "got in-transit entry: " << *e);
726 return e;
727 }
728
729 if (memStore) {
730 if (StoreEntry *e = memStore->get(key)) {
731 debugs(20, 3, HERE << "got mem-cached entry: " << *e);
732 return e;
733 }
734 }
735
736 // TODO: this disk iteration is misplaced; move to StoreHashIndex
737 if (const int cacheDirs = Config.cacheSwap.n_configured) {
738 // ask each cache_dir until the entry is found; use static starting
739 // point to avoid asking the same subset of disks more often
740 // TODO: coordinate with put() to be able to guess the right disk often
741 static int idx = 0;
742 for (int n = 0; n < cacheDirs; ++n) {
743 idx = (idx + 1) % cacheDirs;
744 SwapDir *sd = dynamic_cast<SwapDir*>(INDEXSD(idx));
745 if (!sd->active())
746 continue;
747
748 if (StoreEntry *e = sd->get(key)) {
749 debugs(20, 3, HERE << "cache_dir " << idx <<
750 " got cached entry: " << *e);
751 return e;
752 }
753 }
754 }
755
756 debugs(20, 4, HERE << "none of " << Config.cacheSwap.n_configured <<
757 " cache_dirs have " << storeKeyText(key));
758 return NULL;
759 }
760
761 void
762 StoreController::get(String const key, STOREGETCLIENT aCallback, void *aCallbackData)
763 {
764 fatal("not implemented");
765 }
766
767 void
768 StoreController::handleIdleEntry(StoreEntry &e)
769 {
770 bool keepInLocalMemory = false;
771 if (memStore) {
772 memStore->considerKeeping(e);
773 // leave keepInLocalMemory false; memStore maintains its own cache
774 } else {
775 keepInLocalMemory = e.memoryCachable() && // entry is in good shape and
776 // the local memory cache is not overflowing
777 (mem_node::InUseCount() <= store_pages_max);
778 }
779
780 dereference(e);
781
782 // XXX: Rock store specific: Since each SwapDir controls its index,
783 // unlocked entries should not stay in the global store_table.
784 if (fileno >= 0) {
785 debugs(20, 5, HERE << "destroying unlocked entry: " << &e << ' ' << e);
786 destroyStoreEntry(static_cast<hash_link*>(&e));
787 return;
788 }
789
790 // TODO: move this into [non-shared] memory cache class when we have one
791 if (keepInLocalMemory) {
792 e.setMemStatus(IN_MEMORY);
793 e.mem_obj->unlinkRequest();
794 } else {
795 e.purgeMem(); // may free e
796 }
797 }
798
799 StoreHashIndex::StoreHashIndex()
800 {
801 if (store_table)
802 abort();
803 assert (store_table == NULL);
804 }
805
806 StoreHashIndex::~StoreHashIndex()
807 {
808 if (store_table) {
809 hashFreeItems(store_table, destroyStoreEntry);
810 hashFreeMemory(store_table);
811 store_table = NULL;
812 }
813 }
814
815 int
816 StoreHashIndex::callback()
817 {
818 int result = 0;
819 int j;
820 static int ndir = 0;
821
822 do {
823 j = 0;
824
825 for (int i = 0; i < Config.cacheSwap.n_configured; i++) {
826 if (ndir >= Config.cacheSwap.n_configured)
827 ndir = ndir % Config.cacheSwap.n_configured;
828
829 int temp_result = store(ndir)->callback();
830
831 ++ndir;
832
833 j += temp_result;
834
835 result += temp_result;
836
837 if (j > 100)
838 fatal ("too much io\n");
839 }
840 } while (j > 0);
841
842 ndir++;
843
844 return result;
845 }
846
847 void
848 StoreHashIndex::create()
849 {
850 for (int i = 0; i < Config.cacheSwap.n_configured; i++) {
851 if (dir(i).active())
852 store(i)->create();
853 }
854 }
855
856 /* Lookup an object in the cache.
857 * return just a reference to object, don't start swapping in yet. */
858 StoreEntry *
859 StoreHashIndex::get(const cache_key *key)
860 {
861 PROF_start(storeGet);
862 debugs(20, 3, "storeGet: looking up " << storeKeyText(key));
863 StoreEntry *p = static_cast<StoreEntry *>(hash_lookup(store_table, key));
864 PROF_stop(storeGet);
865 return p;
866 }
867
868 void
869 StoreHashIndex::get(String const key, STOREGETCLIENT aCallback, void *aCallbackData)
870 {
871 fatal("not implemented");
872 }
873
874 void
875 StoreHashIndex::init()
876 {
877 /* Calculate size of hash table (maximum currently 64k buckets). */
878 /* this is very bogus, its specific to the any Store maintaining an
879 * in-core index, not global */
880 size_t buckets = (Store::Root().maxSize() + ( Config.memMaxSize >> 10)) / Config.Store.avgObjectSize;
881 debugs(20, 1, "Swap maxSize " << Store::Root().maxSize() <<
882 " + " << ( Config.memMaxSize >> 10) << " KB, estimated " << buckets << " objects");
883 buckets /= Config.Store.objectsPerBucket;
884 debugs(20, 1, "Target number of buckets: " << buckets);
885 /* ideally the full scan period should be configurable, for the
886 * moment it remains at approximately 24 hours. */
887 store_hash_buckets = storeKeyHashBuckets(buckets);
888 debugs(20, 1, "Using " << store_hash_buckets << " Store buckets");
889 debugs(20, 1, "Max Mem size: " << ( Config.memMaxSize >> 10) << " KB");
890 debugs(20, 1, "Max Swap size: " << Store::Root().maxSize() << " KB");
891
892 store_table = hash_create(storeKeyHashCmp,
893 store_hash_buckets, storeKeyHashHash);
894
895 for (int i = 0; i < Config.cacheSwap.n_configured; i++) {
896 /* this starts a search of the store dirs, loading their
897 * index. under the new Store api this should be
898 * driven by the StoreHashIndex, not by each store.
899 *
900 * That is, the HashIndex should perform a search of each dir it is
901 * indexing to do the hash insertions. The search is then able to
902 * decide 'from-memory', or 'from-clean-log' or 'from-dirty-log' or
903 * 'from-no-log'.
904 *
905 * Step 1: make the store rebuilds use a search internally
906 * Step 2: change the search logic to use the four modes described
907 * above
908 * Step 3: have the hash index walk the searches itself.
909 */
910 if (dir(i).active())
911 store(i)->init();
912 }
913 }
914
915 uint64_t
916 StoreHashIndex::maxSize() const
917 {
918 uint64_t result = 0;
919
920 for (int i = 0; i < Config.cacheSwap.n_configured; i++) {
921 if (dir(i).doReportStat())
922 result += store(i)->maxSize();
923 }
924
925 return result;
926 }
927
928 uint64_t
929 StoreHashIndex::minSize() const
930 {
931 uint64_t result = 0;
932
933 for (int i = 0; i < Config.cacheSwap.n_configured; i++) {
934 if (dir(i).doReportStat())
935 result += store(i)->minSize();
936 }
937
938 return result;
939 }
940
941 uint64_t
942 StoreHashIndex::currentSize() const
943 {
944 uint64_t result = 0;
945
946 for (int i = 0; i < Config.cacheSwap.n_configured; i++) {
947 if (dir(i).doReportStat())
948 result += store(i)->currentSize();
949 }
950
951 return result;
952 }
953
954 uint64_t
955 StoreHashIndex::currentCount() const
956 {
957 uint64_t result = 0;
958
959 for (int i = 0; i < Config.cacheSwap.n_configured; i++) {
960 if (dir(i).doReportStat())
961 result += store(i)->currentCount();
962 }
963
964 return result;
965 }
966
967 int64_t
968 StoreHashIndex::maxObjectSize() const
969 {
970 int64_t result = -1;
971
972 for (int i = 0; i < Config.cacheSwap.n_configured; i++) {
973 if (dir(i).active() && store(i)->maxObjectSize() > result)
974 result = store(i)->maxObjectSize();
975 }
976
977 return result;
978 }
979
980 void
981 StoreHashIndex::stat(StoreEntry & output) const
982 {
983 int i;
984
985 /* Now go through each store, calling its stat routine */
986
987 for (i = 0; i < Config.cacheSwap.n_configured; i++) {
988 storeAppendPrintf(&output, "\n");
989 store(i)->stat(output);
990 }
991 }
992
993 void
994 StoreHashIndex::reference(StoreEntry&)
995 {}
996
997 void
998 StoreHashIndex::dereference(StoreEntry&)
999 {}
1000
1001 void
1002 StoreHashIndex::maintain()
1003 {
1004 int i;
1005 /* walk each fs */
1006
1007 for (i = 0; i < Config.cacheSwap.n_configured; i++) {
1008 /* XXX FixMe: This should be done "in parallell" on the different
1009 * cache_dirs, not one at a time.
1010 */
1011 /* call the maintain function .. */
1012 store(i)->maintain();
1013 }
1014 }
1015
1016 void
1017 StoreHashIndex::updateSize(int64_t, int)
1018 {}
1019
1020 void
1021 StoreHashIndex::sync()
1022 {
1023 for (int i = 0; i < Config.cacheSwap.n_configured; ++i)
1024 store(i)->sync();
1025 }
1026
1027 StoreSearch *
1028 StoreHashIndex::search(String const url, HttpRequest *)
1029 {
1030 if (url.size())
1031 fatal ("Cannot search by url yet\n");
1032
1033 return new StoreSearchHashIndex (this);
1034 }
1035
1036 CBDATA_CLASS_INIT(StoreSearchHashIndex);
1037
1038 StoreSearchHashIndex::StoreSearchHashIndex(RefCount<StoreHashIndex> aSwapDir) : sd(aSwapDir), _done (false), bucket (0)
1039 {}
1040
1041 /* do not link
1042 StoreSearchHashIndex::StoreSearchHashIndex(StoreSearchHashIndex const &);
1043 */
1044
1045 StoreSearchHashIndex::~StoreSearchHashIndex()
1046 {}
1047
1048 void
1049 StoreSearchHashIndex::next(void (aCallback)(void *), void *aCallbackData)
1050 {
1051 next();
1052 aCallback (aCallbackData);
1053 }
1054
1055 bool
1056 StoreSearchHashIndex::next()
1057 {
1058 if (entries.size())
1059 entries.pop_back();
1060
1061 while (!isDone() && !entries.size())
1062 copyBucket();
1063
1064 return currentItem() != NULL;
1065 }
1066
1067 bool
1068 StoreSearchHashIndex::error() const
1069 {
1070 return false;
1071 }
1072
1073 bool
1074 StoreSearchHashIndex::isDone() const
1075 {
1076 return bucket >= store_hash_buckets || _done;
1077 }
1078
1079 StoreEntry *
1080 StoreSearchHashIndex::currentItem()
1081 {
1082 if (!entries.size())
1083 return NULL;
1084
1085 return entries.back();
1086 }
1087
1088 void
1089 StoreSearchHashIndex::copyBucket()
1090 {
1091 /* probably need to lock the store entries...
1092 * we copy them all to prevent races on the links. */
1093 debugs(47, 3, "StoreSearchHashIndex::copyBucket #" << bucket);
1094 assert (!entries.size());
1095 hash_link *link_ptr = NULL;
1096 hash_link *link_next = NULL;
1097 link_next = hash_get_bucket(store_table, bucket);
1098
1099 while (NULL != (link_ptr = link_next)) {
1100 link_next = link_ptr->next;
1101 StoreEntry *e = (StoreEntry *) link_ptr;
1102
1103 entries.push_back(e);
1104 }
1105
1106 bucket++;
1107 debugs(47,3, "got entries: " << entries.size());
1108 }