]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blame - libxfs/cache.c
libxfs: update to 4.1-rc2 code base
[thirdparty/xfsprogs-dev.git] / libxfs / cache.c
CommitLineData
e80aa729
NS
1/*
2 * Copyright (c) 2006 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18
19#include <stdio.h>
20#include <stdlib.h>
21#include <string.h>
22#include <unistd.h>
23#include <pthread.h>
24
25#include <xfs/platform_defs.h>
f5015c24 26#include <xfs/list.h>
e80aa729 27#include <xfs/cache.h>
602dcc0e 28#include <xfs/libxfs.h>
e80aa729
NS
29
30#define CACHE_DEBUG 1
00418cb4
NS
31#undef CACHE_DEBUG
32#define CACHE_DEBUG 1
33#undef CACHE_ABORT
34/* #define CACHE_ABORT 1 */
e80aa729 35
69ec88b5
BN
36#define CACHE_SHAKE_COUNT 64
37
1c4110bd
NS
38static unsigned int cache_generic_bulkrelse(struct cache *, struct list_head *);
39
e80aa729
NS
40struct cache *
41cache_init(
ba9ecd40 42 int flags,
e80aa729
NS
43 unsigned int hashsize,
44 struct cache_operations *cache_operations)
45{
46 struct cache * cache;
47 unsigned int i, maxcount;
48
b6281496 49 maxcount = hashsize * HASH_CACHE_RATIO;
e80aa729
NS
50
51 if (!(cache = malloc(sizeof(struct cache))))
52 return NULL;
53 if (!(cache->c_hash = calloc(hashsize, sizeof(struct cache_hash)))) {
54 free(cache);
55 return NULL;
56 }
57
ba9ecd40 58 cache->c_flags = flags;
e80aa729 59 cache->c_count = 0;
9f38f08d
MV
60 cache->c_max = 0;
61 cache->c_hits = 0;
62 cache->c_misses = 0;
e80aa729
NS
63 cache->c_maxcount = maxcount;
64 cache->c_hashsize = hashsize;
602dcc0e 65 cache->c_hashshift = libxfs_highbit32(hashsize);
e80aa729
NS
66 cache->hash = cache_operations->hash;
67 cache->alloc = cache_operations->alloc;
33165ec3 68 cache->flush = cache_operations->flush;
e80aa729
NS
69 cache->relse = cache_operations->relse;
70 cache->compare = cache_operations->compare;
1c4110bd
NS
71 cache->bulkrelse = cache_operations->bulkrelse ?
72 cache_operations->bulkrelse : cache_generic_bulkrelse;
e80aa729
NS
73 pthread_mutex_init(&cache->c_mutex, NULL);
74
75 for (i = 0; i < hashsize; i++) {
76 list_head_init(&cache->c_hash[i].ch_list);
b6281496 77 cache->c_hash[i].ch_count = 0;
e80aa729
NS
78 pthread_mutex_init(&cache->c_hash[i].ch_mutex, NULL);
79 }
69ec88b5
BN
80
81 for (i = 0; i <= CACHE_MAX_PRIORITY; i++) {
82 list_head_init(&cache->c_mrus[i].cm_list);
83 cache->c_mrus[i].cm_count = 0;
84 pthread_mutex_init(&cache->c_mrus[i].cm_mutex, NULL);
85 }
e80aa729
NS
86 return cache;
87}
88
631596d2
ES
89void
90cache_expand(
91 struct cache * cache)
92{
93 pthread_mutex_lock(&cache->c_mutex);
94#ifdef CACHE_DEBUG
95 fprintf(stderr, "doubling cache size to %d\n", 2 * cache->c_maxcount);
96#endif
97 cache->c_maxcount *= 2;
98 pthread_mutex_unlock(&cache->c_mutex);
99}
100
e80aa729
NS
101void
102cache_walk(
103 struct cache * cache,
104 cache_walk_t visit)
105{
106 struct cache_hash * hash;
107 struct list_head * head;
108 struct list_head * pos;
109 unsigned int i;
110
111 for (i = 0; i < cache->c_hashsize; i++) {
112 hash = &cache->c_hash[i];
113 head = &hash->ch_list;
114 pthread_mutex_lock(&hash->ch_mutex);
115 for (pos = head->next; pos != head; pos = pos->next)
116 visit((struct cache_node *)pos);
117 pthread_mutex_unlock(&hash->ch_mutex);
118 }
119}
120
00418cb4
NS
121#ifdef CACHE_ABORT
122#define cache_abort() abort()
123#else
124#define cache_abort() do { } while (0)
125#endif
126
e80aa729
NS
127#ifdef CACHE_DEBUG
128static void
129cache_zero_check(
130 struct cache_node * node)
131{
132 if (node->cn_count > 0) {
133 fprintf(stderr, "%s: refcount is %u, not zero (node=%p)\n",
134 __FUNCTION__, node->cn_count, node);
00418cb4 135 cache_abort();
e80aa729
NS
136 }
137}
138#define cache_destroy_check(c) cache_walk((c), cache_zero_check)
139#else
140#define cache_destroy_check(c) do { } while (0)
141#endif
142
143void
144cache_destroy(
145 struct cache * cache)
146{
147 unsigned int i;
148
149 cache_destroy_check(cache);
150 for (i = 0; i < cache->c_hashsize; i++) {
151 list_head_destroy(&cache->c_hash[i].ch_list);
152 pthread_mutex_destroy(&cache->c_hash[i].ch_mutex);
153 }
69ec88b5
BN
154 for (i = 0; i <= CACHE_MAX_PRIORITY; i++) {
155 list_head_destroy(&cache->c_mrus[i].cm_list);
156 pthread_mutex_destroy(&cache->c_mrus[i].cm_mutex);
157 }
e80aa729
NS
158 pthread_mutex_destroy(&cache->c_mutex);
159 free(cache->c_hash);
160 free(cache);
161}
162
69ec88b5
BN
163static unsigned int
164cache_generic_bulkrelse(
e80aa729 165 struct cache * cache,
69ec88b5 166 struct list_head * list)
e80aa729 167{
69ec88b5
BN
168 struct cache_node * node;
169 unsigned int count = 0;
e80aa729 170
69ec88b5
BN
171 while (!list_empty(list)) {
172 node = list_entry(list->next, struct cache_node, cn_mru);
e80aa729 173 pthread_mutex_destroy(&node->cn_mutex);
69ec88b5 174 list_del_init(&node->cn_mru);
e80aa729 175 cache->relse(node);
69ec88b5 176 count++;
e80aa729 177 }
69ec88b5 178
e80aa729
NS
179 return count;
180}
181
182/*
183 * We've hit the limit on cache size, so we need to start reclaiming
69ec88b5
BN
184 * nodes we've used. The MRU specified by the priority is shaken.
185 * Returns new priority at end of the call (in case we call again).
e80aa729
NS
186 */
187static unsigned int
69ec88b5 188cache_shake(
e80aa729 189 struct cache * cache,
69ec88b5
BN
190 unsigned int priority,
191 int all)
e80aa729 192{
69ec88b5
BN
193 struct cache_mru *mru;
194 struct cache_hash * hash;
e80aa729
NS
195 struct list_head temp;
196 struct list_head * head;
197 struct list_head * pos;
198 struct list_head * n;
199 struct cache_node * node;
69ec88b5 200 unsigned int count;
e80aa729 201
69ec88b5
BN
202 ASSERT(priority <= CACHE_MAX_PRIORITY);
203 if (priority > CACHE_MAX_PRIORITY)
204 priority = 0;
205
206 mru = &cache->c_mrus[priority];
207 count = 0;
e80aa729 208 list_head_init(&temp);
69ec88b5
BN
209 head = &mru->cm_list;
210
211 pthread_mutex_lock(&mru->cm_mutex);
212 for (pos = head->prev, n = pos->prev; pos != head;
213 pos = n, n = pos->prev) {
214 node = list_entry(pos, struct cache_node, cn_mru);
215
216 if (pthread_mutex_trylock(&node->cn_mutex) != 0)
217 continue;
218
219 hash = cache->c_hash + node->cn_hashidx;
a040d7c9 220 if (pthread_mutex_trylock(&hash->ch_mutex) != 0) {
69ec88b5
BN
221 pthread_mutex_unlock(&node->cn_mutex);
222 continue;
b6281496 223 }
a040d7c9 224 ASSERT(node->cn_count == 0);
69ec88b5
BN
225 ASSERT(node->cn_priority == priority);
226 node->cn_priority = -1;
1c4110bd 227
69ec88b5
BN
228 list_move(&node->cn_mru, &temp);
229 list_del_init(&node->cn_hash);
230 hash->ch_count--;
231 mru->cm_count--;
232 pthread_mutex_unlock(&hash->ch_mutex);
233 pthread_mutex_unlock(&node->cn_mutex);
1c4110bd 234
e80aa729 235 count++;
69ec88b5
BN
236 if (!all && count == CACHE_SHAKE_COUNT)
237 break;
e80aa729 238 }
69ec88b5 239 pthread_mutex_unlock(&mru->cm_mutex);
e80aa729 240
69ec88b5
BN
241 if (count > 0) {
242 cache->bulkrelse(cache, &temp);
e80aa729 243
e80aa729
NS
244 pthread_mutex_lock(&cache->c_mutex);
245 cache->c_count -= count;
246 pthread_mutex_unlock(&cache->c_mutex);
247 }
69ec88b5
BN
248
249 return (count == CACHE_SHAKE_COUNT) ? priority : ++priority;
e80aa729
NS
250}
251
252/*
253 * Allocate a new hash node (updating atomic counter in the process),
254 * unless doing so will push us over the maximum cache size.
255 */
69ec88b5 256static struct cache_node *
e80aa729
NS
257cache_node_allocate(
258 struct cache * cache,
2556c98b 259 cache_key_t key)
e80aa729
NS
260{
261 unsigned int nodesfree;
262 struct cache_node * node;
263
264 pthread_mutex_lock(&cache->c_mutex);
69ec88b5
BN
265 nodesfree = (cache->c_count < cache->c_maxcount);
266 if (nodesfree) {
e80aa729 267 cache->c_count++;
9f38f08d
MV
268 if (cache->c_count > cache->c_max)
269 cache->c_max = cache->c_count;
270 }
271 cache->c_misses++;
e80aa729
NS
272 pthread_mutex_unlock(&cache->c_mutex);
273 if (!nodesfree)
274 return NULL;
69ec88b5
BN
275 node = cache->alloc(key);
276 if (node == NULL) { /* uh-oh */
e80aa729
NS
277 pthread_mutex_lock(&cache->c_mutex);
278 cache->c_count--;
279 pthread_mutex_unlock(&cache->c_mutex);
280 return NULL;
281 }
282 pthread_mutex_init(&node->cn_mutex, NULL);
a040d7c9 283 list_head_init(&node->cn_mru);
e80aa729 284 node->cn_count = 1;
69ec88b5 285 node->cn_priority = 0;
e80aa729
NS
286 return node;
287}
288
2556c98b
BN
289int
290cache_overflowed(
291 struct cache * cache)
292{
af43ca9f 293 return cache->c_maxcount == cache->c_max;
2556c98b
BN
294}
295
ba9ecd40
DC
296
297static int
298__cache_node_purge(
299 struct cache * cache,
300 struct cache_node * node)
301{
302 int count;
303 struct cache_mru * mru;
304
305 pthread_mutex_lock(&node->cn_mutex);
306 count = node->cn_count;
307 if (count != 0) {
308 pthread_mutex_unlock(&node->cn_mutex);
309 return count;
310 }
311 mru = &cache->c_mrus[node->cn_priority];
312 pthread_mutex_lock(&mru->cm_mutex);
313 list_del_init(&node->cn_mru);
314 mru->cm_count--;
315 pthread_mutex_unlock(&mru->cm_mutex);
316
317 pthread_mutex_unlock(&node->cn_mutex);
318 pthread_mutex_destroy(&node->cn_mutex);
319 list_del_init(&node->cn_hash);
320 cache->relse(node);
321 return count;
322}
323
e80aa729
NS
324/*
325 * Lookup in the cache hash table. With any luck we'll get a cache
326 * hit, in which case this will all be over quickly and painlessly.
327 * Otherwise, we allocate a new node, taking care not to expand the
328 * cache beyond the requested maximum size (shrink it if it would).
329 * Returns one if hit in cache, otherwise zero. A node is _always_
330 * returned, however.
331 */
332int
333cache_node_get(
334 struct cache * cache,
335 cache_key_t key,
336 struct cache_node ** nodep)
337{
338 struct cache_node * node = NULL;
339 struct cache_hash * hash;
69ec88b5 340 struct cache_mru * mru;
e80aa729
NS
341 struct list_head * head;
342 struct list_head * pos;
ba9ecd40 343 struct list_head * n;
69ec88b5 344 unsigned int hashidx;
e80aa729 345 int priority = 0;
ba9ecd40 346 int purged = 0;
e80aa729 347
602dcc0e 348 hashidx = cache->hash(key, cache->c_hashsize, cache->c_hashshift);
69ec88b5 349 hash = cache->c_hash + hashidx;
e80aa729
NS
350 head = &hash->ch_list;
351
69ec88b5
BN
352 for (;;) {
353 pthread_mutex_lock(&hash->ch_mutex);
ba9ecd40
DC
354 for (pos = head->next, n = pos->next; pos != head;
355 pos = n, n = pos->next) {
356 int result;
357
69ec88b5 358 node = list_entry(pos, struct cache_node, cn_hash);
ba9ecd40
DC
359 result = cache->compare(node, key);
360 switch (result) {
361 case CACHE_HIT:
362 break;
363 case CACHE_PURGE:
364 if ((cache->c_flags & CACHE_MISCOMPARE_PURGE) &&
365 !__cache_node_purge(cache, node)) {
366 purged++;
367 hash->ch_count--;
368 }
369 /* FALL THROUGH */
370 case CACHE_MISS:
371 goto next_object;
372 }
373
69ec88b5 374 /*
a040d7c9
BN
375 * node found, bump node's reference count, remove it
376 * from its MRU list, and update stats.
377 */
69ec88b5 378 pthread_mutex_lock(&node->cn_mutex);
69ec88b5 379
a040d7c9
BN
380 if (node->cn_count == 0) {
381 ASSERT(node->cn_priority >= 0);
382 ASSERT(!list_empty(&node->cn_mru));
383 mru = &cache->c_mrus[node->cn_priority];
384 pthread_mutex_lock(&mru->cm_mutex);
385 mru->cm_count--;
386 list_del_init(&node->cn_mru);
387 pthread_mutex_unlock(&mru->cm_mutex);
388 }
389 node->cn_count++;
69ec88b5
BN
390
391 pthread_mutex_unlock(&node->cn_mutex);
392 pthread_mutex_unlock(&hash->ch_mutex);
393
394 pthread_mutex_lock(&cache->c_mutex);
395 cache->c_hits++;
396 pthread_mutex_unlock(&cache->c_mutex);
397
398 *nodep = node;
399 return 0;
ba9ecd40
DC
400next_object:
401 continue; /* what the hell, gcc? */
e80aa729 402 }
69ec88b5
BN
403 pthread_mutex_unlock(&hash->ch_mutex);
404 /*
405 * not found, allocate a new entry
406 */
407 node = cache_node_allocate(cache, key);
408 if (node)
409 break;
410 priority = cache_shake(cache, priority, 0);
631596d2
ES
411 /*
412 * We start at 0; if we free CACHE_SHAKE_COUNT we get
413 * back the same priority, if not we get back priority+1.
414 * If we exceed CACHE_MAX_PRIORITY all slots are full; grow it.
415 */
416 if (priority > CACHE_MAX_PRIORITY) {
417 priority = 0;
418 cache_expand(cache);
419 }
e80aa729 420 }
69ec88b5
BN
421
422 node->cn_hashidx = hashidx;
423
a040d7c9 424 /* add new node to appropriate hash */
69ec88b5
BN
425 pthread_mutex_lock(&hash->ch_mutex);
426 hash->ch_count++;
69ec88b5 427 list_add(&node->cn_hash, &hash->ch_list);
e80aa729 428 pthread_mutex_unlock(&hash->ch_mutex);
69ec88b5 429
ba9ecd40
DC
430 if (purged) {
431 pthread_mutex_lock(&cache->c_mutex);
432 cache->c_count -= purged;
433 pthread_mutex_unlock(&cache->c_mutex);
434 }
435
e80aa729 436 *nodep = node;
69ec88b5 437 return 1;
e80aa729
NS
438}
439
440void
441cache_node_put(
a040d7c9 442 struct cache * cache,
e80aa729
NS
443 struct cache_node * node)
444{
a040d7c9
BN
445 struct cache_mru * mru;
446
e80aa729
NS
447 pthread_mutex_lock(&node->cn_mutex);
448#ifdef CACHE_DEBUG
449 if (node->cn_count < 1) {
450 fprintf(stderr, "%s: node put on refcount %u (node=%p)\n",
451 __FUNCTION__, node->cn_count, node);
00418cb4 452 cache_abort();
e80aa729 453 }
a040d7c9
BN
454 if (!list_empty(&node->cn_mru)) {
455 fprintf(stderr, "%s: node put on node (%p) in MRU list\n",
456 __FUNCTION__, node);
457 cache_abort();
458 }
e80aa729
NS
459#endif
460 node->cn_count--;
a040d7c9
BN
461
462 if (node->cn_count == 0) {
463 /* add unreferenced node to appropriate MRU for shaker */
464 mru = &cache->c_mrus[node->cn_priority];
465 pthread_mutex_lock(&mru->cm_mutex);
466 mru->cm_count++;
467 list_add(&node->cn_mru, &mru->cm_list);
468 pthread_mutex_unlock(&mru->cm_mutex);
469 }
470
e80aa729
NS
471 pthread_mutex_unlock(&node->cn_mutex);
472}
473
69ec88b5
BN
474void
475cache_node_set_priority(
476 struct cache * cache,
477 struct cache_node * node,
478 int priority)
479{
69ec88b5
BN
480 if (priority < 0)
481 priority = 0;
482 else if (priority > CACHE_MAX_PRIORITY)
483 priority = CACHE_MAX_PRIORITY;
484
485 pthread_mutex_lock(&node->cn_mutex);
69ec88b5 486 ASSERT(node->cn_count > 0);
69ec88b5 487 node->cn_priority = priority;
69ec88b5
BN
488 pthread_mutex_unlock(&node->cn_mutex);
489}
490
491int
492cache_node_get_priority(
493 struct cache_node * node)
494{
495 int priority;
496
497 pthread_mutex_lock(&node->cn_mutex);
498 priority = node->cn_priority;
499 pthread_mutex_unlock(&node->cn_mutex);
500
501 return priority;
502}
503
504
e80aa729
NS
505/*
506 * Purge a specific node from the cache. Reference count must be zero.
507 */
508int
509cache_node_purge(
510 struct cache * cache,
511 cache_key_t key,
512 struct cache_node * node)
513{
69ec88b5
BN
514 struct list_head * head;
515 struct list_head * pos;
516 struct list_head * n;
517 struct cache_hash * hash;
69ec88b5
BN
518 int count = -1;
519
602dcc0e
DC
520 hash = cache->c_hash + cache->hash(key, cache->c_hashsize,
521 cache->c_hashshift);
69ec88b5
BN
522 head = &hash->ch_list;
523 pthread_mutex_lock(&hash->ch_mutex);
524 for (pos = head->next, n = pos->next; pos != head;
525 pos = n, n = pos->next) {
526 if ((struct cache_node *)pos != node)
527 continue;
e80aa729 528
ba9ecd40
DC
529 count = __cache_node_purge(cache, node);
530 if (!count)
531 hash->ch_count--;
69ec88b5
BN
532 break;
533 }
534 pthread_mutex_unlock(&hash->ch_mutex);
535
536 if (count == 0) {
e80aa729
NS
537 pthread_mutex_lock(&cache->c_mutex);
538 cache->c_count--;
539 pthread_mutex_unlock(&cache->c_mutex);
540 }
541#ifdef CACHE_DEBUG
69ec88b5 542 if (count >= 1) {
e80aa729 543 fprintf(stderr, "%s: refcount was %u, not zero (node=%p)\n",
69ec88b5 544 __FUNCTION__, count, node);
00418cb4 545 cache_abort();
e80aa729 546 }
69ec88b5 547 if (count == -1) {
e80aa729
NS
548 fprintf(stderr, "%s: purge node not found! (node=%p)\n",
549 __FUNCTION__, node);
00418cb4 550 cache_abort();
e80aa729
NS
551 }
552#endif
af43ca9f 553 return count == 0;
e80aa729
NS
554}
555
556/*
557 * Purge all nodes from the cache. All reference counts must be zero.
558 */
559void
560cache_purge(
561 struct cache * cache)
562{
69ec88b5
BN
563 int i;
564
565 for (i = 0; i <= CACHE_MAX_PRIORITY; i++)
566 cache_shake(cache, i, 1);
e80aa729 567
e80aa729
NS
568#ifdef CACHE_DEBUG
569 if (cache->c_count != 0) {
69ec88b5
BN
570 /* flush referenced nodes to disk */
571 cache_flush(cache);
e80aa729
NS
572 fprintf(stderr, "%s: shake on cache %p left %u nodes!?\n",
573 __FUNCTION__, cache, cache->c_count);
00418cb4 574 cache_abort();
e80aa729
NS
575 }
576#endif
33165ec3
BN
577}
578
579/*
2556c98b 580 * Flush all nodes in the cache to disk.
33165ec3
BN
581 */
582void
583cache_flush(
584 struct cache * cache)
585{
586 struct cache_hash * hash;
587 struct list_head * head;
588 struct list_head * pos;
589 struct cache_node * node;
590 int i;
2556c98b 591
33165ec3
BN
592 if (!cache->flush)
593 return;
2556c98b 594
33165ec3
BN
595 for (i = 0; i < cache->c_hashsize; i++) {
596 hash = &cache->c_hash[i];
2556c98b 597
33165ec3
BN
598 pthread_mutex_lock(&hash->ch_mutex);
599 head = &hash->ch_list;
600 for (pos = head->next; pos != head; pos = pos->next) {
601 node = (struct cache_node *)pos;
602 pthread_mutex_lock(&node->cn_mutex);
603 cache->flush(node);
604 pthread_mutex_unlock(&node->cn_mutex);
605 }
606 pthread_mutex_unlock(&hash->ch_mutex);
607 }
e80aa729 608}
9f38f08d 609
69ec88b5 610#define HASH_REPORT (3 * HASH_CACHE_RATIO)
9f38f08d 611void
69ec88b5
BN
612cache_report(
613 FILE *fp,
614 const char *name,
615 struct cache *cache)
9f38f08d 616{
69ec88b5
BN
617 int i;
618 unsigned long count, index, total;
619 unsigned long hash_bucket_lengths[HASH_REPORT + 2];
b6281496 620
69ec88b5 621 if ((cache->c_hits + cache->c_misses) == 0)
b6281496
MV
622 return;
623
624 /* report cache summary */
625 fprintf(fp, "%s: %p\n"
9f38f08d
MV
626 "Max supported entries = %u\n"
627 "Max utilized entries = %u\n"
628 "Active entries = %u\n"
629 "Hash table size = %u\n"
630 "Hits = %llu\n"
631 "Misses = %llu\n"
632 "Hit ratio = %5.2f\n",
633 name, cache,
634 cache->c_maxcount,
635 cache->c_max,
636 cache->c_count,
637 cache->c_hashsize,
638 cache->c_hits,
639 cache->c_misses,
69ec88b5
BN
640 (double)cache->c_hits * 100 /
641 (cache->c_hits + cache->c_misses)
9f38f08d 642 );
b6281496 643
69ec88b5
BN
644 for (i = 0; i <= CACHE_MAX_PRIORITY; i++)
645 fprintf(fp, "MRU %d entries = %6u (%3u%%)\n",
646 i, cache->c_mrus[i].cm_count,
647 cache->c_mrus[i].cm_count * 100 / cache->c_count);
648
b6281496
MV
649 /* report hash bucket lengths */
650 bzero(hash_bucket_lengths, sizeof(hash_bucket_lengths));
651
652 for (i = 0; i < cache->c_hashsize; i++) {
653 count = cache->c_hash[i].ch_count;
654 if (count > HASH_REPORT)
655 index = HASH_REPORT + 1;
656 else
657 index = count;
658 hash_bucket_lengths[index]++;
659 }
660
661 total = 0;
69ec88b5
BN
662 for (i = 0; i < HASH_REPORT + 1; i++) {
663 total += i * hash_bucket_lengths[i];
b6281496
MV
664 if (hash_bucket_lengths[i] == 0)
665 continue;
69ec88b5
BN
666 fprintf(fp, "Hash buckets with %2d entries %6ld (%3ld%%)\n",
667 i, hash_bucket_lengths[i],
668 (i * hash_bucket_lengths[i] * 100) / cache->c_count);
b6281496
MV
669 }
670 if (hash_bucket_lengths[i]) /* last report bucket is the overflow bucket */
69ec88b5
BN
671 fprintf(fp, "Hash buckets with >%2d entries %6ld (%3ld%%)\n",
672 i - 1, hash_bucket_lengths[i],
673 ((cache->c_count - total) * 100) / cache->c_count);
9f38f08d 674}