]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blame - libxfs/cache.c
xfs: create structure verifier function for shortform xattrs
[thirdparty/xfsprogs-dev.git] / libxfs / cache.c
CommitLineData
e80aa729
NS
1/*
2 * Copyright (c) 2006 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18
19#include <stdio.h>
20#include <stdlib.h>
21#include <string.h>
22#include <unistd.h>
23#include <pthread.h>
24
9c799827 25#include "libxfs_priv.h"
b626fb59
DC
26#include "xfs_fs.h"
27#include "xfs_shared.h"
28#include "xfs_format.h"
29#include "xfs_trans_resv.h"
30#include "xfs_mount.h"
31#include "xfs_bit.h"
e80aa729
NS
32
33#define CACHE_DEBUG 1
00418cb4
NS
34#undef CACHE_DEBUG
35#define CACHE_DEBUG 1
36#undef CACHE_ABORT
37/* #define CACHE_ABORT 1 */
e80aa729 38
69ec88b5
BN
39#define CACHE_SHAKE_COUNT 64
40
1c4110bd
NS
41static unsigned int cache_generic_bulkrelse(struct cache *, struct list_head *);
42
e80aa729
NS
43struct cache *
44cache_init(
ba9ecd40 45 int flags,
e80aa729
NS
46 unsigned int hashsize,
47 struct cache_operations *cache_operations)
48{
49 struct cache * cache;
50 unsigned int i, maxcount;
51
b6281496 52 maxcount = hashsize * HASH_CACHE_RATIO;
e80aa729
NS
53
54 if (!(cache = malloc(sizeof(struct cache))))
55 return NULL;
56 if (!(cache->c_hash = calloc(hashsize, sizeof(struct cache_hash)))) {
57 free(cache);
58 return NULL;
59 }
60
ba9ecd40 61 cache->c_flags = flags;
e80aa729 62 cache->c_count = 0;
9f38f08d
MV
63 cache->c_max = 0;
64 cache->c_hits = 0;
65 cache->c_misses = 0;
e80aa729
NS
66 cache->c_maxcount = maxcount;
67 cache->c_hashsize = hashsize;
602dcc0e 68 cache->c_hashshift = libxfs_highbit32(hashsize);
e80aa729
NS
69 cache->hash = cache_operations->hash;
70 cache->alloc = cache_operations->alloc;
33165ec3 71 cache->flush = cache_operations->flush;
e80aa729
NS
72 cache->relse = cache_operations->relse;
73 cache->compare = cache_operations->compare;
1c4110bd
NS
74 cache->bulkrelse = cache_operations->bulkrelse ?
75 cache_operations->bulkrelse : cache_generic_bulkrelse;
e80aa729
NS
76 pthread_mutex_init(&cache->c_mutex, NULL);
77
78 for (i = 0; i < hashsize; i++) {
79 list_head_init(&cache->c_hash[i].ch_list);
b6281496 80 cache->c_hash[i].ch_count = 0;
e80aa729
NS
81 pthread_mutex_init(&cache->c_hash[i].ch_mutex, NULL);
82 }
69ec88b5 83
aef29e17 84 for (i = 0; i <= CACHE_DIRTY_PRIORITY; i++) {
69ec88b5
BN
85 list_head_init(&cache->c_mrus[i].cm_list);
86 cache->c_mrus[i].cm_count = 0;
87 pthread_mutex_init(&cache->c_mrus[i].cm_mutex, NULL);
88 }
e80aa729
NS
89 return cache;
90}
91
631596d2
ES
92void
93cache_expand(
94 struct cache * cache)
95{
96 pthread_mutex_lock(&cache->c_mutex);
97#ifdef CACHE_DEBUG
98 fprintf(stderr, "doubling cache size to %d\n", 2 * cache->c_maxcount);
99#endif
100 cache->c_maxcount *= 2;
101 pthread_mutex_unlock(&cache->c_mutex);
102}
103
e80aa729
NS
104void
105cache_walk(
106 struct cache * cache,
107 cache_walk_t visit)
108{
109 struct cache_hash * hash;
110 struct list_head * head;
111 struct list_head * pos;
112 unsigned int i;
113
114 for (i = 0; i < cache->c_hashsize; i++) {
115 hash = &cache->c_hash[i];
116 head = &hash->ch_list;
117 pthread_mutex_lock(&hash->ch_mutex);
118 for (pos = head->next; pos != head; pos = pos->next)
119 visit((struct cache_node *)pos);
120 pthread_mutex_unlock(&hash->ch_mutex);
121 }
122}
123
00418cb4
NS
124#ifdef CACHE_ABORT
125#define cache_abort() abort()
126#else
127#define cache_abort() do { } while (0)
128#endif
129
e80aa729
NS
130#ifdef CACHE_DEBUG
131static void
132cache_zero_check(
133 struct cache_node * node)
134{
135 if (node->cn_count > 0) {
136 fprintf(stderr, "%s: refcount is %u, not zero (node=%p)\n",
137 __FUNCTION__, node->cn_count, node);
00418cb4 138 cache_abort();
e80aa729
NS
139 }
140}
141#define cache_destroy_check(c) cache_walk((c), cache_zero_check)
142#else
143#define cache_destroy_check(c) do { } while (0)
144#endif
145
146void
147cache_destroy(
148 struct cache * cache)
149{
150 unsigned int i;
151
152 cache_destroy_check(cache);
153 for (i = 0; i < cache->c_hashsize; i++) {
154 list_head_destroy(&cache->c_hash[i].ch_list);
155 pthread_mutex_destroy(&cache->c_hash[i].ch_mutex);
156 }
aef29e17 157 for (i = 0; i <= CACHE_DIRTY_PRIORITY; i++) {
69ec88b5
BN
158 list_head_destroy(&cache->c_mrus[i].cm_list);
159 pthread_mutex_destroy(&cache->c_mrus[i].cm_mutex);
160 }
e80aa729
NS
161 pthread_mutex_destroy(&cache->c_mutex);
162 free(cache->c_hash);
163 free(cache);
164}
165
69ec88b5
BN
166static unsigned int
167cache_generic_bulkrelse(
e80aa729 168 struct cache * cache,
69ec88b5 169 struct list_head * list)
e80aa729 170{
69ec88b5
BN
171 struct cache_node * node;
172 unsigned int count = 0;
e80aa729 173
69ec88b5
BN
174 while (!list_empty(list)) {
175 node = list_entry(list->next, struct cache_node, cn_mru);
e80aa729 176 pthread_mutex_destroy(&node->cn_mutex);
69ec88b5 177 list_del_init(&node->cn_mru);
e80aa729 178 cache->relse(node);
69ec88b5 179 count++;
e80aa729 180 }
69ec88b5 181
e80aa729
NS
182 return count;
183}
184
185/*
aef29e17
DC
186 * Park unflushable nodes on their own special MRU so that cache_shake() doesn't
187 * end up repeatedly scanning them in the futile attempt to clean them before
188 * reclaim.
189 */
190static void
191cache_add_to_dirty_mru(
192 struct cache *cache,
193 struct cache_node *node)
194{
195 struct cache_mru *mru = &cache->c_mrus[CACHE_DIRTY_PRIORITY];
196
197 pthread_mutex_lock(&mru->cm_mutex);
1b9fecf7 198 node->cn_old_priority = node->cn_priority;
aef29e17
DC
199 node->cn_priority = CACHE_DIRTY_PRIORITY;
200 list_add(&node->cn_mru, &mru->cm_list);
201 mru->cm_count++;
202 pthread_mutex_unlock(&mru->cm_mutex);
203}
204
205/*
206 * We've hit the limit on cache size, so we need to start reclaiming nodes we've
207 * used. The MRU specified by the priority is shaken. Returns new priority at
208 * end of the call (in case we call again). We are not allowed to reclaim dirty
209 * objects, so we have to flush them first. If flushing fails, we move them to
210 * the "dirty, unreclaimable" list.
211 *
212 * Hence we skip priorities > CACHE_MAX_PRIORITY unless "purge" is set as we
213 * park unflushable (and hence unreclaimable) buffers at these priorities.
214 * Trying to shake unreclaimable buffer lists when there is memory pressure is a
215 * waste of time and CPU and greatly slows down cache node recycling operations.
216 * Hence we only try to free them if we are being asked to purge the cache of
217 * all entries.
e80aa729
NS
218 */
219static unsigned int
69ec88b5 220cache_shake(
e80aa729 221 struct cache * cache,
69ec88b5 222 unsigned int priority,
aef29e17 223 bool purge)
e80aa729 224{
69ec88b5
BN
225 struct cache_mru *mru;
226 struct cache_hash * hash;
e80aa729
NS
227 struct list_head temp;
228 struct list_head * head;
229 struct list_head * pos;
230 struct list_head * n;
231 struct cache_node * node;
69ec88b5 232 unsigned int count;
e80aa729 233
aef29e17
DC
234 ASSERT(priority <= CACHE_DIRTY_PRIORITY);
235 if (priority > CACHE_MAX_PRIORITY && !purge)
69ec88b5
BN
236 priority = 0;
237
238 mru = &cache->c_mrus[priority];
239 count = 0;
e80aa729 240 list_head_init(&temp);
69ec88b5
BN
241 head = &mru->cm_list;
242
243 pthread_mutex_lock(&mru->cm_mutex);
244 for (pos = head->prev, n = pos->prev; pos != head;
245 pos = n, n = pos->prev) {
246 node = list_entry(pos, struct cache_node, cn_mru);
247
248 if (pthread_mutex_trylock(&node->cn_mutex) != 0)
249 continue;
250
aef29e17
DC
251 /* memory pressure is not allowed to release dirty objects */
252 if (cache->flush(node) && !purge) {
253 list_del(&node->cn_mru);
254 mru->cm_count--;
255 node->cn_priority = -1;
0a7942b3 256 pthread_mutex_unlock(&node->cn_mutex);
aef29e17 257 cache_add_to_dirty_mru(cache, node);
0a7942b3
DC
258 continue;
259 }
260
69ec88b5 261 hash = cache->c_hash + node->cn_hashidx;
a040d7c9 262 if (pthread_mutex_trylock(&hash->ch_mutex) != 0) {
69ec88b5
BN
263 pthread_mutex_unlock(&node->cn_mutex);
264 continue;
b6281496 265 }
a040d7c9 266 ASSERT(node->cn_count == 0);
69ec88b5
BN
267 ASSERT(node->cn_priority == priority);
268 node->cn_priority = -1;
1c4110bd 269
69ec88b5
BN
270 list_move(&node->cn_mru, &temp);
271 list_del_init(&node->cn_hash);
272 hash->ch_count--;
273 mru->cm_count--;
274 pthread_mutex_unlock(&hash->ch_mutex);
275 pthread_mutex_unlock(&node->cn_mutex);
1c4110bd 276
e80aa729 277 count++;
aef29e17 278 if (!purge && count == CACHE_SHAKE_COUNT)
69ec88b5 279 break;
e80aa729 280 }
69ec88b5 281 pthread_mutex_unlock(&mru->cm_mutex);
e80aa729 282
69ec88b5
BN
283 if (count > 0) {
284 cache->bulkrelse(cache, &temp);
e80aa729 285
e80aa729
NS
286 pthread_mutex_lock(&cache->c_mutex);
287 cache->c_count -= count;
288 pthread_mutex_unlock(&cache->c_mutex);
289 }
69ec88b5
BN
290
291 return (count == CACHE_SHAKE_COUNT) ? priority : ++priority;
e80aa729
NS
292}
293
294/*
295 * Allocate a new hash node (updating atomic counter in the process),
296 * unless doing so will push us over the maximum cache size.
297 */
69ec88b5 298static struct cache_node *
e80aa729
NS
299cache_node_allocate(
300 struct cache * cache,
2556c98b 301 cache_key_t key)
e80aa729
NS
302{
303 unsigned int nodesfree;
304 struct cache_node * node;
305
306 pthread_mutex_lock(&cache->c_mutex);
69ec88b5
BN
307 nodesfree = (cache->c_count < cache->c_maxcount);
308 if (nodesfree) {
e80aa729 309 cache->c_count++;
9f38f08d
MV
310 if (cache->c_count > cache->c_max)
311 cache->c_max = cache->c_count;
312 }
313 cache->c_misses++;
e80aa729
NS
314 pthread_mutex_unlock(&cache->c_mutex);
315 if (!nodesfree)
316 return NULL;
69ec88b5
BN
317 node = cache->alloc(key);
318 if (node == NULL) { /* uh-oh */
e80aa729
NS
319 pthread_mutex_lock(&cache->c_mutex);
320 cache->c_count--;
321 pthread_mutex_unlock(&cache->c_mutex);
322 return NULL;
323 }
324 pthread_mutex_init(&node->cn_mutex, NULL);
a040d7c9 325 list_head_init(&node->cn_mru);
e80aa729 326 node->cn_count = 1;
69ec88b5 327 node->cn_priority = 0;
1b9fecf7 328 node->cn_old_priority = -1;
e80aa729
NS
329 return node;
330}
331
2556c98b
BN
332int
333cache_overflowed(
334 struct cache * cache)
335{
af43ca9f 336 return cache->c_maxcount == cache->c_max;
2556c98b
BN
337}
338
ba9ecd40
DC
339
340static int
341__cache_node_purge(
342 struct cache * cache,
343 struct cache_node * node)
344{
345 int count;
346 struct cache_mru * mru;
347
348 pthread_mutex_lock(&node->cn_mutex);
349 count = node->cn_count;
350 if (count != 0) {
351 pthread_mutex_unlock(&node->cn_mutex);
352 return count;
353 }
0a7942b3
DC
354
355 /* can't purge dirty objects */
356 if (cache->flush(node)) {
357 pthread_mutex_unlock(&node->cn_mutex);
358 return 1;
359 }
360
ba9ecd40
DC
361 mru = &cache->c_mrus[node->cn_priority];
362 pthread_mutex_lock(&mru->cm_mutex);
363 list_del_init(&node->cn_mru);
364 mru->cm_count--;
365 pthread_mutex_unlock(&mru->cm_mutex);
366
367 pthread_mutex_unlock(&node->cn_mutex);
368 pthread_mutex_destroy(&node->cn_mutex);
369 list_del_init(&node->cn_hash);
370 cache->relse(node);
0a7942b3 371 return 0;
ba9ecd40
DC
372}
373
e80aa729
NS
374/*
375 * Lookup in the cache hash table. With any luck we'll get a cache
376 * hit, in which case this will all be over quickly and painlessly.
377 * Otherwise, we allocate a new node, taking care not to expand the
378 * cache beyond the requested maximum size (shrink it if it would).
379 * Returns one if hit in cache, otherwise zero. A node is _always_
380 * returned, however.
381 */
382int
383cache_node_get(
384 struct cache * cache,
385 cache_key_t key,
386 struct cache_node ** nodep)
387{
388 struct cache_node * node = NULL;
389 struct cache_hash * hash;
69ec88b5 390 struct cache_mru * mru;
e80aa729
NS
391 struct list_head * head;
392 struct list_head * pos;
ba9ecd40 393 struct list_head * n;
69ec88b5 394 unsigned int hashidx;
e80aa729 395 int priority = 0;
ba9ecd40 396 int purged = 0;
e80aa729 397
602dcc0e 398 hashidx = cache->hash(key, cache->c_hashsize, cache->c_hashshift);
69ec88b5 399 hash = cache->c_hash + hashidx;
e80aa729
NS
400 head = &hash->ch_list;
401
69ec88b5
BN
402 for (;;) {
403 pthread_mutex_lock(&hash->ch_mutex);
ba9ecd40
DC
404 for (pos = head->next, n = pos->next; pos != head;
405 pos = n, n = pos->next) {
406 int result;
407
69ec88b5 408 node = list_entry(pos, struct cache_node, cn_hash);
ba9ecd40
DC
409 result = cache->compare(node, key);
410 switch (result) {
411 case CACHE_HIT:
412 break;
413 case CACHE_PURGE:
414 if ((cache->c_flags & CACHE_MISCOMPARE_PURGE) &&
415 !__cache_node_purge(cache, node)) {
416 purged++;
417 hash->ch_count--;
418 }
419 /* FALL THROUGH */
420 case CACHE_MISS:
421 goto next_object;
422 }
423
69ec88b5 424 /*
a040d7c9
BN
425 * node found, bump node's reference count, remove it
426 * from its MRU list, and update stats.
427 */
69ec88b5 428 pthread_mutex_lock(&node->cn_mutex);
69ec88b5 429
a040d7c9
BN
430 if (node->cn_count == 0) {
431 ASSERT(node->cn_priority >= 0);
432 ASSERT(!list_empty(&node->cn_mru));
433 mru = &cache->c_mrus[node->cn_priority];
434 pthread_mutex_lock(&mru->cm_mutex);
435 mru->cm_count--;
436 list_del_init(&node->cn_mru);
437 pthread_mutex_unlock(&mru->cm_mutex);
1b9fecf7
DC
438 if (node->cn_old_priority != -1) {
439 ASSERT(node->cn_priority ==
440 CACHE_DIRTY_PRIORITY);
441 node->cn_priority = node->cn_old_priority;
442 node->cn_old_priority = -1;
443 }
a040d7c9
BN
444 }
445 node->cn_count++;
69ec88b5
BN
446
447 pthread_mutex_unlock(&node->cn_mutex);
448 pthread_mutex_unlock(&hash->ch_mutex);
449
450 pthread_mutex_lock(&cache->c_mutex);
451 cache->c_hits++;
452 pthread_mutex_unlock(&cache->c_mutex);
453
454 *nodep = node;
455 return 0;
ba9ecd40
DC
456next_object:
457 continue; /* what the hell, gcc? */
e80aa729 458 }
69ec88b5
BN
459 pthread_mutex_unlock(&hash->ch_mutex);
460 /*
461 * not found, allocate a new entry
462 */
463 node = cache_node_allocate(cache, key);
464 if (node)
465 break;
aef29e17 466 priority = cache_shake(cache, priority, false);
631596d2
ES
467 /*
468 * We start at 0; if we free CACHE_SHAKE_COUNT we get
469 * back the same priority, if not we get back priority+1.
470 * If we exceed CACHE_MAX_PRIORITY all slots are full; grow it.
471 */
472 if (priority > CACHE_MAX_PRIORITY) {
473 priority = 0;
474 cache_expand(cache);
475 }
e80aa729 476 }
69ec88b5
BN
477
478 node->cn_hashidx = hashidx;
479
a040d7c9 480 /* add new node to appropriate hash */
69ec88b5
BN
481 pthread_mutex_lock(&hash->ch_mutex);
482 hash->ch_count++;
69ec88b5 483 list_add(&node->cn_hash, &hash->ch_list);
e80aa729 484 pthread_mutex_unlock(&hash->ch_mutex);
69ec88b5 485
ba9ecd40
DC
486 if (purged) {
487 pthread_mutex_lock(&cache->c_mutex);
488 cache->c_count -= purged;
489 pthread_mutex_unlock(&cache->c_mutex);
490 }
491
e80aa729 492 *nodep = node;
69ec88b5 493 return 1;
e80aa729
NS
494}
495
496void
497cache_node_put(
a040d7c9 498 struct cache * cache,
e80aa729
NS
499 struct cache_node * node)
500{
a040d7c9
BN
501 struct cache_mru * mru;
502
e80aa729
NS
503 pthread_mutex_lock(&node->cn_mutex);
504#ifdef CACHE_DEBUG
505 if (node->cn_count < 1) {
506 fprintf(stderr, "%s: node put on refcount %u (node=%p)\n",
507 __FUNCTION__, node->cn_count, node);
00418cb4 508 cache_abort();
e80aa729 509 }
a040d7c9
BN
510 if (!list_empty(&node->cn_mru)) {
511 fprintf(stderr, "%s: node put on node (%p) in MRU list\n",
512 __FUNCTION__, node);
513 cache_abort();
514 }
e80aa729
NS
515#endif
516 node->cn_count--;
a040d7c9
BN
517
518 if (node->cn_count == 0) {
519 /* add unreferenced node to appropriate MRU for shaker */
520 mru = &cache->c_mrus[node->cn_priority];
521 pthread_mutex_lock(&mru->cm_mutex);
522 mru->cm_count++;
523 list_add(&node->cn_mru, &mru->cm_list);
524 pthread_mutex_unlock(&mru->cm_mutex);
525 }
526
e80aa729
NS
527 pthread_mutex_unlock(&node->cn_mutex);
528}
529
69ec88b5
BN
530void
531cache_node_set_priority(
532 struct cache * cache,
533 struct cache_node * node,
534 int priority)
535{
69ec88b5
BN
536 if (priority < 0)
537 priority = 0;
538 else if (priority > CACHE_MAX_PRIORITY)
539 priority = CACHE_MAX_PRIORITY;
540
541 pthread_mutex_lock(&node->cn_mutex);
69ec88b5 542 ASSERT(node->cn_count > 0);
69ec88b5 543 node->cn_priority = priority;
1b9fecf7 544 node->cn_old_priority = -1;
69ec88b5
BN
545 pthread_mutex_unlock(&node->cn_mutex);
546}
547
548int
549cache_node_get_priority(
550 struct cache_node * node)
551{
552 int priority;
553
554 pthread_mutex_lock(&node->cn_mutex);
555 priority = node->cn_priority;
556 pthread_mutex_unlock(&node->cn_mutex);
557
558 return priority;
559}
560
561
e80aa729
NS
562/*
563 * Purge a specific node from the cache. Reference count must be zero.
564 */
565int
566cache_node_purge(
567 struct cache * cache,
568 cache_key_t key,
569 struct cache_node * node)
570{
69ec88b5
BN
571 struct list_head * head;
572 struct list_head * pos;
573 struct list_head * n;
574 struct cache_hash * hash;
69ec88b5
BN
575 int count = -1;
576
602dcc0e
DC
577 hash = cache->c_hash + cache->hash(key, cache->c_hashsize,
578 cache->c_hashshift);
69ec88b5
BN
579 head = &hash->ch_list;
580 pthread_mutex_lock(&hash->ch_mutex);
581 for (pos = head->next, n = pos->next; pos != head;
582 pos = n, n = pos->next) {
583 if ((struct cache_node *)pos != node)
584 continue;
e80aa729 585
ba9ecd40
DC
586 count = __cache_node_purge(cache, node);
587 if (!count)
588 hash->ch_count--;
69ec88b5
BN
589 break;
590 }
591 pthread_mutex_unlock(&hash->ch_mutex);
592
593 if (count == 0) {
e80aa729
NS
594 pthread_mutex_lock(&cache->c_mutex);
595 cache->c_count--;
596 pthread_mutex_unlock(&cache->c_mutex);
597 }
598#ifdef CACHE_DEBUG
69ec88b5 599 if (count >= 1) {
e80aa729 600 fprintf(stderr, "%s: refcount was %u, not zero (node=%p)\n",
69ec88b5 601 __FUNCTION__, count, node);
00418cb4 602 cache_abort();
e80aa729 603 }
69ec88b5 604 if (count == -1) {
e80aa729
NS
605 fprintf(stderr, "%s: purge node not found! (node=%p)\n",
606 __FUNCTION__, node);
00418cb4 607 cache_abort();
e80aa729
NS
608 }
609#endif
af43ca9f 610 return count == 0;
e80aa729
NS
611}
612
613/*
614 * Purge all nodes from the cache. All reference counts must be zero.
615 */
616void
617cache_purge(
618 struct cache * cache)
619{
69ec88b5
BN
620 int i;
621
aef29e17
DC
622 for (i = 0; i <= CACHE_DIRTY_PRIORITY; i++)
623 cache_shake(cache, i, true);
e80aa729 624
e80aa729
NS
625#ifdef CACHE_DEBUG
626 if (cache->c_count != 0) {
69ec88b5
BN
627 /* flush referenced nodes to disk */
628 cache_flush(cache);
e80aa729
NS
629 fprintf(stderr, "%s: shake on cache %p left %u nodes!?\n",
630 __FUNCTION__, cache, cache->c_count);
00418cb4 631 cache_abort();
e80aa729
NS
632 }
633#endif
33165ec3
BN
634}
635
636/*
2556c98b 637 * Flush all nodes in the cache to disk.
33165ec3
BN
638 */
639void
640cache_flush(
641 struct cache * cache)
642{
643 struct cache_hash * hash;
644 struct list_head * head;
645 struct list_head * pos;
646 struct cache_node * node;
647 int i;
2556c98b 648
33165ec3
BN
649 if (!cache->flush)
650 return;
2556c98b 651
33165ec3
BN
652 for (i = 0; i < cache->c_hashsize; i++) {
653 hash = &cache->c_hash[i];
2556c98b 654
33165ec3
BN
655 pthread_mutex_lock(&hash->ch_mutex);
656 head = &hash->ch_list;
657 for (pos = head->next; pos != head; pos = pos->next) {
658 node = (struct cache_node *)pos;
659 pthread_mutex_lock(&node->cn_mutex);
660 cache->flush(node);
661 pthread_mutex_unlock(&node->cn_mutex);
662 }
663 pthread_mutex_unlock(&hash->ch_mutex);
664 }
e80aa729 665}
9f38f08d 666
69ec88b5 667#define HASH_REPORT (3 * HASH_CACHE_RATIO)
9f38f08d 668void
69ec88b5 669cache_report(
aef29e17
DC
670 FILE *fp,
671 const char *name,
672 struct cache *cache)
9f38f08d 673{
aef29e17
DC
674 int i;
675 unsigned long count, index, total;
676 unsigned long hash_bucket_lengths[HASH_REPORT + 2];
b6281496 677
69ec88b5 678 if ((cache->c_hits + cache->c_misses) == 0)
b6281496
MV
679 return;
680
681 /* report cache summary */
682 fprintf(fp, "%s: %p\n"
9f38f08d
MV
683 "Max supported entries = %u\n"
684 "Max utilized entries = %u\n"
685 "Active entries = %u\n"
686 "Hash table size = %u\n"
687 "Hits = %llu\n"
688 "Misses = %llu\n"
689 "Hit ratio = %5.2f\n",
690 name, cache,
691 cache->c_maxcount,
692 cache->c_max,
693 cache->c_count,
694 cache->c_hashsize,
695 cache->c_hits,
696 cache->c_misses,
69ec88b5
BN
697 (double)cache->c_hits * 100 /
698 (cache->c_hits + cache->c_misses)
9f38f08d 699 );
b6281496 700
69ec88b5
BN
701 for (i = 0; i <= CACHE_MAX_PRIORITY; i++)
702 fprintf(fp, "MRU %d entries = %6u (%3u%%)\n",
703 i, cache->c_mrus[i].cm_count,
704 cache->c_mrus[i].cm_count * 100 / cache->c_count);
705
aef29e17
DC
706 i = CACHE_DIRTY_PRIORITY;
707 fprintf(fp, "Dirty MRU %d entries = %6u (%3u%%)\n",
708 i, cache->c_mrus[i].cm_count,
709 cache->c_mrus[i].cm_count * 100 / cache->c_count);
710
b6281496
MV
711 /* report hash bucket lengths */
712 bzero(hash_bucket_lengths, sizeof(hash_bucket_lengths));
713
714 for (i = 0; i < cache->c_hashsize; i++) {
715 count = cache->c_hash[i].ch_count;
716 if (count > HASH_REPORT)
717 index = HASH_REPORT + 1;
718 else
719 index = count;
720 hash_bucket_lengths[index]++;
721 }
722
723 total = 0;
69ec88b5
BN
724 for (i = 0; i < HASH_REPORT + 1; i++) {
725 total += i * hash_bucket_lengths[i];
b6281496
MV
726 if (hash_bucket_lengths[i] == 0)
727 continue;
69ec88b5
BN
728 fprintf(fp, "Hash buckets with %2d entries %6ld (%3ld%%)\n",
729 i, hash_bucket_lengths[i],
730 (i * hash_bucket_lengths[i] * 100) / cache->c_count);
b6281496
MV
731 }
732 if (hash_bucket_lengths[i]) /* last report bucket is the overflow bucket */
69ec88b5
BN
733 fprintf(fp, "Hash buckets with >%2d entries %6ld (%3ld%%)\n",
734 i - 1, hash_bucket_lengths[i],
735 ((cache->c_count - total) * 100) / cache->c_count);
9f38f08d 736}