]>
git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - libxfs/cache.c
1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2006 Silicon Graphics, Inc.
13 #include "libxfs_priv.h"
15 #include "xfs_shared.h"
16 #include "xfs_format.h"
17 #include "xfs_trans_resv.h"
18 #include "xfs_mount.h"
25 /* #define CACHE_ABORT 1 */
27 #define CACHE_SHAKE_COUNT 64
29 static unsigned int cache_generic_bulkrelse(struct cache
*, struct list_head
*);
34 unsigned int hashsize
,
35 struct cache_operations
*cache_operations
)
38 unsigned int i
, maxcount
;
40 maxcount
= hashsize
* HASH_CACHE_RATIO
;
42 if (!(cache
= malloc(sizeof(struct cache
))))
44 if (!(cache
->c_hash
= calloc(hashsize
, sizeof(struct cache_hash
)))) {
49 cache
->c_flags
= flags
;
54 cache
->c_maxcount
= maxcount
;
55 cache
->c_hashsize
= hashsize
;
56 cache
->c_hashshift
= libxfs_highbit32(hashsize
);
57 cache
->hash
= cache_operations
->hash
;
58 cache
->alloc
= cache_operations
->alloc
;
59 cache
->flush
= cache_operations
->flush
;
60 cache
->relse
= cache_operations
->relse
;
61 cache
->compare
= cache_operations
->compare
;
62 cache
->bulkrelse
= cache_operations
->bulkrelse
?
63 cache_operations
->bulkrelse
: cache_generic_bulkrelse
;
64 pthread_mutex_init(&cache
->c_mutex
, NULL
);
66 for (i
= 0; i
< hashsize
; i
++) {
67 list_head_init(&cache
->c_hash
[i
].ch_list
);
68 cache
->c_hash
[i
].ch_count
= 0;
69 pthread_mutex_init(&cache
->c_hash
[i
].ch_mutex
, NULL
);
72 for (i
= 0; i
<= CACHE_DIRTY_PRIORITY
; i
++) {
73 list_head_init(&cache
->c_mrus
[i
].cm_list
);
74 cache
->c_mrus
[i
].cm_count
= 0;
75 pthread_mutex_init(&cache
->c_mrus
[i
].cm_mutex
, NULL
);
84 pthread_mutex_lock(&cache
->c_mutex
);
86 fprintf(stderr
, "doubling cache size to %d\n", 2 * cache
->c_maxcount
);
88 cache
->c_maxcount
*= 2;
89 pthread_mutex_unlock(&cache
->c_mutex
);
97 struct cache_hash
* hash
;
98 struct list_head
* head
;
99 struct list_head
* pos
;
102 for (i
= 0; i
< cache
->c_hashsize
; i
++) {
103 hash
= &cache
->c_hash
[i
];
104 head
= &hash
->ch_list
;
105 pthread_mutex_lock(&hash
->ch_mutex
);
106 for (pos
= head
->next
; pos
!= head
; pos
= pos
->next
)
107 visit((struct cache_node
*)pos
);
108 pthread_mutex_unlock(&hash
->ch_mutex
);
113 #define cache_abort() abort()
115 #define cache_abort() do { } while (0)
121 struct cache_node
* node
)
123 if (node
->cn_count
> 0) {
124 fprintf(stderr
, "%s: refcount is %u, not zero (node=%p)\n",
125 __FUNCTION__
, node
->cn_count
, node
);
129 #define cache_destroy_check(c) cache_walk((c), cache_zero_check)
131 #define cache_destroy_check(c) do { } while (0)
136 struct cache
* cache
)
140 cache_destroy_check(cache
);
141 for (i
= 0; i
< cache
->c_hashsize
; i
++) {
142 list_head_destroy(&cache
->c_hash
[i
].ch_list
);
143 pthread_mutex_destroy(&cache
->c_hash
[i
].ch_mutex
);
145 for (i
= 0; i
<= CACHE_DIRTY_PRIORITY
; i
++) {
146 list_head_destroy(&cache
->c_mrus
[i
].cm_list
);
147 pthread_mutex_destroy(&cache
->c_mrus
[i
].cm_mutex
);
149 pthread_mutex_destroy(&cache
->c_mutex
);
155 cache_generic_bulkrelse(
156 struct cache
* cache
,
157 struct list_head
* list
)
159 struct cache_node
* node
;
160 unsigned int count
= 0;
162 while (!list_empty(list
)) {
163 node
= list_entry(list
->next
, struct cache_node
, cn_mru
);
164 pthread_mutex_destroy(&node
->cn_mutex
);
165 list_del_init(&node
->cn_mru
);
174 * Park unflushable nodes on their own special MRU so that cache_shake() doesn't
175 * end up repeatedly scanning them in the futile attempt to clean them before
179 cache_add_to_dirty_mru(
181 struct cache_node
*node
)
183 struct cache_mru
*mru
= &cache
->c_mrus
[CACHE_DIRTY_PRIORITY
];
185 pthread_mutex_lock(&mru
->cm_mutex
);
186 node
->cn_old_priority
= node
->cn_priority
;
187 node
->cn_priority
= CACHE_DIRTY_PRIORITY
;
188 list_add(&node
->cn_mru
, &mru
->cm_list
);
190 pthread_mutex_unlock(&mru
->cm_mutex
);
194 * We've hit the limit on cache size, so we need to start reclaiming nodes we've
195 * used. The MRU specified by the priority is shaken. Returns new priority at
196 * end of the call (in case we call again). We are not allowed to reclaim dirty
197 * objects, so we have to flush them first. If flushing fails, we move them to
198 * the "dirty, unreclaimable" list.
200 * Hence we skip priorities > CACHE_MAX_PRIORITY unless "purge" is set as we
201 * park unflushable (and hence unreclaimable) buffers at these priorities.
202 * Trying to shake unreclaimable buffer lists when there is memory pressure is a
203 * waste of time and CPU and greatly slows down cache node recycling operations.
204 * Hence we only try to free them if we are being asked to purge the cache of
209 struct cache
* cache
,
210 unsigned int priority
,
213 struct cache_mru
*mru
;
214 struct cache_hash
* hash
;
215 struct list_head temp
;
216 struct list_head
* head
;
217 struct list_head
* pos
;
218 struct list_head
* n
;
219 struct cache_node
* node
;
222 ASSERT(priority
<= CACHE_DIRTY_PRIORITY
);
223 if (priority
> CACHE_MAX_PRIORITY
&& !purge
)
226 mru
= &cache
->c_mrus
[priority
];
228 list_head_init(&temp
);
229 head
= &mru
->cm_list
;
231 pthread_mutex_lock(&mru
->cm_mutex
);
232 for (pos
= head
->prev
, n
= pos
->prev
; pos
!= head
;
233 pos
= n
, n
= pos
->prev
) {
234 node
= list_entry(pos
, struct cache_node
, cn_mru
);
236 if (pthread_mutex_trylock(&node
->cn_mutex
) != 0)
239 /* memory pressure is not allowed to release dirty objects */
240 if (cache
->flush(node
) && !purge
) {
241 list_del(&node
->cn_mru
);
243 node
->cn_priority
= -1;
244 pthread_mutex_unlock(&node
->cn_mutex
);
245 cache_add_to_dirty_mru(cache
, node
);
249 hash
= cache
->c_hash
+ node
->cn_hashidx
;
250 if (pthread_mutex_trylock(&hash
->ch_mutex
) != 0) {
251 pthread_mutex_unlock(&node
->cn_mutex
);
254 ASSERT(node
->cn_count
== 0);
255 ASSERT(node
->cn_priority
== priority
);
256 node
->cn_priority
= -1;
258 list_move(&node
->cn_mru
, &temp
);
259 list_del_init(&node
->cn_hash
);
262 pthread_mutex_unlock(&hash
->ch_mutex
);
263 pthread_mutex_unlock(&node
->cn_mutex
);
266 if (!purge
&& count
== CACHE_SHAKE_COUNT
)
269 pthread_mutex_unlock(&mru
->cm_mutex
);
272 cache
->bulkrelse(cache
, &temp
);
274 pthread_mutex_lock(&cache
->c_mutex
);
275 cache
->c_count
-= count
;
276 pthread_mutex_unlock(&cache
->c_mutex
);
279 return (count
== CACHE_SHAKE_COUNT
) ? priority
: ++priority
;
283 * Allocate a new hash node (updating atomic counter in the process),
284 * unless doing so will push us over the maximum cache size.
286 static struct cache_node
*
288 struct cache
* cache
,
291 unsigned int nodesfree
;
292 struct cache_node
* node
;
294 pthread_mutex_lock(&cache
->c_mutex
);
295 nodesfree
= (cache
->c_count
< cache
->c_maxcount
);
298 if (cache
->c_count
> cache
->c_max
)
299 cache
->c_max
= cache
->c_count
;
302 pthread_mutex_unlock(&cache
->c_mutex
);
305 node
= cache
->alloc(key
);
306 if (node
== NULL
) { /* uh-oh */
307 pthread_mutex_lock(&cache
->c_mutex
);
309 pthread_mutex_unlock(&cache
->c_mutex
);
312 pthread_mutex_init(&node
->cn_mutex
, NULL
);
313 list_head_init(&node
->cn_mru
);
315 node
->cn_priority
= 0;
316 node
->cn_old_priority
= -1;
322 struct cache
* cache
)
324 return cache
->c_maxcount
== cache
->c_max
;
330 struct cache
* cache
,
331 struct cache_node
* node
)
334 struct cache_mru
* mru
;
336 pthread_mutex_lock(&node
->cn_mutex
);
337 count
= node
->cn_count
;
339 pthread_mutex_unlock(&node
->cn_mutex
);
343 /* can't purge dirty objects */
344 if (cache
->flush(node
)) {
345 pthread_mutex_unlock(&node
->cn_mutex
);
349 mru
= &cache
->c_mrus
[node
->cn_priority
];
350 pthread_mutex_lock(&mru
->cm_mutex
);
351 list_del_init(&node
->cn_mru
);
353 pthread_mutex_unlock(&mru
->cm_mutex
);
355 pthread_mutex_unlock(&node
->cn_mutex
);
356 pthread_mutex_destroy(&node
->cn_mutex
);
357 list_del_init(&node
->cn_hash
);
363 * Lookup in the cache hash table. With any luck we'll get a cache
364 * hit, in which case this will all be over quickly and painlessly.
365 * Otherwise, we allocate a new node, taking care not to expand the
366 * cache beyond the requested maximum size (shrink it if it would).
367 * Returns one if hit in cache, otherwise zero. A node is _always_
372 struct cache
* cache
,
374 struct cache_node
** nodep
)
376 struct cache_node
* node
= NULL
;
377 struct cache_hash
* hash
;
378 struct cache_mru
* mru
;
379 struct list_head
* head
;
380 struct list_head
* pos
;
381 struct list_head
* n
;
382 unsigned int hashidx
;
386 hashidx
= cache
->hash(key
, cache
->c_hashsize
, cache
->c_hashshift
);
387 hash
= cache
->c_hash
+ hashidx
;
388 head
= &hash
->ch_list
;
391 pthread_mutex_lock(&hash
->ch_mutex
);
392 for (pos
= head
->next
, n
= pos
->next
; pos
!= head
;
393 pos
= n
, n
= pos
->next
) {
396 node
= list_entry(pos
, struct cache_node
, cn_hash
);
397 result
= cache
->compare(node
, key
);
402 if ((cache
->c_flags
& CACHE_MISCOMPARE_PURGE
) &&
403 !__cache_node_purge(cache
, node
)) {
413 * node found, bump node's reference count, remove it
414 * from its MRU list, and update stats.
416 pthread_mutex_lock(&node
->cn_mutex
);
418 if (node
->cn_count
== 0) {
419 ASSERT(node
->cn_priority
>= 0);
420 ASSERT(!list_empty(&node
->cn_mru
));
421 mru
= &cache
->c_mrus
[node
->cn_priority
];
422 pthread_mutex_lock(&mru
->cm_mutex
);
424 list_del_init(&node
->cn_mru
);
425 pthread_mutex_unlock(&mru
->cm_mutex
);
426 if (node
->cn_old_priority
!= -1) {
427 ASSERT(node
->cn_priority
==
428 CACHE_DIRTY_PRIORITY
);
429 node
->cn_priority
= node
->cn_old_priority
;
430 node
->cn_old_priority
= -1;
435 pthread_mutex_unlock(&node
->cn_mutex
);
436 pthread_mutex_unlock(&hash
->ch_mutex
);
438 pthread_mutex_lock(&cache
->c_mutex
);
440 pthread_mutex_unlock(&cache
->c_mutex
);
445 continue; /* what the hell, gcc? */
447 pthread_mutex_unlock(&hash
->ch_mutex
);
449 * not found, allocate a new entry
451 node
= cache_node_allocate(cache
, key
);
454 priority
= cache_shake(cache
, priority
, false);
456 * We start at 0; if we free CACHE_SHAKE_COUNT we get
457 * back the same priority, if not we get back priority+1.
458 * If we exceed CACHE_MAX_PRIORITY all slots are full; grow it.
460 if (priority
> CACHE_MAX_PRIORITY
) {
466 node
->cn_hashidx
= hashidx
;
468 /* add new node to appropriate hash */
469 pthread_mutex_lock(&hash
->ch_mutex
);
471 list_add(&node
->cn_hash
, &hash
->ch_list
);
472 pthread_mutex_unlock(&hash
->ch_mutex
);
475 pthread_mutex_lock(&cache
->c_mutex
);
476 cache
->c_count
-= purged
;
477 pthread_mutex_unlock(&cache
->c_mutex
);
486 struct cache
* cache
,
487 struct cache_node
* node
)
489 struct cache_mru
* mru
;
491 pthread_mutex_lock(&node
->cn_mutex
);
493 if (node
->cn_count
< 1) {
494 fprintf(stderr
, "%s: node put on refcount %u (node=%p)\n",
495 __FUNCTION__
, node
->cn_count
, node
);
498 if (!list_empty(&node
->cn_mru
)) {
499 fprintf(stderr
, "%s: node put on node (%p) in MRU list\n",
506 if (node
->cn_count
== 0) {
507 /* add unreferenced node to appropriate MRU for shaker */
508 mru
= &cache
->c_mrus
[node
->cn_priority
];
509 pthread_mutex_lock(&mru
->cm_mutex
);
511 list_add(&node
->cn_mru
, &mru
->cm_list
);
512 pthread_mutex_unlock(&mru
->cm_mutex
);
515 pthread_mutex_unlock(&node
->cn_mutex
);
519 cache_node_set_priority(
520 struct cache
* cache
,
521 struct cache_node
* node
,
526 else if (priority
> CACHE_MAX_PRIORITY
)
527 priority
= CACHE_MAX_PRIORITY
;
529 pthread_mutex_lock(&node
->cn_mutex
);
530 ASSERT(node
->cn_count
> 0);
531 node
->cn_priority
= priority
;
532 node
->cn_old_priority
= -1;
533 pthread_mutex_unlock(&node
->cn_mutex
);
537 cache_node_get_priority(
538 struct cache_node
* node
)
542 pthread_mutex_lock(&node
->cn_mutex
);
543 priority
= node
->cn_priority
;
544 pthread_mutex_unlock(&node
->cn_mutex
);
551 * Purge a specific node from the cache. Reference count must be zero.
555 struct cache
* cache
,
557 struct cache_node
* node
)
559 struct list_head
* head
;
560 struct list_head
* pos
;
561 struct list_head
* n
;
562 struct cache_hash
* hash
;
565 hash
= cache
->c_hash
+ cache
->hash(key
, cache
->c_hashsize
,
567 head
= &hash
->ch_list
;
568 pthread_mutex_lock(&hash
->ch_mutex
);
569 for (pos
= head
->next
, n
= pos
->next
; pos
!= head
;
570 pos
= n
, n
= pos
->next
) {
571 if ((struct cache_node
*)pos
!= node
)
574 count
= __cache_node_purge(cache
, node
);
579 pthread_mutex_unlock(&hash
->ch_mutex
);
582 pthread_mutex_lock(&cache
->c_mutex
);
584 pthread_mutex_unlock(&cache
->c_mutex
);
588 fprintf(stderr
, "%s: refcount was %u, not zero (node=%p)\n",
589 __FUNCTION__
, count
, node
);
593 fprintf(stderr
, "%s: purge node not found! (node=%p)\n",
602 * Purge all nodes from the cache. All reference counts must be zero.
606 struct cache
* cache
)
610 for (i
= 0; i
<= CACHE_DIRTY_PRIORITY
; i
++)
611 cache_shake(cache
, i
, true);
614 if (cache
->c_count
!= 0) {
615 /* flush referenced nodes to disk */
617 fprintf(stderr
, "%s: shake on cache %p left %u nodes!?\n",
618 __FUNCTION__
, cache
, cache
->c_count
);
625 * Flush all nodes in the cache to disk.
629 struct cache
* cache
)
631 struct cache_hash
* hash
;
632 struct list_head
* head
;
633 struct list_head
* pos
;
634 struct cache_node
* node
;
640 for (i
= 0; i
< cache
->c_hashsize
; i
++) {
641 hash
= &cache
->c_hash
[i
];
643 pthread_mutex_lock(&hash
->ch_mutex
);
644 head
= &hash
->ch_list
;
645 for (pos
= head
->next
; pos
!= head
; pos
= pos
->next
) {
646 node
= (struct cache_node
*)pos
;
647 pthread_mutex_lock(&node
->cn_mutex
);
649 pthread_mutex_unlock(&node
->cn_mutex
);
651 pthread_mutex_unlock(&hash
->ch_mutex
);
655 #define HASH_REPORT (3 * HASH_CACHE_RATIO)
663 unsigned long count
, index
, total
;
664 unsigned long hash_bucket_lengths
[HASH_REPORT
+ 2];
666 if ((cache
->c_hits
+ cache
->c_misses
) == 0)
669 /* report cache summary */
670 fprintf(fp
, "%s: %p\n"
671 "Max supported entries = %u\n"
672 "Max utilized entries = %u\n"
673 "Active entries = %u\n"
674 "Hash table size = %u\n"
677 "Hit ratio = %5.2f\n",
685 (double)cache
->c_hits
* 100 /
686 (cache
->c_hits
+ cache
->c_misses
)
689 for (i
= 0; i
<= CACHE_MAX_PRIORITY
; i
++)
690 fprintf(fp
, "MRU %d entries = %6u (%3u%%)\n",
691 i
, cache
->c_mrus
[i
].cm_count
,
692 cache
->c_mrus
[i
].cm_count
* 100 / cache
->c_count
);
694 i
= CACHE_DIRTY_PRIORITY
;
695 fprintf(fp
, "Dirty MRU %d entries = %6u (%3u%%)\n",
696 i
, cache
->c_mrus
[i
].cm_count
,
697 cache
->c_mrus
[i
].cm_count
* 100 / cache
->c_count
);
699 /* report hash bucket lengths */
700 bzero(hash_bucket_lengths
, sizeof(hash_bucket_lengths
));
702 for (i
= 0; i
< cache
->c_hashsize
; i
++) {
703 count
= cache
->c_hash
[i
].ch_count
;
704 if (count
> HASH_REPORT
)
705 index
= HASH_REPORT
+ 1;
708 hash_bucket_lengths
[index
]++;
712 for (i
= 0; i
< HASH_REPORT
+ 1; i
++) {
713 total
+= i
* hash_bucket_lengths
[i
];
714 if (hash_bucket_lengths
[i
] == 0)
716 fprintf(fp
, "Hash buckets with %2d entries %6ld (%3ld%%)\n",
717 i
, hash_bucket_lengths
[i
],
718 (i
* hash_bucket_lengths
[i
] * 100) / cache
->c_count
);
720 if (hash_bucket_lengths
[i
]) /* last report bucket is the overflow bucket */
721 fprintf(fp
, "Hash buckets with >%2d entries %6ld (%3ld%%)\n",
722 i
- 1, hash_bucket_lengths
[i
],
723 ((cache
->c_count
- total
) * 100) / cache
->c_count
);