]>
git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - libxfs/cache.c
2 * Copyright (c) 2006 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
25 #include <xfs/platform_defs.h>
27 #include <xfs/cache.h>
33 /* #define CACHE_ABORT 1 */
35 #define CACHE_SHAKE_COUNT 64
37 static unsigned int cache_generic_bulkrelse(struct cache
*, struct list_head
*);
41 unsigned int hashsize
,
42 struct cache_operations
*cache_operations
)
45 unsigned int i
, maxcount
;
47 maxcount
= hashsize
* HASH_CACHE_RATIO
;
49 if (!(cache
= malloc(sizeof(struct cache
))))
51 if (!(cache
->c_hash
= calloc(hashsize
, sizeof(struct cache_hash
)))) {
60 cache
->c_maxcount
= maxcount
;
61 cache
->c_hashsize
= hashsize
;
62 cache
->hash
= cache_operations
->hash
;
63 cache
->alloc
= cache_operations
->alloc
;
64 cache
->flush
= cache_operations
->flush
;
65 cache
->relse
= cache_operations
->relse
;
66 cache
->compare
= cache_operations
->compare
;
67 cache
->bulkrelse
= cache_operations
->bulkrelse
?
68 cache_operations
->bulkrelse
: cache_generic_bulkrelse
;
69 pthread_mutex_init(&cache
->c_mutex
, NULL
);
71 for (i
= 0; i
< hashsize
; i
++) {
72 list_head_init(&cache
->c_hash
[i
].ch_list
);
73 cache
->c_hash
[i
].ch_count
= 0;
74 pthread_mutex_init(&cache
->c_hash
[i
].ch_mutex
, NULL
);
77 for (i
= 0; i
<= CACHE_MAX_PRIORITY
; i
++) {
78 list_head_init(&cache
->c_mrus
[i
].cm_list
);
79 cache
->c_mrus
[i
].cm_count
= 0;
80 pthread_mutex_init(&cache
->c_mrus
[i
].cm_mutex
, NULL
);
90 struct cache_hash
* hash
;
91 struct list_head
* head
;
92 struct list_head
* pos
;
95 for (i
= 0; i
< cache
->c_hashsize
; i
++) {
96 hash
= &cache
->c_hash
[i
];
97 head
= &hash
->ch_list
;
98 pthread_mutex_lock(&hash
->ch_mutex
);
99 for (pos
= head
->next
; pos
!= head
; pos
= pos
->next
)
100 visit((struct cache_node
*)pos
);
101 pthread_mutex_unlock(&hash
->ch_mutex
);
106 #define cache_abort() abort()
108 #define cache_abort() do { } while (0)
114 struct cache_node
* node
)
116 if (node
->cn_count
> 0) {
117 fprintf(stderr
, "%s: refcount is %u, not zero (node=%p)\n",
118 __FUNCTION__
, node
->cn_count
, node
);
122 #define cache_destroy_check(c) cache_walk((c), cache_zero_check)
124 #define cache_destroy_check(c) do { } while (0)
129 struct cache
* cache
)
133 cache_destroy_check(cache
);
134 for (i
= 0; i
< cache
->c_hashsize
; i
++) {
135 list_head_destroy(&cache
->c_hash
[i
].ch_list
);
136 pthread_mutex_destroy(&cache
->c_hash
[i
].ch_mutex
);
138 for (i
= 0; i
<= CACHE_MAX_PRIORITY
; i
++) {
139 list_head_destroy(&cache
->c_mrus
[i
].cm_list
);
140 pthread_mutex_destroy(&cache
->c_mrus
[i
].cm_mutex
);
142 pthread_mutex_destroy(&cache
->c_mutex
);
148 cache_generic_bulkrelse(
149 struct cache
* cache
,
150 struct list_head
* list
)
152 struct cache_node
* node
;
153 unsigned int count
= 0;
155 while (!list_empty(list
)) {
156 node
= list_entry(list
->next
, struct cache_node
, cn_mru
);
157 pthread_mutex_destroy(&node
->cn_mutex
);
158 list_del_init(&node
->cn_mru
);
167 * We've hit the limit on cache size, so we need to start reclaiming
168 * nodes we've used. The MRU specified by the priority is shaken.
169 * Returns new priority at end of the call (in case we call again).
173 struct cache
* cache
,
174 unsigned int priority
,
177 struct cache_mru
*mru
;
178 struct cache_hash
* hash
;
179 struct list_head temp
;
180 struct list_head
* head
;
181 struct list_head
* pos
;
182 struct list_head
* n
;
183 struct cache_node
* node
;
186 ASSERT(priority
<= CACHE_MAX_PRIORITY
);
187 if (priority
> CACHE_MAX_PRIORITY
)
190 mru
= &cache
->c_mrus
[priority
];
192 list_head_init(&temp
);
193 head
= &mru
->cm_list
;
195 pthread_mutex_lock(&mru
->cm_mutex
);
196 for (pos
= head
->prev
, n
= pos
->prev
; pos
!= head
;
197 pos
= n
, n
= pos
->prev
) {
198 node
= list_entry(pos
, struct cache_node
, cn_mru
);
200 if (pthread_mutex_trylock(&node
->cn_mutex
) != 0)
203 hash
= cache
->c_hash
+ node
->cn_hashidx
;
204 if (pthread_mutex_trylock(&hash
->ch_mutex
) != 0) {
205 pthread_mutex_unlock(&node
->cn_mutex
);
208 ASSERT(node
->cn_count
== 0);
209 ASSERT(node
->cn_priority
== priority
);
210 node
->cn_priority
= -1;
212 list_move(&node
->cn_mru
, &temp
);
213 list_del_init(&node
->cn_hash
);
216 pthread_mutex_unlock(&hash
->ch_mutex
);
217 pthread_mutex_unlock(&node
->cn_mutex
);
220 if (!all
&& count
== CACHE_SHAKE_COUNT
)
223 pthread_mutex_unlock(&mru
->cm_mutex
);
226 cache
->bulkrelse(cache
, &temp
);
228 pthread_mutex_lock(&cache
->c_mutex
);
229 cache
->c_count
-= count
;
230 pthread_mutex_unlock(&cache
->c_mutex
);
233 return (count
== CACHE_SHAKE_COUNT
) ? priority
: ++priority
;
237 * Allocate a new hash node (updating atomic counter in the process),
238 * unless doing so will push us over the maximum cache size.
240 static struct cache_node
*
242 struct cache
* cache
,
245 unsigned int nodesfree
;
246 struct cache_node
* node
;
248 pthread_mutex_lock(&cache
->c_mutex
);
249 nodesfree
= (cache
->c_count
< cache
->c_maxcount
);
252 if (cache
->c_count
> cache
->c_max
)
253 cache
->c_max
= cache
->c_count
;
256 pthread_mutex_unlock(&cache
->c_mutex
);
259 node
= cache
->alloc(key
);
260 if (node
== NULL
) { /* uh-oh */
261 pthread_mutex_lock(&cache
->c_mutex
);
263 pthread_mutex_unlock(&cache
->c_mutex
);
266 pthread_mutex_init(&node
->cn_mutex
, NULL
);
267 list_head_init(&node
->cn_mru
);
269 node
->cn_priority
= 0;
275 struct cache
* cache
)
277 return (cache
->c_maxcount
== cache
->c_max
);
281 * Lookup in the cache hash table. With any luck we'll get a cache
282 * hit, in which case this will all be over quickly and painlessly.
283 * Otherwise, we allocate a new node, taking care not to expand the
284 * cache beyond the requested maximum size (shrink it if it would).
285 * Returns one if hit in cache, otherwise zero. A node is _always_
290 struct cache
* cache
,
292 struct cache_node
** nodep
)
294 struct cache_node
* node
= NULL
;
295 struct cache_hash
* hash
;
296 struct cache_mru
* mru
;
297 struct list_head
* head
;
298 struct list_head
* pos
;
299 unsigned int hashidx
;
302 hashidx
= cache
->hash(key
, cache
->c_hashsize
);
303 hash
= cache
->c_hash
+ hashidx
;
304 head
= &hash
->ch_list
;
307 pthread_mutex_lock(&hash
->ch_mutex
);
308 for (pos
= head
->next
; pos
!= head
; pos
= pos
->next
) {
309 node
= list_entry(pos
, struct cache_node
, cn_hash
);
310 if (!cache
->compare(node
, key
))
313 * node found, bump node's reference count, remove it
314 * from its MRU list, and update stats.
316 pthread_mutex_lock(&node
->cn_mutex
);
318 if (node
->cn_count
== 0) {
319 ASSERT(node
->cn_priority
>= 0);
320 ASSERT(!list_empty(&node
->cn_mru
));
321 mru
= &cache
->c_mrus
[node
->cn_priority
];
322 pthread_mutex_lock(&mru
->cm_mutex
);
324 list_del_init(&node
->cn_mru
);
325 pthread_mutex_unlock(&mru
->cm_mutex
);
329 pthread_mutex_unlock(&node
->cn_mutex
);
330 pthread_mutex_unlock(&hash
->ch_mutex
);
332 pthread_mutex_lock(&cache
->c_mutex
);
334 pthread_mutex_unlock(&cache
->c_mutex
);
339 pthread_mutex_unlock(&hash
->ch_mutex
);
341 * not found, allocate a new entry
343 node
= cache_node_allocate(cache
, key
);
346 priority
= cache_shake(cache
, priority
, 0);
349 node
->cn_hashidx
= hashidx
;
351 /* add new node to appropriate hash */
352 pthread_mutex_lock(&hash
->ch_mutex
);
354 list_add(&node
->cn_hash
, &hash
->ch_list
);
355 pthread_mutex_unlock(&hash
->ch_mutex
);
363 struct cache
* cache
,
364 struct cache_node
* node
)
366 struct cache_mru
* mru
;
368 pthread_mutex_lock(&node
->cn_mutex
);
370 if (node
->cn_count
< 1) {
371 fprintf(stderr
, "%s: node put on refcount %u (node=%p)\n",
372 __FUNCTION__
, node
->cn_count
, node
);
375 if (!list_empty(&node
->cn_mru
)) {
376 fprintf(stderr
, "%s: node put on node (%p) in MRU list\n",
383 if (node
->cn_count
== 0) {
384 /* add unreferenced node to appropriate MRU for shaker */
385 mru
= &cache
->c_mrus
[node
->cn_priority
];
386 pthread_mutex_lock(&mru
->cm_mutex
);
388 list_add(&node
->cn_mru
, &mru
->cm_list
);
389 pthread_mutex_unlock(&mru
->cm_mutex
);
392 pthread_mutex_unlock(&node
->cn_mutex
);
396 cache_node_set_priority(
397 struct cache
* cache
,
398 struct cache_node
* node
,
403 else if (priority
> CACHE_MAX_PRIORITY
)
404 priority
= CACHE_MAX_PRIORITY
;
406 pthread_mutex_lock(&node
->cn_mutex
);
407 ASSERT(node
->cn_count
> 0);
408 node
->cn_priority
= priority
;
409 pthread_mutex_unlock(&node
->cn_mutex
);
413 cache_node_get_priority(
414 struct cache_node
* node
)
418 pthread_mutex_lock(&node
->cn_mutex
);
419 priority
= node
->cn_priority
;
420 pthread_mutex_unlock(&node
->cn_mutex
);
427 * Purge a specific node from the cache. Reference count must be zero.
431 struct cache
* cache
,
433 struct cache_node
* node
)
435 struct list_head
* head
;
436 struct list_head
* pos
;
437 struct list_head
* n
;
438 struct cache_hash
* hash
;
439 struct cache_mru
* mru
;
442 hash
= cache
->c_hash
+ cache
->hash(key
, cache
->c_hashsize
);
443 head
= &hash
->ch_list
;
444 pthread_mutex_lock(&hash
->ch_mutex
);
445 for (pos
= head
->next
, n
= pos
->next
; pos
!= head
;
446 pos
= n
, n
= pos
->next
) {
447 if ((struct cache_node
*)pos
!= node
)
450 pthread_mutex_lock(&node
->cn_mutex
);
451 count
= node
->cn_count
;
453 pthread_mutex_unlock(&node
->cn_mutex
);
456 mru
= &cache
->c_mrus
[node
->cn_priority
];
457 pthread_mutex_lock(&mru
->cm_mutex
);
458 list_del_init(&node
->cn_mru
);
460 pthread_mutex_unlock(&mru
->cm_mutex
);
462 pthread_mutex_unlock(&node
->cn_mutex
);
463 pthread_mutex_destroy(&node
->cn_mutex
);
464 list_del_init(&node
->cn_hash
);
469 pthread_mutex_unlock(&hash
->ch_mutex
);
472 pthread_mutex_lock(&cache
->c_mutex
);
474 pthread_mutex_unlock(&cache
->c_mutex
);
478 fprintf(stderr
, "%s: refcount was %u, not zero (node=%p)\n",
479 __FUNCTION__
, count
, node
);
483 fprintf(stderr
, "%s: purge node not found! (node=%p)\n",
492 * Purge all nodes from the cache. All reference counts must be zero.
496 struct cache
* cache
)
500 for (i
= 0; i
<= CACHE_MAX_PRIORITY
; i
++)
501 cache_shake(cache
, i
, 1);
504 if (cache
->c_count
!= 0) {
505 /* flush referenced nodes to disk */
507 fprintf(stderr
, "%s: shake on cache %p left %u nodes!?\n",
508 __FUNCTION__
, cache
, cache
->c_count
);
515 * Flush all nodes in the cache to disk.
519 struct cache
* cache
)
521 struct cache_hash
* hash
;
522 struct list_head
* head
;
523 struct list_head
* pos
;
524 struct cache_node
* node
;
530 for (i
= 0; i
< cache
->c_hashsize
; i
++) {
531 hash
= &cache
->c_hash
[i
];
533 pthread_mutex_lock(&hash
->ch_mutex
);
534 head
= &hash
->ch_list
;
535 for (pos
= head
->next
; pos
!= head
; pos
= pos
->next
) {
536 node
= (struct cache_node
*)pos
;
537 pthread_mutex_lock(&node
->cn_mutex
);
539 pthread_mutex_unlock(&node
->cn_mutex
);
541 pthread_mutex_unlock(&hash
->ch_mutex
);
545 #define HASH_REPORT (3 * HASH_CACHE_RATIO)
553 unsigned long count
, index
, total
;
554 unsigned long hash_bucket_lengths
[HASH_REPORT
+ 2];
556 if ((cache
->c_hits
+ cache
->c_misses
) == 0)
559 /* report cache summary */
560 fprintf(fp
, "%s: %p\n"
561 "Max supported entries = %u\n"
562 "Max utilized entries = %u\n"
563 "Active entries = %u\n"
564 "Hash table size = %u\n"
567 "Hit ratio = %5.2f\n",
575 (double)cache
->c_hits
* 100 /
576 (cache
->c_hits
+ cache
->c_misses
)
579 for (i
= 0; i
<= CACHE_MAX_PRIORITY
; i
++)
580 fprintf(fp
, "MRU %d entries = %6u (%3u%%)\n",
581 i
, cache
->c_mrus
[i
].cm_count
,
582 cache
->c_mrus
[i
].cm_count
* 100 / cache
->c_count
);
584 /* report hash bucket lengths */
585 bzero(hash_bucket_lengths
, sizeof(hash_bucket_lengths
));
587 for (i
= 0; i
< cache
->c_hashsize
; i
++) {
588 count
= cache
->c_hash
[i
].ch_count
;
589 if (count
> HASH_REPORT
)
590 index
= HASH_REPORT
+ 1;
593 hash_bucket_lengths
[index
]++;
597 for (i
= 0; i
< HASH_REPORT
+ 1; i
++) {
598 total
+= i
* hash_bucket_lengths
[i
];
599 if (hash_bucket_lengths
[i
] == 0)
601 fprintf(fp
, "Hash buckets with %2d entries %6ld (%3ld%%)\n",
602 i
, hash_bucket_lengths
[i
],
603 (i
* hash_bucket_lengths
[i
] * 100) / cache
->c_count
);
605 if (hash_bucket_lengths
[i
]) /* last report bucket is the overflow bucket */
606 fprintf(fp
, "Hash buckets with >%2d entries %6ld (%3ld%%)\n",
607 i
- 1, hash_bucket_lengths
[i
],
608 ((cache
->c_count
- total
) * 100) / cache
->c_count
);