]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - libxfs/cache.c
Revert "Merge branch 'xfsprogs-dev'"
[thirdparty/xfsprogs-dev.git] / libxfs / cache.c
1 /*
2 * Copyright (c) 2006 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18
19 #include <stdio.h>
20 #include <stdlib.h>
21 #include <string.h>
22 #include <unistd.h>
23 #include <pthread.h>
24
25 #include <xfs/platform_defs.h>
26 #include <xfs/list.h>
27 #include <xfs/cache.h>
28
29 #define CACHE_DEBUG 1
30 #undef CACHE_DEBUG
31 #define CACHE_DEBUG 1
32 #undef CACHE_ABORT
33 /* #define CACHE_ABORT 1 */
34
35 #define CACHE_SHAKE_COUNT 64
36
37 static unsigned int cache_generic_bulkrelse(struct cache *, struct list_head *);
38
39 struct cache *
40 cache_init(
41 unsigned int hashsize,
42 struct cache_operations *cache_operations)
43 {
44 struct cache * cache;
45 unsigned int i, maxcount;
46
47 maxcount = hashsize * HASH_CACHE_RATIO;
48
49 if (!(cache = malloc(sizeof(struct cache))))
50 return NULL;
51 if (!(cache->c_hash = calloc(hashsize, sizeof(struct cache_hash)))) {
52 free(cache);
53 return NULL;
54 }
55
56 cache->c_count = 0;
57 cache->c_max = 0;
58 cache->c_hits = 0;
59 cache->c_misses = 0;
60 cache->c_maxcount = maxcount;
61 cache->c_hashsize = hashsize;
62 cache->hash = cache_operations->hash;
63 cache->alloc = cache_operations->alloc;
64 cache->flush = cache_operations->flush;
65 cache->relse = cache_operations->relse;
66 cache->compare = cache_operations->compare;
67 cache->bulkrelse = cache_operations->bulkrelse ?
68 cache_operations->bulkrelse : cache_generic_bulkrelse;
69 pthread_mutex_init(&cache->c_mutex, NULL);
70
71 for (i = 0; i < hashsize; i++) {
72 list_head_init(&cache->c_hash[i].ch_list);
73 cache->c_hash[i].ch_count = 0;
74 pthread_mutex_init(&cache->c_hash[i].ch_mutex, NULL);
75 }
76
77 for (i = 0; i <= CACHE_MAX_PRIORITY; i++) {
78 list_head_init(&cache->c_mrus[i].cm_list);
79 cache->c_mrus[i].cm_count = 0;
80 pthread_mutex_init(&cache->c_mrus[i].cm_mutex, NULL);
81 }
82 return cache;
83 }
84
85 void
86 cache_walk(
87 struct cache * cache,
88 cache_walk_t visit)
89 {
90 struct cache_hash * hash;
91 struct list_head * head;
92 struct list_head * pos;
93 unsigned int i;
94
95 for (i = 0; i < cache->c_hashsize; i++) {
96 hash = &cache->c_hash[i];
97 head = &hash->ch_list;
98 pthread_mutex_lock(&hash->ch_mutex);
99 for (pos = head->next; pos != head; pos = pos->next)
100 visit((struct cache_node *)pos);
101 pthread_mutex_unlock(&hash->ch_mutex);
102 }
103 }
104
105 #ifdef CACHE_ABORT
106 #define cache_abort() abort()
107 #else
108 #define cache_abort() do { } while (0)
109 #endif
110
111 #ifdef CACHE_DEBUG
112 static void
113 cache_zero_check(
114 struct cache_node * node)
115 {
116 if (node->cn_count > 0) {
117 fprintf(stderr, "%s: refcount is %u, not zero (node=%p)\n",
118 __FUNCTION__, node->cn_count, node);
119 cache_abort();
120 }
121 }
122 #define cache_destroy_check(c) cache_walk((c), cache_zero_check)
123 #else
124 #define cache_destroy_check(c) do { } while (0)
125 #endif
126
127 void
128 cache_destroy(
129 struct cache * cache)
130 {
131 unsigned int i;
132
133 cache_destroy_check(cache);
134 for (i = 0; i < cache->c_hashsize; i++) {
135 list_head_destroy(&cache->c_hash[i].ch_list);
136 pthread_mutex_destroy(&cache->c_hash[i].ch_mutex);
137 }
138 for (i = 0; i <= CACHE_MAX_PRIORITY; i++) {
139 list_head_destroy(&cache->c_mrus[i].cm_list);
140 pthread_mutex_destroy(&cache->c_mrus[i].cm_mutex);
141 }
142 pthread_mutex_destroy(&cache->c_mutex);
143 free(cache->c_hash);
144 free(cache);
145 }
146
147 static unsigned int
148 cache_generic_bulkrelse(
149 struct cache * cache,
150 struct list_head * list)
151 {
152 struct cache_node * node;
153 unsigned int count = 0;
154
155 while (!list_empty(list)) {
156 node = list_entry(list->next, struct cache_node, cn_mru);
157 pthread_mutex_destroy(&node->cn_mutex);
158 list_del_init(&node->cn_mru);
159 cache->relse(node);
160 count++;
161 }
162
163 return count;
164 }
165
166 /*
167 * We've hit the limit on cache size, so we need to start reclaiming
168 * nodes we've used. The MRU specified by the priority is shaken.
169 * Returns new priority at end of the call (in case we call again).
170 */
171 static unsigned int
172 cache_shake(
173 struct cache * cache,
174 unsigned int priority,
175 int all)
176 {
177 struct cache_mru *mru;
178 struct cache_hash * hash;
179 struct list_head temp;
180 struct list_head * head;
181 struct list_head * pos;
182 struct list_head * n;
183 struct cache_node * node;
184 unsigned int count;
185
186 ASSERT(priority <= CACHE_MAX_PRIORITY);
187 if (priority > CACHE_MAX_PRIORITY)
188 priority = 0;
189
190 mru = &cache->c_mrus[priority];
191 count = 0;
192 list_head_init(&temp);
193 head = &mru->cm_list;
194
195 pthread_mutex_lock(&mru->cm_mutex);
196 for (pos = head->prev, n = pos->prev; pos != head;
197 pos = n, n = pos->prev) {
198 node = list_entry(pos, struct cache_node, cn_mru);
199
200 if (pthread_mutex_trylock(&node->cn_mutex) != 0)
201 continue;
202
203 hash = cache->c_hash + node->cn_hashidx;
204 if (pthread_mutex_trylock(&hash->ch_mutex) != 0) {
205 pthread_mutex_unlock(&node->cn_mutex);
206 continue;
207 }
208 ASSERT(node->cn_count == 0);
209 ASSERT(node->cn_priority == priority);
210 node->cn_priority = -1;
211
212 list_move(&node->cn_mru, &temp);
213 list_del_init(&node->cn_hash);
214 hash->ch_count--;
215 mru->cm_count--;
216 pthread_mutex_unlock(&hash->ch_mutex);
217 pthread_mutex_unlock(&node->cn_mutex);
218
219 count++;
220 if (!all && count == CACHE_SHAKE_COUNT)
221 break;
222 }
223 pthread_mutex_unlock(&mru->cm_mutex);
224
225 if (count > 0) {
226 cache->bulkrelse(cache, &temp);
227
228 pthread_mutex_lock(&cache->c_mutex);
229 cache->c_count -= count;
230 pthread_mutex_unlock(&cache->c_mutex);
231 }
232
233 return (count == CACHE_SHAKE_COUNT) ? priority : ++priority;
234 }
235
236 /*
237 * Allocate a new hash node (updating atomic counter in the process),
238 * unless doing so will push us over the maximum cache size.
239 */
240 static struct cache_node *
241 cache_node_allocate(
242 struct cache * cache,
243 cache_key_t key)
244 {
245 unsigned int nodesfree;
246 struct cache_node * node;
247
248 pthread_mutex_lock(&cache->c_mutex);
249 nodesfree = (cache->c_count < cache->c_maxcount);
250 if (nodesfree) {
251 cache->c_count++;
252 if (cache->c_count > cache->c_max)
253 cache->c_max = cache->c_count;
254 }
255 cache->c_misses++;
256 pthread_mutex_unlock(&cache->c_mutex);
257 if (!nodesfree)
258 return NULL;
259 node = cache->alloc(key);
260 if (node == NULL) { /* uh-oh */
261 pthread_mutex_lock(&cache->c_mutex);
262 cache->c_count--;
263 pthread_mutex_unlock(&cache->c_mutex);
264 return NULL;
265 }
266 pthread_mutex_init(&node->cn_mutex, NULL);
267 list_head_init(&node->cn_mru);
268 node->cn_count = 1;
269 node->cn_priority = 0;
270 return node;
271 }
272
273 int
274 cache_overflowed(
275 struct cache * cache)
276 {
277 return (cache->c_maxcount == cache->c_max);
278 }
279
280 /*
281 * Lookup in the cache hash table. With any luck we'll get a cache
282 * hit, in which case this will all be over quickly and painlessly.
283 * Otherwise, we allocate a new node, taking care not to expand the
284 * cache beyond the requested maximum size (shrink it if it would).
285 * Returns one if hit in cache, otherwise zero. A node is _always_
286 * returned, however.
287 */
288 int
289 cache_node_get(
290 struct cache * cache,
291 cache_key_t key,
292 struct cache_node ** nodep)
293 {
294 struct cache_node * node = NULL;
295 struct cache_hash * hash;
296 struct cache_mru * mru;
297 struct list_head * head;
298 struct list_head * pos;
299 unsigned int hashidx;
300 int priority = 0;
301
302 hashidx = cache->hash(key, cache->c_hashsize);
303 hash = cache->c_hash + hashidx;
304 head = &hash->ch_list;
305
306 for (;;) {
307 pthread_mutex_lock(&hash->ch_mutex);
308 for (pos = head->next; pos != head; pos = pos->next) {
309 node = list_entry(pos, struct cache_node, cn_hash);
310 if (!cache->compare(node, key))
311 continue;
312 /*
313 * node found, bump node's reference count, remove it
314 * from its MRU list, and update stats.
315 */
316 pthread_mutex_lock(&node->cn_mutex);
317
318 if (node->cn_count == 0) {
319 ASSERT(node->cn_priority >= 0);
320 ASSERT(!list_empty(&node->cn_mru));
321 mru = &cache->c_mrus[node->cn_priority];
322 pthread_mutex_lock(&mru->cm_mutex);
323 mru->cm_count--;
324 list_del_init(&node->cn_mru);
325 pthread_mutex_unlock(&mru->cm_mutex);
326 }
327 node->cn_count++;
328
329 pthread_mutex_unlock(&node->cn_mutex);
330 pthread_mutex_unlock(&hash->ch_mutex);
331
332 pthread_mutex_lock(&cache->c_mutex);
333 cache->c_hits++;
334 pthread_mutex_unlock(&cache->c_mutex);
335
336 *nodep = node;
337 return 0;
338 }
339 pthread_mutex_unlock(&hash->ch_mutex);
340 /*
341 * not found, allocate a new entry
342 */
343 node = cache_node_allocate(cache, key);
344 if (node)
345 break;
346 priority = cache_shake(cache, priority, 0);
347 }
348
349 node->cn_hashidx = hashidx;
350
351 /* add new node to appropriate hash */
352 pthread_mutex_lock(&hash->ch_mutex);
353 hash->ch_count++;
354 list_add(&node->cn_hash, &hash->ch_list);
355 pthread_mutex_unlock(&hash->ch_mutex);
356
357 *nodep = node;
358 return 1;
359 }
360
361 void
362 cache_node_put(
363 struct cache * cache,
364 struct cache_node * node)
365 {
366 struct cache_mru * mru;
367
368 pthread_mutex_lock(&node->cn_mutex);
369 #ifdef CACHE_DEBUG
370 if (node->cn_count < 1) {
371 fprintf(stderr, "%s: node put on refcount %u (node=%p)\n",
372 __FUNCTION__, node->cn_count, node);
373 cache_abort();
374 }
375 if (!list_empty(&node->cn_mru)) {
376 fprintf(stderr, "%s: node put on node (%p) in MRU list\n",
377 __FUNCTION__, node);
378 cache_abort();
379 }
380 #endif
381 node->cn_count--;
382
383 if (node->cn_count == 0) {
384 /* add unreferenced node to appropriate MRU for shaker */
385 mru = &cache->c_mrus[node->cn_priority];
386 pthread_mutex_lock(&mru->cm_mutex);
387 mru->cm_count++;
388 list_add(&node->cn_mru, &mru->cm_list);
389 pthread_mutex_unlock(&mru->cm_mutex);
390 }
391
392 pthread_mutex_unlock(&node->cn_mutex);
393 }
394
395 void
396 cache_node_set_priority(
397 struct cache * cache,
398 struct cache_node * node,
399 int priority)
400 {
401 if (priority < 0)
402 priority = 0;
403 else if (priority > CACHE_MAX_PRIORITY)
404 priority = CACHE_MAX_PRIORITY;
405
406 pthread_mutex_lock(&node->cn_mutex);
407 ASSERT(node->cn_count > 0);
408 node->cn_priority = priority;
409 pthread_mutex_unlock(&node->cn_mutex);
410 }
411
412 int
413 cache_node_get_priority(
414 struct cache_node * node)
415 {
416 int priority;
417
418 pthread_mutex_lock(&node->cn_mutex);
419 priority = node->cn_priority;
420 pthread_mutex_unlock(&node->cn_mutex);
421
422 return priority;
423 }
424
425
426 /*
427 * Purge a specific node from the cache. Reference count must be zero.
428 */
429 int
430 cache_node_purge(
431 struct cache * cache,
432 cache_key_t key,
433 struct cache_node * node)
434 {
435 struct list_head * head;
436 struct list_head * pos;
437 struct list_head * n;
438 struct cache_hash * hash;
439 struct cache_mru * mru;
440 int count = -1;
441
442 hash = cache->c_hash + cache->hash(key, cache->c_hashsize);
443 head = &hash->ch_list;
444 pthread_mutex_lock(&hash->ch_mutex);
445 for (pos = head->next, n = pos->next; pos != head;
446 pos = n, n = pos->next) {
447 if ((struct cache_node *)pos != node)
448 continue;
449
450 pthread_mutex_lock(&node->cn_mutex);
451 count = node->cn_count;
452 if (count != 0) {
453 pthread_mutex_unlock(&node->cn_mutex);
454 break;
455 }
456 mru = &cache->c_mrus[node->cn_priority];
457 pthread_mutex_lock(&mru->cm_mutex);
458 list_del_init(&node->cn_mru);
459 mru->cm_count--;
460 pthread_mutex_unlock(&mru->cm_mutex);
461
462 pthread_mutex_unlock(&node->cn_mutex);
463 pthread_mutex_destroy(&node->cn_mutex);
464 list_del_init(&node->cn_hash);
465 hash->ch_count--;
466 cache->relse(node);
467 break;
468 }
469 pthread_mutex_unlock(&hash->ch_mutex);
470
471 if (count == 0) {
472 pthread_mutex_lock(&cache->c_mutex);
473 cache->c_count--;
474 pthread_mutex_unlock(&cache->c_mutex);
475 }
476 #ifdef CACHE_DEBUG
477 if (count >= 1) {
478 fprintf(stderr, "%s: refcount was %u, not zero (node=%p)\n",
479 __FUNCTION__, count, node);
480 cache_abort();
481 }
482 if (count == -1) {
483 fprintf(stderr, "%s: purge node not found! (node=%p)\n",
484 __FUNCTION__, node);
485 cache_abort();
486 }
487 #endif
488 return (count == 0);
489 }
490
491 /*
492 * Purge all nodes from the cache. All reference counts must be zero.
493 */
494 void
495 cache_purge(
496 struct cache * cache)
497 {
498 int i;
499
500 for (i = 0; i <= CACHE_MAX_PRIORITY; i++)
501 cache_shake(cache, i, 1);
502
503 #ifdef CACHE_DEBUG
504 if (cache->c_count != 0) {
505 /* flush referenced nodes to disk */
506 cache_flush(cache);
507 fprintf(stderr, "%s: shake on cache %p left %u nodes!?\n",
508 __FUNCTION__, cache, cache->c_count);
509 cache_abort();
510 }
511 #endif
512 }
513
514 /*
515 * Flush all nodes in the cache to disk.
516 */
517 void
518 cache_flush(
519 struct cache * cache)
520 {
521 struct cache_hash * hash;
522 struct list_head * head;
523 struct list_head * pos;
524 struct cache_node * node;
525 int i;
526
527 if (!cache->flush)
528 return;
529
530 for (i = 0; i < cache->c_hashsize; i++) {
531 hash = &cache->c_hash[i];
532
533 pthread_mutex_lock(&hash->ch_mutex);
534 head = &hash->ch_list;
535 for (pos = head->next; pos != head; pos = pos->next) {
536 node = (struct cache_node *)pos;
537 pthread_mutex_lock(&node->cn_mutex);
538 cache->flush(node);
539 pthread_mutex_unlock(&node->cn_mutex);
540 }
541 pthread_mutex_unlock(&hash->ch_mutex);
542 }
543 }
544
545 #define HASH_REPORT (3 * HASH_CACHE_RATIO)
546 void
547 cache_report(
548 FILE *fp,
549 const char *name,
550 struct cache *cache)
551 {
552 int i;
553 unsigned long count, index, total;
554 unsigned long hash_bucket_lengths[HASH_REPORT + 2];
555
556 if ((cache->c_hits + cache->c_misses) == 0)
557 return;
558
559 /* report cache summary */
560 fprintf(fp, "%s: %p\n"
561 "Max supported entries = %u\n"
562 "Max utilized entries = %u\n"
563 "Active entries = %u\n"
564 "Hash table size = %u\n"
565 "Hits = %llu\n"
566 "Misses = %llu\n"
567 "Hit ratio = %5.2f\n",
568 name, cache,
569 cache->c_maxcount,
570 cache->c_max,
571 cache->c_count,
572 cache->c_hashsize,
573 cache->c_hits,
574 cache->c_misses,
575 (double)cache->c_hits * 100 /
576 (cache->c_hits + cache->c_misses)
577 );
578
579 for (i = 0; i <= CACHE_MAX_PRIORITY; i++)
580 fprintf(fp, "MRU %d entries = %6u (%3u%%)\n",
581 i, cache->c_mrus[i].cm_count,
582 cache->c_mrus[i].cm_count * 100 / cache->c_count);
583
584 /* report hash bucket lengths */
585 bzero(hash_bucket_lengths, sizeof(hash_bucket_lengths));
586
587 for (i = 0; i < cache->c_hashsize; i++) {
588 count = cache->c_hash[i].ch_count;
589 if (count > HASH_REPORT)
590 index = HASH_REPORT + 1;
591 else
592 index = count;
593 hash_bucket_lengths[index]++;
594 }
595
596 total = 0;
597 for (i = 0; i < HASH_REPORT + 1; i++) {
598 total += i * hash_bucket_lengths[i];
599 if (hash_bucket_lengths[i] == 0)
600 continue;
601 fprintf(fp, "Hash buckets with %2d entries %6ld (%3ld%%)\n",
602 i, hash_bucket_lengths[i],
603 (i * hash_bucket_lengths[i] * 100) / cache->c_count);
604 }
605 if (hash_bucket_lengths[i]) /* last report bucket is the overflow bucket */
606 fprintf(fp, "Hash buckets with >%2d entries %6ld (%3ld%%)\n",
607 i - 1, hash_bucket_lengths[i],
608 ((cache->c_count - total) * 100) / cache->c_count);
609 }