]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blame - libxfs/cache.c
Fix an endianness issue in the FreeBSD headers, remove a no-longer-needed HAVE macro...
[thirdparty/xfsprogs-dev.git] / libxfs / cache.c
CommitLineData
e80aa729
NS
1/*
2 * Copyright (c) 2006 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18
19#include <stdio.h>
20#include <stdlib.h>
21#include <string.h>
22#include <unistd.h>
23#include <pthread.h>
24
25#include <xfs/platform_defs.h>
26#include <xfs/list.h>
27#include <xfs/cache.h>
28
29#define CACHE_DEBUG 1
30
1c4110bd
NS
31static unsigned int cache_generic_bulkrelse(struct cache *, struct list_head *);
32
e80aa729
NS
33struct cache *
34cache_init(
35 unsigned int hashsize,
36 struct cache_operations *cache_operations)
37{
38 struct cache * cache;
39 unsigned int i, maxcount;
40
41 maxcount = hashsize << 3; /* 8 nodes per hash bucket */
42
43 if (!(cache = malloc(sizeof(struct cache))))
44 return NULL;
45 if (!(cache->c_hash = calloc(hashsize, sizeof(struct cache_hash)))) {
46 free(cache);
47 return NULL;
48 }
49
50 cache->c_count = 0;
9f38f08d
MV
51 cache->c_max = 0;
52 cache->c_hits = 0;
53 cache->c_misses = 0;
e80aa729
NS
54 cache->c_maxcount = maxcount;
55 cache->c_hashsize = hashsize;
56 cache->hash = cache_operations->hash;
57 cache->alloc = cache_operations->alloc;
58 cache->relse = cache_operations->relse;
59 cache->compare = cache_operations->compare;
1c4110bd
NS
60 cache->bulkrelse = cache_operations->bulkrelse ?
61 cache_operations->bulkrelse : cache_generic_bulkrelse;
e80aa729
NS
62 pthread_mutex_init(&cache->c_mutex, NULL);
63
64 for (i = 0; i < hashsize; i++) {
65 list_head_init(&cache->c_hash[i].ch_list);
66 pthread_mutex_init(&cache->c_hash[i].ch_mutex, NULL);
67 }
68 return cache;
69}
70
71void
72cache_walk(
73 struct cache * cache,
74 cache_walk_t visit)
75{
76 struct cache_hash * hash;
77 struct list_head * head;
78 struct list_head * pos;
79 unsigned int i;
80
81 for (i = 0; i < cache->c_hashsize; i++) {
82 hash = &cache->c_hash[i];
83 head = &hash->ch_list;
84 pthread_mutex_lock(&hash->ch_mutex);
85 for (pos = head->next; pos != head; pos = pos->next)
86 visit((struct cache_node *)pos);
87 pthread_mutex_unlock(&hash->ch_mutex);
88 }
89}
90
91#ifdef CACHE_DEBUG
92static void
93cache_zero_check(
94 struct cache_node * node)
95{
96 if (node->cn_count > 0) {
97 fprintf(stderr, "%s: refcount is %u, not zero (node=%p)\n",
98 __FUNCTION__, node->cn_count, node);
99 /* abort(); */
100 }
101}
102#define cache_destroy_check(c) cache_walk((c), cache_zero_check)
103#else
104#define cache_destroy_check(c) do { } while (0)
105#endif
106
107void
108cache_destroy(
109 struct cache * cache)
110{
111 unsigned int i;
112
113 cache_destroy_check(cache);
114 for (i = 0; i < cache->c_hashsize; i++) {
115 list_head_destroy(&cache->c_hash[i].ch_list);
116 pthread_mutex_destroy(&cache->c_hash[i].ch_mutex);
117 }
118 pthread_mutex_destroy(&cache->c_mutex);
119 free(cache->c_hash);
120 free(cache);
121}
122
123static int
124cache_shake_node(
125 struct cache * cache,
126 cache_key_t key,
127 struct cache_node * node)
128{
129 struct list_head * head;
130 struct list_head * pos;
131 struct list_head * n;
132 struct cache_hash * hash;
133 int count = -1;
134
135 hash = cache->c_hash + cache->hash(key, cache->c_hashsize);
136 head = &hash->ch_list;
137 pthread_mutex_lock(&hash->ch_mutex);
138 for (pos = head->next, n = pos->next;
139 pos != head;
140 pos = n, n = pos->next) {
141 if ((struct cache_node *)pos != node)
142 continue;
143 pthread_mutex_lock(&node->cn_mutex);
144 count = node->cn_count;
145 pthread_mutex_unlock(&node->cn_mutex);
146 if (count != 0)
147 break;
148 pthread_mutex_destroy(&node->cn_mutex);
149 list_del_init(&node->cn_list);
150 cache->relse(node);
151 break;
152 }
153 pthread_mutex_unlock(&hash->ch_mutex);
154 return count;
155}
156
157/*
158 * We've hit the limit on cache size, so we need to start reclaiming
159 * nodes we've used. This reclaims from the one given hash bucket
160 * only. Returns the number of freed up nodes, its left to the
161 * caller to updates the global counter of used nodes for the cache.
162 * The hash chain lock is held for the hash list argument, must be
163 * dropped before returning.
164 * We walk backwards through the hash (remembering we keep recently
165 * used nodes toward the front) until we hit an in-use node. We'll
166 * stop there if its a low priority call but keep going if its not.
167 */
168static unsigned int
169cache_shake_hash(
170 struct cache * cache,
171 struct cache_hash * hash,
172 unsigned int priority)
173{
174 struct list_head temp;
175 struct list_head * head;
176 struct list_head * pos;
177 struct list_head * n;
178 struct cache_node * node;
179 unsigned int inuse = 0;
e80aa729
NS
180
181 list_head_init(&temp);
182 head = &hash->ch_list;
183 for (pos = head->prev, n = pos->prev;
184 pos != head;
185 pos = n, n = pos->prev) {
186 node = (struct cache_node *)pos;
187 pthread_mutex_lock(&node->cn_mutex);
188 if (!(inuse = (node->cn_count > 0)))
189 list_move(&node->cn_list, &temp);
190 pthread_mutex_unlock(&node->cn_mutex);
191 if (inuse && !priority)
192 break;
193 }
194 pthread_mutex_unlock(&hash->ch_mutex);
1c4110bd
NS
195 return cache->bulkrelse(cache, &temp);
196}
197
198/*
199 * Generic implementation of bulk release, which just iterates over
200 * the list calling the single node relse routine for each node.
201 */
202static unsigned int
203cache_generic_bulkrelse(
204 struct cache * cache,
205 struct list_head * list)
206{
207 struct cache_node * node;
208 unsigned int count = 0;
209
210 while (!list_empty(list)) {
211 node = (struct cache_node *)list->next;
e80aa729
NS
212 pthread_mutex_destroy(&node->cn_mutex);
213 list_del_init(&node->cn_list);
214 cache->relse(node);
215 count++;
216 }
217 return count;
218}
219
220/*
221 * We've hit the limit on cache size, so we need to start reclaiming
222 * nodes we've used. Start by shaking this hash chain only, unless
223 * the shake priority has been increased already.
224 * The hash chain lock is held for the hash list argument, must be
225 * dropped before returning.
226 * Returns new priority at end of the call (in case we call again).
227 */
228static unsigned int
229cache_shake(
230 struct cache * cache,
231 struct cache_hash * hash,
232 unsigned int priority)
233{
234 unsigned int count;
235 unsigned int i;
236
237 if (!priority) { /* do just one */
238 count = cache_shake_hash(cache, hash, priority);
239 } else { /* use a bigger hammer */
240 pthread_mutex_unlock(&hash->ch_mutex);
241 for (count = 0, i = 0; i < cache->c_hashsize; i++) {
242 hash = &cache->c_hash[i];
243 pthread_mutex_lock(&hash->ch_mutex);
244 count += cache_shake_hash(cache, hash, priority - 1);
245 }
246 }
247 if (count) {
248 pthread_mutex_lock(&cache->c_mutex);
249 cache->c_count -= count;
250 pthread_mutex_unlock(&cache->c_mutex);
251 }
252 return ++priority;
253}
254
255/*
256 * Allocate a new hash node (updating atomic counter in the process),
257 * unless doing so will push us over the maximum cache size.
258 */
259struct cache_node *
260cache_node_allocate(
261 struct cache * cache,
262 struct cache_hash * hashlist)
263{
264 unsigned int nodesfree;
265 struct cache_node * node;
266
267 pthread_mutex_lock(&cache->c_mutex);
9f38f08d 268 if ((nodesfree = (cache->c_count < cache->c_maxcount))) {
e80aa729 269 cache->c_count++;
9f38f08d
MV
270 if (cache->c_count > cache->c_max)
271 cache->c_max = cache->c_count;
272 }
273 cache->c_misses++;
e80aa729
NS
274 pthread_mutex_unlock(&cache->c_mutex);
275 if (!nodesfree)
276 return NULL;
277 if (!(node = cache->alloc())) { /* uh-oh */
278 pthread_mutex_lock(&cache->c_mutex);
279 cache->c_count--;
280 pthread_mutex_unlock(&cache->c_mutex);
281 return NULL;
282 }
283 pthread_mutex_init(&node->cn_mutex, NULL);
284 list_head_init(&node->cn_list);
285 node->cn_count = 1;
286 return node;
287}
288
289/*
290 * Lookup in the cache hash table. With any luck we'll get a cache
291 * hit, in which case this will all be over quickly and painlessly.
292 * Otherwise, we allocate a new node, taking care not to expand the
293 * cache beyond the requested maximum size (shrink it if it would).
294 * Returns one if hit in cache, otherwise zero. A node is _always_
295 * returned, however.
296 */
297int
298cache_node_get(
299 struct cache * cache,
300 cache_key_t key,
301 struct cache_node ** nodep)
302{
303 struct cache_node * node = NULL;
304 struct cache_hash * hash;
305 struct list_head * head;
306 struct list_head * pos;
307 int priority = 0;
308 int allocated = 0;
309
310 hash = cache->c_hash + cache->hash(key, cache->c_hashsize);
311 head = &hash->ch_list;
312
313 restart:
314 pthread_mutex_lock(&hash->ch_mutex);
315 for (pos = head->next; pos != head; pos = pos->next) {
316 node = (struct cache_node *)pos;
317 if (cache->compare(node, key) == 0)
318 continue;
319 pthread_mutex_lock(&node->cn_mutex);
320 node->cn_count++;
321 pthread_mutex_unlock(&node->cn_mutex);
9f38f08d
MV
322 pthread_mutex_lock(&cache->c_mutex);
323 cache->c_hits++;
324 pthread_mutex_unlock(&cache->c_mutex);
e80aa729
NS
325 break;
326 }
327 if (pos == head) {
328 node = cache_node_allocate(cache, hash);
329 if (!node) {
330 priority = cache_shake(cache, hash, priority);
331 goto restart;
332 }
333 allocated = 1;
334 }
335 /* looked at it, move to hash list head */
336 list_move(&node->cn_list, &hash->ch_list);
337 pthread_mutex_unlock(&hash->ch_mutex);
338 *nodep = node;
339 return allocated;
340}
341
342void
343cache_node_put(
344 struct cache_node * node)
345{
346 pthread_mutex_lock(&node->cn_mutex);
347#ifdef CACHE_DEBUG
348 if (node->cn_count < 1) {
349 fprintf(stderr, "%s: node put on refcount %u (node=%p)\n",
350 __FUNCTION__, node->cn_count, node);
351 /* abort(); */
352 }
353#endif
354 node->cn_count--;
355 pthread_mutex_unlock(&node->cn_mutex);
356}
357
358/*
359 * Purge a specific node from the cache. Reference count must be zero.
360 */
361int
362cache_node_purge(
363 struct cache * cache,
364 cache_key_t key,
365 struct cache_node * node)
366{
367 int refcount;
368
369 refcount = cache_shake_node(cache, key, node);
370 if (refcount == 0) {
371 pthread_mutex_lock(&cache->c_mutex);
372 cache->c_count--;
373 pthread_mutex_unlock(&cache->c_mutex);
374 }
375#ifdef CACHE_DEBUG
376 if (refcount >= 1) {
377 fprintf(stderr, "%s: refcount was %u, not zero (node=%p)\n",
378 __FUNCTION__, refcount, node);
379 /* abort(); */
380 }
381 if (refcount == -1) {
382 fprintf(stderr, "%s: purge node not found! (node=%p)\n",
383 __FUNCTION__, node);
384 /* abort(); */
385 }
386#endif
387 return (refcount == 0);
388}
389
390/*
391 * Purge all nodes from the cache. All reference counts must be zero.
392 */
393void
394cache_purge(
395 struct cache * cache)
396{
397 struct cache_hash * hash;
398
399 hash = &cache->c_hash[0];
400 pthread_mutex_lock(&hash->ch_mutex);
401 cache_shake(cache, hash, (unsigned int)-1);
402#ifdef CACHE_DEBUG
403 if (cache->c_count != 0) {
404 fprintf(stderr, "%s: shake on cache %p left %u nodes!?\n",
405 __FUNCTION__, cache, cache->c_count);
406 /* abort(); */
407 }
408#endif
409}
9f38f08d
MV
410
411void
412cache_report(const char *name, struct cache * cache)
413{
414 fprintf(stderr, "%s: %p\n"
415 "Max supported entries = %u\n"
416 "Max utilized entries = %u\n"
417 "Active entries = %u\n"
418 "Hash table size = %u\n"
419 "Hits = %llu\n"
420 "Misses = %llu\n"
421 "Hit ratio = %5.2f\n",
422 name, cache,
423 cache->c_maxcount,
424 cache->c_max,
425 cache->c_count,
426 cache->c_hashsize,
427 cache->c_hits,
428 cache->c_misses,
429 (double) (cache->c_hits*100/(cache->c_hits+cache->c_misses))
430 );
431}