From: Nathan Scott Date: Thu, 18 May 2006 15:49:09 +0000 (+0000) Subject: Initial version of a generic cache, which will provide a buffer/inode cache shortly... X-Git-Tag: v2.8.0~6 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=e80aa729ad5684225fd76d107ea08da80b4c8356;p=thirdparty%2Fxfsprogs-dev.git Initial version of a generic cache, which will provide a buffer/inode cache shortly. Uses pthread-based locking for mutex on shared structures. Merge of master-melb:xfs-cmds:25963a by kenmcd. --- diff --git a/include/Makefile b/include/Makefile index 9bb00219b..4aab957ea 100644 --- a/include/Makefile +++ b/include/Makefile @@ -1,5 +1,5 @@ # -# Copyright (c) 2000-2005 Silicon Graphics, Inc. +# Copyright (c) 2000-2006 Silicon Graphics, Inc. # All Rights Reserved. # # This program is free software; you can redistribute it and/or @@ -18,7 +18,7 @@ TOPDIR = .. include $(TOPDIR)/include/builddefs -HFILES = handle.h jdm.h libxfs.h libxlog.h swab.h xqm.h \ +HFILES = handle.h jdm.h libxfs.h libxlog.h parent.h xfs.h xqm.h \ xfs_ag.h xfs_alloc.h xfs_alloc_btree.h xfs_arch.h xfs_attr_leaf.h \ xfs_attr_sf.h xfs_bit.h xfs_bmap.h xfs_bmap_btree.h xfs_btree.h \ xfs_buf_item.h xfs_da_btree.h xfs_dfrag.h xfs_dinode.h \ @@ -29,12 +29,12 @@ HFILES = handle.h jdm.h libxfs.h libxlog.h swab.h xqm.h \ xfs_log.h xfs_log_priv.h xfs_log_recover.h xfs_mount.h xfs_quota.h \ xfs_rtalloc.h xfs_sb.h xfs_trans.h xfs_trans_space.h xfs_types.h -HFILES += parent.h $(PKG_PLATFORM).h xfs.h +HFILES += $(PKG_PLATFORM).h swab.h PHFILES = darwin.h freebsd.h irix.h linux.h DKHFILES = volume.h fstyp.h dvh.h LSRCFILES = $(shell echo $(PHFILES) | sed -e "s/$(PKG_PLATFORM).h//g") LSRCFILES += platform_defs.h.in builddefs.in buildmacros buildrules $(DKHFILES) -LSRCFILES += command.h input.h path.h project.h +LSRCFILES += cache.h command.h input.h list.h path.h project.h LDIRT = xfs disk default install : diff --git a/include/cache.h b/include/cache.h new file mode 100644 index 000000000..85dce63a6 --- /dev/null +++ b/include/cache.h @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2006 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#ifndef __CACHE_H__ +#define __CACHE_H__ + +/* + * Simple, generic implementation of a cache (arbitrary data). + * Provides a hash table with a capped number of cache entries. + */ + +struct cache; +struct cache_hash; +struct cache_node; + +typedef void *cache_key_t; +typedef void (*cache_walk_t)(struct cache_node *); +typedef struct cache_node * (*cache_node_alloc_t)(void); +typedef void (*cache_node_relse_t)(struct cache_node *); +typedef unsigned int (*cache_node_hash_t)(cache_key_t, unsigned int); +typedef int (*cache_node_compare_t)(struct cache_node *, cache_key_t); + +struct cache_operations { + cache_node_hash_t hash; + cache_node_alloc_t alloc; + cache_node_relse_t relse; + cache_node_compare_t compare; +}; + +struct cache { + unsigned int c_maxcount; /* max cache nodes */ + unsigned int c_count; /* count of nodes */ + pthread_mutex_t c_mutex; /* node count mutex */ + cache_node_hash_t hash; /* node hash function */ + cache_node_alloc_t alloc; /* allocation function */ + cache_node_relse_t relse; /* memory free function */ + cache_node_compare_t compare; /* comparison routine */ + unsigned int c_hashsize; /* hash bucket count */ + struct cache_hash *c_hash; /* hash table buckets */ +}; + +struct cache_hash { + struct list_head ch_list; /* hash chain head */ + pthread_mutex_t ch_mutex; /* hash chain mutex */ +}; + +struct cache_node { + struct list_head cn_list; /* hash chain */ + unsigned int cn_count; /* reference count */ + pthread_mutex_t cn_mutex; /* refcount mutex */ +}; + +struct cache *cache_init(unsigned int, struct cache_operations *); +void cache_destroy(struct cache *); +void cache_walk(struct cache *, cache_walk_t); +void cache_purge(struct cache *); + +int cache_node_get(struct cache *, cache_key_t, struct cache_node **); +void cache_node_put(struct cache_node *); +int cache_node_purge(struct cache *, cache_key_t, struct cache_node *); + +#endif /* __CACHE_H__ */ diff --git a/include/list.h b/include/list.h new file mode 100644 index 000000000..ad6ddfd31 --- /dev/null +++ b/include/list.h @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2006 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#ifndef __LIST_H__ +#define __LIST_H__ + +/* + * Simple, generic doubly-linked list implementation. + */ + +struct list_head { + struct list_head *next; + struct list_head *prev; +}; + +static inline void list_head_init(struct list_head *list) +{ + list->next = list->prev = list; +} + +static inline void list_head_destroy(struct list_head *list) +{ + list->next = list->prev = NULL; +} + +static inline void __list_add(struct list_head *add, + struct list_head *prev, struct list_head *next) +{ + next->prev = add; + add->next = next; + add->prev = prev; + prev->next = add; +} + +static inline void list_add(struct list_head *add, struct list_head *head) +{ + __list_add(add, head, head->next); +} + +static inline void list_add_tail(struct list_head *add, struct list_head *head) +{ + __list_add(add, head->prev, head); +} + +static inline void __list_del(struct list_head *prev, struct list_head *next) +{ + next->prev = prev; + prev->next = next; +} + +static inline void list_del_init(struct list_head *entry) +{ + __list_del(entry->prev, entry->next); + list_head_init(entry); +} + +static inline void list_move(struct list_head *list, struct list_head *head) +{ + __list_del(list->prev, list->next); + list_add(list, head); +} + +static inline int list_empty(const struct list_head *head) +{ + return head->next == head; +} + +#endif /* __LIST_H__ */ diff --git a/libxfs/Makefile b/libxfs/Makefile index 83162067d..e30a12d87 100644 --- a/libxfs/Makefile +++ b/libxfs/Makefile @@ -1,5 +1,5 @@ # -# Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved. +# Copyright (c) 2000-2006 Silicon Graphics, Inc. All Rights Reserved. # TOPDIR = .. @@ -11,7 +11,7 @@ LT_REVISION = 0 LT_AGE = 0 HFILES = xfs.h init.h -CFILES = bit.c init.c logitem.c rdwr.c trans.c util.c \ +CFILES = bit.c cache.c init.c logitem.c rdwr.c trans.c util.c \ xfs_alloc.c xfs_ialloc.c xfs_rtalloc.c \ xfs_inode.c xfs_btree.c xfs_alloc_btree.c xfs_ialloc_btree.c \ xfs_bmap_btree.c xfs_da_btree.c xfs_dir.c xfs_dir_leaf.c \ diff --git a/libxfs/cache.c b/libxfs/cache.c new file mode 100644 index 000000000..93bb0e644 --- /dev/null +++ b/libxfs/cache.c @@ -0,0 +1,381 @@ +/* + * Copyright (c) 2006 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include + +#include +#include +#include + +#define CACHE_DEBUG 1 + +struct cache * +cache_init( + unsigned int hashsize, + struct cache_operations *cache_operations) +{ + struct cache * cache; + unsigned int i, maxcount; + + maxcount = hashsize << 3; /* 8 nodes per hash bucket */ + + if (!(cache = malloc(sizeof(struct cache)))) + return NULL; + if (!(cache->c_hash = calloc(hashsize, sizeof(struct cache_hash)))) { + free(cache); + return NULL; + } + + cache->c_count = 0; + cache->c_maxcount = maxcount; + cache->c_hashsize = hashsize; + cache->hash = cache_operations->hash; + cache->alloc = cache_operations->alloc; + cache->relse = cache_operations->relse; + cache->compare = cache_operations->compare; + pthread_mutex_init(&cache->c_mutex, NULL); + + for (i = 0; i < hashsize; i++) { + list_head_init(&cache->c_hash[i].ch_list); + pthread_mutex_init(&cache->c_hash[i].ch_mutex, NULL); + } + return cache; +} + +void +cache_walk( + struct cache * cache, + cache_walk_t visit) +{ + struct cache_hash * hash; + struct list_head * head; + struct list_head * pos; + unsigned int i; + + for (i = 0; i < cache->c_hashsize; i++) { + hash = &cache->c_hash[i]; + head = &hash->ch_list; + pthread_mutex_lock(&hash->ch_mutex); + for (pos = head->next; pos != head; pos = pos->next) + visit((struct cache_node *)pos); + pthread_mutex_unlock(&hash->ch_mutex); + } +} + +#ifdef CACHE_DEBUG +static void +cache_zero_check( + struct cache_node * node) +{ + if (node->cn_count > 0) { + fprintf(stderr, "%s: refcount is %u, not zero (node=%p)\n", + __FUNCTION__, node->cn_count, node); + /* abort(); */ + } +} +#define cache_destroy_check(c) cache_walk((c), cache_zero_check) +#else +#define cache_destroy_check(c) do { } while (0) +#endif + +void +cache_destroy( + struct cache * cache) +{ + unsigned int i; + + cache_destroy_check(cache); + for (i = 0; i < cache->c_hashsize; i++) { + list_head_destroy(&cache->c_hash[i].ch_list); + pthread_mutex_destroy(&cache->c_hash[i].ch_mutex); + } + pthread_mutex_destroy(&cache->c_mutex); + free(cache->c_hash); + free(cache); +} + +static int +cache_shake_node( + struct cache * cache, + cache_key_t key, + struct cache_node * node) +{ + struct list_head * head; + struct list_head * pos; + struct list_head * n; + struct cache_hash * hash; + int count = -1; + + hash = cache->c_hash + cache->hash(key, cache->c_hashsize); + head = &hash->ch_list; + pthread_mutex_lock(&hash->ch_mutex); + for (pos = head->next, n = pos->next; + pos != head; + pos = n, n = pos->next) { + if ((struct cache_node *)pos != node) + continue; + pthread_mutex_lock(&node->cn_mutex); + count = node->cn_count; + pthread_mutex_unlock(&node->cn_mutex); + if (count != 0) + break; + pthread_mutex_destroy(&node->cn_mutex); + list_del_init(&node->cn_list); + cache->relse(node); + break; + } + pthread_mutex_unlock(&hash->ch_mutex); + return count; +} + +/* + * We've hit the limit on cache size, so we need to start reclaiming + * nodes we've used. This reclaims from the one given hash bucket + * only. Returns the number of freed up nodes, its left to the + * caller to updates the global counter of used nodes for the cache. + * The hash chain lock is held for the hash list argument, must be + * dropped before returning. + * We walk backwards through the hash (remembering we keep recently + * used nodes toward the front) until we hit an in-use node. We'll + * stop there if its a low priority call but keep going if its not. + */ +static unsigned int +cache_shake_hash( + struct cache * cache, + struct cache_hash * hash, + unsigned int priority) +{ + struct list_head temp; + struct list_head * head; + struct list_head * pos; + struct list_head * n; + struct cache_node * node; + unsigned int inuse = 0; + unsigned int count = 0; + + list_head_init(&temp); + head = &hash->ch_list; + for (pos = head->prev, n = pos->prev; + pos != head; + pos = n, n = pos->prev) { + node = (struct cache_node *)pos; + pthread_mutex_lock(&node->cn_mutex); + if (!(inuse = (node->cn_count > 0))) + list_move(&node->cn_list, &temp); + pthread_mutex_unlock(&node->cn_mutex); + if (inuse && !priority) + break; + } + pthread_mutex_unlock(&hash->ch_mutex); + while (!list_empty(&temp)) { + node = (struct cache_node *)temp.next; + pthread_mutex_destroy(&node->cn_mutex); + list_del_init(&node->cn_list); + cache->relse(node); + count++; + } + return count; +} + +/* + * We've hit the limit on cache size, so we need to start reclaiming + * nodes we've used. Start by shaking this hash chain only, unless + * the shake priority has been increased already. + * The hash chain lock is held for the hash list argument, must be + * dropped before returning. + * Returns new priority at end of the call (in case we call again). + */ +static unsigned int +cache_shake( + struct cache * cache, + struct cache_hash * hash, + unsigned int priority) +{ + unsigned int count; + unsigned int i; + + if (!priority) { /* do just one */ + count = cache_shake_hash(cache, hash, priority); + } else { /* use a bigger hammer */ + pthread_mutex_unlock(&hash->ch_mutex); + for (count = 0, i = 0; i < cache->c_hashsize; i++) { + hash = &cache->c_hash[i]; + pthread_mutex_lock(&hash->ch_mutex); + count += cache_shake_hash(cache, hash, priority - 1); + } + } + if (count) { + pthread_mutex_lock(&cache->c_mutex); + cache->c_count -= count; + pthread_mutex_unlock(&cache->c_mutex); + } + return ++priority; +} + +/* + * Allocate a new hash node (updating atomic counter in the process), + * unless doing so will push us over the maximum cache size. + */ +struct cache_node * +cache_node_allocate( + struct cache * cache, + struct cache_hash * hashlist) +{ + unsigned int nodesfree; + struct cache_node * node; + + pthread_mutex_lock(&cache->c_mutex); + if ((nodesfree = (cache->c_count < cache->c_maxcount))) + cache->c_count++; + pthread_mutex_unlock(&cache->c_mutex); + if (!nodesfree) + return NULL; + if (!(node = cache->alloc())) { /* uh-oh */ + pthread_mutex_lock(&cache->c_mutex); + cache->c_count--; + pthread_mutex_unlock(&cache->c_mutex); + return NULL; + } + pthread_mutex_init(&node->cn_mutex, NULL); + list_head_init(&node->cn_list); + node->cn_count = 1; + return node; +} + +/* + * Lookup in the cache hash table. With any luck we'll get a cache + * hit, in which case this will all be over quickly and painlessly. + * Otherwise, we allocate a new node, taking care not to expand the + * cache beyond the requested maximum size (shrink it if it would). + * Returns one if hit in cache, otherwise zero. A node is _always_ + * returned, however. + */ +int +cache_node_get( + struct cache * cache, + cache_key_t key, + struct cache_node ** nodep) +{ + struct cache_node * node = NULL; + struct cache_hash * hash; + struct list_head * head; + struct list_head * pos; + int priority = 0; + int allocated = 0; + + hash = cache->c_hash + cache->hash(key, cache->c_hashsize); + head = &hash->ch_list; + + restart: + pthread_mutex_lock(&hash->ch_mutex); + for (pos = head->next; pos != head; pos = pos->next) { + node = (struct cache_node *)pos; + if (cache->compare(node, key) == 0) + continue; + pthread_mutex_lock(&node->cn_mutex); + node->cn_count++; + pthread_mutex_unlock(&node->cn_mutex); + break; + } + if (pos == head) { + node = cache_node_allocate(cache, hash); + if (!node) { + priority = cache_shake(cache, hash, priority); + goto restart; + } + allocated = 1; + } + /* looked at it, move to hash list head */ + list_move(&node->cn_list, &hash->ch_list); + pthread_mutex_unlock(&hash->ch_mutex); + *nodep = node; + return allocated; +} + +void +cache_node_put( + struct cache_node * node) +{ + pthread_mutex_lock(&node->cn_mutex); +#ifdef CACHE_DEBUG + if (node->cn_count < 1) { + fprintf(stderr, "%s: node put on refcount %u (node=%p)\n", + __FUNCTION__, node->cn_count, node); + /* abort(); */ + } +#endif + node->cn_count--; + pthread_mutex_unlock(&node->cn_mutex); +} + +/* + * Purge a specific node from the cache. Reference count must be zero. + */ +int +cache_node_purge( + struct cache * cache, + cache_key_t key, + struct cache_node * node) +{ + int refcount; + + refcount = cache_shake_node(cache, key, node); + if (refcount == 0) { + pthread_mutex_lock(&cache->c_mutex); + cache->c_count--; + pthread_mutex_unlock(&cache->c_mutex); + } +#ifdef CACHE_DEBUG + if (refcount >= 1) { + fprintf(stderr, "%s: refcount was %u, not zero (node=%p)\n", + __FUNCTION__, refcount, node); + /* abort(); */ + } + if (refcount == -1) { + fprintf(stderr, "%s: purge node not found! (node=%p)\n", + __FUNCTION__, node); + /* abort(); */ + } +#endif + return (refcount == 0); +} + +/* + * Purge all nodes from the cache. All reference counts must be zero. + */ +void +cache_purge( + struct cache * cache) +{ + struct cache_hash * hash; + + hash = &cache->c_hash[0]; + pthread_mutex_lock(&hash->ch_mutex); + cache_shake(cache, hash, (unsigned int)-1); +#ifdef CACHE_DEBUG + if (cache->c_count != 0) { + fprintf(stderr, "%s: shake on cache %p left %u nodes!?\n", + __FUNCTION__, cache, cache->c_count); + /* abort(); */ + } +#endif +}