#define HASH_CACHE_RATIO 8
+/*
+ * Cache priorities range from BASE to MAX.
+ *
+ * For prefetch support, the top half of the range starts at
+ * CACHE_PREFETCH_PRIORITY and everytime the buffer is fetched
+ * and is at or above this priority level, it is reduced to
+ * below this level (refer to libxfs_getbuf).
+ */
+
+#define CACHE_BASE_PRIORITY 0
+#define CACHE_PREFETCH_PRIORITY 8
#define CACHE_MAX_PRIORITY 15
/*
void cache_flush(struct cache *);
int cache_node_get(struct cache *, cache_key_t, struct cache_node **);
-void cache_node_put(struct cache_node *);
+void cache_node_put(struct cache *, struct cache_node *);
void cache_node_set_priority(struct cache *, struct cache_node *, int);
int cache_node_get_priority(struct cache_node *);
int cache_node_purge(struct cache *, cache_key_t, struct cache_node *);
continue;
hash = cache->c_hash + node->cn_hashidx;
- if (node->cn_count > 0 ||
- pthread_mutex_trylock(&hash->ch_mutex) != 0) {
+ if (pthread_mutex_trylock(&hash->ch_mutex) != 0) {
pthread_mutex_unlock(&node->cn_mutex);
continue;
}
+ ASSERT(node->cn_count == 0);
ASSERT(node->cn_priority == priority);
node->cn_priority = -1;
return NULL;
}
pthread_mutex_init(&node->cn_mutex, NULL);
+ list_head_init(&node->cn_mru);
node->cn_count = 1;
node->cn_priority = 0;
return node;
if (!cache->compare(node, key))
continue;
/*
- * node found, bump node's reference count, move it to the
- * top of its MRU list, and update stats.
- */
+ * node found, bump node's reference count, remove it
+ * from its MRU list, and update stats.
+ */
pthread_mutex_lock(&node->cn_mutex);
- node->cn_count++;
- mru = &cache->c_mrus[node->cn_priority];
- pthread_mutex_lock(&mru->cm_mutex);
- list_move(&node->cn_mru, &mru->cm_list);
- pthread_mutex_unlock(&mru->cm_mutex);
+ if (node->cn_count == 0) {
+ ASSERT(node->cn_priority >= 0);
+ ASSERT(!list_empty(&node->cn_mru));
+ mru = &cache->c_mrus[node->cn_priority];
+ pthread_mutex_lock(&mru->cm_mutex);
+ mru->cm_count--;
+ list_del_init(&node->cn_mru);
+ pthread_mutex_unlock(&mru->cm_mutex);
+ }
+ node->cn_count++;
pthread_mutex_unlock(&node->cn_mutex);
pthread_mutex_unlock(&hash->ch_mutex);
node->cn_hashidx = hashidx;
- /* add new node to appropriate hash and lowest priority MRU */
- mru = &cache->c_mrus[0];
- pthread_mutex_lock(&mru->cm_mutex);
+ /* add new node to appropriate hash */
pthread_mutex_lock(&hash->ch_mutex);
hash->ch_count++;
- mru->cm_count++;
list_add(&node->cn_hash, &hash->ch_list);
- list_add(&node->cn_mru, &mru->cm_list);
pthread_mutex_unlock(&hash->ch_mutex);
- pthread_mutex_unlock(&mru->cm_mutex);
*nodep = node;
return 1;
void
cache_node_put(
+ struct cache * cache,
struct cache_node * node)
{
+ struct cache_mru * mru;
+
pthread_mutex_lock(&node->cn_mutex);
#ifdef CACHE_DEBUG
if (node->cn_count < 1) {
__FUNCTION__, node->cn_count, node);
cache_abort();
}
+ if (!list_empty(&node->cn_mru)) {
+ fprintf(stderr, "%s: node put on node (%p) in MRU list\n",
+ __FUNCTION__, node);
+ cache_abort();
+ }
#endif
node->cn_count--;
+
+ if (node->cn_count == 0) {
+ /* add unreferenced node to appropriate MRU for shaker */
+ mru = &cache->c_mrus[node->cn_priority];
+ pthread_mutex_lock(&mru->cm_mutex);
+ mru->cm_count++;
+ list_add(&node->cn_mru, &mru->cm_list);
+ pthread_mutex_unlock(&mru->cm_mutex);
+ }
+
pthread_mutex_unlock(&node->cn_mutex);
}
struct cache_node * node,
int priority)
{
- struct cache_mru * mru;
-
if (priority < 0)
priority = 0;
else if (priority > CACHE_MAX_PRIORITY)
priority = CACHE_MAX_PRIORITY;
pthread_mutex_lock(&node->cn_mutex);
-
ASSERT(node->cn_count > 0);
- if (priority == node->cn_priority) {
- pthread_mutex_unlock(&node->cn_mutex);
- return;
- }
- mru = &cache->c_mrus[node->cn_priority];
- pthread_mutex_lock(&mru->cm_mutex);
- list_del_init(&node->cn_mru);
- mru->cm_count--;
- pthread_mutex_unlock(&mru->cm_mutex);
-
- mru = &cache->c_mrus[priority];
- pthread_mutex_lock(&mru->cm_mutex);
- list_add(&node->cn_mru, &mru->cm_list);
node->cn_priority = priority;
- mru->cm_count++;
- pthread_mutex_unlock(&mru->cm_mutex);
-
pthread_mutex_unlock(&node->cn_mutex);
}
if (use_xfs_buf_lock)
pthread_mutex_lock(&bp->b_lock);
cache_node_set_priority(libxfs_bcache, (struct cache_node *)bp,
- cache_node_get_priority((struct cache_node *)bp) - 4);
+ cache_node_get_priority((struct cache_node *)bp) -
+ CACHE_PREFETCH_PRIORITY);
#ifdef XFS_BUF_TRACING
pthread_mutex_lock(&libxfs_bcache->c_mutex);
lock_buf_count++;
#endif
if (use_xfs_buf_lock)
pthread_mutex_unlock(&bp->b_lock);
- cache_node_put((struct cache_node *)bp);
+ cache_node_put(libxfs_bcache, (struct cache_node *)bp);
}
void
void
libxfs_iput(xfs_inode_t *ip, uint lock_flags)
{
- cache_node_put((struct cache_node *)ip);
+ cache_node_put(libxfs_icache, (struct cache_node *)ip);
}
static struct cache_node *
static void pf_read_inode_dirs(prefetch_args_t *, xfs_buf_t *);
-/* buffer priorities for the libxfs cache */
-
-#define B_DIR_BMAP 15
-#define B_DIR_META_2 13 /* metadata in secondary queue */
-#define B_DIR_META_H 11 /* metadata fetched for PF_META_ONLY */
-#define B_DIR_META_S 9 /* single block of metadata */
-#define B_DIR_META 7
-#define B_DIR_INODE 6
-#define B_BMAP 5
-#define B_INODE 4
-
-#define B_IS_INODE(b) (((b) & 1) == 0)
-#define B_IS_META(b) (((b) & 1) != 0)
+/*
+ * Buffer priorities for the libxfs cache
+ *
+ * Directory metadata is ranked higher than other metadata as it's used
+ * in phases 3, 4 and 6, while other metadata is only used in 3 & 4.
+ */
+
+/* intermediate directory btree nodes - can't be queued */
+#define B_DIR_BMAP CACHE_PREFETCH_PRIORITY + 7
+/* directory metadata in secondary queue */
+#define B_DIR_META_2 CACHE_PREFETCH_PRIORITY + 6
+/* dir metadata that had to fetched from the primary queue to avoid stalling */
+#define B_DIR_META_H CACHE_PREFETCH_PRIORITY + 5
+/* single block of directory metadata (can't batch read) */
+#define B_DIR_META_S CACHE_PREFETCH_PRIORITY + 4
+/* dir metadata with more than one block fetched in a single I/O */
+#define B_DIR_META CACHE_PREFETCH_PRIORITY + 3
+/* inode clusters with directory inodes */
+#define B_DIR_INODE CACHE_PREFETCH_PRIORITY + 2
+/* intermediate extent btree nodes */
+#define B_BMAP CACHE_PREFETCH_PRIORITY + 1
+/* inode clusters without any directory entries */
+#define B_INODE CACHE_PREFETCH_PRIORITY
+
+/*
+ * Test if bit 0 or 2 is set in the "priority tag" of the buffer to see if
+ * the buffer is for an inode or other metadata.
+ */
+#define B_IS_INODE(f) (((f) & 5) == 0)
+#define B_IS_META(f) (((f) & 5) != 0)
#define DEF_BATCH_BYTES 0x10000