]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/commitdiff
libxfs: reset dirty buffer priority on lookup
authorDave Chinner <dchinner@redhat.com>
Thu, 11 Feb 2016 06:09:10 +0000 (17:09 +1100)
committerDave Chinner <david@fromorbit.com>
Thu, 11 Feb 2016 06:09:10 +0000 (17:09 +1100)
When a buffer on the dirty MRU is looked up and found, we remove the
buffer from the MRU. However, we've already set the priority of the
buffer to "dirty" so when we are done with it it will go back on the
dirty buffer MRU regardless of whether it needs to or not.

Hence when we move a buffer to a the dirty MRU, record the old
priority and restore it when we remove the buffer from the MRU on
lookup. This will prevent us from putting fixed, now writeable
buffers back on the dirty MRU and allow the cache routine to write,
shake and reclaim the buffers once they are clean.

Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Dave Chinner <david@fromorbit.com>
include/cache.h
libxfs/cache.c

index b46c2a5cedb88df8367c22c46a3916c9070e5d18..c3c1b7d509bdd0ad1a823362648a9196c02fcc32 100644 (file)
@@ -104,6 +104,7 @@ struct cache_node {
        unsigned int            cn_count;       /* reference count */
        unsigned int            cn_hashidx;     /* hash chain index */
        int                     cn_priority;    /* priority, -1 = free list */
+       int                     cn_old_priority;/* saved pre-dirty prio */
        pthread_mutex_t         cn_mutex;       /* node mutex */
 };
 
index 6f498936e26f88f785987ab262c3794c28b5bdb9..4e82e4029f569c8ee24d27e2807084688025c014 100644 (file)
@@ -195,6 +195,7 @@ cache_add_to_dirty_mru(
        struct cache_mru        *mru = &cache->c_mrus[CACHE_DIRTY_PRIORITY];
 
        pthread_mutex_lock(&mru->cm_mutex);
+       node->cn_old_priority = node->cn_priority;
        node->cn_priority = CACHE_DIRTY_PRIORITY;
        list_add(&node->cn_mru, &mru->cm_list);
        mru->cm_count++;
@@ -324,6 +325,7 @@ cache_node_allocate(
        list_head_init(&node->cn_mru);
        node->cn_count = 1;
        node->cn_priority = 0;
+       node->cn_old_priority = -1;
        return node;
 }
 
@@ -433,6 +435,12 @@ cache_node_get(
                                mru->cm_count--;
                                list_del_init(&node->cn_mru);
                                pthread_mutex_unlock(&mru->cm_mutex);
+                               if (node->cn_old_priority != -1) {
+                                       ASSERT(node->cn_priority ==
+                                                       CACHE_DIRTY_PRIORITY);
+                                       node->cn_priority = node->cn_old_priority;
+                                       node->cn_old_priority = -1;
+                               }
                        }
                        node->cn_count++;
 
@@ -533,6 +541,7 @@ cache_node_set_priority(
        pthread_mutex_lock(&node->cn_mutex);
        ASSERT(node->cn_count > 0);
        node->cn_priority = priority;
+       node->cn_old_priority = -1;
        pthread_mutex_unlock(&node->cn_mutex);
 }