]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
btrfs: set DIRTY and WRITEBACK tags on the buffer_tree
authorJosef Bacik <josef@toxicpanda.com>
Mon, 28 Apr 2025 14:52:56 +0000 (10:52 -0400)
committerDavid Sterba <dsterba@suse.com>
Thu, 15 May 2025 12:30:50 +0000 (14:30 +0200)
In preparation for changing how we do writeout of extent buffers, start
tagging the extent buffer xarray with DIRTY and WRITEBACK to make it
easier to find extent buffers that are in either state.

Reviewed-by: Filipe Manana <fdmanana@suse.com>
Signed-off-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/extent_io.c

index cf3c0318898a194c0618d013d48818232a500915..ed573a402706714f475f8f3c08914fd5f5cdd5ae 100644 (file)
@@ -1774,8 +1774,18 @@ static noinline_for_stack bool lock_extent_buffer_for_io(struct extent_buffer *e
         */
        spin_lock(&eb->refs_lock);
        if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
+               XA_STATE(xas, &fs_info->buffer_tree, eb->start >> fs_info->sectorsize_bits);
+               unsigned long flags;
+
                set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
                spin_unlock(&eb->refs_lock);
+
+               xas_lock_irqsave(&xas, flags);
+               xas_load(&xas);
+               xas_set_mark(&xas, PAGECACHE_TAG_WRITEBACK);
+               xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY);
+               xas_unlock_irqrestore(&xas, flags);
+
                btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
                percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
                                         -eb->len,
@@ -1861,6 +1871,30 @@ static void set_btree_ioerr(struct extent_buffer *eb)
        }
 }
 
+static void buffer_tree_set_mark(const struct extent_buffer *eb, xa_mark_t mark)
+{
+       struct btrfs_fs_info *fs_info = eb->fs_info;
+       XA_STATE(xas, &fs_info->buffer_tree, eb->start >> fs_info->sectorsize_bits);
+       unsigned long flags;
+
+       xas_lock_irqsave(&xas, flags);
+       xas_load(&xas);
+       xas_set_mark(&xas, mark);
+       xas_unlock_irqrestore(&xas, flags);
+}
+
+static void buffer_tree_clear_mark(const struct extent_buffer *eb, xa_mark_t mark)
+{
+       struct btrfs_fs_info *fs_info = eb->fs_info;
+       XA_STATE(xas, &fs_info->buffer_tree, eb->start >> fs_info->sectorsize_bits);
+       unsigned long flags;
+
+       xas_lock_irqsave(&xas, flags);
+       xas_load(&xas);
+       xas_clear_mark(&xas, mark);
+       xas_unlock_irqrestore(&xas, flags);
+}
+
 /*
  * The endio specific version which won't touch any unsafe spinlock in endio
  * context.
@@ -1891,6 +1925,7 @@ static void end_bbio_meta_write(struct btrfs_bio *bbio)
                btrfs_meta_folio_clear_writeback(fi.folio, eb);
        }
 
+       buffer_tree_clear_mark(eb, PAGECACHE_TAG_WRITEBACK);
        clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
        smp_mb__after_atomic();
        wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
@@ -3513,6 +3548,7 @@ void btrfs_clear_buffer_dirty(struct btrfs_trans_handle *trans,
        if (!test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags))
                return;
 
+       buffer_tree_clear_mark(eb, PAGECACHE_TAG_DIRTY);
        percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, -eb->len,
                                 fs_info->dirty_metadata_batch);
 
@@ -3561,6 +3597,7 @@ void set_extent_buffer_dirty(struct extent_buffer *eb)
                        folio_lock(eb->folios[0]);
                for (int i = 0; i < num_extent_folios(eb); i++)
                        btrfs_meta_folio_set_dirty(eb->folios[i], eb);
+               buffer_tree_set_mark(eb, PAGECACHE_TAG_DIRTY);
                if (subpage)
                        folio_unlock(eb->folios[0]);
                percpu_counter_add_batch(&eb->fs_info->dirty_metadata_bytes,