]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
fs: Make bhs point to mapping_metadata_bhs
authorJan Kara <jack@suse.cz>
Thu, 26 Mar 2026 09:54:25 +0000 (10:54 +0100)
committerChristian Brauner <brauner@kernel.org>
Thu, 26 Mar 2026 14:03:31 +0000 (15:03 +0100)
Make buffer heads point to mapping_metadata_bhs instead of struct
address_space. This makes the code more self contained. For the (only)
case of IO error handling where we really need to reach struct
address_space add a pointer to the mapping from mapping_metadata_bhs.

Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jan Kara <jack@suse.cz>
Link: https://patch.msgid.link/20260326095354.16340-73-jack@suse.cz
Signed-off-by: Christian Brauner <brauner@kernel.org>
fs/buffer.c
fs/inode.c
include/linux/buffer_head.h
include/linux/fs.h

index 294f9cd07f42715eb8f0d89dd58d9a9a84b2bbe7..67b3d4624503cd57262deb13d8d42972f22017dc 100644 (file)
@@ -497,13 +497,12 @@ static void __remove_assoc_queue(struct mapping_metadata_bhs *mmb,
 {
        lockdep_assert_held(&mmb->lock);
        list_del_init(&bh->b_assoc_buffers);
-       WARN_ON(!bh->b_assoc_map);
-       bh->b_assoc_map = NULL;
+       WARN_ON(!bh->b_mmb);
+       bh->b_mmb = NULL;
 }
 
 static void remove_assoc_queue(struct buffer_head *bh)
 {
-       struct address_space *mapping;
        struct mapping_metadata_bhs *mmb;
 
        /*
@@ -514,13 +513,12 @@ static void remove_assoc_queue(struct buffer_head *bh)
         * opportunistically acquire the lock and then recheck the bh
         * didn't move under us.
         */
-       while (bh->b_assoc_map) {
+       while (bh->b_mmb) {
                rcu_read_lock();
-               mapping = READ_ONCE(bh->b_assoc_map);
-               if (mapping) {
-                       mmb = &mapping->i_metadata_bhs;
+               mmb = READ_ONCE(bh->b_mmb);
+               if (mmb) {
                        spin_lock(&mmb->lock);
-                       if (bh->b_assoc_map == mapping)
+                       if (bh->b_mmb == mmb)
                                __remove_assoc_queue(mmb, bh);
                        spin_unlock(&mmb->lock);
                }
@@ -551,9 +549,9 @@ EXPORT_SYMBOL_GPL(inode_has_buffers);
  * Do this in two main stages: first we copy dirty buffers to a
  * temporary inode list, queueing the writes as we go. Then we clean
  * up, waiting for those writes to complete. mark_buffer_dirty_inode()
- * doesn't touch b_assoc_buffers list if b_assoc_map is not NULL so we
- * are sure the buffer stays on our list until IO completes (at which point
- * it can be reaped).
+ * doesn't touch b_assoc_buffers list if b_mmb is not NULL so we are sure the
+ * buffer stays on our list until IO completes (at which point it can be
+ * reaped).
  */
 int sync_mapping_buffers(struct address_space *mapping)
 {
@@ -571,14 +569,14 @@ int sync_mapping_buffers(struct address_space *mapping)
        spin_lock(&mmb->lock);
        while (!list_empty(&mmb->list)) {
                bh = BH_ENTRY(mmb->list.next);
-               WARN_ON_ONCE(bh->b_assoc_map != mapping);
+               WARN_ON_ONCE(bh->b_mmb != mmb);
                __remove_assoc_queue(mmb, bh);
                /* Avoid race with mark_buffer_dirty_inode() which does
                 * a lockless check and we rely on seeing the dirty bit */
                smp_mb();
                if (buffer_dirty(bh) || buffer_locked(bh)) {
                        list_add(&bh->b_assoc_buffers, &tmp);
-                       bh->b_assoc_map = mapping;
+                       bh->b_mmb = mmb;
                        if (buffer_dirty(bh)) {
                                get_bh(bh);
                                spin_unlock(&mmb->lock);
@@ -616,7 +614,7 @@ int sync_mapping_buffers(struct address_space *mapping)
                smp_mb();
                if (buffer_dirty(bh)) {
                        list_add(&bh->b_assoc_buffers, &mmb->list);
-                       bh->b_assoc_map = mapping;
+                       bh->b_mmb = mmb;
                }
                spin_unlock(&mmb->lock);
                wait_on_buffer(bh);
@@ -724,11 +722,11 @@ void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
        struct address_space *mapping = inode->i_mapping;
 
        mark_buffer_dirty(bh);
-       if (!bh->b_assoc_map) {
+       if (!bh->b_mmb) {
                spin_lock(&mapping->i_metadata_bhs.lock);
                list_move_tail(&bh->b_assoc_buffers,
                                &mapping->i_metadata_bhs.list);
-               bh->b_assoc_map = mapping;
+               bh->b_mmb = &mapping->i_metadata_bhs;
                spin_unlock(&mapping->i_metadata_bhs.lock);
        }
 }
@@ -1124,8 +1122,8 @@ void mark_buffer_write_io_error(struct buffer_head *bh)
        /* FIXME: do we need to set this in both places? */
        if (bh->b_folio && bh->b_folio->mapping)
                mapping_set_error(bh->b_folio->mapping, -EIO);
-       if (bh->b_assoc_map)
-               mapping_set_error(bh->b_assoc_map, -EIO);
+       if (bh->b_mmb)
+               mapping_set_error(bh->b_mmb->mapping, -EIO);
 }
 EXPORT_SYMBOL(mark_buffer_write_io_error);
 
index 393f586d050adb2a9d3f6f1daf98069f1faeadaf..3874b933abdbcb539990236a9c550da4ff1e9ba0 100644 (file)
@@ -276,6 +276,7 @@ int inode_init_always_gfp(struct super_block *sb, struct inode *inode, gfp_t gfp
 
        mapping->a_ops = &empty_aops;
        mapping->host = inode;
+       mapping->i_metadata_bhs.mapping = mapping;
        mapping->flags = 0;
        mapping->wb_err = 0;
        atomic_set(&mapping->i_mmap_writable, 0);
index 631bf971efc04e250434f0d793a1df75e8fbc483..20636599d8583b415000f6c6874b4c0407ece671 100644 (file)
@@ -73,8 +73,8 @@ struct buffer_head {
        bh_end_io_t *b_end_io;          /* I/O completion */
        void *b_private;                /* reserved for b_end_io */
        struct list_head b_assoc_buffers; /* associated with another mapping */
-       struct address_space *b_assoc_map;      /* mapping this buffer is
-                                                  associated with */
+       struct mapping_metadata_bhs *b_mmb; /* head of the list of metadata bhs
+                                            * this buffer is associated with */
        atomic_t b_count;               /* users using this buffer_head */
        spinlock_t b_uptodate_lock;     /* Used by the first bh in a page, to
                                         * serialise IO completion of other
index 76360b0040e02f91e0788c5c2a6c9e19db8c3198..fa2a812bd718d1010aebacc138d3f022044e85a4 100644 (file)
@@ -447,6 +447,7 @@ extern const struct address_space_operations empty_aops;
 
 /* Structure for tracking metadata buffer heads associated with the mapping */
 struct mapping_metadata_bhs {
+       struct address_space *mapping;  /* Mapping bhs are associated with */
        spinlock_t lock;        /* Lock protecting bh list */
        struct list_head list;  /* The list of bhs (b_assoc_buffers) */
 };