]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
xfs: kill XBF_UNMAPPED
authorChristoph Hellwig <hch@lst.de>
Mon, 10 Mar 2025 13:19:11 +0000 (14:19 +0100)
committerCarlos Maiolino <cem@kernel.org>
Mon, 10 Mar 2025 13:29:44 +0000 (14:29 +0100)
Unmapped buffer access is a pain, so kill it. The switch to large
folios means we rarely pay a vmap penalty for large buffers,
so this functionality is largely unnecessary now.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Signed-off-by: Carlos Maiolino <cem@kernel.org>
fs/xfs/libxfs/xfs_ialloc.c
fs/xfs/libxfs/xfs_inode_buf.c
fs/xfs/scrub/inode_repair.c
fs/xfs/xfs_buf.c
fs/xfs/xfs_buf.h
fs/xfs/xfs_buf_item.c
fs/xfs/xfs_buf_item_recover.c
fs/xfs/xfs_inode.c

index 57513ba19d6a71648bf72b1461c27c44c38e325c..0c47b5c6ca7d99c4067ceb8ba22f2b4ddc89ad0f 100644 (file)
@@ -364,7 +364,7 @@ xfs_ialloc_inode_init(
                                (j * M_IGEO(mp)->blocks_per_cluster));
                error = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
                                mp->m_bsize * M_IGEO(mp)->blocks_per_cluster,
-                               XBF_UNMAPPED, &fbuf);
+                               0, &fbuf);
                if (error)
                        return error;
 
index 992e6d33770940644c04f25e47554906e7456eda..aa13fc00afd707b9baa274a16711126cb67d4da9 100644 (file)
@@ -137,7 +137,7 @@ xfs_imap_to_bp(
        int                     error;
 
        error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno,
-                       imap->im_len, XBF_UNMAPPED, bpp, &xfs_inode_buf_ops);
+                       imap->im_len, 0, bpp, &xfs_inode_buf_ops);
        if (xfs_metadata_is_sick(error))
                xfs_agno_mark_sick(mp, xfs_daddr_to_agno(mp, imap->im_blkno),
                                XFS_SICK_AG_INODES);
index 4299063ffe87495f15c6593ff23cfeae8d89c09d..a90a011c7e5f81ba8a38e45b04fc6f0fee1dcf2f 100644 (file)
@@ -1560,8 +1560,7 @@ xrep_dinode_core(
 
        /* Read the inode cluster buffer. */
        error = xfs_trans_read_buf(sc->mp, sc->tp, sc->mp->m_ddev_targp,
-                       ri->imap.im_blkno, ri->imap.im_len, XBF_UNMAPPED, &bp,
-                       NULL);
+                       ri->imap.im_blkno, ri->imap.im_len, 0, &bp, NULL);
        if (error)
                return error;
 
index a831e8c755cb76d6acd04bf2839939ff34b9dc3b..b5ec7d83210f63407791fcfee0113f9f2e07677a 100644 (file)
@@ -145,7 +145,7 @@ _xfs_buf_alloc(
         * We don't want certain flags to appear in b_flags unless they are
         * specifically set by later operations on the buffer.
         */
-       flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD);
+       flags &= ~(XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD);
 
        /*
         * A new buffer is held and locked by the owner.  This ensures that the
@@ -289,9 +289,7 @@ xfs_buf_alloc_kmem(
  *
  * The third type of buffer is the multi-page buffer. These are always made
  * up of single pages so that they can be fed to vmap_ram() to return a
- * contiguous memory region we can access the data through, or mark it as
- * XBF_UNMAPPED and access the data directly through individual page_address()
- * calls.
+ * contiguous memory region we can access the data through.
  */
 static int
 xfs_buf_alloc_backing_mem(
@@ -413,8 +411,6 @@ _xfs_buf_map_pages(
        if (bp->b_page_count == 1) {
                /* A single page buffer is always mappable */
                bp->b_addr = page_address(bp->b_pages[0]);
-       } else if (flags & XBF_UNMAPPED) {
-               bp->b_addr = NULL;
        } else {
                int retried = 0;
                unsigned nofs_flag;
@@ -1345,7 +1341,7 @@ __xfs_buf_ioend(
        trace_xfs_buf_iodone(bp, _RET_IP_);
 
        if (bp->b_flags & XBF_READ) {
-               if (!bp->b_error && bp->b_addr && is_vmalloc_addr(bp->b_addr))
+               if (!bp->b_error && is_vmalloc_addr(bp->b_addr))
                        invalidate_kernel_vmap_range(bp->b_addr,
                                        xfs_buf_vmap_len(bp));
                if (!bp->b_error && bp->b_ops)
@@ -1526,7 +1522,7 @@ xfs_buf_submit_bio(
                        __bio_add_page(bio, bp->b_pages[p], PAGE_SIZE, 0);
                bio->bi_iter.bi_size = size; /* limit to the actual size used */
 
-               if (bp->b_addr && is_vmalloc_addr(bp->b_addr))
+               if (is_vmalloc_addr(bp->b_addr))
                        flush_kernel_vmap_range(bp->b_addr,
                                        xfs_buf_vmap_len(bp));
        }
@@ -1657,52 +1653,6 @@ xfs_buf_submit(
        xfs_buf_submit_bio(bp);
 }
 
-void *
-xfs_buf_offset(
-       struct xfs_buf          *bp,
-       size_t                  offset)
-{
-       struct page             *page;
-
-       if (bp->b_addr)
-               return bp->b_addr + offset;
-
-       page = bp->b_pages[offset >> PAGE_SHIFT];
-       return page_address(page) + (offset & (PAGE_SIZE-1));
-}
-
-void
-xfs_buf_zero(
-       struct xfs_buf          *bp,
-       size_t                  boff,
-       size_t                  bsize)
-{
-       size_t                  bend;
-
-       if (bp->b_addr) {
-               memset(bp->b_addr + boff, 0, bsize);
-               return;
-       }
-
-       bend = boff + bsize;
-       while (boff < bend) {
-               struct page     *page;
-               int             page_index, page_offset, csize;
-
-               page_index = boff >> PAGE_SHIFT;
-               page_offset = boff & ~PAGE_MASK;
-               page = bp->b_pages[page_index];
-               csize = min_t(size_t, PAGE_SIZE - page_offset,
-                                     BBTOB(bp->b_length) - boff);
-
-               ASSERT((csize + page_offset) <= PAGE_SIZE);
-
-               memset(page_address(page) + page_offset, 0, csize);
-
-               boff += csize;
-       }
-}
-
 /*
  * Log a message about and stale a buffer that a caller has decided is corrupt.
  *
index c92a328252cc1ae6d6dd03adb88a75285806c09a..8db522f19b0c0fe29041f634479a1bec9bef073a 100644 (file)
@@ -48,7 +48,6 @@ struct xfs_buf;
 #define XBF_LIVESCAN    (1u << 28)
 #define XBF_INCORE      (1u << 29)/* lookup only, return if found in cache */
 #define XBF_TRYLOCK     (1u << 30)/* lock requested, but do not wait */
-#define XBF_UNMAPPED    (1u << 31)/* do not map the buffer */
 
 
 typedef unsigned int xfs_buf_flags_t;
@@ -68,8 +67,7 @@ typedef unsigned int xfs_buf_flags_t;
        /* The following interface flags should never be set */ \
        { XBF_LIVESCAN,         "LIVESCAN" }, \
        { XBF_INCORE,           "INCORE" }, \
-       { XBF_TRYLOCK,          "TRYLOCK" }, \
-       { XBF_UNMAPPED,         "UNMAPPED" }
+       { XBF_TRYLOCK,          "TRYLOCK" }
 
 /*
  * Internal state flags.
@@ -313,12 +311,20 @@ extern void __xfs_buf_ioerror(struct xfs_buf *bp, int error,
 #define xfs_buf_ioerror(bp, err) __xfs_buf_ioerror((bp), (err), __this_address)
 extern void xfs_buf_ioerror_alert(struct xfs_buf *bp, xfs_failaddr_t fa);
 void xfs_buf_ioend_fail(struct xfs_buf *);
-void xfs_buf_zero(struct xfs_buf *bp, size_t boff, size_t bsize);
 void __xfs_buf_mark_corrupt(struct xfs_buf *bp, xfs_failaddr_t fa);
 #define xfs_buf_mark_corrupt(bp) __xfs_buf_mark_corrupt((bp), __this_address)
 
 /* Buffer Utility Routines */
-extern void *xfs_buf_offset(struct xfs_buf *, size_t);
+static inline void *xfs_buf_offset(struct xfs_buf *bp, size_t offset)
+{
+       return bp->b_addr + offset;
+}
+
+static inline void xfs_buf_zero(struct xfs_buf *bp, size_t boff, size_t bsize)
+{
+       memset(bp->b_addr + boff, 0, bsize);
+}
+
 extern void xfs_buf_stale(struct xfs_buf *bp);
 
 /* Delayed Write Buffer Routines */
index 0ee6fa9efd18c9abbd9608053b8c93c5fbd2b77a..41f0bc9aa5f4572517296f20453334e869a5eb7d 100644 (file)
@@ -70,7 +70,7 @@ xfs_buf_item_straddle(
 {
        void                    *first, *last;
 
-       if (bp->b_page_count == 1 || !(bp->b_flags & XBF_UNMAPPED))
+       if (bp->b_page_count == 1)
                return false;
 
        first = xfs_buf_offset(bp, offset + (first_bit << XFS_BLF_SHIFT));
index 05a2f6927c121bd4adb739ec5c1d9caad3bb031a..d4c5cef5bc4340bb09a5f26b77f0b690e89d8b42 100644 (file)
@@ -1006,7 +1006,6 @@ xlog_recover_buf_commit_pass2(
        struct xfs_mount                *mp = log->l_mp;
        struct xfs_buf                  *bp;
        int                             error;
-       uint                            buf_flags;
        xfs_lsn_t                       lsn;
 
        /*
@@ -1025,13 +1024,8 @@ xlog_recover_buf_commit_pass2(
        }
 
        trace_xfs_log_recover_buf_recover(log, buf_f);
-
-       buf_flags = 0;
-       if (buf_f->blf_flags & XFS_BLF_INODE_BUF)
-               buf_flags |= XBF_UNMAPPED;
-
        error = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len,
-                         buf_flags, &bp, NULL);
+                         0, &bp, NULL);
        if (error)
                return error;
 
index 7ded570e01917970f9852521db3b51a6613eef99..ce6b8ffbaa2cbd6406af79fe674fe7f19fdc0b8e 100644 (file)
@@ -1721,8 +1721,7 @@ xfs_ifree_cluster(
                 * to mark all the active inodes on the buffer stale.
                 */
                error = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
-                               mp->m_bsize * igeo->blocks_per_cluster,
-                               XBF_UNMAPPED, &bp);
+                               mp->m_bsize * igeo->blocks_per_cluster, 0, &bp);
                if (error)
                        return error;