]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
xfs: fold xfs_ilock_for_write_fault into xfs_write_fault
authorChristoph Hellwig <hch@lst.de>
Sun, 23 Jun 2024 05:44:31 +0000 (07:44 +0200)
committerChandan Babu R <chandanbabu@kernel.org>
Mon, 1 Jul 2024 04:02:29 +0000 (09:32 +0530)
Now that the page fault handler has been refactored, the only caller
of xfs_ilock_for_write_fault is simple enough and calls it
unconditionally.  Fold the logic and expand the comments explaining it.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Signed-off-by: Chandan Babu R <chandanbabu@kernel.org>
fs/xfs/xfs_file.c

index 904be41f3e5ec64c46dd43ee572ab2c6d4d37d15..4cdc54dc96862e2af50a5dbd5951be2588d3a282 100644 (file)
@@ -227,21 +227,6 @@ xfs_ilock_iocb_for_write(
        return 0;
 }
 
-static unsigned int
-xfs_ilock_for_write_fault(
-       struct xfs_inode        *ip)
-{
-       /* get a shared lock if no remapping in progress */
-       xfs_ilock(ip, XFS_MMAPLOCK_SHARED);
-       if (!xfs_iflags_test(ip, XFS_IREMAPPING))
-               return XFS_MMAPLOCK_SHARED;
-
-       /* wait for remapping to complete */
-       xfs_iunlock(ip, XFS_MMAPLOCK_SHARED);
-       xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
-       return XFS_MMAPLOCK_EXCL;
-}
-
 STATIC ssize_t
 xfs_file_dio_read(
        struct kiocb            *iocb,
@@ -1294,18 +1279,30 @@ xfs_write_fault(
        unsigned int            order)
 {
        struct inode            *inode = file_inode(vmf->vma->vm_file);
-       unsigned int            lock_mode;
+       struct xfs_inode        *ip = XFS_I(inode);
+       unsigned int            lock_mode = XFS_MMAPLOCK_SHARED;
        vm_fault_t              ret;
 
        sb_start_pagefault(inode->i_sb);
        file_update_time(vmf->vma->vm_file);
 
-       lock_mode = xfs_ilock_for_write_fault(XFS_I(inode));
+       /*
+        * Normally we only need the shared mmaplock, but if a reflink remap is
+        * in progress we take the exclusive lock to wait for the remap to
+        * finish before taking a write fault.
+        */
+       xfs_ilock(ip, XFS_MMAPLOCK_SHARED);
+       if (xfs_iflags_test(ip, XFS_IREMAPPING)) {
+               xfs_iunlock(ip, XFS_MMAPLOCK_SHARED);
+               xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
+               lock_mode = XFS_MMAPLOCK_EXCL;
+       }
+
        if (IS_DAX(inode))
                ret = xfs_dax_fault_locked(vmf, order, true);
        else
                ret = iomap_page_mkwrite(vmf, &xfs_page_mkwrite_iomap_ops);
-       xfs_iunlock(XFS_I(inode), lock_mode);
+       xfs_iunlock(ip, lock_mode);
 
        sb_end_pagefault(inode->i_sb);
        return ret;