]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
xfs: split write fault handling out of __xfs_filemap_fault
authorChristoph Hellwig <hch@lst.de>
Tue, 29 Oct 2024 15:11:58 +0000 (16:11 +0100)
committerCarlos Maiolino <cem@kernel.org>
Tue, 5 Nov 2024 12:52:57 +0000 (13:52 +0100)
Only two of the callers of __xfs_filemap_fault every handle read faults.
Split the write_fault handling out of __xfs_filemap_fault so that all
callers call that directly either conditionally or unconditionally and
only leave the read fault handling in __xfs_filemap_fault.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Signed-off-by: Carlos Maiolino <cem@kernel.org>
fs/xfs/xfs_file.c

index 20f7f92b88674afb430b1ed1e6efd376d2159852..0b8e36f8703c3af7e90af5747815b3e449edd1af 100644 (file)
@@ -1434,6 +1434,16 @@ xfs_dax_read_fault(
        return ret;
 }
 
+/*
+ * Locking for serialisation of IO during page faults. This results in a lock
+ * ordering of:
+ *
+ * mmap_lock (MM)
+ *   sb_start_pagefault(vfs, freeze)
+ *     invalidate_lock (vfs/XFS_MMAPLOCK - truncate serialisation)
+ *       page_lock (MM)
+ *         i_lock (XFS - extent map serialisation)
+ */
 static vm_fault_t
 xfs_write_fault(
        struct vm_fault         *vmf,
@@ -1471,26 +1481,13 @@ xfs_write_fault(
        return ret;
 }
 
-/*
- * Locking for serialisation of IO during page faults. This results in a lock
- * ordering of:
- *
- * mmap_lock (MM)
- *   sb_start_pagefault(vfs, freeze)
- *     invalidate_lock (vfs/XFS_MMAPLOCK - truncate serialisation)
- *       page_lock (MM)
- *         i_lock (XFS - extent map serialisation)
- */
 static vm_fault_t
 __xfs_filemap_fault(
        struct vm_fault         *vmf,
-       unsigned int            order,
-       bool                    write_fault)
+       unsigned int            order)
 {
        struct inode            *inode = file_inode(vmf->vma->vm_file);
 
-       if (write_fault)
-               return xfs_write_fault(vmf, order);
        if (IS_DAX(inode))
                return xfs_dax_read_fault(vmf, order);
 
@@ -1511,9 +1508,9 @@ xfs_filemap_fault(
        struct vm_fault         *vmf)
 {
        /* DAX can shortcut the normal fault path on write faults! */
-       return __xfs_filemap_fault(vmf, 0,
-                       IS_DAX(file_inode(vmf->vma->vm_file)) &&
-                       xfs_is_write_fault(vmf));
+       if (IS_DAX(file_inode(vmf->vma->vm_file)) && xfs_is_write_fault(vmf))
+               return xfs_write_fault(vmf, 0);
+       return __xfs_filemap_fault(vmf, 0);
 }
 
 static vm_fault_t
@@ -1525,15 +1522,16 @@ xfs_filemap_huge_fault(
                return VM_FAULT_FALLBACK;
 
        /* DAX can shortcut the normal fault path on write faults! */
-       return __xfs_filemap_fault(vmf, order,
-                       xfs_is_write_fault(vmf));
+       if (xfs_is_write_fault(vmf))
+               return xfs_write_fault(vmf, order);
+       return __xfs_filemap_fault(vmf, order);
 }
 
 static vm_fault_t
 xfs_filemap_page_mkwrite(
        struct vm_fault         *vmf)
 {
-       return __xfs_filemap_fault(vmf, 0, true);
+       return xfs_write_fault(vmf, 0);
 }
 
 /*
@@ -1545,8 +1543,7 @@ static vm_fault_t
 xfs_filemap_pfn_mkwrite(
        struct vm_fault         *vmf)
 {
-
-       return __xfs_filemap_fault(vmf, 0, true);
+       return xfs_write_fault(vmf, 0);
 }
 
 static const struct vm_operations_struct xfs_file_vm_ops = {