]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
mm: update all remaining mmap_prepare users to use vma_flags_t
authorLorenzo Stoakes <lorenzo.stoakes@oracle.com>
Thu, 22 Jan 2026 16:06:18 +0000 (16:06 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 12 Feb 2026 23:42:58 +0000 (15:42 -0800)
We will be shortly removing the vm_flags_t field from vm_area_desc so we
need to update all mmap_prepare users to only use the dessc->vma_flags
field.

This patch achieves that and makes all ancillary changes required to make
this possible.

This lays the groundwork for future work to eliminate the use of
vm_flags_t in vm_area_desc altogether and more broadly throughout the
kernel.

While we're here, we take the opportunity to replace VM_REMAP_FLAGS with
VMA_REMAP_FLAGS, the vma_flags_t equivalent.

No functional changes intended.

Link: https://lkml.kernel.org/r/fb1f55323799f09fe6a36865b31550c9ec67c225.1769097829.git.lorenzo.stoakes@oracle.com
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Acked-by: Damien Le Moal <dlemoal@kernel.org> [zonefs]
Acked-by: "Darrick J. Wong" <djwong@kernel.org>
Acked-by: Pedro Falcato <pfalcato@suse.de>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Barry Song <baohua@kernel.org>
Cc: David Hildenbrand <david@kernel.org>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Jason Gunthorpe <jgg@nvidia.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Jarkko Sakkinen <jarkko@kernel.org>
Cc: Yury Norov <ynorov@nvidia.com>
Cc: Chris Mason <clm@fb.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
16 files changed:
drivers/char/mem.c
drivers/dax/device.c
fs/aio.c
fs/erofs/data.c
fs/ext4/file.c
fs/ntfs3/file.c
fs/orangefs/file.c
fs/ramfs/file-nommu.c
fs/resctrl/pseudo_lock.c
fs/romfs/mmap-nommu.c
fs/xfs/xfs_file.c
fs/zonefs/file.c
include/linux/dax.h
include/linux/mm.h
kernel/relay.c
mm/memory.c

index 52039fae1594c18dc616267ccaeace789e4775e2..cca4529431f8a82492d3f34ff86f299e593821dc 100644 (file)
@@ -306,7 +306,7 @@ static unsigned zero_mmap_capabilities(struct file *file)
 /* can't do an in-place private mapping if there's no MMU */
 static inline int private_mapping_ok(struct vm_area_desc *desc)
 {
-       return is_nommu_shared_mapping(desc->vm_flags);
+       return is_nommu_shared_vma_flags(&desc->vma_flags);
 }
 #else
 
@@ -360,7 +360,7 @@ static int mmap_mem_prepare(struct vm_area_desc *desc)
 
        desc->vm_ops = &mmap_mem_ops;
 
-       /* Remap-pfn-range will mark the range VM_IO. */
+       /* Remap-pfn-range will mark the range with the I/O flag. */
        mmap_action_remap_full(desc, desc->pgoff);
        /* We filter remap errors to -EAGAIN. */
        desc->action.error_hook = mmap_filter_error;
@@ -520,7 +520,7 @@ static int mmap_zero_prepare(struct vm_area_desc *desc)
 #ifndef CONFIG_MMU
        return -ENOSYS;
 #endif
-       if (desc->vm_flags & VM_SHARED)
+       if (vma_desc_test_flags(desc, VMA_SHARED_BIT))
                return shmem_zero_setup_desc(desc);
 
        desc->action.success_hook = mmap_zero_private_success;
index 22999a402e029255e19026b7f42a0b864c213175..528e81240c4d451496c27cd1dc331d77c922eb3b 100644 (file)
@@ -13,7 +13,7 @@
 #include "dax-private.h"
 #include "bus.h"
 
-static int __check_vma(struct dev_dax *dev_dax, vm_flags_t vm_flags,
+static int __check_vma(struct dev_dax *dev_dax, vma_flags_t flags,
                       unsigned long start, unsigned long end, struct file *file,
                       const char *func)
 {
@@ -24,7 +24,7 @@ static int __check_vma(struct dev_dax *dev_dax, vm_flags_t vm_flags,
                return -ENXIO;
 
        /* prevent private mappings from being established */
-       if ((vm_flags & VM_MAYSHARE) != VM_MAYSHARE) {
+       if (!vma_flags_test(&flags, VMA_MAYSHARE_BIT)) {
                dev_info_ratelimited(dev,
                                "%s: %s: fail, attempted private mapping\n",
                                current->comm, func);
@@ -53,7 +53,7 @@ static int __check_vma(struct dev_dax *dev_dax, vm_flags_t vm_flags,
 static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
                     const char *func)
 {
-       return __check_vma(dev_dax, vma->vm_flags, vma->vm_start, vma->vm_end,
+       return __check_vma(dev_dax, vma->flags, vma->vm_start, vma->vm_end,
                           vma->vm_file, func);
 }
 
@@ -306,14 +306,14 @@ static int dax_mmap_prepare(struct vm_area_desc *desc)
         * fault time.
         */
        id = dax_read_lock();
-       rc = __check_vma(dev_dax, desc->vm_flags, desc->start, desc->end, filp,
+       rc = __check_vma(dev_dax, desc->vma_flags, desc->start, desc->end, filp,
                         __func__);
        dax_read_unlock(id);
        if (rc)
                return rc;
 
        desc->vm_ops = &dax_vm_ops;
-       desc->vm_flags |= VM_HUGEPAGE;
+       vma_desc_set_flags(desc, VMA_HUGEPAGE_BIT);
        return 0;
 }
 
index 0a23a8c0717ff9ce71a413baf174569ae52f345f..59b67b8da1b2ee99725a0e176fb5f101de0b76ff 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -394,7 +394,7 @@ static const struct vm_operations_struct aio_ring_vm_ops = {
 
 static int aio_ring_mmap_prepare(struct vm_area_desc *desc)
 {
-       desc->vm_flags |= VM_DONTEXPAND;
+       vma_desc_set_flags(desc, VMA_DONTEXPAND_BIT);
        desc->vm_ops = &aio_ring_vm_ops;
        return 0;
 }
index bb13c4cb845563492a616fc000910112b92df555..e7bc29e764c6b7f19094e10bb8e127388741dd50 100644 (file)
@@ -438,11 +438,12 @@ static int erofs_file_mmap_prepare(struct vm_area_desc *desc)
        if (!IS_DAX(file_inode(desc->file)))
                return generic_file_readonly_mmap_prepare(desc);
 
-       if ((desc->vm_flags & VM_SHARED) && (desc->vm_flags & VM_MAYWRITE))
+       if (vma_desc_test_flags(desc, VMA_SHARED_BIT) &&
+           vma_desc_test_flags(desc, VMA_MAYWRITE_BIT))
                return -EINVAL;
 
        desc->vm_ops = &erofs_dax_vm_ops;
-       desc->vm_flags |= VM_HUGEPAGE;
+       vma_desc_set_flags(desc, VMA_HUGEPAGE_BIT);
        return 0;
 }
 #else
index 7a8b3093218921f26a7f8962f94739ba49431230..dfd5f4fe16475cad3e61599b4282bdf31ea32575 100644 (file)
@@ -822,13 +822,13 @@ static int ext4_file_mmap_prepare(struct vm_area_desc *desc)
         * We don't support synchronous mappings for non-DAX files and
         * for DAX files if underneath dax_device is not synchronous.
         */
-       if (!daxdev_mapping_supported(desc->vm_flags, file_inode(file), dax_dev))
+       if (!daxdev_mapping_supported(desc, file_inode(file), dax_dev))
                return -EOPNOTSUPP;
 
        file_accessed(file);
        if (IS_DAX(file_inode(file))) {
                desc->vm_ops = &ext4_dax_vm_ops;
-               desc->vm_flags |= VM_HUGEPAGE;
+               vma_desc_set_flags(desc, VMA_HUGEPAGE_BIT);
        } else {
                desc->vm_ops = &ext4_file_vm_ops;
        }
index 2e7b2e566ebe18c173319c7cfd4304c22ddd2f28..2902fc6d9a85d22297d1655d59391922ad0d329f 100644 (file)
@@ -347,7 +347,7 @@ static int ntfs_file_mmap_prepare(struct vm_area_desc *desc)
        struct inode *inode = file_inode(file);
        struct ntfs_inode *ni = ntfs_i(inode);
        u64 from = ((u64)desc->pgoff << PAGE_SHIFT);
-       bool rw = desc->vm_flags & VM_WRITE;
+       const bool rw = vma_desc_test_flags(desc, VMA_WRITE_BIT);
        int err;
 
        /* Avoid any operation if inode is bad. */
index 919f99b16834160dd8cc87faf9b8802aa02796cf..c75aa3f419b1b8d2166e6afba94def17b57e292d 100644 (file)
@@ -411,8 +411,8 @@ static int orangefs_file_mmap_prepare(struct vm_area_desc *desc)
                     "orangefs_file_mmap: called on %pD\n", file);
 
        /* set the sequential readahead hint */
-       desc->vm_flags |= VM_SEQ_READ;
-       desc->vm_flags &= ~VM_RAND_READ;
+       vma_desc_set_flags(desc, VMA_SEQ_READ_BIT);
+       vma_desc_clear_flags(desc, VMA_RAND_READ_BIT);
 
        file_accessed(file);
        desc->vm_ops = &orangefs_file_vm_ops;
index 77b8ca2757e0d698a9281b48637ade61dd2fdfff..0f8e838ece0785331dd474bda277dc2c52f2e3d6 100644 (file)
@@ -264,7 +264,7 @@ out:
  */
 static int ramfs_nommu_mmap_prepare(struct vm_area_desc *desc)
 {
-       if (!is_nommu_shared_mapping(desc->vm_flags))
+       if (!is_nommu_shared_vma_flags(&desc->vma_flags))
                return -ENOSYS;
 
        file_accessed(desc->file);
index 0bfc13c5b96d76abd177efd6e4d559a43354fab7..e81d71abfe54db5f0162e99dc7b5937c89b78afe 100644 (file)
@@ -1044,7 +1044,7 @@ static int pseudo_lock_dev_mmap_prepare(struct vm_area_desc *desc)
         * Ensure changes are carried directly to the memory being mapped,
         * do not allow copy-on-write mapping.
         */
-       if (!(desc->vm_flags & VM_SHARED)) {
+       if (!vma_desc_test_flags(desc, VMA_SHARED_BIT)) {
                mutex_unlock(&rdtgroup_mutex);
                return -EINVAL;
        }
index 4b77c6dc4418141057718ea208839e35e22eb771..7c3a1a7fecee72ece59c8b1c0170c560c25fbfec 100644 (file)
@@ -63,7 +63,7 @@ static unsigned long romfs_get_unmapped_area(struct file *file,
  */
 static int romfs_mmap_prepare(struct vm_area_desc *desc)
 {
-       return is_nommu_shared_mapping(desc->vm_flags) ? 0 : -ENOSYS;
+       return is_nommu_shared_vma_flags(&desc->vma_flags) ? 0 : -ENOSYS;
 }
 
 static unsigned romfs_mmap_capabilities(struct file *file)
index 7874cf745af372fe8d90af09c6916d4c635472e0..1238ec018bc75b42ec079812f6af10336f050a1e 100644 (file)
@@ -1974,14 +1974,14 @@ xfs_file_mmap_prepare(
         * We don't support synchronous mappings for non-DAX files and
         * for DAX files if underneath dax_device is not synchronous.
         */
-       if (!daxdev_mapping_supported(desc->vm_flags, file_inode(file),
+       if (!daxdev_mapping_supported(desc, file_inode(file),
                                      target->bt_daxdev))
                return -EOPNOTSUPP;
 
        file_accessed(file);
        desc->vm_ops = &xfs_file_vm_ops;
        if (IS_DAX(inode))
-               desc->vm_flags |= VM_HUGEPAGE;
+               vma_desc_set_flags(desc, VMA_HUGEPAGE_BIT);
        return 0;
 }
 
index c1e5e30e90a09743dabf1a8fd93b35922dcb74bf..8a7161fc49e5c86d2008be516092028699adbacb 100644 (file)
@@ -333,7 +333,8 @@ static int zonefs_file_mmap_prepare(struct vm_area_desc *desc)
         * ordering between msync() and page cache writeback.
         */
        if (zonefs_inode_is_seq(file_inode(file)) &&
-           (desc->vm_flags & VM_SHARED) && (desc->vm_flags & VM_MAYWRITE))
+           vma_desc_test_flags(desc, VMA_SHARED_BIT) &&
+           vma_desc_test_flags(desc, VMA_MAYWRITE_BIT))
                return -EINVAL;
 
        file_accessed(file);
index 9d624f4d9df66d2111112770baa2a3e394f9d92d..bf103f317cac0ddb727f867125442e82dabc0291 100644 (file)
@@ -65,11 +65,11 @@ size_t dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff,
 /*
  * Check if given mapping is supported by the file / underlying device.
  */
-static inline bool daxdev_mapping_supported(vm_flags_t vm_flags,
+static inline bool daxdev_mapping_supported(const struct vm_area_desc *desc,
                                            const struct inode *inode,
                                            struct dax_device *dax_dev)
 {
-       if (!(vm_flags & VM_SYNC))
+       if (!vma_desc_test_flags(desc, VMA_SYNC_BIT))
                return true;
        if (!IS_DAX(inode))
                return false;
@@ -111,11 +111,11 @@ static inline void set_dax_nomc(struct dax_device *dax_dev)
 static inline void set_dax_synchronous(struct dax_device *dax_dev)
 {
 }
-static inline bool daxdev_mapping_supported(vm_flags_t vm_flags,
+static inline bool daxdev_mapping_supported(const struct vm_area_desc *desc,
                                            const struct inode *inode,
                                            struct dax_device *dax_dev)
 {
-       return !(vm_flags & VM_SYNC);
+       return !vma_desc_test_flags(desc, VMA_SYNC_BIT);
 }
 static inline size_t dax_recovery_write(struct dax_device *dax_dev,
                pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i)
index aa99b28e7a8aebab9dae6bdd6c592defc533c9d0..05d950805701d06c87a97e3d5b7b9b82935f6728 100644 (file)
@@ -550,17 +550,18 @@ enum {
 /*
  * Physically remapped pages are special. Tell the
  * rest of the world about it:
- *   VM_IO tells people not to look at these pages
+ *   IO tells people not to look at these pages
  *     (accesses can have side effects).
- *   VM_PFNMAP tells the core MM that the base pages are just
+ *   PFNMAP tells the core MM that the base pages are just
  *     raw PFN mappings, and do not have a "struct page" associated
  *     with them.
- *   VM_DONTEXPAND
+ *   DONTEXPAND
  *      Disable vma merging and expanding with mremap().
- *   VM_DONTDUMP
+ *   DONTDUMP
  *      Omit vma from core dump, even when VM_IO turned off.
  */
-#define VM_REMAP_FLAGS (VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)
+#define VMA_REMAP_FLAGS mk_vma_flags(VMA_IO_BIT, VMA_PFNMAP_BIT,       \
+                                    VMA_DONTEXPAND_BIT, VMA_DONTDUMP_BIT)
 
 /* This mask prevents VMA from being scanned with khugepaged */
 #define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
@@ -1925,6 +1926,14 @@ static inline bool is_cow_mapping(vm_flags_t flags)
        return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
 }
 
+static inline bool vma_desc_is_cow_mapping(struct vm_area_desc *desc)
+{
+       const vma_flags_t *flags = &desc->vma_flags;
+
+       return vma_flags_test(flags, VMA_MAYWRITE_BIT) &&
+               !vma_flags_test(flags, VMA_SHARED_BIT);
+}
+
 #ifndef CONFIG_MMU
 static inline bool is_nommu_shared_mapping(vm_flags_t flags)
 {
@@ -1938,6 +1947,11 @@ static inline bool is_nommu_shared_mapping(vm_flags_t flags)
         */
        return flags & (VM_MAYSHARE | VM_MAYOVERLAY);
 }
+
+static inline bool is_nommu_shared_vma_flags(const vma_flags_t *flags)
+{
+       return vma_flags_test(flags, VMA_MAYSHARE_BIT, VMA_MAYOVERLAY_BIT);
+}
 #endif
 
 #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
index e36f6b926f7f044efd36d0a1c55a106969d7cdfe..1c8e88259df01441aaf5ccfa2d8e41f27924a345 100644 (file)
@@ -92,7 +92,7 @@ static int relay_mmap_prepare_buf(struct rchan_buf *buf,
                return -EINVAL;
 
        desc->vm_ops = &relay_file_mmap_ops;
-       desc->vm_flags |= VM_DONTEXPAND;
+       vma_desc_set_flags(desc, VMA_DONTEXPAND_BIT);
        desc->private_data = buf;
 
        return 0;
index 136b80ca357bb964a169552ef3658f964e690572..9ee60d87969b9023d445a84a1525e0938dd16f83 100644 (file)
@@ -2957,7 +2957,7 @@ static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
        return 0;
 }
 
-static int get_remap_pgoff(vm_flags_t vm_flags, unsigned long addr,
+static int get_remap_pgoff(bool is_cow, unsigned long addr,
                unsigned long end, unsigned long vm_start, unsigned long vm_end,
                unsigned long pfn, pgoff_t *vm_pgoff_p)
 {
@@ -2967,7 +2967,7 @@ static int get_remap_pgoff(vm_flags_t vm_flags, unsigned long addr,
         * un-COW'ed pages by matching them up with "vma->vm_pgoff".
         * See vm_normal_page() for details.
         */
-       if (is_cow_mapping(vm_flags)) {
+       if (is_cow) {
                if (addr != vm_start || end != vm_end)
                        return -EINVAL;
                *vm_pgoff_p = pfn;
@@ -2988,7 +2988,7 @@ static int remap_pfn_range_internal(struct vm_area_struct *vma, unsigned long ad
        if (WARN_ON_ONCE(!PAGE_ALIGNED(addr)))
                return -EINVAL;
 
-       VM_WARN_ON_ONCE((vma->vm_flags & VM_REMAP_FLAGS) != VM_REMAP_FLAGS);
+       VM_WARN_ON_ONCE(!vma_test_all_flags_mask(vma, VMA_REMAP_FLAGS));
 
        BUG_ON(addr >= end);
        pfn -= addr >> PAGE_SHIFT;
@@ -3112,9 +3112,9 @@ void remap_pfn_range_prepare(struct vm_area_desc *desc, unsigned long pfn)
         * check it again on complete and will fail there if specified addr is
         * invalid.
         */
-       get_remap_pgoff(desc->vm_flags, desc->start, desc->end,
+       get_remap_pgoff(vma_desc_is_cow_mapping(desc), desc->start, desc->end,
                        desc->start, desc->end, pfn, &desc->pgoff);
-       desc->vm_flags |= VM_REMAP_FLAGS;
+       vma_desc_set_flags_mask(desc, VMA_REMAP_FLAGS);
 }
 
 static int remap_pfn_range_prepare_vma(struct vm_area_struct *vma, unsigned long addr,
@@ -3123,13 +3123,12 @@ static int remap_pfn_range_prepare_vma(struct vm_area_struct *vma, unsigned long
        unsigned long end = addr + PAGE_ALIGN(size);
        int err;
 
-       err = get_remap_pgoff(vma->vm_flags, addr, end,
-                             vma->vm_start, vma->vm_end,
-                             pfn, &vma->vm_pgoff);
+       err = get_remap_pgoff(is_cow_mapping(vma->vm_flags), addr, end,
+                             vma->vm_start, vma->vm_end, pfn, &vma->vm_pgoff);
        if (err)
                return err;
 
-       vm_flags_set(vma, VM_REMAP_FLAGS);
+       vma_set_flags_mask(vma, VMA_REMAP_FLAGS);
        return 0;
 }