/* can't do an in-place private mapping if there's no MMU */
static inline int private_mapping_ok(struct vm_area_desc *desc)
{
- return is_nommu_shared_mapping(desc->vm_flags);
+ return is_nommu_shared_vma_flags(&desc->vma_flags);
}
#else
desc->vm_ops = &mmap_mem_ops;
- /* Remap-pfn-range will mark the range VM_IO. */
+ /* Remap-pfn-range will mark the range with the I/O flag. */
mmap_action_remap_full(desc, desc->pgoff);
/* We filter remap errors to -EAGAIN. */
desc->action.error_hook = mmap_filter_error;
#ifndef CONFIG_MMU
return -ENOSYS;
#endif
- if (desc->vm_flags & VM_SHARED)
+ if (vma_desc_test_flags(desc, VMA_SHARED_BIT))
return shmem_zero_setup_desc(desc);
desc->action.success_hook = mmap_zero_private_success;
#include "dax-private.h"
#include "bus.h"
-static int __check_vma(struct dev_dax *dev_dax, vm_flags_t vm_flags,
+static int __check_vma(struct dev_dax *dev_dax, vma_flags_t flags,
unsigned long start, unsigned long end, struct file *file,
const char *func)
{
return -ENXIO;
/* prevent private mappings from being established */
- if ((vm_flags & VM_MAYSHARE) != VM_MAYSHARE) {
+ if (!vma_flags_test(&flags, VMA_MAYSHARE_BIT)) {
dev_info_ratelimited(dev,
"%s: %s: fail, attempted private mapping\n",
current->comm, func);
static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
const char *func)
{
- return __check_vma(dev_dax, vma->vm_flags, vma->vm_start, vma->vm_end,
+ return __check_vma(dev_dax, vma->flags, vma->vm_start, vma->vm_end,
vma->vm_file, func);
}
* fault time.
*/
id = dax_read_lock();
- rc = __check_vma(dev_dax, desc->vm_flags, desc->start, desc->end, filp,
+ rc = __check_vma(dev_dax, desc->vma_flags, desc->start, desc->end, filp,
__func__);
dax_read_unlock(id);
if (rc)
return rc;
desc->vm_ops = &dax_vm_ops;
- desc->vm_flags |= VM_HUGEPAGE;
+ vma_desc_set_flags(desc, VMA_HUGEPAGE_BIT);
return 0;
}
static int aio_ring_mmap_prepare(struct vm_area_desc *desc)
{
- desc->vm_flags |= VM_DONTEXPAND;
+ vma_desc_set_flags(desc, VMA_DONTEXPAND_BIT);
desc->vm_ops = &aio_ring_vm_ops;
return 0;
}
if (!IS_DAX(file_inode(desc->file)))
return generic_file_readonly_mmap_prepare(desc);
- if ((desc->vm_flags & VM_SHARED) && (desc->vm_flags & VM_MAYWRITE))
+ if (vma_desc_test_flags(desc, VMA_SHARED_BIT) &&
+ vma_desc_test_flags(desc, VMA_MAYWRITE_BIT))
return -EINVAL;
desc->vm_ops = &erofs_dax_vm_ops;
- desc->vm_flags |= VM_HUGEPAGE;
+ vma_desc_set_flags(desc, VMA_HUGEPAGE_BIT);
return 0;
}
#else
* We don't support synchronous mappings for non-DAX files and
* for DAX files if underneath dax_device is not synchronous.
*/
- if (!daxdev_mapping_supported(desc->vm_flags, file_inode(file), dax_dev))
+ if (!daxdev_mapping_supported(desc, file_inode(file), dax_dev))
return -EOPNOTSUPP;
file_accessed(file);
if (IS_DAX(file_inode(file))) {
desc->vm_ops = &ext4_dax_vm_ops;
- desc->vm_flags |= VM_HUGEPAGE;
+ vma_desc_set_flags(desc, VMA_HUGEPAGE_BIT);
} else {
desc->vm_ops = &ext4_file_vm_ops;
}
struct inode *inode = file_inode(file);
struct ntfs_inode *ni = ntfs_i(inode);
u64 from = ((u64)desc->pgoff << PAGE_SHIFT);
- bool rw = desc->vm_flags & VM_WRITE;
+ const bool rw = vma_desc_test_flags(desc, VMA_WRITE_BIT);
int err;
/* Avoid any operation if inode is bad. */
"orangefs_file_mmap: called on %pD\n", file);
/* set the sequential readahead hint */
- desc->vm_flags |= VM_SEQ_READ;
- desc->vm_flags &= ~VM_RAND_READ;
+ vma_desc_set_flags(desc, VMA_SEQ_READ_BIT);
+ vma_desc_clear_flags(desc, VMA_RAND_READ_BIT);
file_accessed(file);
desc->vm_ops = &orangefs_file_vm_ops;
*/
static int ramfs_nommu_mmap_prepare(struct vm_area_desc *desc)
{
- if (!is_nommu_shared_mapping(desc->vm_flags))
+ if (!is_nommu_shared_vma_flags(&desc->vma_flags))
return -ENOSYS;
file_accessed(desc->file);
* Ensure changes are carried directly to the memory being mapped,
* do not allow copy-on-write mapping.
*/
- if (!(desc->vm_flags & VM_SHARED)) {
+ if (!vma_desc_test_flags(desc, VMA_SHARED_BIT)) {
mutex_unlock(&rdtgroup_mutex);
return -EINVAL;
}
*/
static int romfs_mmap_prepare(struct vm_area_desc *desc)
{
- return is_nommu_shared_mapping(desc->vm_flags) ? 0 : -ENOSYS;
+ return is_nommu_shared_vma_flags(&desc->vma_flags) ? 0 : -ENOSYS;
}
static unsigned romfs_mmap_capabilities(struct file *file)
* We don't support synchronous mappings for non-DAX files and
* for DAX files if underneath dax_device is not synchronous.
*/
- if (!daxdev_mapping_supported(desc->vm_flags, file_inode(file),
+ if (!daxdev_mapping_supported(desc, file_inode(file),
target->bt_daxdev))
return -EOPNOTSUPP;
file_accessed(file);
desc->vm_ops = &xfs_file_vm_ops;
if (IS_DAX(inode))
- desc->vm_flags |= VM_HUGEPAGE;
+ vma_desc_set_flags(desc, VMA_HUGEPAGE_BIT);
return 0;
}
* ordering between msync() and page cache writeback.
*/
if (zonefs_inode_is_seq(file_inode(file)) &&
- (desc->vm_flags & VM_SHARED) && (desc->vm_flags & VM_MAYWRITE))
+ vma_desc_test_flags(desc, VMA_SHARED_BIT) &&
+ vma_desc_test_flags(desc, VMA_MAYWRITE_BIT))
return -EINVAL;
file_accessed(file);
/*
* Check if given mapping is supported by the file / underlying device.
*/
-static inline bool daxdev_mapping_supported(vm_flags_t vm_flags,
+static inline bool daxdev_mapping_supported(const struct vm_area_desc *desc,
const struct inode *inode,
struct dax_device *dax_dev)
{
- if (!(vm_flags & VM_SYNC))
+ if (!vma_desc_test_flags(desc, VMA_SYNC_BIT))
return true;
if (!IS_DAX(inode))
return false;
static inline void set_dax_synchronous(struct dax_device *dax_dev)
{
}
-static inline bool daxdev_mapping_supported(vm_flags_t vm_flags,
+static inline bool daxdev_mapping_supported(const struct vm_area_desc *desc,
const struct inode *inode,
struct dax_device *dax_dev)
{
- return !(vm_flags & VM_SYNC);
+ return !vma_desc_test_flags(desc, VMA_SYNC_BIT);
}
static inline size_t dax_recovery_write(struct dax_device *dax_dev,
pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i)
/*
* Physically remapped pages are special. Tell the
* rest of the world about it:
- * VM_IO tells people not to look at these pages
+ * IO tells people not to look at these pages
* (accesses can have side effects).
- * VM_PFNMAP tells the core MM that the base pages are just
+ * PFNMAP tells the core MM that the base pages are just
* raw PFN mappings, and do not have a "struct page" associated
* with them.
- * VM_DONTEXPAND
+ * DONTEXPAND
* Disable vma merging and expanding with mremap().
- * VM_DONTDUMP
+ * DONTDUMP
* Omit vma from core dump, even when VM_IO turned off.
*/
-#define VM_REMAP_FLAGS (VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)
+#define VMA_REMAP_FLAGS mk_vma_flags(VMA_IO_BIT, VMA_PFNMAP_BIT, \
+ VMA_DONTEXPAND_BIT, VMA_DONTDUMP_BIT)
/* This mask prevents VMA from being scanned with khugepaged */
#define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
}
+static inline bool vma_desc_is_cow_mapping(struct vm_area_desc *desc)
+{
+ const vma_flags_t *flags = &desc->vma_flags;
+
+ return vma_flags_test(flags, VMA_MAYWRITE_BIT) &&
+ !vma_flags_test(flags, VMA_SHARED_BIT);
+}
+
#ifndef CONFIG_MMU
static inline bool is_nommu_shared_mapping(vm_flags_t flags)
{
*/
return flags & (VM_MAYSHARE | VM_MAYOVERLAY);
}
+
+static inline bool is_nommu_shared_vma_flags(const vma_flags_t *flags)
+{
+ return vma_flags_test(flags, VMA_MAYSHARE_BIT, VMA_MAYOVERLAY_BIT);
+}
#endif
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
return -EINVAL;
desc->vm_ops = &relay_file_mmap_ops;
- desc->vm_flags |= VM_DONTEXPAND;
+ vma_desc_set_flags(desc, VMA_DONTEXPAND_BIT);
desc->private_data = buf;
return 0;
return 0;
}
-static int get_remap_pgoff(vm_flags_t vm_flags, unsigned long addr,
+static int get_remap_pgoff(bool is_cow, unsigned long addr,
unsigned long end, unsigned long vm_start, unsigned long vm_end,
unsigned long pfn, pgoff_t *vm_pgoff_p)
{
* un-COW'ed pages by matching them up with "vma->vm_pgoff".
* See vm_normal_page() for details.
*/
- if (is_cow_mapping(vm_flags)) {
+ if (is_cow) {
if (addr != vm_start || end != vm_end)
return -EINVAL;
*vm_pgoff_p = pfn;
if (WARN_ON_ONCE(!PAGE_ALIGNED(addr)))
return -EINVAL;
- VM_WARN_ON_ONCE((vma->vm_flags & VM_REMAP_FLAGS) != VM_REMAP_FLAGS);
+ VM_WARN_ON_ONCE(!vma_test_all_flags_mask(vma, VMA_REMAP_FLAGS));
BUG_ON(addr >= end);
pfn -= addr >> PAGE_SHIFT;
* check it again on complete and will fail there if specified addr is
* invalid.
*/
- get_remap_pgoff(desc->vm_flags, desc->start, desc->end,
+ get_remap_pgoff(vma_desc_is_cow_mapping(desc), desc->start, desc->end,
desc->start, desc->end, pfn, &desc->pgoff);
- desc->vm_flags |= VM_REMAP_FLAGS;
+ vma_desc_set_flags_mask(desc, VMA_REMAP_FLAGS);
}
static int remap_pfn_range_prepare_vma(struct vm_area_struct *vma, unsigned long addr,
unsigned long end = addr + PAGE_ALIGN(size);
int err;
- err = get_remap_pgoff(vma->vm_flags, addr, end,
- vma->vm_start, vma->vm_end,
- pfn, &vma->vm_pgoff);
+ err = get_remap_pgoff(is_cow_mapping(vma->vm_flags), addr, end,
+ vma->vm_start, vma->vm_end, pfn, &vma->vm_pgoff);
if (err)
return err;
- vm_flags_set(vma, VM_REMAP_FLAGS);
+ vma_set_flags_mask(vma, VMA_REMAP_FLAGS);
return 0;
}