]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
mm: remove rest usage of VM_NONLINEAR and pte_file()
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Tue, 10 Feb 2015 22:10:04 +0000 (14:10 -0800)
committerBen Hutchings <ben@decadent.org.uk>
Wed, 3 Oct 2018 03:09:52 +0000 (04:09 +0100)
commit 0661a33611fca12570cba48d9344ce68834ee86c upstream.

One bit in ->vm_flags is unused now!

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Dan Carpenter <dan.carpenter@oracle.com>
Cc: Michal Hocko <mhocko@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
[bwh: Backported to 3.16: Drop changes in mm/debug.c]
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
12 files changed:
drivers/gpu/drm/drm_vma_manager.c
include/linux/mm.h
include/linux/swapops.h
mm/gup.c
mm/ksm.c
mm/madvise.c
mm/memcontrol.c
mm/memory.c
mm/mincore.c
mm/mprotect.c
mm/mremap.c
mm/msync.c

index 63b471205072fb5bf2c11b2273efe1b9a7e63b5d..68c1f32fb086babba23c883cb6ec5b2f431dd3d6 100644 (file)
@@ -50,8 +50,7 @@
  *
  * You must not use multiple offset managers on a single address_space.
  * Otherwise, mm-core will be unable to tear down memory mappings as the VM will
- * no longer be linear. Please use VM_NONLINEAR in that case and implement your
- * own offset managers.
+ * no longer be linear.
  *
  * This offset manager works on page-based addresses. That is, every argument
  * and return code (with the exception of drm_vma_node_offset_addr()) is given
index a7d4e04d3dcb35cb398b181bff223cff2e9aa3e2..2f595d7e00d2c29f49183b2b1aa7fe59291ad9f1 100644 (file)
@@ -125,7 +125,6 @@ extern unsigned int kobjsize(const void *objp);
 #define VM_ACCOUNT     0x00100000      /* Is a VM accounted object */
 #define VM_NORESERVE   0x00200000      /* should the VM suppress accounting */
 #define VM_HUGETLB     0x00400000      /* Huge TLB Page VM */
-#define VM_NONLINEAR   0x00800000      /* Is non-linear (remap_file_pages) */
 #define VM_ARCH_1      0x01000000      /* Architecture-specific flag */
 #define VM_DONTDUMP    0x04000000      /* Do not include in the core dump */
 
index e288d5c016a72223ac85af4007db3edbce412251..831a3168ab35a6d985896186b74df1e5322d2c4f 100644 (file)
@@ -54,7 +54,7 @@ static inline pgoff_t swp_offset(swp_entry_t entry)
 /* check whether a pte points to a swap entry */
 static inline int is_swap_pte(pte_t pte)
 {
-       return !pte_none(pte) && !pte_present_nonuma(pte) && !pte_file(pte);
+       return !pte_none(pte) && !pte_present_nonuma(pte);
 }
 #endif
 
@@ -66,7 +66,6 @@ static inline swp_entry_t pte_to_swp_entry(pte_t pte)
 {
        swp_entry_t arch_entry;
 
-       BUG_ON(pte_file(pte));
        if (pte_swp_soft_dirty(pte))
                pte = pte_swp_clear_soft_dirty(pte);
        arch_entry = __pte_to_swp_entry(pte);
@@ -82,7 +81,6 @@ static inline pte_t swp_entry_to_pte(swp_entry_t entry)
        swp_entry_t arch_entry;
 
        arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
-       BUG_ON(pte_file(__swp_entry_to_pte(arch_entry)));
        return __swp_entry_to_pte(arch_entry);
 }
 
index 4d2fd27b33abb8beac7f4cb66facb381c243aec9..b4164bfca326a85d1d34712ec64ecd53654fb619 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -61,7 +61,7 @@ retry:
                 */
                if (likely(!(flags & FOLL_MIGRATION)))
                        goto no_page;
-               if (pte_none(pte) || pte_file(pte))
+               if (pte_none(pte))
                        goto no_page;
                entry = pte_to_swp_entry(pte);
                if (!is_migration_entry(entry))
index d8c9c689862c5f7bdf4bac0fc8ffe4868d8e075a..032ebc32c26f4bfb980fe762d5aec84a1a521a45 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1749,7 +1749,7 @@ int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
                 */
                if (*vm_flags & (VM_MERGEABLE | VM_SHARED  | VM_MAYSHARE   |
                                 VM_PFNMAP    | VM_IO      | VM_DONTEXPAND |
-                                VM_HUGETLB | VM_NONLINEAR | VM_MIXEDMAP))
+                                VM_HUGETLB | VM_MIXEDMAP))
                        return 0;               /* just ignore the advice */
 
 #ifdef VM_SAO
index a0ac92a4b0c6c7592e2ea0c2a91348a1fce45a96..04061738664831a41c38225036a961a0c5704b41 100644 (file)
@@ -155,7 +155,7 @@ static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
                pte = *(orig_pte + ((index - start) / PAGE_SIZE));
                pte_unmap_unlock(orig_pte, ptl);
 
-               if (pte_present(pte) || pte_none(pte) || pte_file(pte))
+               if (pte_present(pte) || pte_none(pte))
                        continue;
                entry = pte_to_swp_entry(pte);
                if (unlikely(non_swap_entry(entry)))
@@ -298,7 +298,7 @@ static long madvise_remove(struct vm_area_struct *vma,
 
        *prev = NULL;   /* tell sys_madvise we drop mmap_sem */
 
-       if (vma->vm_flags & (VM_LOCKED|VM_NONLINEAR|VM_HUGETLB))
+       if (vma->vm_flags & (VM_LOCKED | VM_HUGETLB))
                return -EINVAL;
 
        f = vma->vm_file;
index 2bfd852934ac14661cb94101369c2603525e51cd..543a3e2e11c3c88fb444792c12d8e1ae32547e58 100644 (file)
@@ -6580,10 +6580,7 @@ static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
                return NULL;
 
        mapping = vma->vm_file->f_mapping;
-       if (pte_none(ptent))
-               pgoff = linear_page_index(vma, addr);
-       else /* pte_file(ptent) is true */
-               pgoff = pte_to_pgoff(ptent);
+       pgoff = linear_page_index(vma, addr);
 
        /* page is moved even if it's not RSS of this task(page-faulted). */
 #ifdef CONFIG_SWAP
@@ -6616,7 +6613,7 @@ static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
                page = mc_handle_present_pte(vma, addr, ptent);
        else if (is_swap_pte(ptent))
                page = mc_handle_swap_pte(vma, addr, ptent, &ent);
-       else if (pte_none(ptent) || pte_file(ptent))
+       else if (pte_none(ptent))
                page = mc_handle_file_pte(vma, addr, ptent, &ent);
 
        if (!page && !ent.val)
index 02b18069bdad353163e948a1536904993f5efe46..c1246c754eeff0b30a460591c8016dd0512691f6 100644 (file)
@@ -810,42 +810,40 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
 
        /* pte contains position in swap or file, so copy. */
        if (unlikely(!pte_present(pte))) {
-               if (!pte_file(pte)) {
-                       swp_entry_t entry = pte_to_swp_entry(pte);
-
-                       if (likely(!non_swap_entry(entry))) {
-                               if (swap_duplicate(entry) < 0)
-                                       return entry.val;
-
-                               /* make sure dst_mm is on swapoff's mmlist. */
-                               if (unlikely(list_empty(&dst_mm->mmlist))) {
-                                       spin_lock(&mmlist_lock);
-                                       if (list_empty(&dst_mm->mmlist))
-                                               list_add(&dst_mm->mmlist,
-                                                        &src_mm->mmlist);
-                                       spin_unlock(&mmlist_lock);
-                               }
-                               rss[MM_SWAPENTS]++;
-                       } else if (is_migration_entry(entry)) {
-                               page = migration_entry_to_page(entry);
-
-                               if (PageAnon(page))
-                                       rss[MM_ANONPAGES]++;
-                               else
-                                       rss[MM_FILEPAGES]++;
-
-                               if (is_write_migration_entry(entry) &&
-                                   is_cow_mapping(vm_flags)) {
-                                       /*
-                                        * COW mappings require pages in both
-                                        * parent and child to be set to read.
-                                        */
-                                       make_migration_entry_read(&entry);
-                                       pte = swp_entry_to_pte(entry);
-                                       if (pte_swp_soft_dirty(*src_pte))
-                                               pte = pte_swp_mksoft_dirty(pte);
-                                       set_pte_at(src_mm, addr, src_pte, pte);
-                               }
+               swp_entry_t entry = pte_to_swp_entry(pte);
+
+               if (likely(!non_swap_entry(entry))) {
+                       if (swap_duplicate(entry) < 0)
+                               return entry.val;
+
+                       /* make sure dst_mm is on swapoff's mmlist. */
+                       if (unlikely(list_empty(&dst_mm->mmlist))) {
+                               spin_lock(&mmlist_lock);
+                               if (list_empty(&dst_mm->mmlist))
+                                       list_add(&dst_mm->mmlist,
+                                                       &src_mm->mmlist);
+                               spin_unlock(&mmlist_lock);
+                       }
+                       rss[MM_SWAPENTS]++;
+               } else if (is_migration_entry(entry)) {
+                       page = migration_entry_to_page(entry);
+
+                       if (PageAnon(page))
+                               rss[MM_ANONPAGES]++;
+                       else
+                               rss[MM_FILEPAGES]++;
+
+                       if (is_write_migration_entry(entry) &&
+                                       is_cow_mapping(vm_flags)) {
+                               /*
+                                * COW mappings require pages in both
+                                * parent and child to be set to read.
+                                */
+                               make_migration_entry_read(&entry);
+                               pte = swp_entry_to_pte(entry);
+                               if (pte_swp_soft_dirty(*src_pte))
+                                       pte = pte_swp_mksoft_dirty(pte);
+                               set_pte_at(src_mm, addr, src_pte, pte);
                        }
                }
                goto out_set_pte;
@@ -1019,11 +1017,9 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
         * readonly mappings. The tradeoff is that copy_page_range is more
         * efficient than faulting.
         */
-       if (!(vma->vm_flags & (VM_HUGETLB | VM_NONLINEAR |
-                              VM_PFNMAP | VM_MIXEDMAP))) {
-               if (!vma->anon_vma)
-                       return 0;
-       }
+       if (!(vma->vm_flags & (VM_HUGETLB | VM_PFNMAP | VM_MIXEDMAP)) &&
+                       !vma->anon_vma)
+               return 0;
 
        if (is_vm_hugetlb_page(vma))
                return copy_hugetlb_page_range(dst_mm, src_mm, vma);
index 725c809610483c6822d3ffa158820d3ff64da1a6..3c4c8f6ab57e12f0e8b84cb8143d92951a27a433 100644 (file)
@@ -124,17 +124,13 @@ static void mincore_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
        ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
        do {
                pte_t pte = *ptep;
-               pgoff_t pgoff;
 
                next = addr + PAGE_SIZE;
                if (pte_none(pte))
                        mincore_unmapped_range(vma, addr, next, vec);
                else if (pte_present(pte))
                        *vec = 1;
-               else if (pte_file(pte)) {
-                       pgoff = pte_to_pgoff(pte);
-                       *vec = mincore_page(vma->vm_file->f_mapping, pgoff);
-               } else { /* pte is a swap entry */
+               else { /* pte is a swap entry */
                        swp_entry_t entry = pte_to_swp_entry(pte);
 
                        if (is_migration_entry(entry)) {
@@ -142,9 +138,8 @@ static void mincore_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
                                *vec = 1;
                        } else {
 #ifdef CONFIG_SWAP
-                               pgoff = entry.val;
                                *vec = mincore_page(swap_address_space(entry),
-                                       pgoff);
+                                       entry.val);
 #else
                                WARN_ON(1);
                                *vec = 1;
index bfc99abb627791475861ff9856c394fcdb8a9353..ac4130d265c55f1c90af7841922c970a92cc7ad7 100644 (file)
@@ -110,7 +110,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
                        }
                        if (updated)
                                pages++;
-               } else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) {
+               } else if (IS_ENABLED(CONFIG_MIGRATION)) {
                        swp_entry_t entry = pte_to_swp_entry(oldpte);
 
                        if (is_write_migration_entry(entry)) {
index 05f1180e9f21822e99a5f11a2a7a03af663a422c..6d49f62a48633d8288b11602aafba62b0b796241 100644 (file)
@@ -81,8 +81,6 @@ static pte_t move_soft_dirty_pte(pte_t pte)
                pte = pte_mksoft_dirty(pte);
        else if (is_swap_pte(pte))
                pte = pte_swp_mksoft_dirty(pte);
-       else if (pte_file(pte))
-               pte = pte_file_mksoft_dirty(pte);
 #endif
        return pte;
 }
index 992a1673d488dbbb38aa506c15eb0533f553ed4a..bb04d53ae8529597d3b0ca6636c49d5243762875 100644 (file)
@@ -86,10 +86,7 @@ SYSCALL_DEFINE3(msync, unsigned long, start, size_t, len, int, flags)
                                (vma->vm_flags & VM_SHARED)) {
                        get_file(file);
                        up_read(&mm->mmap_sem);
-                       if (vma->vm_flags & VM_NONLINEAR)
-                               error = vfs_fsync(file, 1);
-                       else
-                               error = vfs_fsync_range(file, fstart, fend, 1);
+                       error = vfs_fsync_range(file, fstart, fend, 1);
                        fput(file);
                        if (error || start >= end)
                                goto out;