]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
proc: drop handling non-linear mappings
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Tue, 10 Feb 2015 22:09:57 +0000 (14:09 -0800)
committerBen Hutchings <ben@decadent.org.uk>
Wed, 3 Oct 2018 03:09:51 +0000 (04:09 +0100)
commit 1da4b35b001481df99a6dcab12d5d39a876f7056 upstream.

We have to handle non-linear mappings for /proc/PID/{smaps,clear_refs}
which is unused now.  Let's drop it.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
[bwh: Backported to 3.16:
 - Deleted code is slightly different
 - Adjust context]
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
fs/proc/task_mmu.c

index 9380e4e291a6956269433208e59df5112aea098b..a6242e29eef141e6362904f8d1ce268531e0ba5b 100644 (file)
@@ -436,7 +436,6 @@ struct mem_size_stats {
        unsigned long anonymous;
        unsigned long anonymous_thp;
        unsigned long swap;
-       unsigned long nonlinear;
        u64 pss;
 };
 
@@ -446,7 +445,6 @@ static void smaps_pte_entry(pte_t ptent, unsigned long addr,
 {
        struct mem_size_stats *mss = walk->private;
        struct vm_area_struct *vma = mss->vma;
-       pgoff_t pgoff = linear_page_index(vma, addr);
        struct page *page = NULL;
        int mapcount;
 
@@ -459,9 +457,6 @@ static void smaps_pte_entry(pte_t ptent, unsigned long addr,
                        mss->swap += ptent_size;
                else if (is_migration_entry(swpent))
                        page = migration_entry_to_page(swpent);
-       } else if (pte_file(ptent)) {
-               if (pte_to_pgoff(ptent) != pgoff)
-                       mss->nonlinear += ptent_size;
        }
 
        if (!page)
@@ -470,9 +465,6 @@ static void smaps_pte_entry(pte_t ptent, unsigned long addr,
        if (PageAnon(page))
                mss->anonymous += ptent_size;
 
-       if (page->index != pgoff)
-               mss->nonlinear += ptent_size;
-
        mss->resident += ptent_size;
        /* Accumulate the size in pages that have been accessed. */
        if (pte_young(ptent) || PageReferenced(page))
@@ -554,7 +546,6 @@ static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
                [ilog2(VM_ACCOUNT)]     = "ac",
                [ilog2(VM_NORESERVE)]   = "nr",
                [ilog2(VM_HUGETLB)]     = "ht",
-               [ilog2(VM_NONLINEAR)]   = "nl",
                [ilog2(VM_ARCH_1)]      = "ar",
                [ilog2(VM_DONTDUMP)]    = "dd",
 #ifdef CONFIG_MEM_SOFT_DIRTY
@@ -628,10 +619,6 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
                   (vma->vm_flags & VM_LOCKED) ?
                        (unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0);
 
-       if (vma->vm_flags & VM_NONLINEAR)
-               seq_printf(m, "Nonlinear:      %8lu kB\n",
-                               mss.nonlinear >> 10);
-
        show_smap_vma_flags(m, vma);
 
        if (m->count < m->size)  /* vma is copied successfully */
@@ -735,8 +722,6 @@ static inline void clear_soft_dirty(struct vm_area_struct *vma,
                ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY);
        } else if (is_swap_pte(ptent)) {
                ptent = pte_swp_clear_soft_dirty(ptent);
-       } else if (pte_file(ptent)) {
-               ptent = pte_file_clear_soft_dirty(ptent);
        }
 
        set_pte_at(vma->vm_mm, addr, pte, ptent);