]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blame - queue-4.19/mm-huge_memory.c-fix-modifying-of-page-protection-by-insert_pfn_pmd.patch
Linux 4.14.112
[thirdparty/kernel/stable-queue.git] / queue-4.19 / mm-huge_memory.c-fix-modifying-of-page-protection-by-insert_pfn_pmd.patch
CommitLineData
de2fa02c
GKH
1From c6f3c5ee40c10bb65725047a220570f718507001 Mon Sep 17 00:00:00 2001
2From: "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com>
3Date: Fri, 5 Apr 2019 18:39:10 -0700
4Subject: mm/huge_memory.c: fix modifying of page protection by insert_pfn_pmd()
5
6From: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
7
8commit c6f3c5ee40c10bb65725047a220570f718507001 upstream.
9
10With some architectures like ppc64, set_pmd_at() cannot cope with a
11situation where there is already some (different) valid entry present.
12
13Use pmdp_set_access_flags() instead to modify the pfn which is built to
14deal with modifying existing PMD entries.
15
16This is similar to commit cae85cb8add3 ("mm/memory.c: fix modifying of
17page protection by insert_pfn()")
18
19We also do similar update w.r.t insert_pfn_pud eventhough ppc64 don't
20support pud pfn entries now.
21
22Without this patch we also see the below message in kernel log "BUG:
23non-zero pgtables_bytes on freeing mm:"
24
25Link: http://lkml.kernel.org/r/20190402115125.18803-1-aneesh.kumar@linux.ibm.com
26Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
27Reported-by: Chandan Rajendra <chandan@linux.ibm.com>
28Reviewed-by: Jan Kara <jack@suse.cz>
29Cc: Dan Williams <dan.j.williams@intel.com>
30Cc: <stable@vger.kernel.org>
31Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
32Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
33Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
34
35---
36 mm/huge_memory.c | 36 ++++++++++++++++++++++++++++++++++++
37 1 file changed, 36 insertions(+)
38
39--- a/mm/huge_memory.c
40+++ b/mm/huge_memory.c
41@@ -734,6 +734,21 @@ static void insert_pfn_pmd(struct vm_are
42 spinlock_t *ptl;
43
44 ptl = pmd_lock(mm, pmd);
45+ if (!pmd_none(*pmd)) {
46+ if (write) {
47+ if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) {
48+ WARN_ON_ONCE(!is_huge_zero_pmd(*pmd));
49+ goto out_unlock;
50+ }
51+ entry = pmd_mkyoung(*pmd);
52+ entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
53+ if (pmdp_set_access_flags(vma, addr, pmd, entry, 1))
54+ update_mmu_cache_pmd(vma, addr, pmd);
55+ }
56+
57+ goto out_unlock;
58+ }
59+
60 entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
61 if (pfn_t_devmap(pfn))
62 entry = pmd_mkdevmap(entry);
63@@ -745,11 +760,16 @@ static void insert_pfn_pmd(struct vm_are
64 if (pgtable) {
65 pgtable_trans_huge_deposit(mm, pmd, pgtable);
66 mm_inc_nr_ptes(mm);
67+ pgtable = NULL;
68 }
69
70 set_pmd_at(mm, addr, pmd, entry);
71 update_mmu_cache_pmd(vma, addr, pmd);
72+
73+out_unlock:
74 spin_unlock(ptl);
75+ if (pgtable)
76+ pte_free(mm, pgtable);
77 }
78
79 vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
80@@ -800,6 +820,20 @@ static void insert_pfn_pud(struct vm_are
81 spinlock_t *ptl;
82
83 ptl = pud_lock(mm, pud);
84+ if (!pud_none(*pud)) {
85+ if (write) {
86+ if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) {
87+ WARN_ON_ONCE(!is_huge_zero_pud(*pud));
88+ goto out_unlock;
89+ }
90+ entry = pud_mkyoung(*pud);
91+ entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma);
92+ if (pudp_set_access_flags(vma, addr, pud, entry, 1))
93+ update_mmu_cache_pud(vma, addr, pud);
94+ }
95+ goto out_unlock;
96+ }
97+
98 entry = pud_mkhuge(pfn_t_pud(pfn, prot));
99 if (pfn_t_devmap(pfn))
100 entry = pud_mkdevmap(entry);
101@@ -809,6 +843,8 @@ static void insert_pfn_pud(struct vm_are
102 }
103 set_pud_at(mm, addr, pud, entry);
104 update_mmu_cache_pud(vma, addr, pud);
105+
106+out_unlock:
107 spin_unlock(ptl);
108 }
109