1 From 96e7deb0a438672dc1ad9ef1d86e43d3ebd30126 Mon Sep 17 00:00:00 2001
2 From: Sasha Levin <sashal@kernel.org>
3 Date: Sun, 1 Nov 2020 17:07:40 -0800
4 Subject: mm: mempolicy: fix potential pte_unmap_unlock pte error
6 From: Shijie Luo <luoshijie1@huawei.com>
8 [ Upstream commit 3f08842098e842c51e3b97d0dcdebf810b32558e ]
10 When flags in queue_pages_pte_range don't have MPOL_MF_MOVE or
11 MPOL_MF_MOVE_ALL bits, code breaks and passing origin pte - 1 to
12 pte_unmap_unlock seems like not a good idea.
14 queue_pages_pte_range can run in MPOL_MF_MOVE_ALL mode which doesn't
15 migrate misplaced pages but returns with EIO when encountering such a
16 page. Since commit a7f40cfe3b7a ("mm: mempolicy: make mbind() return
17 -EIO when MPOL_MF_STRICT is specified") and early break on the first pte
18 in the range results in pte_unmap_unlock on an underflow pte. This can
19 lead to lockups later on when somebody tries to lock the pte resp.
20 page_table_lock again..
22 Fixes: a7f40cfe3b7a ("mm: mempolicy: make mbind() return -EIO when MPOL_MF_STRICT is specified")
23 Signed-off-by: Shijie Luo <luoshijie1@huawei.com>
24 Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
25 Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
26 Reviewed-by: Oscar Salvador <osalvador@suse.de>
27 Acked-by: Michal Hocko <mhocko@suse.com>
28 Cc: Miaohe Lin <linmiaohe@huawei.com>
29 Cc: Feilong Lin <linfeilong@huawei.com>
30 Cc: Shijie Luo <luoshijie1@huawei.com>
31 Cc: <stable@vger.kernel.org>
32 Link: https://lkml.kernel.org/r/20201019074853.50856-1-luoshijie1@huawei.com
33 Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
34 Signed-off-by: Sasha Levin <sashal@kernel.org>
36 mm/mempolicy.c | 6 +++---
37 1 file changed, 3 insertions(+), 3 deletions(-)
39 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
40 index a2be65bf5d8cc..2f443767fd1b4 100644
43 @@ -487,7 +487,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
44 struct queue_pages *qp = walk->private;
45 unsigned long flags = qp->flags;
48 + pte_t *pte, *mapped_pte;
51 if (pmd_trans_huge(*pmd)) {
52 @@ -515,7 +515,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
53 if (pmd_trans_unstable(pmd))
56 - pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
57 + mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
58 for (; addr != end; pte++, addr += PAGE_SIZE) {
59 if (!pte_present(*pte))
61 @@ -554,7 +554,7 @@ retry:
65 - pte_unmap_unlock(pte - 1, ptl);
66 + pte_unmap_unlock(mapped_pte, ptl);
68 return addr != end ? -EIO : 0;