From: Sasha Levin Date: Mon, 12 Sep 2022 11:10:45 +0000 (-0400) Subject: Fixes for 5.15 X-Git-Tag: v5.19.9~20 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=a9e3a333dc42a8b28642b266d4f3ada53e4fa6ac;p=thirdparty%2Fkernel%2Fstable-queue.git Fixes for 5.15 Signed-off-by: Sasha Levin --- diff --git a/queue-5.15/revert-arm64-kasan-revert-arm64-mte-reset-the-page-t.patch b/queue-5.15/revert-arm64-kasan-revert-arm64-mte-reset-the-page-t.patch new file mode 100644 index 00000000000..972e176d9a7 --- /dev/null +++ b/queue-5.15/revert-arm64-kasan-revert-arm64-mte-reset-the-page-t.patch @@ -0,0 +1,108 @@ +From 529446ff084b82f1e73c32a293f94a19a2b01a81 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 12 Sep 2022 07:07:56 -0400 +Subject: Revert "arm64: kasan: Revert "arm64: mte: reset the page tag in + page->flags"" + +This reverts commit add4bc9281e8704e5ab15616b429576c84f453a2. + +On Mon, Sep 12, 2022 at 10:52:45AM +0100, Catalin Marinas wrote: +>I missed this (holidays) and it looks like it's in stable already. On +>its own it will likely break kasan_hw if used together with user-space +>MTE as this change relies on two previous commits: +> +>70c248aca9e7 ("mm: kasan: Skip unpoisoning of user pages") +>6d05141a3930 ("mm: kasan: Skip page unpoisoning only if __GFP_SKIP_KASAN_UNPOISON") +> +>The reason I did not cc stable is that there are other dependencies in +>this area. The potential issues without the above commits were rather +>theoretical, so take these patches rather as clean-ups/refactoring than +>fixes. + +Signed-off-by: Sasha Levin +--- + arch/arm64/kernel/hibernate.c | 5 +++++ + arch/arm64/kernel/mte.c | 9 +++++++++ + arch/arm64/mm/copypage.c | 9 +++++++++ + arch/arm64/mm/mteswap.c | 9 +++++++++ + 4 files changed, 32 insertions(+) + +diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c +index db93ce2b0113b..46a0b4d6e2519 100644 +--- a/arch/arm64/kernel/hibernate.c ++++ b/arch/arm64/kernel/hibernate.c +@@ -326,6 +326,11 @@ static void swsusp_mte_restore_tags(void) + unsigned long pfn = xa_state.xa_index; + struct page *page = pfn_to_online_page(pfn); + ++ /* ++ * It is not required to invoke page_kasan_tag_reset(page) ++ * at this point since the tags stored in page->flags are ++ * already restored. ++ */ + mte_restore_page_tags(page_address(page), tags); + + mte_free_tag_storage(tags); +diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c +index 10207e3e5ae20..7c1c82c8115cc 100644 +--- a/arch/arm64/kernel/mte.c ++++ b/arch/arm64/kernel/mte.c +@@ -44,6 +44,15 @@ static void mte_sync_page_tags(struct page *page, pte_t old_pte, + if (!pte_is_tagged) + return; + ++ page_kasan_tag_reset(page); ++ /* ++ * We need smp_wmb() in between setting the flags and clearing the ++ * tags because if another thread reads page->flags and builds a ++ * tagged address out of it, there is an actual dependency to the ++ * memory access, but on the current thread we do not guarantee that ++ * the new page->flags are visible before the tags were updated. ++ */ ++ smp_wmb(); + mte_clear_page_tags(page_address(page)); + } + +diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c +index 24913271e898c..0dea80bf6de46 100644 +--- a/arch/arm64/mm/copypage.c ++++ b/arch/arm64/mm/copypage.c +@@ -23,6 +23,15 @@ void copy_highpage(struct page *to, struct page *from) + + if (system_supports_mte() && test_bit(PG_mte_tagged, &from->flags)) { + set_bit(PG_mte_tagged, &to->flags); ++ page_kasan_tag_reset(to); ++ /* ++ * We need smp_wmb() in between setting the flags and clearing the ++ * tags because if another thread reads page->flags and builds a ++ * tagged address out of it, there is an actual dependency to the ++ * memory access, but on the current thread we do not guarantee that ++ * the new page->flags are visible before the tags were updated. ++ */ ++ smp_wmb(); + mte_copy_page_tags(kto, kfrom); + } + } +diff --git a/arch/arm64/mm/mteswap.c b/arch/arm64/mm/mteswap.c +index c52c1847079c1..7c4ef56265ee1 100644 +--- a/arch/arm64/mm/mteswap.c ++++ b/arch/arm64/mm/mteswap.c +@@ -53,6 +53,15 @@ bool mte_restore_tags(swp_entry_t entry, struct page *page) + if (!tags) + return false; + ++ page_kasan_tag_reset(page); ++ /* ++ * We need smp_wmb() in between setting the flags and clearing the ++ * tags because if another thread reads page->flags and builds a ++ * tagged address out of it, there is an actual dependency to the ++ * memory access, but on the current thread we do not guarantee that ++ * the new page->flags are visible before the tags were updated. ++ */ ++ smp_wmb(); + mte_restore_page_tags(page_address(page), tags); + + return true; +-- +2.35.1 + diff --git a/queue-5.15/series b/queue-5.15/series index d3b140993ad..5c3fd1da54f 100644 --- a/queue-5.15/series +++ b/queue-5.15/series @@ -112,3 +112,4 @@ hwmon-mr75203-update-pvt-v_num-and-vm_num-to-the-act.patch hwmon-mr75203-fix-voltage-equation-for-negative-sour.patch hwmon-mr75203-fix-multi-channel-voltage-reading.patch hwmon-mr75203-enable-polling-for-all-vm-channels.patch +revert-arm64-kasan-revert-arm64-mte-reset-the-page-t.patch