From 65f6855ae015c5e6923ef4e3f901d4a6b92b3076 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Wed, 17 Apr 2024 10:02:15 +0200 Subject: [PATCH] drop queue-6.6/arm64-tlb-fix-tlbi-range-operand.patch --- .../arm64-tlb-fix-tlbi-range-operand.patch | 91 ------------------- queue-6.6/series | 1 - 2 files changed, 92 deletions(-) delete mode 100644 queue-6.6/arm64-tlb-fix-tlbi-range-operand.patch diff --git a/queue-6.6/arm64-tlb-fix-tlbi-range-operand.patch b/queue-6.6/arm64-tlb-fix-tlbi-range-operand.patch deleted file mode 100644 index 8ea6e13f54f..00000000000 --- a/queue-6.6/arm64-tlb-fix-tlbi-range-operand.patch +++ /dev/null @@ -1,91 +0,0 @@ -From e3ba51ab24fddef79fc212f9840de54db8fd1685 Mon Sep 17 00:00:00 2001 -From: Gavin Shan -Date: Fri, 5 Apr 2024 13:58:50 +1000 -Subject: arm64: tlb: Fix TLBI RANGE operand - -From: Gavin Shan - -commit e3ba51ab24fddef79fc212f9840de54db8fd1685 upstream. - -KVM/arm64 relies on TLBI RANGE feature to flush TLBs when the dirty -pages are collected by VMM and the page table entries become write -protected during live migration. Unfortunately, the operand passed -to the TLBI RANGE instruction isn't correctly sorted out due to the -commit 117940aa6e5f ("KVM: arm64: Define kvm_tlb_flush_vmid_range()"). -It leads to crash on the destination VM after live migration because -TLBs aren't flushed completely and some of the dirty pages are missed. - -For example, I have a VM where 8GB memory is assigned, starting from -0x40000000 (1GB). Note that the host has 4KB as the base page size. -In the middile of migration, kvm_tlb_flush_vmid_range() is executed -to flush TLBs. It passes MAX_TLBI_RANGE_PAGES as the argument to -__kvm_tlb_flush_vmid_range() and __flush_s2_tlb_range_op(). SCALE#3 -and NUM#31, corresponding to MAX_TLBI_RANGE_PAGES, isn't supported -by __TLBI_RANGE_NUM(). In this specific case, -1 has been returned -from __TLBI_RANGE_NUM() for SCALE#3/2/1/0 and rejected by the loop -in the __flush_tlb_range_op() until the variable @scale underflows -and becomes -9, 0xffff708000040000 is set as the operand. The operand -is wrong since it's sorted out by __TLBI_VADDR_RANGE() according to -invalid @scale and @num. - -Fix it by extending __TLBI_RANGE_NUM() to support the combination of -SCALE#3 and NUM#31. With the changes, [-1 31] instead of [-1 30] can -be returned from the macro, meaning the TLBs for 0x200000 pages in the -above example can be flushed in one shoot with SCALE#3 and NUM#31. The -macro TLBI_RANGE_MASK is dropped since no one uses it any more. The -comments are also adjusted accordingly. - -Fixes: 117940aa6e5f ("KVM: arm64: Define kvm_tlb_flush_vmid_range()") -Cc: stable@kernel.org # v6.6+ -Reported-by: Yihuang Yu -Suggested-by: Marc Zyngier -Signed-off-by: Gavin Shan -Reviewed-by: Catalin Marinas -Reviewed-by: Ryan Roberts -Reviewed-by: Anshuman Khandual -Reviewed-by: Shaoqin Huang -Link: https://lore.kernel.org/r/20240405035852.1532010-2-gshan@redhat.com -Signed-off-by: Catalin Marinas -Signed-off-by: Greg Kroah-Hartman ---- - arch/arm64/include/asm/tlbflush.h | 22 ++++++++++++---------- - 1 file changed, 12 insertions(+), 10 deletions(-) - ---- a/arch/arm64/include/asm/tlbflush.h -+++ b/arch/arm64/include/asm/tlbflush.h -@@ -152,12 +152,18 @@ static inline unsigned long get_trans_gr - #define MAX_TLBI_RANGE_PAGES __TLBI_RANGE_PAGES(31, 3) - - /* -- * Generate 'num' values from -1 to 30 with -1 rejected by the -- * __flush_tlb_range() loop below. -- */ --#define TLBI_RANGE_MASK GENMASK_ULL(4, 0) --#define __TLBI_RANGE_NUM(pages, scale) \ -- ((((pages) >> (5 * (scale) + 1)) & TLBI_RANGE_MASK) - 1) -+ * Generate 'num' values from -1 to 31 with -1 rejected by the -+ * __flush_tlb_range() loop below. Its return value is only -+ * significant for a maximum of MAX_TLBI_RANGE_PAGES pages. If -+ * 'pages' is more than that, you must iterate over the overall -+ * range. -+ */ -+#define __TLBI_RANGE_NUM(pages, scale) \ -+ ({ \ -+ int __pages = min((pages), \ -+ __TLBI_RANGE_PAGES(31, (scale))); \ -+ (__pages >> (5 * (scale) + 1)) - 1; \ -+ }) - - /* - * TLB Invalidation -@@ -359,10 +365,6 @@ static inline void arch_tlbbatch_flush(s - * Start from scale = 0, flush the corresponding number of pages - * ((num+1)*2^(5*scale+1) starting from 'addr'), then increase it - * until no pages left. -- * -- * Note that certain ranges can be represented by either num = 31 and -- * scale or num = 0 and scale + 1. The loop below favours the latter -- * since num is limited to 30 by the __TLBI_RANGE_NUM() macro. - */ - #define __flush_tlb_range_op(op, start, pages, stride, \ - asid, tlb_level, tlbi_user) \ diff --git a/queue-6.6/series b/queue-6.6/series index 9425a86942a..9003530e9c4 100644 --- a/queue-6.6/series +++ b/queue-6.6/series @@ -2,7 +2,6 @@ smb3-fix-open-files-on-server-counter-going-negative.patch ata-libata-core-allow-command-duration-limits-detection-for-acs-4-drives.patch ata-libata-scsi-fix-ata_scsi_dev_rescan-error-path.patch batman-adv-avoid-infinite-loop-trying-to-resize-local-tt.patch -arm64-tlb-fix-tlbi-range-operand.patch arm-dts-imx7s-warp-pass-ov2680-link-frequencies.patch raid1-fix-use-after-free-for-original-bio-in-raid1_write_request.patch ring-buffer-only-update-pages_touched-when-a-new-page-is-touched.patch -- 2.47.2