--- /dev/null
+From e2768b798a197318736f00c506633cb78ff77012 Mon Sep 17 00:00:00 2001
+From: Ryan Roberts <ryan.roberts@arm.com>
+Date: Mon, 27 Nov 2023 11:17:26 +0000
+Subject: arm64/mm: Modify range-based tlbi to decrement scale
+
+From: Ryan Roberts <ryan.roberts@arm.com>
+
+commit e2768b798a197318736f00c506633cb78ff77012 upstream.
+
+In preparation for adding support for LPA2 to the tlb invalidation
+routines, modify the algorithm used by range-based tlbi to start at the
+highest 'scale' and decrement instead of starting at the lowest 'scale'
+and incrementing. This new approach makes it possible to maintain 64K
+alignment as we work through the range, until the last op (at scale=0).
+This is required when LPA2 is enabled. (This part will be added in a
+subsequent commit).
+
+This change is separated into its own patch because it will also impact
+non-LPA2 systems, and I want to make it easy to bisect in case it leads
+to performance regression (see below for benchmarks that suggest this
+should not be a problem).
+
+The original commit (d1d3aa98 "arm64: tlb: Use the TLBI RANGE feature in
+arm64") stated this as the reason for _incrementing_ scale:
+
+ However, in most scenarios, the pages = 1 when flush_tlb_range() is
+ called. Start from scale = 3 or other proper value (such as scale
+ =ilog2(pages)), will incur extra overhead. So increase 'scale' from 0
+ to maximum.
+
+But pages=1 is already special cased by the non-range invalidation path,
+which will take care of it the first time through the loop (both in the
+original commit and in my change), so I don't think switching to
+decrement scale should have any extra performance impact after all.
+
+Indeed benchmarking kernel compilation, a TLBI-heavy workload, suggests
+that this new approach actually _improves_ performance slightly (using a
+virtual machine on Apple M2):
+
+Table shows time to execute kernel compilation workload with 8 jobs,
+relative to baseline without this patch (more negative number is
+bigger speedup). Repeated 9 times across 3 system reboots:
+
+| counter | mean | stdev |
+|:----------|-----------:|----------:|
+| real-time | -0.6% | 0.0% |
+| kern-time | -1.6% | 0.5% |
+| user-time | -0.4% | 0.1% |
+
+Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
+Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Link: https://lore.kernel.org/r/20231127111737.1897081-2-ryan.roberts@arm.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/tlbflush.h | 20 ++++++++++----------
+ 1 file changed, 10 insertions(+), 10 deletions(-)
+
+--- a/arch/arm64/include/asm/tlbflush.h
++++ b/arch/arm64/include/asm/tlbflush.h
+@@ -351,14 +351,14 @@ static inline void arch_tlbbatch_flush(s
+ * entries one by one at the granularity of 'stride'. If the TLB
+ * range ops are supported, then:
+ *
+- * 1. If 'pages' is odd, flush the first page through non-range
+- * operations;
++ * 1. The minimum range granularity is decided by 'scale', so multiple range
++ * TLBI operations may be required. Start from scale = 3, flush the largest
++ * possible number of pages ((num+1)*2^(5*scale+1)) that fit into the
++ * requested range, then decrement scale and continue until one or zero pages
++ * are left.
+ *
+- * 2. For remaining pages: the minimum range granularity is decided
+- * by 'scale', so multiple range TLBI operations may be required.
+- * Start from scale = 0, flush the corresponding number of pages
+- * ((num+1)*2^(5*scale+1) starting from 'addr'), then increase it
+- * until no pages left.
++ * 2. If there is 1 page remaining, flush it through non-range operations. Range
++ * operations can only span an even number of pages.
+ *
+ * Note that certain ranges can be represented by either num = 31 and
+ * scale or num = 0 and scale + 1. The loop below favours the latter
+@@ -368,12 +368,12 @@ static inline void arch_tlbbatch_flush(s
+ asid, tlb_level, tlbi_user) \
+ do { \
+ int num = 0; \
+- int scale = 0; \
++ int scale = 3; \
+ unsigned long addr; \
+ \
+ while (pages > 0) { \
+ if (!system_supports_tlb_range() || \
+- pages % 2 == 1) { \
++ pages == 1) { \
+ addr = __TLBI_VADDR(start, asid); \
+ __tlbi_level(op, addr, tlb_level); \
+ if (tlbi_user) \
+@@ -393,7 +393,7 @@ do { \
+ start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \
+ pages -= __TLBI_RANGE_PAGES(num, scale); \
+ } \
+- scale++; \
++ scale--; \
+ } \
+ } while (0)
+
--- /dev/null
+From e3ba51ab24fddef79fc212f9840de54db8fd1685 Mon Sep 17 00:00:00 2001
+From: Gavin Shan <gshan@redhat.com>
+Date: Fri, 5 Apr 2024 13:58:50 +1000
+Subject: arm64: tlb: Fix TLBI RANGE operand
+
+From: Gavin Shan <gshan@redhat.com>
+
+commit e3ba51ab24fddef79fc212f9840de54db8fd1685 upstream.
+
+KVM/arm64 relies on TLBI RANGE feature to flush TLBs when the dirty
+pages are collected by VMM and the page table entries become write
+protected during live migration. Unfortunately, the operand passed
+to the TLBI RANGE instruction isn't correctly sorted out due to the
+commit 117940aa6e5f ("KVM: arm64: Define kvm_tlb_flush_vmid_range()").
+It leads to crash on the destination VM after live migration because
+TLBs aren't flushed completely and some of the dirty pages are missed.
+
+For example, I have a VM where 8GB memory is assigned, starting from
+0x40000000 (1GB). Note that the host has 4KB as the base page size.
+In the middile of migration, kvm_tlb_flush_vmid_range() is executed
+to flush TLBs. It passes MAX_TLBI_RANGE_PAGES as the argument to
+__kvm_tlb_flush_vmid_range() and __flush_s2_tlb_range_op(). SCALE#3
+and NUM#31, corresponding to MAX_TLBI_RANGE_PAGES, isn't supported
+by __TLBI_RANGE_NUM(). In this specific case, -1 has been returned
+from __TLBI_RANGE_NUM() for SCALE#3/2/1/0 and rejected by the loop
+in the __flush_tlb_range_op() until the variable @scale underflows
+and becomes -9, 0xffff708000040000 is set as the operand. The operand
+is wrong since it's sorted out by __TLBI_VADDR_RANGE() according to
+invalid @scale and @num.
+
+Fix it by extending __TLBI_RANGE_NUM() to support the combination of
+SCALE#3 and NUM#31. With the changes, [-1 31] instead of [-1 30] can
+be returned from the macro, meaning the TLBs for 0x200000 pages in the
+above example can be flushed in one shoot with SCALE#3 and NUM#31. The
+macro TLBI_RANGE_MASK is dropped since no one uses it any more. The
+comments are also adjusted accordingly.
+
+Fixes: 117940aa6e5f ("KVM: arm64: Define kvm_tlb_flush_vmid_range()")
+Cc: stable@kernel.org # v6.6+
+Reported-by: Yihuang Yu <yihyu@redhat.com>
+Suggested-by: Marc Zyngier <maz@kernel.org>
+Signed-off-by: Gavin Shan <gshan@redhat.com>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
+Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com>
+Reviewed-by: Shaoqin Huang <shahuang@redhat.com>
+Link: https://lore.kernel.org/r/20240405035852.1532010-2-gshan@redhat.com
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/tlbflush.h | 22 ++++++++++++----------
+ 1 file changed, 12 insertions(+), 10 deletions(-)
+
+--- a/arch/arm64/include/asm/tlbflush.h
++++ b/arch/arm64/include/asm/tlbflush.h
+@@ -152,12 +152,18 @@ static inline unsigned long get_trans_gr
+ #define MAX_TLBI_RANGE_PAGES __TLBI_RANGE_PAGES(31, 3)
+
+ /*
+- * Generate 'num' values from -1 to 30 with -1 rejected by the
+- * __flush_tlb_range() loop below.
+- */
+-#define TLBI_RANGE_MASK GENMASK_ULL(4, 0)
+-#define __TLBI_RANGE_NUM(pages, scale) \
+- ((((pages) >> (5 * (scale) + 1)) & TLBI_RANGE_MASK) - 1)
++ * Generate 'num' values from -1 to 31 with -1 rejected by the
++ * __flush_tlb_range() loop below. Its return value is only
++ * significant for a maximum of MAX_TLBI_RANGE_PAGES pages. If
++ * 'pages' is more than that, you must iterate over the overall
++ * range.
++ */
++#define __TLBI_RANGE_NUM(pages, scale) \
++ ({ \
++ int __pages = min((pages), \
++ __TLBI_RANGE_PAGES(31, (scale))); \
++ (__pages >> (5 * (scale) + 1)) - 1; \
++ })
+
+ /*
+ * TLB Invalidation
+@@ -359,10 +365,6 @@ static inline void arch_tlbbatch_flush(s
+ *
+ * 2. If there is 1 page remaining, flush it through non-range operations. Range
+ * operations can only span an even number of pages.
+- *
+- * Note that certain ranges can be represented by either num = 31 and
+- * scale or num = 0 and scale + 1. The loop below favours the latter
+- * since num is limited to 30 by the __TLBI_RANGE_NUM() macro.
+ */
+ #define __flush_tlb_range_op(op, start, pages, stride, \
+ asid, tlb_level, tlbi_user) \
random-handle-creditable-entropy-from-atomic-process-context.patch
scsi-core-fix-handling-of-scmd_fail_if_recovering.patch
net-usb-ax88179_178a-avoid-writing-the-mac-address-before-first-reading.patch
+arm64-mm-modify-range-based-tlbi-to-decrement-scale.patch
+arm64-tlb-fix-tlbi-range-operand.patch