]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
KVM: arm64: Ensure target address is granule-aligned for range TLBI
authorWill Deacon <will@kernel.org>
Wed, 27 Mar 2024 12:48:53 +0000 (12:48 +0000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 10 Apr 2024 14:35:51 +0000 (16:35 +0200)
commit 4c36a156738887c1edd78589fe192d757989bcde upstream.

When zapping a table entry in stage2_try_break_pte(), we issue range
TLB invalidation for the region that was mapped by the table. However,
we neglect to align the base address down to the granule size and so
if we ended up reaching the table entry via a misaligned address then
we will accidentally skip invalidation for some prefix of the affected
address range.

Align 'ctx->addr' down to the granule size when performing TLB
invalidation for an unmapped table in stage2_try_break_pte().

Cc: Raghavendra Rao Ananta <rananta@google.com>
Cc: Gavin Shan <gshan@redhat.com>
Cc: Shaoqin Huang <shahuang@redhat.com>
Cc: Quentin Perret <qperret@google.com>
Fixes: defc8cc7abf0 ("KVM: arm64: Invalidate the table entries upon a range")
Signed-off-by: Will Deacon <will@kernel.org>
Reviewed-by: Shaoqin Huang <shahuang@redhat.com>
Reviewed-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20240327124853.11206-5-will@kernel.org
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/arm64/kvm/hyp/pgtable.c

index f155b8c9e98c7fbf1298f4ecf64c6826c76fdb23..15aa9bad1c280ba1bcc74f26c52297d7bac1138f 100644 (file)
@@ -805,12 +805,15 @@ static bool stage2_try_break_pte(const struct kvm_pgtable_visit_ctx *ctx,
                 * Perform the appropriate TLB invalidation based on the
                 * evicted pte value (if any).
                 */
-               if (kvm_pte_table(ctx->old, ctx->level))
-                       kvm_tlb_flush_vmid_range(mmu, ctx->addr,
-                                               kvm_granule_size(ctx->level));
-               else if (kvm_pte_valid(ctx->old))
+               if (kvm_pte_table(ctx->old, ctx->level)) {
+                       u64 size = kvm_granule_size(ctx->level);
+                       u64 addr = ALIGN_DOWN(ctx->addr, size);
+
+                       kvm_tlb_flush_vmid_range(mmu, addr, size);
+               } else if (kvm_pte_valid(ctx->old)) {
                        kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu,
                                     ctx->addr, ctx->level);
+               }
        }
 
        if (stage2_pte_is_counted(ctx->old))