From 42a6842dc9a363e44385a2e1e7ec779af32f8b57 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Tue, 23 May 2017 19:45:29 +0200 Subject: [PATCH] 4.11-stable patches added patches: kvm-arm-arm64-fix-use-after-free-of-stage2-page-table.patch kvm-arm-arm64-force-reading-uncached-stage2-pgd.patch --- ...-use-after-free-of-stage2-page-table.patch | 74 +++++++++++++++++++ ...64-force-reading-uncached-stage2-pgd.patch | 33 +++++++++ queue-4.11/series | 2 + 3 files changed, 109 insertions(+) create mode 100644 queue-4.11/kvm-arm-arm64-fix-use-after-free-of-stage2-page-table.patch create mode 100644 queue-4.11/kvm-arm-arm64-force-reading-uncached-stage2-pgd.patch diff --git a/queue-4.11/kvm-arm-arm64-fix-use-after-free-of-stage2-page-table.patch b/queue-4.11/kvm-arm-arm64-fix-use-after-free-of-stage2-page-table.patch new file mode 100644 index 00000000000..4376c6adb83 --- /dev/null +++ b/queue-4.11/kvm-arm-arm64-fix-use-after-free-of-stage2-page-table.patch @@ -0,0 +1,74 @@ +From 0c428a6a9256fcd66817e12db32a50b405ed2e5c Mon Sep 17 00:00:00 2001 +From: Suzuki K Poulose +Date: Tue, 16 May 2017 10:34:55 +0100 +Subject: kvm: arm/arm64: Fix use after free of stage2 page table +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Suzuki K Poulose + +commit 0c428a6a9256fcd66817e12db32a50b405ed2e5c upstream. + +We yield the kvm->mmu_lock occassionaly while performing an operation +(e.g, unmap or permission changes) on a large area of stage2 mappings. +However this could possibly cause another thread to clear and free up +the stage2 page tables while we were waiting for regaining the lock and +thus the original thread could end up in accessing memory that was +freed. This patch fixes the problem by making sure that the stage2 +pagetable is still valid after we regain the lock. The fact that +mmu_notifer->release() could be called twice (via __mmu_notifier_release +and mmu_notifier_unregsister) enhances the possibility of hitting +this race where there are two threads trying to unmap the entire guest +shadow pages. + +While at it, cleanup the redudant checks around cond_resched_lock in +stage2_wp_range(), as cond_resched_lock already does the same checks. + +Cc: Mark Rutland +Cc: Radim Krčmář +Cc: andreyknvl@google.com +Cc: Paolo Bonzini +Acked-by: Marc Zyngier +Signed-off-by: Suzuki K Poulose +Reviewed-by: Christoffer Dall +Signed-off-by: Christoffer Dall +Signed-off-by: Greg Kroah-Hartman +--- + arch/arm/kvm/mmu.c | 17 +++++++++++++---- + 1 file changed, 13 insertions(+), 4 deletions(-) + +--- a/arch/arm/kvm/mmu.c ++++ b/arch/arm/kvm/mmu.c +@@ -295,6 +295,13 @@ static void unmap_stage2_range(struct kv + assert_spin_locked(&kvm->mmu_lock); + pgd = kvm->arch.pgd + stage2_pgd_index(addr); + do { ++ /* ++ * Make sure the page table is still active, as another thread ++ * could have possibly freed the page table, while we released ++ * the lock. ++ */ ++ if (!READ_ONCE(kvm->arch.pgd)) ++ break; + next = stage2_pgd_addr_end(addr, end); + if (!stage2_pgd_none(*pgd)) + unmap_stage2_puds(kvm, pgd, addr, next); +@@ -1170,11 +1177,13 @@ static void stage2_wp_range(struct kvm * + * large. Otherwise, we may see kernel panics with + * CONFIG_DETECT_HUNG_TASK, CONFIG_LOCKUP_DETECTOR, + * CONFIG_LOCKDEP. Additionally, holding the lock too long +- * will also starve other vCPUs. ++ * will also starve other vCPUs. We have to also make sure ++ * that the page tables are not freed while we released ++ * the lock. + */ +- if (need_resched() || spin_needbreak(&kvm->mmu_lock)) +- cond_resched_lock(&kvm->mmu_lock); +- ++ cond_resched_lock(&kvm->mmu_lock); ++ if (!READ_ONCE(kvm->arch.pgd)) ++ break; + next = stage2_pgd_addr_end(addr, end); + if (stage2_pgd_present(*pgd)) + stage2_wp_puds(pgd, addr, next); diff --git a/queue-4.11/kvm-arm-arm64-force-reading-uncached-stage2-pgd.patch b/queue-4.11/kvm-arm-arm64-force-reading-uncached-stage2-pgd.patch new file mode 100644 index 00000000000..5093c6b2ede --- /dev/null +++ b/queue-4.11/kvm-arm-arm64-force-reading-uncached-stage2-pgd.patch @@ -0,0 +1,33 @@ +From 2952a6070e07ebdd5896f1f5b861acad677caded Mon Sep 17 00:00:00 2001 +From: Suzuki K Poulose +Date: Tue, 16 May 2017 10:34:54 +0100 +Subject: kvm: arm/arm64: Force reading uncached stage2 PGD + +From: Suzuki K Poulose + +commit 2952a6070e07ebdd5896f1f5b861acad677caded upstream. + +Make sure we don't use a cached value of the KVM stage2 PGD while +resetting the PGD. + +Cc: Marc Zyngier +Signed-off-by: Suzuki K Poulose +Reviewed-by: Christoffer Dall +Signed-off-by: Christoffer Dall +Signed-off-by: Greg Kroah-Hartman + +--- + arch/arm/kvm/mmu.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/arch/arm/kvm/mmu.c ++++ b/arch/arm/kvm/mmu.c +@@ -844,7 +844,7 @@ void kvm_free_stage2_pgd(struct kvm *kvm + spin_lock(&kvm->mmu_lock); + if (kvm->arch.pgd) { + unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); +- pgd = kvm->arch.pgd; ++ pgd = READ_ONCE(kvm->arch.pgd); + kvm->arch.pgd = NULL; + } + spin_unlock(&kvm->mmu_lock); diff --git a/queue-4.11/series b/queue-4.11/series index 784ec67d02f..04038673a67 100644 --- a/queue-4.11/series +++ b/queue-4.11/series @@ -164,3 +164,5 @@ genirq-fix-chained-interrupt-data-ordering.patch nvme-unmap-cmb-and-remove-sysfs-file-in-reset-path.patch mips-loongson-3-select-mips_l1_cache_shift_6.patch kvm-arm-arm64-fix-race-in-resetting-stage2-pgd.patch +kvm-arm-arm64-fix-use-after-free-of-stage2-page-table.patch +kvm-arm-arm64-force-reading-uncached-stage2-pgd.patch -- 2.47.3