]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.11-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 23 May 2017 17:45:29 +0000 (19:45 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 23 May 2017 17:45:29 +0000 (19:45 +0200)
added patches:
kvm-arm-arm64-fix-use-after-free-of-stage2-page-table.patch
kvm-arm-arm64-force-reading-uncached-stage2-pgd.patch

queue-4.11/kvm-arm-arm64-fix-use-after-free-of-stage2-page-table.patch [new file with mode: 0644]
queue-4.11/kvm-arm-arm64-force-reading-uncached-stage2-pgd.patch [new file with mode: 0644]
queue-4.11/series

diff --git a/queue-4.11/kvm-arm-arm64-fix-use-after-free-of-stage2-page-table.patch b/queue-4.11/kvm-arm-arm64-fix-use-after-free-of-stage2-page-table.patch
new file mode 100644 (file)
index 0000000..4376c6a
--- /dev/null
@@ -0,0 +1,74 @@
+From 0c428a6a9256fcd66817e12db32a50b405ed2e5c Mon Sep 17 00:00:00 2001
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+Date: Tue, 16 May 2017 10:34:55 +0100
+Subject: kvm: arm/arm64: Fix use after free of stage2 page table
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+
+commit 0c428a6a9256fcd66817e12db32a50b405ed2e5c upstream.
+
+We yield the kvm->mmu_lock occassionaly while performing an operation
+(e.g, unmap or permission changes) on a large area of stage2 mappings.
+However this could possibly cause another thread to clear and free up
+the stage2 page tables while we were waiting for regaining the lock and
+thus the original thread could end up in accessing memory that was
+freed. This patch fixes the problem by making sure that the stage2
+pagetable is still valid after we regain the lock. The fact that
+mmu_notifer->release() could be called twice (via __mmu_notifier_release
+and mmu_notifier_unregsister) enhances the possibility of hitting
+this race where there are two threads trying to unmap the entire guest
+shadow pages.
+
+While at it, cleanup the redudant checks around cond_resched_lock in
+stage2_wp_range(), as cond_resched_lock already does the same checks.
+
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Radim Krčmář <rkrcmar@redhat.com>
+Cc: andreyknvl@google.com
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Acked-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Reviewed-by: Christoffer Dall <cdall@linaro.org>
+Signed-off-by: Christoffer Dall <cdall@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/kvm/mmu.c |   17 +++++++++++++----
+ 1 file changed, 13 insertions(+), 4 deletions(-)
+
+--- a/arch/arm/kvm/mmu.c
++++ b/arch/arm/kvm/mmu.c
+@@ -295,6 +295,13 @@ static void unmap_stage2_range(struct kv
+       assert_spin_locked(&kvm->mmu_lock);
+       pgd = kvm->arch.pgd + stage2_pgd_index(addr);
+       do {
++              /*
++               * Make sure the page table is still active, as another thread
++               * could have possibly freed the page table, while we released
++               * the lock.
++               */
++              if (!READ_ONCE(kvm->arch.pgd))
++                      break;
+               next = stage2_pgd_addr_end(addr, end);
+               if (!stage2_pgd_none(*pgd))
+                       unmap_stage2_puds(kvm, pgd, addr, next);
+@@ -1170,11 +1177,13 @@ static void stage2_wp_range(struct kvm *
+                * large. Otherwise, we may see kernel panics with
+                * CONFIG_DETECT_HUNG_TASK, CONFIG_LOCKUP_DETECTOR,
+                * CONFIG_LOCKDEP. Additionally, holding the lock too long
+-               * will also starve other vCPUs.
++               * will also starve other vCPUs. We have to also make sure
++               * that the page tables are not freed while we released
++               * the lock.
+                */
+-              if (need_resched() || spin_needbreak(&kvm->mmu_lock))
+-                      cond_resched_lock(&kvm->mmu_lock);
+-
++              cond_resched_lock(&kvm->mmu_lock);
++              if (!READ_ONCE(kvm->arch.pgd))
++                      break;
+               next = stage2_pgd_addr_end(addr, end);
+               if (stage2_pgd_present(*pgd))
+                       stage2_wp_puds(pgd, addr, next);
diff --git a/queue-4.11/kvm-arm-arm64-force-reading-uncached-stage2-pgd.patch b/queue-4.11/kvm-arm-arm64-force-reading-uncached-stage2-pgd.patch
new file mode 100644 (file)
index 0000000..5093c6b
--- /dev/null
@@ -0,0 +1,33 @@
+From 2952a6070e07ebdd5896f1f5b861acad677caded Mon Sep 17 00:00:00 2001
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+Date: Tue, 16 May 2017 10:34:54 +0100
+Subject: kvm: arm/arm64: Force reading uncached stage2 PGD
+
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+
+commit 2952a6070e07ebdd5896f1f5b861acad677caded upstream.
+
+Make sure we don't use a cached value of the KVM stage2 PGD while
+resetting the PGD.
+
+Cc: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Reviewed-by: Christoffer Dall <cdall@linaro.org>
+Signed-off-by: Christoffer Dall <cdall@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/kvm/mmu.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm/kvm/mmu.c
++++ b/arch/arm/kvm/mmu.c
+@@ -844,7 +844,7 @@ void kvm_free_stage2_pgd(struct kvm *kvm
+       spin_lock(&kvm->mmu_lock);
+       if (kvm->arch.pgd) {
+               unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
+-              pgd = kvm->arch.pgd;
++              pgd = READ_ONCE(kvm->arch.pgd);
+               kvm->arch.pgd = NULL;
+       }
+       spin_unlock(&kvm->mmu_lock);
index 784ec67d02f29609f8764e2bca14494dfd0d0745..04038673a67f28909d3ef605a4c451021c14929d 100644 (file)
@@ -164,3 +164,5 @@ genirq-fix-chained-interrupt-data-ordering.patch
 nvme-unmap-cmb-and-remove-sysfs-file-in-reset-path.patch
 mips-loongson-3-select-mips_l1_cache_shift_6.patch
 kvm-arm-arm64-fix-race-in-resetting-stage2-pgd.patch
+kvm-arm-arm64-fix-use-after-free-of-stage2-page-table.patch
+kvm-arm-arm64-force-reading-uncached-stage2-pgd.patch