--- /dev/null
+From 86658b819cd0a9aa584cd84453ed268a6f013770 Mon Sep 17 00:00:00 2001
+From: Punit Agrawal <punit.agrawal@arm.com>
+Date: Mon, 13 Aug 2018 11:43:50 +0100
+Subject: KVM: arm/arm64: Skip updating PMD entry if no change
+
+From: Punit Agrawal <punit.agrawal@arm.com>
+
+commit 86658b819cd0a9aa584cd84453ed268a6f013770 upstream.
+
+Contention on updating a PMD entry by a large number of vcpus can lead
+to duplicate work when handling stage 2 page faults. As the page table
+update follows the break-before-make requirement of the architecture,
+it can lead to repeated refaults due to clearing the entry and
+flushing the tlbs.
+
+This problem is more likely when -
+
+* there are large number of vcpus
+* the mapping is large block mapping
+
+such as when using PMD hugepages (512MB) with 64k pages.
+
+Fix this by skipping the page table update if there is no change in
+the entry being updated.
+
+Cc: stable@vger.kernel.org
+Fixes: ad361f093c1e ("KVM: ARM: Support hugetlbfs backed huge pages")
+Reviewed-by: Suzuki Poulose <suzuki.poulose@arm.com>
+Acked-by: Christoffer Dall <christoffer.dall@arm.com>
+Signed-off-by: Punit Agrawal <punit.agrawal@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ arch/arm/kvm/mmu.c | 38 +++++++++++++++++++++++++++-----------
+ 1 file changed, 27 insertions(+), 11 deletions(-)
+
+--- a/arch/arm/kvm/mmu.c
++++ b/arch/arm/kvm/mmu.c
+@@ -894,19 +894,35 @@ static int stage2_set_pmd_huge(struct kv
+ pmd = stage2_get_pmd(kvm, cache, addr);
+ VM_BUG_ON(!pmd);
+
+- /*
+- * Mapping in huge pages should only happen through a fault. If a
+- * page is merged into a transparent huge page, the individual
+- * subpages of that huge page should be unmapped through MMU
+- * notifiers before we get here.
+- *
+- * Merging of CompoundPages is not supported; they should become
+- * splitting first, unmapped, merged, and mapped back in on-demand.
+- */
+- VM_BUG_ON(pmd_present(*pmd) && pmd_pfn(*pmd) != pmd_pfn(*new_pmd));
+-
+ old_pmd = *pmd;
+ if (pmd_present(old_pmd)) {
++ /*
++ * Multiple vcpus faulting on the same PMD entry, can
++ * lead to them sequentially updating the PMD with the
++ * same value. Following the break-before-make
++ * (pmd_clear() followed by tlb_flush()) process can
++ * hinder forward progress due to refaults generated
++ * on missing translations.
++ *
++ * Skip updating the page table if the entry is
++ * unchanged.
++ */
++ if (pmd_val(old_pmd) == pmd_val(*new_pmd))
++ return 0;
++
++ /*
++ * Mapping in huge pages should only happen through a
++ * fault. If a page is merged into a transparent huge
++ * page, the individual subpages of that huge page
++ * should be unmapped through MMU notifiers before we
++ * get here.
++ *
++ * Merging of CompoundPages is not supported; they
++ * should become splitting first, unmapped, merged,
++ * and mapped back in on-demand.
++ */
++ VM_BUG_ON(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd));
++
+ pmd_clear(pmd);
+ kvm_tlb_flush_vmid_ipa(kvm, addr);
+ } else {
--- /dev/null
+From 976d34e2dab10ece5ea8fe7090b7692913f89084 Mon Sep 17 00:00:00 2001
+From: Punit Agrawal <punit.agrawal@arm.com>
+Date: Mon, 13 Aug 2018 11:43:51 +0100
+Subject: KVM: arm/arm64: Skip updating PTE entry if no change
+
+From: Punit Agrawal <punit.agrawal@arm.com>
+
+commit 976d34e2dab10ece5ea8fe7090b7692913f89084 upstream.
+
+When there is contention on faulting in a particular page table entry
+at stage 2, the break-before-make requirement of the architecture can
+lead to additional refaulting due to TLB invalidation.
+
+Avoid this by skipping a page table update if the new value of the PTE
+matches the previous value.
+
+Cc: stable@vger.kernel.org
+Fixes: d5d8184d35c9 ("KVM: ARM: Memory virtualization setup")
+Reviewed-by: Suzuki Poulose <suzuki.poulose@arm.com>
+Acked-by: Christoffer Dall <christoffer.dall@arm.com>
+Signed-off-by: Punit Agrawal <punit.agrawal@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/kvm/mmu.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/arch/arm/kvm/mmu.c
++++ b/arch/arm/kvm/mmu.c
+@@ -962,6 +962,10 @@ static int stage2_set_pte(struct kvm *kv
+ /* Create 2nd stage page table mapping - Level 3 */
+ old_pte = *pte;
+ if (pte_present(old_pte)) {
++ /* Skip page table update if there is no change */
++ if (pte_val(old_pte) == pte_val(*new_pte))
++ return 0;
++
+ kvm_set_pte(pte, __pte(0));
+ kvm_tlb_flush_vmid_ipa(kvm, addr);
+ } else {