--- /dev/null
+From 250a41e0ecc433cdd553a364d0fc74c766425209 Mon Sep 17 00:00:00 2001
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Fri, 17 Aug 2012 09:27:35 -0400
+Subject: xen/p2m: Reuse existing P2M leafs if they are filled with 1:1 PFNs or INVALID.
+
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+
+commit 250a41e0ecc433cdd553a364d0fc74c766425209 upstream.
+
+If P2M leaf is completly packed with INVALID_P2M_ENTRY or with
+1:1 PFNs (so IDENTITY_FRAME type PFNs), we can swap the P2M leaf
+with either a p2m_missing or p2m_identity respectively. The old
+page (which was created via extend_brk or was grafted on from the
+mfn_list) can be re-used for setting new PFNs.
+
+This also means we can remove git commit:
+5bc6f9888db5739abfa0cae279b4b442e4db8049
+xen/p2m: Reserve 8MB of _brk space for P2M leafs when populating back
+which tried to fix this.
+
+and make the amount that is required to be reserved much smaller.
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/xen/p2m.c | 95 +++++++++++++++++++++++++++++++++++++++++++++++++++--
+ 1 file changed, 92 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/xen/p2m.c
++++ b/arch/x86/xen/p2m.c
+@@ -196,9 +196,11 @@ RESERVE_BRK(p2m_mid_identity, PAGE_SIZE
+
+ /* When we populate back during bootup, the amount of pages can vary. The
+ * max we have is seen is 395979, but that does not mean it can't be more.
+- * But some machines can have 3GB I/O holes even. So lets reserve enough
+- * for 4GB of I/O and E820 holes. */
+-RESERVE_BRK(p2m_populated, PMD_SIZE * 4);
++ * Some machines can have 3GB I/O holes even. With early_can_reuse_p2m_middle
++ * it can re-use Xen provided mfn_list array, so we only need to allocate at
++ * most three P2M top nodes. */
++RESERVE_BRK(p2m_populated, PAGE_SIZE * 3);
++
+ static inline unsigned p2m_top_index(unsigned long pfn)
+ {
+ BUG_ON(pfn >= MAX_P2M_PFN);
+@@ -575,12 +577,99 @@ static bool __init early_alloc_p2m(unsig
+ }
+ return true;
+ }
++
++/*
++ * Skim over the P2M tree looking at pages that are either filled with
++ * INVALID_P2M_ENTRY or with 1:1 PFNs. If found, re-use that page and
++ * replace the P2M leaf with a p2m_missing or p2m_identity.
++ * Stick the old page in the new P2M tree location.
++ */
++bool __init early_can_reuse_p2m_middle(unsigned long set_pfn, unsigned long set_mfn)
++{
++ unsigned topidx;
++ unsigned mididx;
++ unsigned ident_pfns;
++ unsigned inv_pfns;
++ unsigned long *p2m;
++ unsigned long *mid_mfn_p;
++ unsigned idx;
++ unsigned long pfn;
++
++ /* We only look when this entails a P2M middle layer */
++ if (p2m_index(set_pfn))
++ return false;
++
++ for (pfn = 0; pfn <= MAX_DOMAIN_PAGES; pfn += P2M_PER_PAGE) {
++ topidx = p2m_top_index(pfn);
++
++ if (!p2m_top[topidx])
++ continue;
++
++ if (p2m_top[topidx] == p2m_mid_missing)
++ continue;
++
++ mididx = p2m_mid_index(pfn);
++ p2m = p2m_top[topidx][mididx];
++ if (!p2m)
++ continue;
++
++ if ((p2m == p2m_missing) || (p2m == p2m_identity))
++ continue;
++
++ if ((unsigned long)p2m == INVALID_P2M_ENTRY)
++ continue;
++
++ ident_pfns = 0;
++ inv_pfns = 0;
++ for (idx = 0; idx < P2M_PER_PAGE; idx++) {
++ /* IDENTITY_PFNs are 1:1 */
++ if (p2m[idx] == IDENTITY_FRAME(pfn + idx))
++ ident_pfns++;
++ else if (p2m[idx] == INVALID_P2M_ENTRY)
++ inv_pfns++;
++ else
++ break;
++ }
++ if ((ident_pfns == P2M_PER_PAGE) || (inv_pfns == P2M_PER_PAGE))
++ goto found;
++ }
++ return false;
++found:
++ /* Found one, replace old with p2m_identity or p2m_missing */
++ p2m_top[topidx][mididx] = (ident_pfns ? p2m_identity : p2m_missing);
++ /* And the other for save/restore.. */
++ mid_mfn_p = p2m_top_mfn_p[topidx];
++ /* NOTE: Even if it is a p2m_identity it should still be point to
++ * a page filled with INVALID_P2M_ENTRY entries. */
++ mid_mfn_p[mididx] = virt_to_mfn(p2m_missing);
++
++ /* Reset where we want to stick the old page in. */
++ topidx = p2m_top_index(set_pfn);
++ mididx = p2m_mid_index(set_pfn);
++
++ /* This shouldn't happen */
++ if (WARN_ON(p2m_top[topidx] == p2m_mid_missing))
++ early_alloc_p2m(set_pfn);
++
++ if (WARN_ON(p2m_top[topidx][mididx] != p2m_missing))
++ return false;
++
++ p2m_init(p2m);
++ p2m_top[topidx][mididx] = p2m;
++ mid_mfn_p = p2m_top_mfn_p[topidx];
++ mid_mfn_p[mididx] = virt_to_mfn(p2m);
++
++ return true;
++}
+ bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn)
+ {
+ if (unlikely(!__set_phys_to_machine(pfn, mfn))) {
+ if (!early_alloc_p2m(pfn))
+ return false;
+
++ if (early_can_reuse_p2m_middle(pfn, mfn))
++ return __set_phys_to_machine(pfn, mfn);
++
+ if (!early_alloc_p2m_middle(pfn, false /* boundary crossover OK!*/))
+ return false;
+
--- /dev/null
+From c96aae1f7f393387d160211f60398d58463a7e65 Mon Sep 17 00:00:00 2001
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Fri, 17 Aug 2012 16:43:28 -0400
+Subject: xen/setup: Fix one-off error when adding for-balloon PFNs to the P2M.
+
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+
+commit c96aae1f7f393387d160211f60398d58463a7e65 upstream.
+
+When we are finished with return PFNs to the hypervisor, then
+populate it back, and also mark the E820 MMIO and E820 gaps
+as IDENTITY_FRAMEs, we then call P2M to set areas that can
+be used for ballooning. We were off by one, and ended up
+over-writting a P2M entry that most likely was an IDENTITY_FRAME.
+For example:
+
+1-1 mapping on 40000->40200
+1-1 mapping on bc558->bc5ac
+1-1 mapping on bc5b4->bc8c5
+1-1 mapping on bc8c6->bcb7c
+1-1 mapping on bcd00->100000
+Released 614 pages of unused memory
+Set 277889 page(s) to 1-1 mapping
+Populating 40200-40466 pfn range: 614 pages added
+
+=> here we set from 40466 up to bc559 P2M tree to be
+INVALID_P2M_ENTRY. We should have done it up to bc558.
+
+The end result is that if anybody is trying to construct
+a PTE for PFN bc558 they end up with ~PAGE_PRESENT.
+
+Reported-by-and-Tested-by: Andre Przywara <andre.przywara@amd.com>
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/xen/setup.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/xen/setup.c
++++ b/arch/x86/xen/setup.c
+@@ -78,9 +78,16 @@ static void __init xen_add_extra_mem(u64
+ memblock_reserve(start, size);
+
+ xen_max_p2m_pfn = PFN_DOWN(start + size);
++ for (pfn = PFN_DOWN(start); pfn < xen_max_p2m_pfn; pfn++) {
++ unsigned long mfn = pfn_to_mfn(pfn);
++
++ if (WARN(mfn == pfn, "Trying to over-write 1-1 mapping (pfn: %lx)\n", pfn))
++ continue;
++ WARN(mfn != INVALID_P2M_ENTRY, "Trying to remove %lx which has %lx mfn!\n",
++ pfn, mfn);
+
+- for (pfn = PFN_DOWN(start); pfn <= xen_max_p2m_pfn; pfn++)
+ __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
++ }
+ }
+
+ static unsigned long __init xen_do_chunk(unsigned long start,