fix-4-port-and-add-support-for-8-port-unknown-pci-serial-port-cards.patch
8250-16-50-add-support-for-broadcom-trumanage-redirected-serial-port.patch
tty-serial-add-support-for-altera-serial-port.patch
-xen-p2m-move-code-around-to-allow-for-better-re-usage.patch
-xen-p2m-allow-alloc_p2m_middle-to-call-reserve_brk-depending-on-argument.patch
-xen-p2m-collapse-early_alloc_p2m_middle-redundant-checks.patch
-xen-p2m-an-early-bootup-variant-of-set_phys_to_machine.patch
-xen-setup-populate-freed-mfns-from-non-ram-e820-entries-and-gaps-to-e820-ram.patch
-xen-setup-combine-the-two-hypercall-functions-since-they-are-quite-similar.patch
-xen-setup-update-va-mapping-when-releasing-memory-during-setup.patch
-xen-balloon-subtract-from-xen_released_pages-the-count-that-is-populated.patch
-xen-populate-correct-number-of-pages-when-across-mem-boundary-v2.patch
-xen-p2m-reserve-8mb-of-_brk-space-for-p2m-leafs-when-populating-back.patch
-xen-p2m-reuse-existing-p2m-leafs-if-they-are-filled-with-1-1-pfns-or-invalid.patch
+++ /dev/null
-From 58b7b53a36b0be8081fbfc91aeea24b83c20ca1b Mon Sep 17 00:00:00 2001
-From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-Date: Tue, 29 May 2012 12:36:43 -0400
-Subject: xen/balloon: Subtract from xen_released_pages the count that is populated.
-
-From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-
-commit 58b7b53a36b0be8081fbfc91aeea24b83c20ca1b upstream.
-
-We did not take into account that xen_released_pages would be
-used outside the initial E820 parsing code. As such we would
-did not subtract from xen_released_pages the count of pages
-that we had populated back (instead we just did a simple
-extra_pages = released - populated).
-
-The balloon driver uses xen_released_pages to set the initial
-current_pages count. If this is wrong (too low) then when a new
-(higher) target is set, the balloon driver will request too many pages
-from Xen."
-
-This fixes errors such as:
-
-(XEN) memory.c:133:d0 Could not allocate order=0 extent: id=0 memflags=0 (51 of 512)
-during bootup and
-free_memory : 0
-
-where the free_memory should be 128.
-
-Acked-by: David Vrabel <david.vrabel@citrix.com>
-[v1: Per David's review made the git commit better]
-Signed-off-by: Daniel Kiper <daniel.kiper@oracle.com>
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-
----
- arch/x86/xen/setup.c | 3 ++-
- 1 file changed, 2 insertions(+), 1 deletion(-)
-
---- a/arch/x86/xen/setup.c
-+++ b/arch/x86/xen/setup.c
-@@ -399,7 +399,8 @@ char * __init xen_memory_setup(void)
- populated = xen_populate_chunk(map, memmap.nr_entries,
- max_pfn, &last_pfn, xen_released_pages);
-
-- extra_pages += (xen_released_pages - populated);
-+ xen_released_pages -= populated;
-+ extra_pages += xen_released_pages;
-
- if (last_pfn > max_pfn) {
- max_pfn = min(MAX_DOMAIN_PAGES, last_pfn);
+++ /dev/null
-From cef4cca551d652b7f69c9d76337c5fae24e069dc Mon Sep 17 00:00:00 2001
-From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-Date: Fri, 30 Mar 2012 14:15:14 -0400
-Subject: xen/p2m: Allow alloc_p2m_middle to call reserve_brk depending on argument
-
-From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-
-commit cef4cca551d652b7f69c9d76337c5fae24e069dc upstream.
-
-For identity cases we want to call reserve_brk only on the boundary
-conditions of the middle P2M (so P2M[x][y][0] = extend_brk). This is
-to work around identify regions (PCI spaces, gaps in E820) which are not
-aligned on 2MB regions.
-
-However for the case were we want to allocate P2M middle leafs at the
-early bootup stage, irregardless of this alignment check we need some
-means of doing that. For that we provide the new argument.
-
-Signed-off-by: Daniel Kiper <daniel.kiper@oracle.com>
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-
----
- arch/x86/xen/p2m.c | 10 +++++-----
- 1 file changed, 5 insertions(+), 5 deletions(-)
-
---- a/arch/x86/xen/p2m.c
-+++ b/arch/x86/xen/p2m.c
-@@ -499,7 +499,7 @@ static bool alloc_p2m(unsigned long pfn)
- return true;
- }
-
--static bool __init early_alloc_p2m_middle(unsigned long pfn)
-+static bool __init early_alloc_p2m_middle(unsigned long pfn, bool check_boundary)
- {
- unsigned topidx, mididx, idx;
-
-@@ -508,7 +508,7 @@ static bool __init early_alloc_p2m_middl
- idx = p2m_index(pfn);
-
- /* Pfff.. No boundary cross-over, lets get out. */
-- if (!idx)
-+ if (!idx && check_boundary)
- return false;
-
- WARN(p2m_top[topidx][mididx] == p2m_identity,
-@@ -531,7 +531,7 @@ static bool __init early_alloc_p2m_middl
- p2m_top[topidx][mididx] = p2m;
-
- /* For save/restore we need to MFN of the P2M saved */
--
-+
- mid_mfn_p = p2m_top_mfn_p[topidx];
- WARN(mid_mfn_p[mididx] != virt_to_mfn(p2m_missing),
- "P2M_TOP_P[%d][%d] != MFN of p2m_missing!\n",
-@@ -592,8 +592,8 @@ unsigned long __init set_phys_range_iden
- WARN_ON(!early_alloc_p2m(pfn));
- }
-
-- early_alloc_p2m_middle(pfn_s);
-- early_alloc_p2m_middle(pfn_e);
-+ early_alloc_p2m_middle(pfn_s, true);
-+ early_alloc_p2m_middle(pfn_e, true);
-
- for (pfn = pfn_s; pfn < pfn_e; pfn++)
- if (!__set_phys_to_machine(pfn, IDENTITY_FRAME(pfn)))
+++ /dev/null
-From 940713bb2ce3033f468a220094a07250a2f69bdd Mon Sep 17 00:00:00 2001
-From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-Date: Fri, 30 Mar 2012 14:33:14 -0400
-Subject: xen/p2m: An early bootup variant of set_phys_to_machine
-
-From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-
-commit 940713bb2ce3033f468a220094a07250a2f69bdd upstream.
-
-During early bootup we can't use alloc_page, so to allocate
-leaf pages in the P2M we need to use extend_brk. For that
-we are utilizing the early_alloc_p2m and early_alloc_p2m_middle
-functions to do the job for us. This function follows the
-same logic as set_phys_to_machine.
-
-Signed-off-by: Daniel Kiper <daniel.kiper@oracle.com>
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-
----
- arch/x86/include/asm/xen/page.h | 1 +
- arch/x86/xen/p2m.c | 15 +++++++++++++++
- 2 files changed, 16 insertions(+)
-
---- a/arch/x86/include/asm/xen/page.h
-+++ b/arch/x86/include/asm/xen/page.h
-@@ -44,6 +44,7 @@ extern unsigned long machine_to_phys_nr
-
- extern unsigned long get_phys_to_machine(unsigned long pfn);
- extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn);
-+extern bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn);
- extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
- extern unsigned long set_phys_range_identity(unsigned long pfn_s,
- unsigned long pfn_e);
---- a/arch/x86/xen/p2m.c
-+++ b/arch/x86/xen/p2m.c
-@@ -570,6 +570,21 @@ static bool __init early_alloc_p2m(unsig
- }
- return true;
- }
-+bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn)
-+{
-+ if (unlikely(!__set_phys_to_machine(pfn, mfn))) {
-+ if (!early_alloc_p2m(pfn))
-+ return false;
-+
-+ if (!early_alloc_p2m_middle(pfn, false /* boundary crossover OK!*/))
-+ return false;
-+
-+ if (!__set_phys_to_machine(pfn, mfn))
-+ return false;
-+ }
-+
-+ return true;
-+}
- unsigned long __init set_phys_range_identity(unsigned long pfn_s,
- unsigned long pfn_e)
- {
+++ /dev/null
-From d5096850b47424fb0f1c6a75b8f7184f7169319a Mon Sep 17 00:00:00 2001
-From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-Date: Fri, 30 Mar 2012 14:16:49 -0400
-Subject: xen/p2m: Collapse early_alloc_p2m_middle redundant checks.
-
-From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-
-commit d5096850b47424fb0f1c6a75b8f7184f7169319a upstream.
-
-At the start of the function we were checking for idx != 0
-and bailing out. And later calling extend_brk if idx != 0.
-
-That is unnecessary so remove that checks.
-
-Signed-off-by: Daniel Kiper <daniel.kiper@oracle.com>
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-
----
- arch/x86/xen/p2m.c | 25 ++++++++++++-------------
- 1 file changed, 12 insertions(+), 13 deletions(-)
-
---- a/arch/x86/xen/p2m.c
-+++ b/arch/x86/xen/p2m.c
-@@ -502,6 +502,8 @@ static bool alloc_p2m(unsigned long pfn)
- static bool __init early_alloc_p2m_middle(unsigned long pfn, bool check_boundary)
- {
- unsigned topidx, mididx, idx;
-+ unsigned long *p2m;
-+ unsigned long *mid_mfn_p;
-
- topidx = p2m_top_index(pfn);
- mididx = p2m_mid_index(pfn);
-@@ -522,24 +524,21 @@ static bool __init early_alloc_p2m_middl
- return false;
-
- /* Boundary cross-over for the edges: */
-- if (idx) {
-- unsigned long *p2m = extend_brk(PAGE_SIZE, PAGE_SIZE);
-- unsigned long *mid_mfn_p;
-+ p2m = extend_brk(PAGE_SIZE, PAGE_SIZE);
-
-- p2m_init(p2m);
-+ p2m_init(p2m);
-
-- p2m_top[topidx][mididx] = p2m;
-+ p2m_top[topidx][mididx] = p2m;
-
-- /* For save/restore we need to MFN of the P2M saved */
-+ /* For save/restore we need to MFN of the P2M saved */
-
-- mid_mfn_p = p2m_top_mfn_p[topidx];
-- WARN(mid_mfn_p[mididx] != virt_to_mfn(p2m_missing),
-- "P2M_TOP_P[%d][%d] != MFN of p2m_missing!\n",
-- topidx, mididx);
-- mid_mfn_p[mididx] = virt_to_mfn(p2m);
-+ mid_mfn_p = p2m_top_mfn_p[topidx];
-+ WARN(mid_mfn_p[mididx] != virt_to_mfn(p2m_missing),
-+ "P2M_TOP_P[%d][%d] != MFN of p2m_missing!\n",
-+ topidx, mididx);
-+ mid_mfn_p[mididx] = virt_to_mfn(p2m);
-
-- }
-- return idx != 0;
-+ return true;
- }
-
- static bool __init early_alloc_p2m(unsigned long pfn)
+++ /dev/null
-From 3f3aaea29ff7ee2d43b430338427f30ba7f60ff9 Mon Sep 17 00:00:00 2001
-From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-Date: Fri, 30 Mar 2012 11:45:01 -0400
-Subject: xen/p2m: Move code around to allow for better re-usage.
-
-From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-
-commit 3f3aaea29ff7ee2d43b430338427f30ba7f60ff9 upstream.
-
-We are going to be using the early_alloc_p2m (and
-early_alloc_p2m_middle) code in follow up patches which
-are not related to setting identity pages.
-
-Hence lets move the code out in its own function and
-rename them as appropiate.
-
-Signed-off-by: Daniel Kiper <daniel.kiper@oracle.com>
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-
----
- arch/x86/xen/p2m.c | 62 +++++++++++++++++++++++++++++------------------------
- 1 file changed, 34 insertions(+), 28 deletions(-)
-
---- a/arch/x86/xen/p2m.c
-+++ b/arch/x86/xen/p2m.c
-@@ -499,7 +499,7 @@ static bool alloc_p2m(unsigned long pfn)
- return true;
- }
-
--static bool __init __early_alloc_p2m(unsigned long pfn)
-+static bool __init early_alloc_p2m_middle(unsigned long pfn)
- {
- unsigned topidx, mididx, idx;
-
-@@ -541,6 +541,36 @@ static bool __init __early_alloc_p2m(uns
- }
- return idx != 0;
- }
-+
-+static bool __init early_alloc_p2m(unsigned long pfn)
-+{
-+ unsigned topidx = p2m_top_index(pfn);
-+ unsigned long *mid_mfn_p;
-+ unsigned long **mid;
-+
-+ mid = p2m_top[topidx];
-+ mid_mfn_p = p2m_top_mfn_p[topidx];
-+ if (mid == p2m_mid_missing) {
-+ mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
-+
-+ p2m_mid_init(mid);
-+
-+ p2m_top[topidx] = mid;
-+
-+ BUG_ON(mid_mfn_p != p2m_mid_missing_mfn);
-+ }
-+ /* And the save/restore P2M tables.. */
-+ if (mid_mfn_p == p2m_mid_missing_mfn) {
-+ mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
-+ p2m_mid_mfn_init(mid_mfn_p);
-+
-+ p2m_top_mfn_p[topidx] = mid_mfn_p;
-+ p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p);
-+ /* Note: we don't set mid_mfn_p[midix] here,
-+ * look in early_alloc_p2m_middle */
-+ }
-+ return true;
-+}
- unsigned long __init set_phys_range_identity(unsigned long pfn_s,
- unsigned long pfn_e)
- {
-@@ -559,35 +589,11 @@ unsigned long __init set_phys_range_iden
- pfn < ALIGN(pfn_e, (P2M_MID_PER_PAGE * P2M_PER_PAGE));
- pfn += P2M_MID_PER_PAGE * P2M_PER_PAGE)
- {
-- unsigned topidx = p2m_top_index(pfn);
-- unsigned long *mid_mfn_p;
-- unsigned long **mid;
--
-- mid = p2m_top[topidx];
-- mid_mfn_p = p2m_top_mfn_p[topidx];
-- if (mid == p2m_mid_missing) {
-- mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
--
-- p2m_mid_init(mid);
--
-- p2m_top[topidx] = mid;
--
-- BUG_ON(mid_mfn_p != p2m_mid_missing_mfn);
-- }
-- /* And the save/restore P2M tables.. */
-- if (mid_mfn_p == p2m_mid_missing_mfn) {
-- mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
-- p2m_mid_mfn_init(mid_mfn_p);
--
-- p2m_top_mfn_p[topidx] = mid_mfn_p;
-- p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p);
-- /* Note: we don't set mid_mfn_p[midix] here,
-- * look in __early_alloc_p2m */
-- }
-+ WARN_ON(!early_alloc_p2m(pfn));
- }
-
-- __early_alloc_p2m(pfn_s);
-- __early_alloc_p2m(pfn_e);
-+ early_alloc_p2m_middle(pfn_s);
-+ early_alloc_p2m_middle(pfn_e);
-
- for (pfn = pfn_s; pfn < pfn_e; pfn++)
- if (!__set_phys_to_machine(pfn, IDENTITY_FRAME(pfn)))
+++ /dev/null
-From 5bc6f9888db5739abfa0cae279b4b442e4db8049 Mon Sep 17 00:00:00 2001
-From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-Date: Mon, 30 Jul 2012 10:18:05 -0400
-Subject: xen/p2m: Reserve 8MB of _brk space for P2M leafs when populating back.
-
-From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-
-commit 5bc6f9888db5739abfa0cae279b4b442e4db8049 upstream.
-
-When we release pages back during bootup:
-
-Freeing 9d-100 pfn range: 99 pages freed
-Freeing 9cf36-9d0d2 pfn range: 412 pages freed
-Freeing 9f6bd-9f6bf pfn range: 2 pages freed
-Freeing 9f714-9f7bf pfn range: 171 pages freed
-Freeing 9f7e0-9f7ff pfn range: 31 pages freed
-Freeing 9f800-100000 pfn range: 395264 pages freed
-Released 395979 pages of unused memory
-
-We then try to populate those pages back. In the P2M tree however
-the space for those leafs must be reserved - as such we use extend_brk.
-We reserve 8MB of _brk space, which means we can fit over
-1048576 PFNs - which is more than we should ever need.
-
-Without this, on certain compilation of the kernel we would hit:
-
-(XEN) domain_crash_sync called from entry.S
-(XEN) CPU: 0
-(XEN) RIP: e033:[<ffffffff818aad3b>]
-(XEN) RFLAGS: 0000000000000206 EM: 1 CONTEXT: pv guest
-(XEN) rax: ffffffff81a7c000 rbx: 000000000000003d rcx: 0000000000001000
-(XEN) rdx: ffffffff81a7b000 rsi: 0000000000001000 rdi: 0000000000001000
-(XEN) rbp: ffffffff81801cd8 rsp: ffffffff81801c98 r8: 0000000000100000
-(XEN) r9: ffffffff81a7a000 r10: 0000000000000001 r11: 0000000000000003
-(XEN) r12: 0000000000000004 r13: 0000000000000004 r14: 000000000000003d
-(XEN) r15: 00000000000001e8 cr0: 000000008005003b cr4: 00000000000006f0
-(XEN) cr3: 0000000125803000 cr2: 0000000000000000
-(XEN) ds: 0000 es: 0000 fs: 0000 gs: 0000 ss: e02b cs: e033
-(XEN) Guest stack trace from rsp=ffffffff81801c98:
-
-.. which is extend_brk hitting a BUG_ON.
-
-Interestingly enough, most of the time we are not going to hit this
-b/c the _brk space is quite large (v3.5):
- ffffffff81a25000 B __brk_base
- ffffffff81e43000 B __brk_limit
-= ~4MB.
-
-vs earlier kernels (with this back-ported), the space is smaller:
- ffffffff81a25000 B __brk_base
- ffffffff81a7b000 B __brk_limit
-= 344 kBytes.
-
-where we would certainly hit this and hit extend_brk.
-
-Note that git commit c3d93f880197953f86ab90d9da4744e926b38e33
-(xen: populate correct number of pages when across mem boundary (v2))
-exposed this bug).
-
-[v1: Made it 8MB of _brk space instead of 4MB per Jan's suggestion]
-
-Signed-off-by: Daniel Kiper <daniel.kiper@oracle.com>
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-
----
- arch/x86/xen/p2m.c | 5 +++++
- 1 file changed, 5 insertions(+)
-
---- a/arch/x86/xen/p2m.c
-+++ b/arch/x86/xen/p2m.c
-@@ -194,6 +194,11 @@ RESERVE_BRK(p2m_mid_mfn, PAGE_SIZE * (MA
- * boundary violation will require three middle nodes. */
- RESERVE_BRK(p2m_mid_identity, PAGE_SIZE * 2 * 3);
-
-+/* When we populate back during bootup, the amount of pages can vary. The
-+ * max we have is seen is 395979, but that does not mean it can't be more.
-+ * But some machines can have 3GB I/O holes even. So lets reserve enough
-+ * for 4GB of I/O and E820 holes. */
-+RESERVE_BRK(p2m_populated, PMD_SIZE * 4);
- static inline unsigned p2m_top_index(unsigned long pfn)
- {
- BUG_ON(pfn >= MAX_P2M_PFN);
+++ /dev/null
-From 250a41e0ecc433cdd553a364d0fc74c766425209 Mon Sep 17 00:00:00 2001
-From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-Date: Fri, 17 Aug 2012 09:27:35 -0400
-Subject: xen/p2m: Reuse existing P2M leafs if they are filled with 1:1 PFNs or INVALID.
-
-From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-
-commit 250a41e0ecc433cdd553a364d0fc74c766425209 upstream.
-
-If P2M leaf is completly packed with INVALID_P2M_ENTRY or with
-1:1 PFNs (so IDENTITY_FRAME type PFNs), we can swap the P2M leaf
-with either a p2m_missing or p2m_identity respectively. The old
-page (which was created via extend_brk or was grafted on from the
-mfn_list) can be re-used for setting new PFNs.
-
-This also means we can remove git commit:
-5bc6f9888db5739abfa0cae279b4b442e4db8049
-xen/p2m: Reserve 8MB of _brk space for P2M leafs when populating back
-which tried to fix this.
-
-and make the amount that is required to be reserved much smaller.
-
-Signed-off-by: Daniel Kiper <daniel.kiper@oracle.com>
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-
----
- arch/x86/xen/p2m.c | 95 +++++++++++++++++++++++++++++++++++++++++++++++++++--
- 1 file changed, 92 insertions(+), 3 deletions(-)
-
---- a/arch/x86/xen/p2m.c
-+++ b/arch/x86/xen/p2m.c
-@@ -196,9 +196,11 @@ RESERVE_BRK(p2m_mid_identity, PAGE_SIZE
-
- /* When we populate back during bootup, the amount of pages can vary. The
- * max we have is seen is 395979, but that does not mean it can't be more.
-- * But some machines can have 3GB I/O holes even. So lets reserve enough
-- * for 4GB of I/O and E820 holes. */
--RESERVE_BRK(p2m_populated, PMD_SIZE * 4);
-+ * Some machines can have 3GB I/O holes even. With early_can_reuse_p2m_middle
-+ * it can re-use Xen provided mfn_list array, so we only need to allocate at
-+ * most three P2M top nodes. */
-+RESERVE_BRK(p2m_populated, PAGE_SIZE * 3);
-+
- static inline unsigned p2m_top_index(unsigned long pfn)
- {
- BUG_ON(pfn >= MAX_P2M_PFN);
-@@ -575,12 +577,99 @@ static bool __init early_alloc_p2m(unsig
- }
- return true;
- }
-+
-+/*
-+ * Skim over the P2M tree looking at pages that are either filled with
-+ * INVALID_P2M_ENTRY or with 1:1 PFNs. If found, re-use that page and
-+ * replace the P2M leaf with a p2m_missing or p2m_identity.
-+ * Stick the old page in the new P2M tree location.
-+ */
-+bool __init early_can_reuse_p2m_middle(unsigned long set_pfn, unsigned long set_mfn)
-+{
-+ unsigned topidx;
-+ unsigned mididx;
-+ unsigned ident_pfns;
-+ unsigned inv_pfns;
-+ unsigned long *p2m;
-+ unsigned long *mid_mfn_p;
-+ unsigned idx;
-+ unsigned long pfn;
-+
-+ /* We only look when this entails a P2M middle layer */
-+ if (p2m_index(set_pfn))
-+ return false;
-+
-+ for (pfn = 0; pfn <= MAX_DOMAIN_PAGES; pfn += P2M_PER_PAGE) {
-+ topidx = p2m_top_index(pfn);
-+
-+ if (!p2m_top[topidx])
-+ continue;
-+
-+ if (p2m_top[topidx] == p2m_mid_missing)
-+ continue;
-+
-+ mididx = p2m_mid_index(pfn);
-+ p2m = p2m_top[topidx][mididx];
-+ if (!p2m)
-+ continue;
-+
-+ if ((p2m == p2m_missing) || (p2m == p2m_identity))
-+ continue;
-+
-+ if ((unsigned long)p2m == INVALID_P2M_ENTRY)
-+ continue;
-+
-+ ident_pfns = 0;
-+ inv_pfns = 0;
-+ for (idx = 0; idx < P2M_PER_PAGE; idx++) {
-+ /* IDENTITY_PFNs are 1:1 */
-+ if (p2m[idx] == IDENTITY_FRAME(pfn + idx))
-+ ident_pfns++;
-+ else if (p2m[idx] == INVALID_P2M_ENTRY)
-+ inv_pfns++;
-+ else
-+ break;
-+ }
-+ if ((ident_pfns == P2M_PER_PAGE) || (inv_pfns == P2M_PER_PAGE))
-+ goto found;
-+ }
-+ return false;
-+found:
-+ /* Found one, replace old with p2m_identity or p2m_missing */
-+ p2m_top[topidx][mididx] = (ident_pfns ? p2m_identity : p2m_missing);
-+ /* And the other for save/restore.. */
-+ mid_mfn_p = p2m_top_mfn_p[topidx];
-+ /* NOTE: Even if it is a p2m_identity it should still be point to
-+ * a page filled with INVALID_P2M_ENTRY entries. */
-+ mid_mfn_p[mididx] = virt_to_mfn(p2m_missing);
-+
-+ /* Reset where we want to stick the old page in. */
-+ topidx = p2m_top_index(set_pfn);
-+ mididx = p2m_mid_index(set_pfn);
-+
-+ /* This shouldn't happen */
-+ if (WARN_ON(p2m_top[topidx] == p2m_mid_missing))
-+ early_alloc_p2m(set_pfn);
-+
-+ if (WARN_ON(p2m_top[topidx][mididx] != p2m_missing))
-+ return false;
-+
-+ p2m_init(p2m);
-+ p2m_top[topidx][mididx] = p2m;
-+ mid_mfn_p = p2m_top_mfn_p[topidx];
-+ mid_mfn_p[mididx] = virt_to_mfn(p2m);
-+
-+ return true;
-+}
- bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn)
- {
- if (unlikely(!__set_phys_to_machine(pfn, mfn))) {
- if (!early_alloc_p2m(pfn))
- return false;
-
-+ if (early_can_reuse_p2m_middle(pfn, mfn))
-+ return __set_phys_to_machine(pfn, mfn);
-+
- if (!early_alloc_p2m_middle(pfn, false /* boundary crossover OK!*/))
- return false;
-
+++ /dev/null
-From c3d93f880197953f86ab90d9da4744e926b38e33 Mon Sep 17 00:00:00 2001
-From: "zhenzhong.duan" <zhenzhong.duan@oracle.com>
-Date: Wed, 18 Jul 2012 13:06:39 +0800
-Subject: xen: populate correct number of pages when across mem boundary (v2)
-
-From: "zhenzhong.duan" <zhenzhong.duan@oracle.com>
-
-commit c3d93f880197953f86ab90d9da4744e926b38e33 upstream.
-
-When populate pages across a mem boundary at bootup, the page count
-populated isn't correct. This is due to mem populated to non-mem
-region and ignored.
-
-Pfn range is also wrongly aligned when mem boundary isn't page aligned.
-
-For a dom0 booted with dom_mem=3368952K(0xcd9ff000-4k) dmesg diff is:
- [ 0.000000] Freeing 9e-100 pfn range: 98 pages freed
- [ 0.000000] 1-1 mapping on 9e->100
- [ 0.000000] 1-1 mapping on cd9ff->100000
- [ 0.000000] Released 98 pages of unused memory
- [ 0.000000] Set 206435 page(s) to 1-1 mapping
--[ 0.000000] Populating cd9fe-cda00 pfn range: 1 pages added
-+[ 0.000000] Populating cd9fe-cd9ff pfn range: 1 pages added
-+[ 0.000000] Populating 100000-100061 pfn range: 97 pages added
- [ 0.000000] BIOS-provided physical RAM map:
- [ 0.000000] Xen: 0000000000000000 - 000000000009e000 (usable)
- [ 0.000000] Xen: 00000000000a0000 - 0000000000100000 (reserved)
- [ 0.000000] Xen: 0000000000100000 - 00000000cd9ff000 (usable)
- [ 0.000000] Xen: 00000000cd9ffc00 - 00000000cda53c00 (ACPI NVS)
-...
- [ 0.000000] Xen: 0000000100000000 - 0000000100061000 (usable)
- [ 0.000000] Xen: 0000000100061000 - 000000012c000000 (unusable)
-...
- [ 0.000000] MEMBLOCK configuration:
-...
--[ 0.000000] reserved[0x4] [0x000000cd9ff000-0x000000cd9ffbff], 0xc00 bytes
--[ 0.000000] reserved[0x5] [0x00000100000000-0x00000100060fff], 0x61000 bytes
-
-Related xen memory layout:
-(XEN) Xen-e820 RAM map:
-(XEN) 0000000000000000 - 000000000009ec00 (usable)
-(XEN) 00000000000f0000 - 0000000000100000 (reserved)
-(XEN) 0000000000100000 - 00000000cd9ffc00 (usable)
-
-Signed-off-by: Zhenzhong Duan <zhenzhong.duan@oracle.com>
-[v2: If xen_do_chunk fail(populate), abort this chunk and any others]
-Suggested by David, thanks.
-Signed-off-by: Daniel Kiper <daniel.kiper@oracle.com>
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-
----
- arch/x86/xen/setup.c | 23 +++++++++--------------
- 1 file changed, 9 insertions(+), 14 deletions(-)
-
---- a/arch/x86/xen/setup.c
-+++ b/arch/x86/xen/setup.c
-@@ -165,25 +165,24 @@ static unsigned long __init xen_populate
- unsigned long dest_pfn;
-
- for (i = 0, entry = list; i < map_size; i++, entry++) {
-- unsigned long credits = credits_left;
- unsigned long s_pfn;
- unsigned long e_pfn;
- unsigned long pfns;
- long capacity;
-
-- if (credits <= 0)
-+ if (credits_left <= 0)
- break;
-
- if (entry->type != E820_RAM)
- continue;
-
-- e_pfn = PFN_UP(entry->addr + entry->size);
-+ e_pfn = PFN_DOWN(entry->addr + entry->size);
-
- /* We only care about E820 after the xen_start_info->nr_pages */
- if (e_pfn <= max_pfn)
- continue;
-
-- s_pfn = PFN_DOWN(entry->addr);
-+ s_pfn = PFN_UP(entry->addr);
- /* If the E820 falls within the nr_pages, we want to start
- * at the nr_pages PFN.
- * If that would mean going past the E820 entry, skip it
-@@ -192,23 +191,19 @@ static unsigned long __init xen_populate
- capacity = e_pfn - max_pfn;
- dest_pfn = max_pfn;
- } else {
-- /* last_pfn MUST be within E820_RAM regions */
-- if (*last_pfn && e_pfn >= *last_pfn)
-- s_pfn = *last_pfn;
- capacity = e_pfn - s_pfn;
- dest_pfn = s_pfn;
- }
-- /* If we had filled this E820_RAM entry, go to the next one. */
-- if (capacity <= 0)
-- continue;
-
-- if (credits > capacity)
-- credits = capacity;
-+ if (credits_left < capacity)
-+ capacity = credits_left;
-
-- pfns = xen_do_chunk(dest_pfn, dest_pfn + credits, false);
-+ pfns = xen_do_chunk(dest_pfn, dest_pfn + capacity, false);
- done += pfns;
-- credits_left -= pfns;
- *last_pfn = (dest_pfn + pfns);
-+ if (pfns < capacity)
-+ break;
-+ credits_left -= pfns;
- }
- return done;
- }
+++ /dev/null
-From 96dc08b35c4af8cb5810450602590706f2593a5f Mon Sep 17 00:00:00 2001
-From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-Date: Fri, 6 Apr 2012 16:10:20 -0400
-Subject: xen/setup: Combine the two hypercall functions - since they are quite similar.
-
-From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-
-commit 96dc08b35c4af8cb5810450602590706f2593a5f upstream.
-
-They use the same set of arguments, so it is just the matter
-of using the proper hypercall.
-
-Acked-by: David Vrabel <david.vrabel@citrix.com>
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-Signed-off-by: Daniel Kiper <daniel.kiper@oracle.com>
-Tested-by: Daniel Kiper <daniel.kiper@oracle.com>
-Tested-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-
-
----
- arch/x86/xen/setup.c | 80 +++++++++++++++++++--------------------------------
- 1 file changed, 30 insertions(+), 50 deletions(-)
-
---- a/arch/x86/xen/setup.c
-+++ b/arch/x86/xen/setup.c
-@@ -91,8 +91,8 @@ static void __init xen_add_extra_mem(u64
- }
- }
-
--static unsigned long __init xen_release_chunk(unsigned long start,
-- unsigned long end)
-+static unsigned long __init xen_do_chunk(unsigned long start,
-+ unsigned long end, bool release)
- {
- struct xen_memory_reservation reservation = {
- .address_bits = 0,
-@@ -103,59 +103,36 @@ static unsigned long __init xen_release_
- unsigned long pfn;
- int ret;
-
-- for(pfn = start; pfn < end; pfn++) {
-- unsigned long mfn = pfn_to_mfn(pfn);
--
-- /* Make sure pfn exists to start with */
-- if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
-- continue;
--
-- set_xen_guest_handle(reservation.extent_start, &mfn);
-- reservation.nr_extents = 1;
--
-- ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
-- &reservation);
-- WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
-- if (ret == 1) {
-- __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
-- len++;
-- }
-- }
-- printk(KERN_INFO "Freeing %lx-%lx pfn range: %lu pages freed\n",
-- start, end, len);
--
-- return len;
--}
--static unsigned long __init xen_populate_physmap(unsigned long start,
-- unsigned long end)
--{
-- struct xen_memory_reservation reservation = {
-- .address_bits = 0,
-- .extent_order = 0,
-- .domid = DOMID_SELF
-- };
-- unsigned long len = 0;
-- int ret;
--
- for (pfn = start; pfn < end; pfn++) {
- unsigned long frame;
-+ unsigned long mfn = pfn_to_mfn(pfn);
-
-- /* Make sure pfn does not exists to start with */
-- if (pfn_to_mfn(pfn) != INVALID_P2M_ENTRY)
-- continue;
--
-- frame = pfn;
-+ if (release) {
-+ /* Make sure pfn exists to start with */
-+ if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
-+ continue;
-+ frame = mfn;
-+ } else {
-+ if (mfn != INVALID_P2M_ENTRY)
-+ continue;
-+ frame = pfn;
-+ }
- set_xen_guest_handle(reservation.extent_start, &frame);
- reservation.nr_extents = 1;
-
-- ret = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
-- WARN(ret != 1, "Failed to populate pfn %lx err=%d\n", pfn, ret);
-+ ret = HYPERVISOR_memory_op(release ? XENMEM_decrease_reservation : XENMEM_populate_physmap,
-+ &reservation);
-+ WARN(ret != 1, "Failed to %s pfn %lx err=%d\n",
-+ release ? "release" : "populate", pfn, ret);
-+
- if (ret == 1) {
-- if (!early_set_phys_to_machine(pfn, frame)) {
-+ if (!early_set_phys_to_machine(pfn, release ? INVALID_P2M_ENTRY : frame)) {
-+ if (release)
-+ break;
- set_xen_guest_handle(reservation.extent_start, &frame);
- reservation.nr_extents = 1;
- ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
-- &reservation);
-+ &reservation);
- break;
- }
- len++;
-@@ -163,8 +140,11 @@ static unsigned long __init xen_populate
- break;
- }
- if (len)
-- printk(KERN_INFO "Populated %lx-%lx pfn range: %lu pages added\n",
-- start, end, len);
-+ printk(KERN_INFO "%s %lx-%lx pfn range: %lu pages %s\n",
-+ release ? "Freeing" : "Populating",
-+ start, end, len,
-+ release ? "freed" : "added");
-+
- return len;
- }
- static unsigned long __init xen_populate_chunk(
-@@ -218,7 +198,7 @@ static unsigned long __init xen_populate
- if (credits > capacity)
- credits = capacity;
-
-- pfns = xen_populate_physmap(dest_pfn, dest_pfn + credits);
-+ pfns = xen_do_chunk(dest_pfn, dest_pfn + credits, false);
- done += pfns;
- credits_left -= pfns;
- *last_pfn = (dest_pfn + pfns);
-@@ -256,8 +236,8 @@ static unsigned long __init xen_set_iden
-
- if (start_pfn < end_pfn) {
- if (start_pfn < nr_pages)
-- released += xen_release_chunk(
-- start_pfn, min(end_pfn, nr_pages));
-+ released += xen_do_chunk(
-+ start_pfn, min(end_pfn, nr_pages), true);
-
- identity += set_phys_range_identity(
- start_pfn, end_pfn);
+++ /dev/null
-From 2e2fb75475c2fc74c98100f1468c8195fee49f3b Mon Sep 17 00:00:00 2001
-From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-Date: Fri, 6 Apr 2012 10:07:11 -0400
-Subject: xen/setup: Populate freed MFNs from non-RAM E820 entries and gaps to E820 RAM
-
-From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-
-commit 2e2fb75475c2fc74c98100f1468c8195fee49f3b upstream.
-
-When the Xen hypervisor boots a PV kernel it hands it two pieces
-of information: nr_pages and a made up E820 entry.
-
-The nr_pages value defines the range from zero to nr_pages of PFNs
-which have a valid Machine Frame Number (MFN) underneath it. The
-E820 mirrors that (with the VGA hole):
-BIOS-provided physical RAM map:
- Xen: 0000000000000000 - 00000000000a0000 (usable)
- Xen: 00000000000a0000 - 0000000000100000 (reserved)
- Xen: 0000000000100000 - 0000000080800000 (usable)
-
-The fun comes when a PV guest that is run with a machine E820 - that
-can either be the initial domain or a PCI PV guest, where the E820
-looks like the normal thing:
-
-BIOS-provided physical RAM map:
- Xen: 0000000000000000 - 000000000009e000 (usable)
- Xen: 000000000009ec00 - 0000000000100000 (reserved)
- Xen: 0000000000100000 - 0000000020000000 (usable)
- Xen: 0000000020000000 - 0000000020200000 (reserved)
- Xen: 0000000020200000 - 0000000040000000 (usable)
- Xen: 0000000040000000 - 0000000040200000 (reserved)
- Xen: 0000000040200000 - 00000000bad80000 (usable)
- Xen: 00000000bad80000 - 00000000badc9000 (ACPI NVS)
-..
-With that overlaying the nr_pages directly on the E820 does not
-work as there are gaps and non-RAM regions that won't be used
-by the memory allocator. The 'xen_release_chunk' helps with that
-by punching holes in the P2M (PFN to MFN lookup tree) for those
-regions and tells us that:
-
-Freeing 20000-20200 pfn range: 512 pages freed
-Freeing 40000-40200 pfn range: 512 pages freed
-Freeing bad80-badf4 pfn range: 116 pages freed
-Freeing badf6-bae7f pfn range: 137 pages freed
-Freeing bb000-100000 pfn range: 282624 pages freed
-Released 283999 pages of unused memory
-
-Those 283999 pages are subtracted from the nr_pages and are returned
-to the hypervisor. The end result is that the initial domain
-boots with 1GB less memory as the nr_pages has been subtracted by
-the amount of pages residing within the PCI hole. It can balloon up
-to that if desired using 'xl mem-set 0 8092', but the balloon driver
-is not always compiled in for the initial domain.
-
-This patch, implements the populate hypercall (XENMEM_populate_physmap)
-which increases the the domain with the same amount of pages that
-were released.
-
-The other solution (that did not work) was to transplant the MFN in
-the P2M tree - the ones that were going to be freed were put in
-the E820_RAM regions past the nr_pages. But the modifications to the
-M2P array (the other side of creating PTEs) were not carried away.
-As the hypervisor is the only one capable of modifying that and the
-only two hypercalls that would do this are: the update_va_mapping
-(which won't work, as during initial bootup only PFNs up to nr_pages
-are mapped in the guest) or via the populate hypercall.
-
-The end result is that the kernel can now boot with the
-nr_pages without having to subtract the 283999 pages.
-
-On a 8GB machine, with various dom0_mem= parameters this is what we get:
-
-no dom0_mem
--Memory: 6485264k/9435136k available (5817k kernel code, 1136060k absent, 1813812k reserved, 2899k data, 696k init)
-+Memory: 7619036k/9435136k available (5817k kernel code, 1136060k absent, 680040k reserved, 2899k data, 696k init)
-
-dom0_mem=3G
--Memory: 2616536k/9435136k available (5817k kernel code, 1136060k absent, 5682540k reserved, 2899k data, 696k init)
-+Memory: 2703776k/9435136k available (5817k kernel code, 1136060k absent, 5595300k reserved, 2899k data, 696k init)
-
-dom0_mem=max:3G
--Memory: 2696732k/4281724k available (5817k kernel code, 1136060k absent, 448932k reserved, 2899k data, 696k init)
-+Memory: 2702204k/4281724k available (5817k kernel code, 1136060k absent, 443460k reserved, 2899k data, 696k init)
-
-And the 'xm list' or 'xl list' now reflect what the dom0_mem=
-argument is.
-
-Acked-by: David Vrabel <david.vrabel@citrix.com>
-[v2: Use populate hypercall]
-[v3: Remove debug printks]
-[v4: Simplify code]
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-Signed-off-by: Daniel Kiper <daniel.kiper@oracle.com>
-Tested-by: Daniel Kiper <daniel.kiper@oracle.com>
-Tested-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
----
- arch/x86/xen/setup.c | 116 +++++++++++++++++++++++++++++++++++++++++++++++++--
- 1 file changed, 112 insertions(+), 4 deletions(-)
-
---- a/arch/x86/xen/setup.c
-+++ b/arch/x86/xen/setup.c
-@@ -27,7 +27,6 @@
- #include <xen/interface/memory.h>
- #include <xen/interface/physdev.h>
- #include <xen/features.h>
--
- #include "xen-ops.h"
- #include "vdso.h"
-
-@@ -127,7 +126,105 @@ static unsigned long __init xen_release_
-
- return len;
- }
-+static unsigned long __init xen_populate_physmap(unsigned long start,
-+ unsigned long end)
-+{
-+ struct xen_memory_reservation reservation = {
-+ .address_bits = 0,
-+ .extent_order = 0,
-+ .domid = DOMID_SELF
-+ };
-+ unsigned long len = 0;
-+ int ret;
-+
-+ for (pfn = start; pfn < end; pfn++) {
-+ unsigned long frame;
-+
-+ /* Make sure pfn does not exists to start with */
-+ if (pfn_to_mfn(pfn) != INVALID_P2M_ENTRY)
-+ continue;
-+
-+ frame = pfn;
-+ set_xen_guest_handle(reservation.extent_start, &frame);
-+ reservation.nr_extents = 1;
-+
-+ ret = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
-+ WARN(ret != 1, "Failed to populate pfn %lx err=%d\n", pfn, ret);
-+ if (ret == 1) {
-+ if (!early_set_phys_to_machine(pfn, frame)) {
-+ set_xen_guest_handle(reservation.extent_start, &frame);
-+ reservation.nr_extents = 1;
-+ ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
-+ &reservation);
-+ break;
-+ }
-+ len++;
-+ } else
-+ break;
-+ }
-+ if (len)
-+ printk(KERN_INFO "Populated %lx-%lx pfn range: %lu pages added\n",
-+ start, end, len);
-+ return len;
-+}
-+static unsigned long __init xen_populate_chunk(
-+ const struct e820entry *list, size_t map_size,
-+ unsigned long max_pfn, unsigned long *last_pfn,
-+ unsigned long credits_left)
-+{
-+ const struct e820entry *entry;
-+ unsigned int i;
-+ unsigned long done = 0;
-+ unsigned long dest_pfn;
-+
-+ for (i = 0, entry = list; i < map_size; i++, entry++) {
-+ unsigned long credits = credits_left;
-+ unsigned long s_pfn;
-+ unsigned long e_pfn;
-+ unsigned long pfns;
-+ long capacity;
-+
-+ if (credits <= 0)
-+ break;
-+
-+ if (entry->type != E820_RAM)
-+ continue;
-+
-+ e_pfn = PFN_UP(entry->addr + entry->size);
-+
-+ /* We only care about E820 after the xen_start_info->nr_pages */
-+ if (e_pfn <= max_pfn)
-+ continue;
-+
-+ s_pfn = PFN_DOWN(entry->addr);
-+ /* If the E820 falls within the nr_pages, we want to start
-+ * at the nr_pages PFN.
-+ * If that would mean going past the E820 entry, skip it
-+ */
-+ if (s_pfn <= max_pfn) {
-+ capacity = e_pfn - max_pfn;
-+ dest_pfn = max_pfn;
-+ } else {
-+ /* last_pfn MUST be within E820_RAM regions */
-+ if (*last_pfn && e_pfn >= *last_pfn)
-+ s_pfn = *last_pfn;
-+ capacity = e_pfn - s_pfn;
-+ dest_pfn = s_pfn;
-+ }
-+ /* If we had filled this E820_RAM entry, go to the next one. */
-+ if (capacity <= 0)
-+ continue;
-+
-+ if (credits > capacity)
-+ credits = capacity;
-
-+ pfns = xen_populate_physmap(dest_pfn, dest_pfn + credits);
-+ done += pfns;
-+ credits_left -= pfns;
-+ *last_pfn = (dest_pfn + pfns);
-+ }
-+ return done;
-+}
- static unsigned long __init xen_set_identity_and_release(
- const struct e820entry *list, size_t map_size, unsigned long nr_pages)
- {
-@@ -150,7 +247,6 @@ static unsigned long __init xen_set_iden
- */
- for (i = 0, entry = list; i < map_size; i++, entry++) {
- phys_addr_t end = entry->addr + entry->size;
--
- if (entry->type == E820_RAM || i == map_size - 1) {
- unsigned long start_pfn = PFN_DOWN(start);
- unsigned long end_pfn = PFN_UP(end);
-@@ -236,7 +332,9 @@ char * __init xen_memory_setup(void)
- int rc;
- struct xen_memory_map memmap;
- unsigned long max_pages;
-+ unsigned long last_pfn = 0;
- unsigned long extra_pages = 0;
-+ unsigned long populated;
- int i;
- int op;
-
-@@ -287,9 +385,20 @@ char * __init xen_memory_setup(void)
- */
- xen_released_pages = xen_set_identity_and_release(
- map, memmap.nr_entries, max_pfn);
-- extra_pages += xen_released_pages;
-
- /*
-+ * Populate back the non-RAM pages and E820 gaps that had been
-+ * released. */
-+ populated = xen_populate_chunk(map, memmap.nr_entries,
-+ max_pfn, &last_pfn, xen_released_pages);
-+
-+ extra_pages += (xen_released_pages - populated);
-+
-+ if (last_pfn > max_pfn) {
-+ max_pfn = min(MAX_DOMAIN_PAGES, last_pfn);
-+ mem_end = PFN_PHYS(max_pfn);
-+ }
-+ /*
- * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
- * factor the base size. On non-highmem systems, the base
- * size is the full initial memory allocation; on highmem it
-@@ -302,7 +411,6 @@ char * __init xen_memory_setup(void)
- */
- extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
- extra_pages);
--
- i = 0;
- while (i < memmap.nr_entries) {
- u64 addr = map[i].addr;
+++ /dev/null
-From 83d51ab473dddde7df858015070ed22b84ebe9a9 Mon Sep 17 00:00:00 2001
-From: David Vrabel <dvrabel@cantab.net>
-Date: Thu, 3 May 2012 16:15:42 +0100
-Subject: xen/setup: update VA mapping when releasing memory during setup
-
-From: David Vrabel <dvrabel@cantab.net>
-
-commit 83d51ab473dddde7df858015070ed22b84ebe9a9 upstream.
-
-In xen_memory_setup(), if a page that is being released has a VA
-mapping this must also be updated. Otherwise, the page will be not
-released completely -- it will still be referenced in Xen and won't be
-freed util the mapping is removed and this prevents it from being
-reallocated at a different PFN.
-
-This was already being done for the ISA memory region in
-xen_ident_map_ISA() but on many systems this was omitting a few pages
-as many systems marked a few pages below the ISA memory region as
-reserved in the e820 map.
-
-This fixes errors such as:
-
-(XEN) page_alloc.c:1148:d0 Over-allocation for domain 0: 2097153 > 2097152
-(XEN) memory.c:133:d0 Could not allocate order=0 extent: id=0 memflags=0 (0 of 17)
-
-Signed-off-by: David Vrabel <david.vrabel@citrix.com>
-Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-Signed-off-by: Daniel Kiper <daniel.kiper@oracle.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-
----
- arch/x86/xen/enlighten.c | 1 -
- arch/x86/xen/mmu.c | 23 -----------------------
- arch/x86/xen/setup.c | 43 +++++++++++++++++++++++++++++++++++--------
- arch/x86/xen/xen-ops.h | 1 -
- 4 files changed, 35 insertions(+), 33 deletions(-)
-
---- a/arch/x86/xen/enlighten.c
-+++ b/arch/x86/xen/enlighten.c
-@@ -1390,7 +1390,6 @@ asmlinkage void __init xen_start_kernel(
-
- xen_raw_console_write("mapping kernel into physical memory\n");
- pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages);
-- xen_ident_map_ISA();
-
- /* Allocate and initialize top and mid mfn levels for p2m structure */
- xen_build_mfn_list_list();
---- a/arch/x86/xen/mmu.c
-+++ b/arch/x86/xen/mmu.c
-@@ -1953,29 +1953,6 @@ static void xen_set_fixmap(unsigned idx,
- #endif
- }
-
--void __init xen_ident_map_ISA(void)
--{
-- unsigned long pa;
--
-- /*
-- * If we're dom0, then linear map the ISA machine addresses into
-- * the kernel's address space.
-- */
-- if (!xen_initial_domain())
-- return;
--
-- xen_raw_printk("Xen: setup ISA identity maps\n");
--
-- for (pa = ISA_START_ADDRESS; pa < ISA_END_ADDRESS; pa += PAGE_SIZE) {
-- pte_t pte = mfn_pte(PFN_DOWN(pa), PAGE_KERNEL_IO);
--
-- if (HYPERVISOR_update_va_mapping(PAGE_OFFSET + pa, pte, 0))
-- BUG();
-- }
--
-- xen_flush_tlb();
--}
--
- static void __init xen_post_allocator_init(void)
- {
- pv_mmu_ops.set_pte = xen_set_pte;
---- a/arch/x86/xen/setup.c
-+++ b/arch/x86/xen/setup.c
-@@ -147,6 +147,13 @@ static unsigned long __init xen_do_chunk
-
- return len;
- }
-+
-+static unsigned long __init xen_release_chunk(unsigned long start,
-+ unsigned long end)
-+{
-+ return xen_do_chunk(start, end, true);
-+}
-+
- static unsigned long __init xen_populate_chunk(
- const struct e820entry *list, size_t map_size,
- unsigned long max_pfn, unsigned long *last_pfn,
-@@ -205,6 +212,29 @@ static unsigned long __init xen_populate
- }
- return done;
- }
-+
-+static void __init xen_set_identity_and_release_chunk(
-+ unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
-+ unsigned long *released, unsigned long *identity)
-+{
-+ unsigned long pfn;
-+
-+ /*
-+ * If the PFNs are currently mapped, the VA mapping also needs
-+ * to be updated to be 1:1.
-+ */
-+ for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
-+ (void)HYPERVISOR_update_va_mapping(
-+ (unsigned long)__va(pfn << PAGE_SHIFT),
-+ mfn_pte(pfn, PAGE_KERNEL_IO), 0);
-+
-+ if (start_pfn < nr_pages)
-+ *released += xen_release_chunk(
-+ start_pfn, min(end_pfn, nr_pages));
-+
-+ *identity += set_phys_range_identity(start_pfn, end_pfn);
-+}
-+
- static unsigned long __init xen_set_identity_and_release(
- const struct e820entry *list, size_t map_size, unsigned long nr_pages)
- {
-@@ -234,14 +264,11 @@ static unsigned long __init xen_set_iden
- if (entry->type == E820_RAM)
- end_pfn = PFN_UP(entry->addr);
-
-- if (start_pfn < end_pfn) {
-- if (start_pfn < nr_pages)
-- released += xen_do_chunk(
-- start_pfn, min(end_pfn, nr_pages), true);
--
-- identity += set_phys_range_identity(
-- start_pfn, end_pfn);
-- }
-+ if (start_pfn < end_pfn)
-+ xen_set_identity_and_release_chunk(
-+ start_pfn, end_pfn, nr_pages,
-+ &released, &identity);
-+
- start = end;
- }
- }
---- a/arch/x86/xen/xen-ops.h
-+++ b/arch/x86/xen/xen-ops.h
-@@ -28,7 +28,6 @@ void xen_setup_shared_info(void);
- void xen_build_mfn_list_list(void);
- void xen_setup_machphys_mapping(void);
- pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn);
--void xen_ident_map_ISA(void);
- void xen_reserve_top(void);
- extern unsigned long xen_max_p2m_pfn;
-