]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 10 Jun 2014 00:08:31 +0000 (17:08 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 10 Jun 2014 00:08:31 +0000 (17:08 -0700)
added patches:
xen-p2m-an-early-bootup-variant-of-set_phys_to_machine.patch
xen-setup-combine-the-two-hypercall-functions-since-they-are-quite-similar.patch
xen-setup-populate-freed-mfns-from-non-ram-e820-entries-and-gaps-to-e820-ram.patch
xen-setup-update-va-mapping-when-releasing-memory-during-setup.patch

queue-3.4/series
queue-3.4/xen-p2m-an-early-bootup-variant-of-set_phys_to_machine.patch [new file with mode: 0644]
queue-3.4/xen-setup-combine-the-two-hypercall-functions-since-they-are-quite-similar.patch [new file with mode: 0644]
queue-3.4/xen-setup-populate-freed-mfns-from-non-ram-e820-entries-and-gaps-to-e820-ram.patch [new file with mode: 0644]
queue-3.4/xen-setup-update-va-mapping-when-releasing-memory-during-setup.patch [new file with mode: 0644]

index 0225198955f8326d01dfe222ade76138f5a0b612..a7dcc1d6c8d5797a7c3f38f4d2ae00989f80c60e 100644 (file)
@@ -78,3 +78,7 @@ tty-serial-add-support-for-altera-serial-port.patch
 xen-p2m-move-code-around-to-allow-for-better-re-usage.patch
 xen-p2m-allow-alloc_p2m_middle-to-call-reserve_brk-depending-on-argument.patch
 xen-p2m-collapse-early_alloc_p2m_middle-redundant-checks.patch
+xen-p2m-an-early-bootup-variant-of-set_phys_to_machine.patch
+xen-setup-populate-freed-mfns-from-non-ram-e820-entries-and-gaps-to-e820-ram.patch
+xen-setup-combine-the-two-hypercall-functions-since-they-are-quite-similar.patch
+xen-setup-update-va-mapping-when-releasing-memory-during-setup.patch
diff --git a/queue-3.4/xen-p2m-an-early-bootup-variant-of-set_phys_to_machine.patch b/queue-3.4/xen-p2m-an-early-bootup-variant-of-set_phys_to_machine.patch
new file mode 100644 (file)
index 0000000..7f8fe0e
--- /dev/null
@@ -0,0 +1,58 @@
+From 940713bb2ce3033f468a220094a07250a2f69bdd Mon Sep 17 00:00:00 2001
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Fri, 30 Mar 2012 14:33:14 -0400
+Subject: xen/p2m: An early bootup variant of set_phys_to_machine
+
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+
+commit 940713bb2ce3033f468a220094a07250a2f69bdd upstream.
+
+During early bootup we can't use alloc_page, so to allocate
+leaf pages in the P2M we need to use extend_brk. For that
+we are utilizing the early_alloc_p2m and early_alloc_p2m_middle
+functions to do the job for us. This function follows the
+same logic as set_phys_to_machine.
+
+Signed-off-by: Daniel Kiper <daniel.kiper@oracle.com>
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/xen/page.h |    1 +
+ arch/x86/xen/p2m.c              |   15 +++++++++++++++
+ 2 files changed, 16 insertions(+)
+
+--- a/arch/x86/include/asm/xen/page.h
++++ b/arch/x86/include/asm/xen/page.h
+@@ -44,6 +44,7 @@ extern unsigned long  machine_to_phys_nr
+ extern unsigned long get_phys_to_machine(unsigned long pfn);
+ extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn);
++extern bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn);
+ extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
+ extern unsigned long set_phys_range_identity(unsigned long pfn_s,
+                                            unsigned long pfn_e);
+--- a/arch/x86/xen/p2m.c
++++ b/arch/x86/xen/p2m.c
+@@ -570,6 +570,21 @@ static bool __init early_alloc_p2m(unsig
+       }
+       return true;
+ }
++bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn)
++{
++      if (unlikely(!__set_phys_to_machine(pfn, mfn)))  {
++              if (!early_alloc_p2m(pfn))
++                      return false;
++
++              if (!early_alloc_p2m_middle(pfn, false /* boundary crossover OK!*/))
++                      return false;
++
++              if (!__set_phys_to_machine(pfn, mfn))
++                      return false;
++      }
++
++      return true;
++}
+ unsigned long __init set_phys_range_identity(unsigned long pfn_s,
+                                     unsigned long pfn_e)
+ {
diff --git a/queue-3.4/xen-setup-combine-the-two-hypercall-functions-since-they-are-quite-similar.patch b/queue-3.4/xen-setup-combine-the-two-hypercall-functions-since-they-are-quite-similar.patch
new file mode 100644 (file)
index 0000000..98c14bd
--- /dev/null
@@ -0,0 +1,151 @@
+From 96dc08b35c4af8cb5810450602590706f2593a5f Mon Sep 17 00:00:00 2001
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Fri, 6 Apr 2012 16:10:20 -0400
+Subject: xen/setup: Combine the two hypercall functions - since they are quite similar.
+
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+
+commit 96dc08b35c4af8cb5810450602590706f2593a5f upstream.
+
+They use the same set of arguments, so it is just the matter
+of using the proper hypercall.
+
+Acked-by: David Vrabel <david.vrabel@citrix.com>
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Daniel Kiper <daniel.kiper@oracle.com>
+Tested-by: Daniel Kiper <daniel.kiper@oracle.com>
+Tested-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ arch/x86/xen/setup.c |   80 +++++++++++++++++++--------------------------------
+ 1 file changed, 30 insertions(+), 50 deletions(-)
+
+--- a/arch/x86/xen/setup.c
++++ b/arch/x86/xen/setup.c
+@@ -91,8 +91,8 @@ static void __init xen_add_extra_mem(u64
+       }
+ }
+-static unsigned long __init xen_release_chunk(unsigned long start,
+-                                            unsigned long end)
++static unsigned long __init xen_do_chunk(unsigned long start,
++                                       unsigned long end, bool release)
+ {
+       struct xen_memory_reservation reservation = {
+               .address_bits = 0,
+@@ -103,59 +103,36 @@ static unsigned long __init xen_release_
+       unsigned long pfn;
+       int ret;
+-      for(pfn = start; pfn < end; pfn++) {
+-              unsigned long mfn = pfn_to_mfn(pfn);
+-
+-              /* Make sure pfn exists to start with */
+-              if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
+-                      continue;
+-
+-              set_xen_guest_handle(reservation.extent_start, &mfn);
+-              reservation.nr_extents = 1;
+-
+-              ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
+-                                         &reservation);
+-              WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
+-              if (ret == 1) {
+-                      __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
+-                      len++;
+-              }
+-      }
+-      printk(KERN_INFO "Freeing  %lx-%lx pfn range: %lu pages freed\n",
+-             start, end, len);
+-
+-      return len;
+-}
+-static unsigned long __init xen_populate_physmap(unsigned long start,
+-                                               unsigned long end)
+-{
+-      struct xen_memory_reservation reservation = {
+-              .address_bits = 0,
+-              .extent_order = 0,
+-              .domid        = DOMID_SELF
+-      };
+-      unsigned long len = 0;
+-      int ret;
+-
+       for (pfn = start; pfn < end; pfn++) {
+               unsigned long frame;
++              unsigned long mfn = pfn_to_mfn(pfn);
+-              /* Make sure pfn does not exists to start with */
+-              if (pfn_to_mfn(pfn) != INVALID_P2M_ENTRY)
+-                      continue;
+-
+-              frame = pfn;
++              if (release) {
++                      /* Make sure pfn exists to start with */
++                      if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
++                              continue;
++                      frame = mfn;
++              } else {
++                      if (mfn != INVALID_P2M_ENTRY)
++                              continue;
++                      frame = pfn;
++              }
+               set_xen_guest_handle(reservation.extent_start, &frame);
+               reservation.nr_extents = 1;
+-              ret = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
+-              WARN(ret != 1, "Failed to populate pfn %lx err=%d\n", pfn, ret);
++              ret = HYPERVISOR_memory_op(release ? XENMEM_decrease_reservation : XENMEM_populate_physmap,
++                                         &reservation);
++              WARN(ret != 1, "Failed to %s pfn %lx err=%d\n",
++                   release ? "release" : "populate", pfn, ret);
++
+               if (ret == 1) {
+-                      if (!early_set_phys_to_machine(pfn, frame)) {
++                      if (!early_set_phys_to_machine(pfn, release ? INVALID_P2M_ENTRY : frame)) {
++                              if (release)
++                                      break;
+                               set_xen_guest_handle(reservation.extent_start, &frame);
+                               reservation.nr_extents = 1;
+                               ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
+-                                                      &reservation);
++                                                         &reservation);
+                               break;
+                       }
+                       len++;
+@@ -163,8 +140,11 @@ static unsigned long __init xen_populate
+                       break;
+       }
+       if (len)
+-              printk(KERN_INFO "Populated %lx-%lx pfn range: %lu pages added\n",
+-                     start, end, len);
++              printk(KERN_INFO "%s %lx-%lx pfn range: %lu pages %s\n",
++                     release ? "Freeing" : "Populating",
++                     start, end, len,
++                     release ? "freed" : "added");
++
+       return len;
+ }
+ static unsigned long __init xen_populate_chunk(
+@@ -218,7 +198,7 @@ static unsigned long __init xen_populate
+               if (credits > capacity)
+                       credits = capacity;
+-              pfns = xen_populate_physmap(dest_pfn, dest_pfn + credits);
++              pfns = xen_do_chunk(dest_pfn, dest_pfn + credits, false);
+               done += pfns;
+               credits_left -= pfns;
+               *last_pfn = (dest_pfn + pfns);
+@@ -256,8 +236,8 @@ static unsigned long __init xen_set_iden
+                       if (start_pfn < end_pfn) {
+                               if (start_pfn < nr_pages)
+-                                      released += xen_release_chunk(
+-                                              start_pfn, min(end_pfn, nr_pages));
++                                      released += xen_do_chunk(
++                                              start_pfn, min(end_pfn, nr_pages), true);
+                               identity += set_phys_range_identity(
+                                       start_pfn, end_pfn);
diff --git a/queue-3.4/xen-setup-populate-freed-mfns-from-non-ram-e820-entries-and-gaps-to-e820-ram.patch b/queue-3.4/xen-setup-populate-freed-mfns-from-non-ram-e820-entries-and-gaps-to-e820-ram.patch
new file mode 100644 (file)
index 0000000..d728678
--- /dev/null
@@ -0,0 +1,264 @@
+From 2e2fb75475c2fc74c98100f1468c8195fee49f3b Mon Sep 17 00:00:00 2001
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Fri, 6 Apr 2012 10:07:11 -0400
+Subject: xen/setup: Populate freed MFNs from non-RAM E820 entries and gaps to E820 RAM
+
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+
+commit 2e2fb75475c2fc74c98100f1468c8195fee49f3b upstream.
+
+When the Xen hypervisor boots a PV kernel it hands it two pieces
+of information: nr_pages and a made up E820 entry.
+
+The nr_pages value defines the range from zero to nr_pages of PFNs
+which have a valid Machine Frame Number (MFN) underneath it. The
+E820 mirrors that (with the VGA hole):
+BIOS-provided physical RAM map:
+ Xen: 0000000000000000 - 00000000000a0000 (usable)
+ Xen: 00000000000a0000 - 0000000000100000 (reserved)
+ Xen: 0000000000100000 - 0000000080800000 (usable)
+
+The fun comes when a PV guest that is run with a machine E820 - that
+can either be the initial domain or a PCI PV guest, where the E820
+looks like the normal thing:
+
+BIOS-provided physical RAM map:
+ Xen: 0000000000000000 - 000000000009e000 (usable)
+ Xen: 000000000009ec00 - 0000000000100000 (reserved)
+ Xen: 0000000000100000 - 0000000020000000 (usable)
+ Xen: 0000000020000000 - 0000000020200000 (reserved)
+ Xen: 0000000020200000 - 0000000040000000 (usable)
+ Xen: 0000000040000000 - 0000000040200000 (reserved)
+ Xen: 0000000040200000 - 00000000bad80000 (usable)
+ Xen: 00000000bad80000 - 00000000badc9000 (ACPI NVS)
+..
+With that overlaying the nr_pages directly on the E820 does not
+work as there are gaps and non-RAM regions that won't be used
+by the memory allocator. The 'xen_release_chunk' helps with that
+by punching holes in the P2M (PFN to MFN lookup tree) for those
+regions and tells us that:
+
+Freeing  20000-20200 pfn range: 512 pages freed
+Freeing  40000-40200 pfn range: 512 pages freed
+Freeing  bad80-badf4 pfn range: 116 pages freed
+Freeing  badf6-bae7f pfn range: 137 pages freed
+Freeing  bb000-100000 pfn range: 282624 pages freed
+Released 283999 pages of unused memory
+
+Those 283999 pages are subtracted from the nr_pages and are returned
+to the hypervisor. The end result is that the initial domain
+boots with 1GB less memory as the nr_pages has been subtracted by
+the amount of pages residing within the PCI hole. It can balloon up
+to that if desired using 'xl mem-set 0 8092', but the balloon driver
+is not always compiled in for the initial domain.
+
+This patch, implements the populate hypercall (XENMEM_populate_physmap)
+which increases the the domain with the same amount of pages that
+were released.
+
+The other solution (that did not work) was to transplant the MFN in
+the P2M tree - the ones that were going to be freed were put in
+the E820_RAM regions past the nr_pages. But the modifications to the
+M2P array (the other side of creating PTEs) were not carried away.
+As the hypervisor is the only one capable of modifying that and the
+only two hypercalls that would do this are: the update_va_mapping
+(which won't work, as during initial bootup only PFNs up to nr_pages
+are mapped in the guest) or via the populate hypercall.
+
+The end result is that the kernel can now boot with the
+nr_pages without having to subtract the 283999 pages.
+
+On a 8GB machine, with various dom0_mem= parameters this is what we get:
+
+no dom0_mem
+-Memory: 6485264k/9435136k available (5817k kernel code, 1136060k absent, 1813812k reserved, 2899k data, 696k init)
++Memory: 7619036k/9435136k available (5817k kernel code, 1136060k absent, 680040k reserved, 2899k data, 696k init)
+
+dom0_mem=3G
+-Memory: 2616536k/9435136k available (5817k kernel code, 1136060k absent, 5682540k reserved, 2899k data, 696k init)
++Memory: 2703776k/9435136k available (5817k kernel code, 1136060k absent, 5595300k reserved, 2899k data, 696k init)
+
+dom0_mem=max:3G
+-Memory: 2696732k/4281724k available (5817k kernel code, 1136060k absent, 448932k reserved, 2899k data, 696k init)
++Memory: 2702204k/4281724k available (5817k kernel code, 1136060k absent, 443460k reserved, 2899k data, 696k init)
+
+And the 'xm list' or 'xl list' now reflect what the dom0_mem=
+argument is.
+
+Acked-by: David Vrabel <david.vrabel@citrix.com>
+[v2: Use populate hypercall]
+[v3: Remove debug printks]
+[v4: Simplify code]
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Daniel Kiper <daniel.kiper@oracle.com>
+Tested-by: Daniel Kiper <daniel.kiper@oracle.com>
+Tested-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/xen/setup.c |  116 +++++++++++++++++++++++++++++++++++++++++++++++++--
+ 1 file changed, 112 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/xen/setup.c
++++ b/arch/x86/xen/setup.c
+@@ -27,7 +27,6 @@
+ #include <xen/interface/memory.h>
+ #include <xen/interface/physdev.h>
+ #include <xen/features.h>
+-
+ #include "xen-ops.h"
+ #include "vdso.h"
+@@ -127,7 +126,105 @@ static unsigned long __init xen_release_
+       return len;
+ }
++static unsigned long __init xen_populate_physmap(unsigned long start,
++                                               unsigned long end)
++{
++      struct xen_memory_reservation reservation = {
++              .address_bits = 0,
++              .extent_order = 0,
++              .domid        = DOMID_SELF
++      };
++      unsigned long len = 0;
++      int ret;
++
++      for (pfn = start; pfn < end; pfn++) {
++              unsigned long frame;
++
++              /* Make sure pfn does not exists to start with */
++              if (pfn_to_mfn(pfn) != INVALID_P2M_ENTRY)
++                      continue;
++
++              frame = pfn;
++              set_xen_guest_handle(reservation.extent_start, &frame);
++              reservation.nr_extents = 1;
++
++              ret = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
++              WARN(ret != 1, "Failed to populate pfn %lx err=%d\n", pfn, ret);
++              if (ret == 1) {
++                      if (!early_set_phys_to_machine(pfn, frame)) {
++                              set_xen_guest_handle(reservation.extent_start, &frame);
++                              reservation.nr_extents = 1;
++                              ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
++                                                      &reservation);
++                              break;
++                      }
++                      len++;
++              } else
++                      break;
++      }
++      if (len)
++              printk(KERN_INFO "Populated %lx-%lx pfn range: %lu pages added\n",
++                     start, end, len);
++      return len;
++}
++static unsigned long __init xen_populate_chunk(
++      const struct e820entry *list, size_t map_size,
++      unsigned long max_pfn, unsigned long *last_pfn,
++      unsigned long credits_left)
++{
++      const struct e820entry *entry;
++      unsigned int i;
++      unsigned long done = 0;
++      unsigned long dest_pfn;
++
++      for (i = 0, entry = list; i < map_size; i++, entry++) {
++              unsigned long credits = credits_left;
++              unsigned long s_pfn;
++              unsigned long e_pfn;
++              unsigned long pfns;
++              long capacity;
++
++              if (credits <= 0)
++                      break;
++
++              if (entry->type != E820_RAM)
++                      continue;
++
++              e_pfn = PFN_UP(entry->addr + entry->size);
++
++              /* We only care about E820 after the xen_start_info->nr_pages */
++              if (e_pfn <= max_pfn)
++                      continue;
++
++              s_pfn = PFN_DOWN(entry->addr);
++              /* If the E820 falls within the nr_pages, we want to start
++               * at the nr_pages PFN.
++               * If that would mean going past the E820 entry, skip it
++               */
++              if (s_pfn <= max_pfn) {
++                      capacity = e_pfn - max_pfn;
++                      dest_pfn = max_pfn;
++              } else {
++                      /* last_pfn MUST be within E820_RAM regions */
++                      if (*last_pfn && e_pfn >= *last_pfn)
++                              s_pfn = *last_pfn;
++                      capacity = e_pfn - s_pfn;
++                      dest_pfn = s_pfn;
++              }
++              /* If we had filled this E820_RAM entry, go to the next one. */
++              if (capacity <= 0)
++                      continue;
++
++              if (credits > capacity)
++                      credits = capacity;
++              pfns = xen_populate_physmap(dest_pfn, dest_pfn + credits);
++              done += pfns;
++              credits_left -= pfns;
++              *last_pfn = (dest_pfn + pfns);
++      }
++      return done;
++}
+ static unsigned long __init xen_set_identity_and_release(
+       const struct e820entry *list, size_t map_size, unsigned long nr_pages)
+ {
+@@ -150,7 +247,6 @@ static unsigned long __init xen_set_iden
+        */
+       for (i = 0, entry = list; i < map_size; i++, entry++) {
+               phys_addr_t end = entry->addr + entry->size;
+-
+               if (entry->type == E820_RAM || i == map_size - 1) {
+                       unsigned long start_pfn = PFN_DOWN(start);
+                       unsigned long end_pfn = PFN_UP(end);
+@@ -236,7 +332,9 @@ char * __init xen_memory_setup(void)
+       int rc;
+       struct xen_memory_map memmap;
+       unsigned long max_pages;
++      unsigned long last_pfn = 0;
+       unsigned long extra_pages = 0;
++      unsigned long populated;
+       int i;
+       int op;
+@@ -287,9 +385,20 @@ char * __init xen_memory_setup(void)
+        */
+       xen_released_pages = xen_set_identity_and_release(
+               map, memmap.nr_entries, max_pfn);
+-      extra_pages += xen_released_pages;
+       /*
++       * Populate back the non-RAM pages and E820 gaps that had been
++       * released. */
++      populated = xen_populate_chunk(map, memmap.nr_entries,
++                      max_pfn, &last_pfn, xen_released_pages);
++
++      extra_pages += (xen_released_pages - populated);
++
++      if (last_pfn > max_pfn) {
++              max_pfn = min(MAX_DOMAIN_PAGES, last_pfn);
++              mem_end = PFN_PHYS(max_pfn);
++      }
++      /*
+        * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
+        * factor the base size.  On non-highmem systems, the base
+        * size is the full initial memory allocation; on highmem it
+@@ -302,7 +411,6 @@ char * __init xen_memory_setup(void)
+        */
+       extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
+                         extra_pages);
+-
+       i = 0;
+       while (i < memmap.nr_entries) {
+               u64 addr = map[i].addr;
diff --git a/queue-3.4/xen-setup-update-va-mapping-when-releasing-memory-during-setup.patch b/queue-3.4/xen-setup-update-va-mapping-when-releasing-memory-during-setup.patch
new file mode 100644 (file)
index 0000000..0efe4c2
--- /dev/null
@@ -0,0 +1,155 @@
+From 83d51ab473dddde7df858015070ed22b84ebe9a9 Mon Sep 17 00:00:00 2001
+From: David Vrabel <dvrabel@cantab.net>
+Date: Thu, 3 May 2012 16:15:42 +0100
+Subject: xen/setup: update VA mapping when releasing memory during setup
+
+From: David Vrabel <dvrabel@cantab.net>
+
+commit 83d51ab473dddde7df858015070ed22b84ebe9a9 upstream.
+
+In xen_memory_setup(), if a page that is being released has a VA
+mapping this must also be updated.  Otherwise, the page will be not
+released completely -- it will still be referenced in Xen and won't be
+freed util the mapping is removed and this prevents it from being
+reallocated at a different PFN.
+
+This was already being done for the ISA memory region in
+xen_ident_map_ISA() but on many systems this was omitting a few pages
+as many systems marked a few pages below the ISA memory region as
+reserved in the e820 map.
+
+This fixes errors such as:
+
+(XEN) page_alloc.c:1148:d0 Over-allocation for domain 0: 2097153 > 2097152
+(XEN) memory.c:133:d0 Could not allocate order=0 extent: id=0 memflags=0 (0 of 17)
+
+Signed-off-by: David Vrabel <david.vrabel@citrix.com>
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Daniel Kiper <daniel.kiper@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/xen/enlighten.c |    1 -
+ arch/x86/xen/mmu.c       |   23 -----------------------
+ arch/x86/xen/setup.c     |   43 +++++++++++++++++++++++++++++++++++--------
+ arch/x86/xen/xen-ops.h   |    1 -
+ 4 files changed, 35 insertions(+), 33 deletions(-)
+
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -1390,7 +1390,6 @@ asmlinkage void __init xen_start_kernel(
+       xen_raw_console_write("mapping kernel into physical memory\n");
+       pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages);
+-      xen_ident_map_ISA();
+       /* Allocate and initialize top and mid mfn levels for p2m structure */
+       xen_build_mfn_list_list();
+--- a/arch/x86/xen/mmu.c
++++ b/arch/x86/xen/mmu.c
+@@ -1953,29 +1953,6 @@ static void xen_set_fixmap(unsigned idx,
+ #endif
+ }
+-void __init xen_ident_map_ISA(void)
+-{
+-      unsigned long pa;
+-
+-      /*
+-       * If we're dom0, then linear map the ISA machine addresses into
+-       * the kernel's address space.
+-       */
+-      if (!xen_initial_domain())
+-              return;
+-
+-      xen_raw_printk("Xen: setup ISA identity maps\n");
+-
+-      for (pa = ISA_START_ADDRESS; pa < ISA_END_ADDRESS; pa += PAGE_SIZE) {
+-              pte_t pte = mfn_pte(PFN_DOWN(pa), PAGE_KERNEL_IO);
+-
+-              if (HYPERVISOR_update_va_mapping(PAGE_OFFSET + pa, pte, 0))
+-                      BUG();
+-      }
+-
+-      xen_flush_tlb();
+-}
+-
+ static void __init xen_post_allocator_init(void)
+ {
+       pv_mmu_ops.set_pte = xen_set_pte;
+--- a/arch/x86/xen/setup.c
++++ b/arch/x86/xen/setup.c
+@@ -147,6 +147,13 @@ static unsigned long __init xen_do_chunk
+       return len;
+ }
++
++static unsigned long __init xen_release_chunk(unsigned long start,
++                                            unsigned long end)
++{
++      return xen_do_chunk(start, end, true);
++}
++
+ static unsigned long __init xen_populate_chunk(
+       const struct e820entry *list, size_t map_size,
+       unsigned long max_pfn, unsigned long *last_pfn,
+@@ -205,6 +212,29 @@ static unsigned long __init xen_populate
+       }
+       return done;
+ }
++
++static void __init xen_set_identity_and_release_chunk(
++      unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
++      unsigned long *released, unsigned long *identity)
++{
++      unsigned long pfn;
++
++      /*
++       * If the PFNs are currently mapped, the VA mapping also needs
++       * to be updated to be 1:1.
++       */
++      for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
++              (void)HYPERVISOR_update_va_mapping(
++                      (unsigned long)__va(pfn << PAGE_SHIFT),
++                      mfn_pte(pfn, PAGE_KERNEL_IO), 0);
++
++      if (start_pfn < nr_pages)
++              *released += xen_release_chunk(
++                      start_pfn, min(end_pfn, nr_pages));
++
++      *identity += set_phys_range_identity(start_pfn, end_pfn);
++}
++
+ static unsigned long __init xen_set_identity_and_release(
+       const struct e820entry *list, size_t map_size, unsigned long nr_pages)
+ {
+@@ -234,14 +264,11 @@ static unsigned long __init xen_set_iden
+                       if (entry->type == E820_RAM)
+                               end_pfn = PFN_UP(entry->addr);
+-                      if (start_pfn < end_pfn) {
+-                              if (start_pfn < nr_pages)
+-                                      released += xen_do_chunk(
+-                                              start_pfn, min(end_pfn, nr_pages), true);
+-
+-                              identity += set_phys_range_identity(
+-                                      start_pfn, end_pfn);
+-                      }
++                      if (start_pfn < end_pfn)
++                              xen_set_identity_and_release_chunk(
++                                      start_pfn, end_pfn, nr_pages,
++                                      &released, &identity);
++
+                       start = end;
+               }
+       }
+--- a/arch/x86/xen/xen-ops.h
++++ b/arch/x86/xen/xen-ops.h
+@@ -28,7 +28,6 @@ void xen_setup_shared_info(void);
+ void xen_build_mfn_list_list(void);
+ void xen_setup_machphys_mapping(void);
+ pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn);
+-void xen_ident_map_ISA(void);
+ void xen_reserve_top(void);
+ extern unsigned long xen_max_p2m_pfn;