]> git.ipfire.org Git - people/pmueller/ipfire-2.x.git/blobdiff - src/patches/suse-2.6.27.31/patches.xen/xen-x86-dcr-fallback
Revert "Move xen patchset to new version's subdir."
[people/pmueller/ipfire-2.x.git] / src / patches / suse-2.6.27.31 / patches.xen / xen-x86-dcr-fallback
diff --git a/src/patches/suse-2.6.27.31/patches.xen/xen-x86-dcr-fallback b/src/patches/suse-2.6.27.31/patches.xen/xen-x86-dcr-fallback
deleted file mode 100644 (file)
index 162f256..0000000
+++ /dev/null
@@ -1,152 +0,0 @@
-Subject: Add fallback when XENMEM_exchange fails to replace contiguous region
-From: jbeulich@novell.com
-Patch-mainline: obsolete
-References: 181869
-
-This avoids losing precious special memory in places where any memory can be
-used.
-
---- sle11-2009-05-14.orig/arch/x86/mm/hypervisor.c     2009-03-16 16:17:45.000000000 +0100
-+++ sle11-2009-05-14/arch/x86/mm/hypervisor.c  2009-03-30 12:18:24.000000000 +0200
-@@ -42,6 +42,7 @@
- #include <xen/interface/memory.h>
- #include <linux/module.h>
- #include <linux/percpu.h>
-+#include <linux/highmem.h>
- #include <asm/tlbflush.h>
- #include <linux/highmem.h>
-@@ -712,6 +713,83 @@ void xen_destroy_contiguous_region(unsig
-               BUG();
-       balloon_unlock(flags);
-+
-+      if (unlikely(!success)) {
-+              /* Try hard to get the special memory back to Xen. */
-+              exchange.in.extent_order = 0;
-+              set_xen_guest_handle(exchange.in.extent_start, &in_frame);
-+
-+              for (i = 0; i < (1U<<order); i++) {
-+                      struct page *page = alloc_page(__GFP_HIGHMEM|__GFP_COLD);
-+                      unsigned long pfn;
-+                      mmu_update_t mmu;
-+                      unsigned int j = 0;
-+
-+                      if (!page) {
-+                              printk(KERN_WARNING "Xen and kernel out of memory "
-+                                     "while trying to release an order %u "
-+                                     "contiguous region\n", order);
-+                              break;
-+                      }
-+                      pfn = page_to_pfn(page);
-+
-+                      balloon_lock(flags);
-+
-+                      if (!PageHighMem(page)) {
-+                              void *v = __va(pfn << PAGE_SHIFT);
-+
-+                              scrub_pages(v, 1);
-+                              MULTI_update_va_mapping(cr_mcl + j, (unsigned long)v,
-+                                                      __pte_ma(0), UVMF_INVLPG|UVMF_ALL);
-+                              ++j;
-+                      }
-+#ifdef CONFIG_XEN_SCRUB_PAGES
-+                      else {
-+                              scrub_pages(kmap(page), 1);
-+                              kunmap(page);
-+                              kmap_flush_unused();
-+                      }
-+#endif
-+
-+                      frame = pfn_to_mfn(pfn);
-+                      set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
-+
-+                      MULTI_update_va_mapping(cr_mcl + j, vstart,
-+                                              pfn_pte_ma(frame, PAGE_KERNEL),
-+                                              UVMF_INVLPG|UVMF_ALL);
-+                      ++j;
-+
-+                      pfn = __pa(vstart) >> PAGE_SHIFT;
-+                      set_phys_to_machine(pfn, frame);
-+                      if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-+                              mmu.ptr = ((uint64_t)frame << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
-+                              mmu.val = pfn;
-+                              cr_mcl[j].op = __HYPERVISOR_mmu_update;
-+                              cr_mcl[j].args[0] = (unsigned long)&mmu;
-+                              cr_mcl[j].args[1] = 1;
-+                              cr_mcl[j].args[2] = 0;
-+                              cr_mcl[j].args[3] = DOMID_SELF;
-+                              ++j;
-+                      }
-+
-+                      cr_mcl[j].op = __HYPERVISOR_memory_op;
-+                      cr_mcl[j].args[0] = XENMEM_decrease_reservation;
-+                      cr_mcl[j].args[1] = (unsigned long)&exchange.in;
-+
-+                      if (HYPERVISOR_multicall(cr_mcl, j + 1))
-+                              BUG();
-+                      BUG_ON(cr_mcl[j].result != 1);
-+                      while (j--)
-+                              BUG_ON(cr_mcl[j].result != 0);
-+
-+                      balloon_unlock(flags);
-+
-+                      free_empty_pages(&page, 1);
-+
-+                      in_frame++;
-+                      vstart += PAGE_SIZE;
-+              }
-+      }
- }
- EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
---- sle11-2009-05-14.orig/drivers/xen/balloon/balloon.c        2008-11-25 13:34:52.000000000 +0100
-+++ sle11-2009-05-14/drivers/xen/balloon/balloon.c     2009-03-16 16:40:33.000000000 +0100
-@@ -687,7 +687,7 @@ struct page **alloc_empty_pages_and_page
-       goto out;
- }
--void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages)
-+static void _free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages, int free_vec)
- {
-       unsigned long flags;
-       int i;
-@@ -702,11 +702,24 @@ void free_empty_pages_and_pagevec(struct
-       }
-       balloon_unlock(flags);
--      kfree(pagevec);
-+      if (free_vec)
-+              kfree(pagevec);
-+      else
-+              totalram_pages = bs.current_pages -= nr_pages;
-       schedule_work(&balloon_worker);
- }
-+void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages)
-+{
-+      _free_empty_pages_and_pagevec(pagevec, nr_pages, 1);
-+}
-+
-+void free_empty_pages(struct page **pagevec, int nr_pages)
-+{
-+      _free_empty_pages_and_pagevec(pagevec, nr_pages, 0);
-+}
-+
- void balloon_release_driver_page(struct page *page)
- {
-       unsigned long flags;
---- sle11-2009-05-14.orig/include/xen/balloon.h        2009-03-16 16:38:05.000000000 +0100
-+++ sle11-2009-05-14/include/xen/balloon.h     2009-03-16 16:40:33.000000000 +0100
-@@ -47,6 +47,10 @@ void balloon_update_driver_allowance(lon
- struct page **alloc_empty_pages_and_pagevec(int nr_pages);
- void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages);
-+/* Free an empty page range (not allocated through
-+   alloc_empty_pages_and_pagevec), adding to the balloon. */
-+void free_empty_pages(struct page **pagevec, int nr_pages);
-+
- void balloon_release_driver_page(struct page *page);
- /*