--- /dev/null
+From a67baeb77375199bbd842fa308cb565164dd1f19 Mon Sep 17 00:00:00 2001
+From: David Vrabel <david.vrabel@citrix.com>
+Date: Wed, 24 Oct 2012 12:39:02 +0100
+Subject: xen/gntdev: don't leak memory from IOCTL_GNTDEV_MAP_GRANT_REF
+
+From: David Vrabel <david.vrabel@citrix.com>
+
+commit a67baeb77375199bbd842fa308cb565164dd1f19 upstream.
+
+map->kmap_ops allocated in gntdev_alloc_map() wasn't freed by
+gntdev_put_map().
+
+Add a gntdev_free_map() helper function to free everything allocated
+by gntdev_alloc_map().
+
+Signed-off-by: David Vrabel <david.vrabel@citrix.com>
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/xen/gntdev.c | 36 +++++++++++++++++++-----------------
+ 1 file changed, 19 insertions(+), 17 deletions(-)
+
+--- a/drivers/xen/gntdev.c
++++ b/drivers/xen/gntdev.c
+@@ -105,6 +105,21 @@ static void gntdev_print_maps(struct gnt
+ #endif
+ }
+
++static void gntdev_free_map(struct grant_map *map)
++{
++ if (map == NULL)
++ return;
++
++ if (map->pages)
++ free_xenballooned_pages(map->count, map->pages);
++ kfree(map->pages);
++ kfree(map->grants);
++ kfree(map->map_ops);
++ kfree(map->unmap_ops);
++ kfree(map->kmap_ops);
++ kfree(map);
++}
++
+ static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
+ {
+ struct grant_map *add;
+@@ -142,12 +157,7 @@ static struct grant_map *gntdev_alloc_ma
+ return add;
+
+ err:
+- kfree(add->pages);
+- kfree(add->grants);
+- kfree(add->map_ops);
+- kfree(add->unmap_ops);
+- kfree(add->kmap_ops);
+- kfree(add);
++ gntdev_free_map(add);
+ return NULL;
+ }
+
+@@ -198,17 +208,9 @@ static void gntdev_put_map(struct grant_
+ evtchn_put(map->notify.event);
+ }
+
+- if (map->pages) {
+- if (!use_ptemod)
+- unmap_grant_pages(map, 0, map->count);
+-
+- free_xenballooned_pages(map->count, map->pages);
+- }
+- kfree(map->pages);
+- kfree(map->grants);
+- kfree(map->map_ops);
+- kfree(map->unmap_ops);
+- kfree(map);
++ if (map->pages && !use_ptemod)
++ unmap_grant_pages(map, 0, map->count);
++ gntdev_free_map(map);
+ }
+
+ /* ------------------------------------------------------------------ */
--- /dev/null
+From 95a7d76897c1e7243d4137037c66d15cbf2cce76 Mon Sep 17 00:00:00 2001
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 31 Oct 2012 12:38:31 -0400
+Subject: xen/mmu: Use Xen specific TLB flush instead of the generic one.
+
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+
+commit 95a7d76897c1e7243d4137037c66d15cbf2cce76 upstream.
+
+As Mukesh explained it, the MMUEXT_TLB_FLUSH_ALL allows the
+hypervisor to do a TLB flush on all active vCPUs. If instead
+we were using the generic one (which ends up being xen_flush_tlb)
+we end up making the MMUEXT_TLB_FLUSH_LOCAL hypercall. But
+before we make that hypercall the kernel will IPI all of the
+vCPUs (even those that were asleep from the hypervisor
+perspective). The end result is that we needlessly wake them
+up and do a TLB flush when we can just let the hypervisor
+do it correctly.
+
+This patch gives around 50% speed improvement when migrating
+idle guest's from one host to another.
+
+Oracle-bug: 14630170
+
+Tested-by: Jingjie Jiang <jingjie.jiang@oracle.com>
+Suggested-by: Mukesh Rathor <mukesh.rathor@oracle.com>
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/xen/mmu.c | 21 ++++++++++++++++++++-
+ include/trace/events/xen.h | 8 ++++++++
+ 2 files changed, 28 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/xen/mmu.c
++++ b/arch/x86/xen/mmu.c
+@@ -1203,6 +1203,25 @@ unsigned long xen_read_cr2_direct(void)
+ return this_cpu_read(xen_vcpu_info.arch.cr2);
+ }
+
++void xen_flush_tlb_all(void)
++{
++ struct mmuext_op *op;
++ struct multicall_space mcs;
++
++ trace_xen_mmu_flush_tlb_all(0);
++
++ preempt_disable();
++
++ mcs = xen_mc_entry(sizeof(*op));
++
++ op = mcs.args;
++ op->cmd = MMUEXT_TLB_FLUSH_ALL;
++ MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
++
++ xen_mc_issue(PARAVIRT_LAZY_MMU);
++
++ preempt_enable();
++}
+ static void xen_flush_tlb(void)
+ {
+ struct mmuext_op *op;
+@@ -2364,7 +2383,7 @@ int xen_remap_domain_mfn_range(struct vm
+ err = 0;
+ out:
+
+- flush_tlb_all();
++ xen_flush_tlb_all();
+
+ return err;
+ }
+--- a/include/trace/events/xen.h
++++ b/include/trace/events/xen.h
+@@ -377,6 +377,14 @@ DECLARE_EVENT_CLASS(xen_mmu_pgd,
+ DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_pin);
+ DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_unpin);
+
++TRACE_EVENT(xen_mmu_flush_tlb_all,
++ TP_PROTO(int x),
++ TP_ARGS(x),
++ TP_STRUCT__entry(__array(char, x, 0)),
++ TP_fast_assign((void)x),
++ TP_printk("%s", "")
++ );
++
+ TRACE_EVENT(xen_mmu_flush_tlb,
+ TP_PROTO(int x),
+ TP_ARGS(x),