--- /dev/null
+From e058632670b709145730a134acc3f83f392f7aa7 Mon Sep 17 00:00:00 2001
+From: Stefano Stabellini <sstabellini@kernel.org>
+Date: Thu, 13 Apr 2017 14:04:21 -0700
+Subject: xen/arm,arm64: fix xen_dma_ops after 815dd18 "Consolidate get_dma_ops..."
+
+From: Stefano Stabellini <sstabellini@kernel.org>
+
+commit e058632670b709145730a134acc3f83f392f7aa7 upstream.
+
+The following commit:
+
+ commit 815dd18788fe0d41899f51b91d0560279cf16b0d
+ Author: Bart Van Assche <bart.vanassche@sandisk.com>
+ Date: Fri Jan 20 13:04:04 2017 -0800
+
+ treewide: Consolidate get_dma_ops() implementations
+
+rearranges get_dma_ops in a way that xen_dma_ops are not returned when
+running on Xen anymore, dev->dma_ops is returned instead (see
+arch/arm/include/asm/dma-mapping.h:get_arch_dma_ops and
+include/linux/dma-mapping.h:get_dma_ops).
+
+Fix the problem by storing dev->dma_ops in dev_archdata, and setting
+dev->dma_ops to xen_dma_ops. This way, xen_dma_ops is returned naturally
+by get_dma_ops. The Xen code can retrieve the original dev->dma_ops from
+dev_archdata when needed. It also allows us to remove __generic_dma_ops
+from common headers.
+
+Signed-off-by: Stefano Stabellini <sstabellini@kernel.org>
+Tested-by: Julien Grall <julien.grall@arm.com>
+Suggested-by: Catalin Marinas <catalin.marinas@arm.com>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+CC: linux@armlinux.org.uk
+CC: catalin.marinas@arm.com
+CC: will.deacon@arm.com
+CC: boris.ostrovsky@oracle.com
+CC: jgross@suse.com
+CC: Julien Grall <julien.grall@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/include/asm/device.h | 3 +++
+ arch/arm/include/asm/dma-mapping.h | 12 +-----------
+ arch/arm/mm/dma-mapping.c | 7 +++++++
+ arch/arm64/include/asm/device.h | 3 +++
+ arch/arm64/include/asm/dma-mapping.h | 13 +------------
+ arch/arm64/mm/dma-mapping.c | 7 +++++++
+ include/xen/arm/page-coherent.h | 8 ++++++++
+ 7 files changed, 30 insertions(+), 23 deletions(-)
+
+--- a/arch/arm/include/asm/device.h
++++ b/arch/arm/include/asm/device.h
+@@ -16,6 +16,9 @@ struct dev_archdata {
+ #ifdef CONFIG_ARM_DMA_USE_IOMMU
+ struct dma_iommu_mapping *mapping;
+ #endif
++#ifdef CONFIG_XEN
++ const struct dma_map_ops *dev_dma_ops;
++#endif
+ bool dma_coherent;
+ };
+
+--- a/arch/arm/include/asm/dma-mapping.h
++++ b/arch/arm/include/asm/dma-mapping.h
+@@ -16,19 +16,9 @@
+ extern const struct dma_map_ops arm_dma_ops;
+ extern const struct dma_map_ops arm_coherent_dma_ops;
+
+-static inline const struct dma_map_ops *__generic_dma_ops(struct device *dev)
+-{
+- if (dev && dev->dma_ops)
+- return dev->dma_ops;
+- return &arm_dma_ops;
+-}
+-
+ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
+ {
+- if (xen_initial_domain())
+- return xen_dma_ops;
+- else
+- return __generic_dma_ops(NULL);
++ return &arm_dma_ops;
+ }
+
+ #define HAVE_ARCH_DMA_SUPPORTED 1
+--- a/arch/arm/mm/dma-mapping.c
++++ b/arch/arm/mm/dma-mapping.c
+@@ -2414,6 +2414,13 @@ void arch_setup_dma_ops(struct device *d
+ dma_ops = arm_get_dma_map_ops(coherent);
+
+ set_dma_ops(dev, dma_ops);
++
++#ifdef CONFIG_XEN
++ if (xen_initial_domain()) {
++ dev->archdata.dev_dma_ops = dev->dma_ops;
++ dev->dma_ops = xen_dma_ops;
++ }
++#endif
+ }
+
+ void arch_teardown_dma_ops(struct device *dev)
+--- a/arch/arm64/include/asm/device.h
++++ b/arch/arm64/include/asm/device.h
+@@ -20,6 +20,9 @@ struct dev_archdata {
+ #ifdef CONFIG_IOMMU_API
+ void *iommu; /* private IOMMU data */
+ #endif
++#ifdef CONFIG_XEN
++ const struct dma_map_ops *dev_dma_ops;
++#endif
+ bool dma_coherent;
+ };
+
+--- a/arch/arm64/include/asm/dma-mapping.h
++++ b/arch/arm64/include/asm/dma-mapping.h
+@@ -27,11 +27,8 @@
+ #define DMA_ERROR_CODE (~(dma_addr_t)0)
+ extern const struct dma_map_ops dummy_dma_ops;
+
+-static inline const struct dma_map_ops *__generic_dma_ops(struct device *dev)
++static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
+ {
+- if (dev && dev->dma_ops)
+- return dev->dma_ops;
+-
+ /*
+ * We expect no ISA devices, and all other DMA masters are expected to
+ * have someone call arch_setup_dma_ops at device creation time.
+@@ -39,14 +36,6 @@ static inline const struct dma_map_ops *
+ return &dummy_dma_ops;
+ }
+
+-static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
+-{
+- if (xen_initial_domain())
+- return xen_dma_ops;
+- else
+- return __generic_dma_ops(NULL);
+-}
+-
+ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
+ const struct iommu_ops *iommu, bool coherent);
+ #define arch_setup_dma_ops arch_setup_dma_ops
+--- a/arch/arm64/mm/dma-mapping.c
++++ b/arch/arm64/mm/dma-mapping.c
+@@ -977,4 +977,11 @@ void arch_setup_dma_ops(struct device *d
+
+ dev->archdata.dma_coherent = coherent;
+ __iommu_setup_dma_ops(dev, dma_base, size, iommu);
++
++#ifdef CONFIG_XEN
++ if (xen_initial_domain()) {
++ dev->archdata.dev_dma_ops = dev->dma_ops;
++ dev->dma_ops = xen_dma_ops;
++ }
++#endif
+ }
+--- a/include/xen/arm/page-coherent.h
++++ b/include/xen/arm/page-coherent.h
+@@ -2,8 +2,16 @@
+ #define _ASM_ARM_XEN_PAGE_COHERENT_H
+
+ #include <asm/page.h>
++#include <asm/dma-mapping.h>
+ #include <linux/dma-mapping.h>
+
++static inline const struct dma_map_ops *__generic_dma_ops(struct device *dev)
++{
++ if (dev && dev->archdata.dev_dma_ops)
++ return dev->archdata.dev_dma_ops;
++ return get_arch_dma_ops(NULL);
++}
++
+ void __xen_dma_map_page(struct device *hwdev, struct page *page,
+ dma_addr_t dev_addr, unsigned long offset, size_t size,
+ enum dma_data_direction dir, unsigned long attrs);
--- /dev/null
+From 84d582d236dc1f9085e741affc72e9ba061a67c2 Mon Sep 17 00:00:00 2001
+From: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Date: Mon, 24 Apr 2017 15:04:53 -0400
+Subject: xen: Revert commits da72ff5bfcb0 and 72a9b186292d
+
+From: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+
+commit 84d582d236dc1f9085e741affc72e9ba061a67c2 upstream.
+
+Recent discussion (http://marc.info/?l=xen-devel&m=149192184523741)
+established that commit 72a9b186292d ("xen: Remove event channel
+notification through Xen PCI platform device") (and thus commit
+da72ff5bfcb0 ("partially revert "xen: Remove event channel
+notification through Xen PCI platform device"")) are unnecessary and,
+in fact, prevent HVM guests from booting on Xen releases prior to 4.0
+
+Therefore we revert both of those commits.
+
+The summary of that discussion is below:
+
+ Here is the brief summary of the current situation:
+
+ Before the offending commit (72a9b186292):
+
+ 1) INTx does not work because of the reset_watches path.
+ 2) The reset_watches path is only taken if you have Xen > 4.0
+ 3) The Linux Kernel by default will use vector inject if the hypervisor
+ support. So even INTx does not work no body running the kernel with
+ Xen > 4.0 would notice. Unless he explicitly disabled this feature
+ either in the kernel or in Xen (and this can only be disabled by
+ modifying the code, not user-supported way to do it).
+
+ After the offending commit (+ partial revert):
+
+ 1) INTx is no longer support for HVM (only for PV guests).
+ 2) Any HVM guest The kernel will not boot on Xen < 4.0 which does
+ not have vector injection support. Since the only other mode
+ supported is INTx which.
+
+ So based on this summary, I think before commit (72a9b186292) we were
+ in much better position from a user point of view.
+
+Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Reviewed-by: Juergen Gross <jgross@suse.com>
+Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: x86@kernel.org
+Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Cc: Bjorn Helgaas <bhelgaas@google.com>
+Cc: Stefano Stabellini <sstabellini@kernel.org>
+Cc: Julien Grall <julien.grall@arm.com>
+Cc: Vitaly Kuznetsov <vkuznets@redhat.com>
+Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
+Cc: Ross Lagerwall <ross.lagerwall@citrix.com>
+Cc: xen-devel@lists.xenproject.org
+Cc: linux-kernel@vger.kernel.org
+Cc: linux-pci@vger.kernel.org
+Cc: Anthony Liguori <aliguori@amazon.com>
+Cc: KarimAllah Ahmed <karahmed@amazon.de>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/xen/events.h | 11 +++++++++++
+ arch/x86/pci/xen.c | 2 +-
+ arch/x86/xen/enlighten.c | 16 +++++++++++-----
+ arch/x86/xen/smp.c | 2 ++
+ arch/x86/xen/time.c | 5 +++++
+ drivers/xen/events/events_base.c | 26 +++++++++++++++++---------
+ drivers/xen/platform-pci.c | 13 +++----------
+ 7 files changed, 50 insertions(+), 25 deletions(-)
+
+--- a/arch/x86/include/asm/xen/events.h
++++ b/arch/x86/include/asm/xen/events.h
+@@ -20,4 +20,15 @@ static inline int xen_irqs_disabled(stru
+ /* No need for a barrier -- XCHG is a barrier on x86. */
+ #define xchg_xen_ulong(ptr, val) xchg((ptr), (val))
+
++extern int xen_have_vector_callback;
++
++/*
++ * Events delivered via platform PCI interrupts are always
++ * routed to vcpu 0 and hence cannot be rebound.
++ */
++static inline bool xen_support_evtchn_rebind(void)
++{
++ return (!xen_hvm_domain() || xen_have_vector_callback);
++}
++
+ #endif /* _ASM_X86_XEN_EVENTS_H */
+--- a/arch/x86/pci/xen.c
++++ b/arch/x86/pci/xen.c
+@@ -447,7 +447,7 @@ void __init xen_msi_init(void)
+
+ int __init pci_xen_hvm_init(void)
+ {
+- if (!xen_feature(XENFEAT_hvm_pirqs))
++ if (!xen_have_vector_callback || !xen_feature(XENFEAT_hvm_pirqs))
+ return 0;
+
+ #ifdef CONFIG_ACPI
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -138,6 +138,8 @@ struct shared_info xen_dummy_shared_info
+ void *xen_initial_gdt;
+
+ RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
++__read_mostly int xen_have_vector_callback;
++EXPORT_SYMBOL_GPL(xen_have_vector_callback);
+
+ static int xen_cpu_up_prepare(unsigned int cpu);
+ static int xen_cpu_up_online(unsigned int cpu);
+@@ -1861,7 +1863,9 @@ static int xen_cpu_up_prepare(unsigned i
+ xen_vcpu_setup(cpu);
+ }
+
+- if (xen_pv_domain() || xen_feature(XENFEAT_hvm_safe_pvclock))
++ if (xen_pv_domain() ||
++ (xen_have_vector_callback &&
++ xen_feature(XENFEAT_hvm_safe_pvclock)))
+ xen_setup_timer(cpu);
+
+ rc = xen_smp_intr_init(cpu);
+@@ -1877,7 +1881,9 @@ static int xen_cpu_dead(unsigned int cpu
+ {
+ xen_smp_intr_free(cpu);
+
+- if (xen_pv_domain() || xen_feature(XENFEAT_hvm_safe_pvclock))
++ if (xen_pv_domain() ||
++ (xen_have_vector_callback &&
++ xen_feature(XENFEAT_hvm_safe_pvclock)))
+ xen_teardown_timer(cpu);
+
+ return 0;
+@@ -1916,8 +1922,8 @@ static void __init xen_hvm_guest_init(vo
+
+ xen_panic_handler_init();
+
+- BUG_ON(!xen_feature(XENFEAT_hvm_callback_vector));
+-
++ if (xen_feature(XENFEAT_hvm_callback_vector))
++ xen_have_vector_callback = 1;
+ xen_hvm_smp_init();
+ WARN_ON(xen_cpuhp_setup());
+ xen_unplug_emulated_devices();
+@@ -1958,7 +1964,7 @@ bool xen_hvm_need_lapic(void)
+ return false;
+ if (!xen_hvm_domain())
+ return false;
+- if (xen_feature(XENFEAT_hvm_pirqs))
++ if (xen_feature(XENFEAT_hvm_pirqs) && xen_have_vector_callback)
+ return false;
+ return true;
+ }
+--- a/arch/x86/xen/smp.c
++++ b/arch/x86/xen/smp.c
+@@ -742,6 +742,8 @@ static void __init xen_hvm_smp_prepare_c
+
+ void __init xen_hvm_smp_init(void)
+ {
++ if (!xen_have_vector_callback)
++ return;
+ smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
+ smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
+ smp_ops.cpu_die = xen_cpu_die;
+--- a/arch/x86/xen/time.c
++++ b/arch/x86/xen/time.c
+@@ -432,6 +432,11 @@ static void xen_hvm_setup_cpu_clockevent
+
+ void __init xen_hvm_init_time_ops(void)
+ {
++ /* vector callback is needed otherwise we cannot receive interrupts
++ * on cpu > 0 and at this point we don't know how many cpus are
++ * available */
++ if (!xen_have_vector_callback)
++ return;
+ if (!xen_feature(XENFEAT_hvm_safe_pvclock)) {
+ printk(KERN_INFO "Xen doesn't support pvclock on HVM,"
+ "disable pv timer\n");
+--- a/drivers/xen/events/events_base.c
++++ b/drivers/xen/events/events_base.c
+@@ -1312,6 +1312,9 @@ static int rebind_irq_to_cpu(unsigned ir
+ if (!VALID_EVTCHN(evtchn))
+ return -1;
+
++ if (!xen_support_evtchn_rebind())
++ return -1;
++
+ /* Send future instances of this interrupt to other vcpu. */
+ bind_vcpu.port = evtchn;
+ bind_vcpu.vcpu = xen_vcpu_nr(tcpu);
+@@ -1645,15 +1648,20 @@ void xen_callback_vector(void)
+ {
+ int rc;
+ uint64_t callback_via;
+-
+- callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR);
+- rc = xen_set_callback_via(callback_via);
+- BUG_ON(rc);
+- pr_info("Xen HVM callback vector for event delivery is enabled\n");
+- /* in the restore case the vector has already been allocated */
+- if (!test_bit(HYPERVISOR_CALLBACK_VECTOR, used_vectors))
+- alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR,
+- xen_hvm_callback_vector);
++ if (xen_have_vector_callback) {
++ callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR);
++ rc = xen_set_callback_via(callback_via);
++ if (rc) {
++ pr_err("Request for Xen HVM callback vector failed\n");
++ xen_have_vector_callback = 0;
++ return;
++ }
++ pr_info("Xen HVM callback vector for event delivery is enabled\n");
++ /* in the restore case the vector has already been allocated */
++ if (!test_bit(HYPERVISOR_CALLBACK_VECTOR, used_vectors))
++ alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR,
++ xen_hvm_callback_vector);
++ }
+ }
+ #else
+ void xen_callback_vector(void) {}
+--- a/drivers/xen/platform-pci.c
++++ b/drivers/xen/platform-pci.c
+@@ -67,7 +67,7 @@ static uint64_t get_callback_via(struct
+ pin = pdev->pin;
+
+ /* We don't know the GSI. Specify the PCI INTx line instead. */
+- return ((uint64_t)0x01 << HVM_CALLBACK_VIA_TYPE_SHIFT) | /* PCI INTx identifier */
++ return ((uint64_t)0x01 << 56) | /* PCI INTx identifier */
+ ((uint64_t)pci_domain_nr(pdev->bus) << 32) |
+ ((uint64_t)pdev->bus->number << 16) |
+ ((uint64_t)(pdev->devfn & 0xff) << 8) |
+@@ -90,7 +90,7 @@ static int xen_allocate_irq(struct pci_d
+ static int platform_pci_resume(struct pci_dev *pdev)
+ {
+ int err;
+- if (!xen_pv_domain())
++ if (xen_have_vector_callback)
+ return 0;
+ err = xen_set_callback_via(callback_via);
+ if (err) {
+@@ -138,14 +138,7 @@ static int platform_pci_probe(struct pci
+ platform_mmio = mmio_addr;
+ platform_mmiolen = mmio_len;
+
+- /*
+- * Xen HVM guests always use the vector callback mechanism.
+- * L1 Dom0 in a nested Xen environment is a PV guest inside in an
+- * HVM environment. It needs the platform-pci driver to get
+- * notifications from L0 Xen, but it cannot use the vector callback
+- * as it is not exported by L1 Xen.
+- */
+- if (xen_pv_domain()) {
++ if (!xen_have_vector_callback) {
+ ret = xen_allocate_irq(pdev);
+ if (ret) {
+ dev_warn(&pdev->dev, "request_irq failed err=%d\n", ret);