--- /dev/null
+From eb493fbc150f4a28151ae1ee84f24395989f3600 Mon Sep 17 00:00:00 2001
+From: Lyude Paul <lyude@redhat.com>
+Date: Tue, 3 Jul 2018 16:31:41 -0400
+Subject: drm/nouveau: Set DRIVER_ATOMIC cap earlier to fix debugfs
+
+From: Lyude Paul <lyude@redhat.com>
+
+commit eb493fbc150f4a28151ae1ee84f24395989f3600 upstream.
+
+Currently nouveau doesn't actually expose the state debugfs file that's
+usually provided for any modesetting driver that supports atomic, even
+if nouveau is loaded with atomic=1. This is due to the fact that the
+standard debugfs files that DRM creates for atomic drivers is called
+when drm_get_pci_dev() is called from nouveau_drm.c. This happens well
+before we've initialized the display core, which is currently
+responsible for setting the DRIVER_ATOMIC cap.
+
+So, move the atomic option into nouveau_drm.c and just add the
+DRIVER_ATOMIC cap whenever it's enabled on the kernel commandline. This
+shouldn't cause any actual issues, as the atomic ioctl will still fail
+as expected even if the display core doesn't disable it until later in
+the init sequence. This also provides the added benefit of being able to
+use the state debugfs file to check the current display state even if
+clients aren't allowed to modify it through anything other than the
+legacy ioctls.
+
+Additionally, disable the DRIVER_ATOMIC cap in nv04's display core, as
+this was already disabled there previously.
+
+Signed-off-by: Lyude Paul <lyude@redhat.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/nouveau/dispnv04/disp.c | 3 +++
+ drivers/gpu/drm/nouveau/nouveau_drm.c | 7 +++++++
+ drivers/gpu/drm/nouveau/nv50_display.c | 6 ------
+ 3 files changed, 10 insertions(+), 6 deletions(-)
+
+--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
++++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
+@@ -55,6 +55,9 @@ nv04_display_create(struct drm_device *d
+ nouveau_display(dev)->init = nv04_display_init;
+ nouveau_display(dev)->fini = nv04_display_fini;
+
++ /* Pre-nv50 doesn't support atomic, so don't expose the ioctls */
++ dev->driver->driver_features &= ~DRIVER_ATOMIC;
++
+ nouveau_hw_save_vga_fonts(dev, 1);
+
+ nv04_crtc_create(dev, 0);
+--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
++++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
+@@ -79,6 +79,10 @@ MODULE_PARM_DESC(modeset, "enable driver
+ int nouveau_modeset = -1;
+ module_param_named(modeset, nouveau_modeset, int, 0400);
+
++MODULE_PARM_DESC(atomic, "Expose atomic ioctl (default: disabled)");
++static int nouveau_atomic = 0;
++module_param_named(atomic, nouveau_atomic, int, 0400);
++
+ MODULE_PARM_DESC(runpm, "disable (0), force enable (1), optimus only default (-1)");
+ static int nouveau_runtime_pm = -1;
+ module_param_named(runpm, nouveau_runtime_pm, int, 0400);
+@@ -501,6 +505,9 @@ static int nouveau_drm_probe(struct pci_
+
+ pci_set_master(pdev);
+
++ if (nouveau_atomic)
++ driver_pci.driver_features |= DRIVER_ATOMIC;
++
+ ret = drm_get_pci_dev(pdev, pent, &driver_pci);
+ if (ret) {
+ nvkm_device_del(&device);
+--- a/drivers/gpu/drm/nouveau/nv50_display.c
++++ b/drivers/gpu/drm/nouveau/nv50_display.c
+@@ -4441,10 +4441,6 @@ nv50_display_destroy(struct drm_device *
+ kfree(disp);
+ }
+
+-MODULE_PARM_DESC(atomic, "Expose atomic ioctl (default: disabled)");
+-static int nouveau_atomic = 0;
+-module_param_named(atomic, nouveau_atomic, int, 0400);
+-
+ int
+ nv50_display_create(struct drm_device *dev)
+ {
+@@ -4469,8 +4465,6 @@ nv50_display_create(struct drm_device *d
+ disp->disp = &nouveau_display(dev)->disp;
+ dev->mode_config.funcs = &nv50_disp_func;
+ dev->driver->driver_features |= DRIVER_PREFER_XBGR_30BPP;
+- if (nouveau_atomic)
+- dev->driver->driver_features |= DRIVER_ATOMIC;
+
+ /* small shared memory area we use for notifiers and semaphores */
+ ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM,
--- /dev/null
+From 76fa4975f3ed12d15762bc979ca44078598ed8ee Mon Sep 17 00:00:00 2001
+From: Alexey Kardashevskiy <aik@ozlabs.ru>
+Date: Tue, 17 Jul 2018 17:19:13 +1000
+Subject: KVM: PPC: Check if IOMMU page is contained in the pinned physical page
+
+From: Alexey Kardashevskiy <aik@ozlabs.ru>
+
+commit 76fa4975f3ed12d15762bc979ca44078598ed8ee upstream.
+
+A VM which has:
+ - a DMA capable device passed through to it (eg. network card);
+ - running a malicious kernel that ignores H_PUT_TCE failure;
+ - capability of using IOMMU pages bigger that physical pages
+can create an IOMMU mapping that exposes (for example) 16MB of
+the host physical memory to the device when only 64K was allocated to the VM.
+
+The remaining 16MB - 64K will be some other content of host memory, possibly
+including pages of the VM, but also pages of host kernel memory, host
+programs or other VMs.
+
+The attacking VM does not control the location of the page it can map,
+and is only allowed to map as many pages as it has pages of RAM.
+
+We already have a check in drivers/vfio/vfio_iommu_spapr_tce.c that
+an IOMMU page is contained in the physical page so the PCI hardware won't
+get access to unassigned host memory; however this check is missing in
+the KVM fastpath (H_PUT_TCE accelerated code). We were lucky so far and
+did not hit this yet as the very first time when the mapping happens
+we do not have tbl::it_userspace allocated yet and fall back to
+the userspace which in turn calls VFIO IOMMU driver, this fails and
+the guest does not retry,
+
+This stores the smallest preregistered page size in the preregistered
+region descriptor and changes the mm_iommu_xxx API to check this against
+the IOMMU page size.
+
+This calculates maximum page size as a minimum of the natural region
+alignment and compound page size. For the page shift this uses the shift
+returned by find_linux_pte() which indicates how the page is mapped to
+the current userspace - if the page is huge and this is not a zero, then
+it is a leaf pte and the page is mapped within the range.
+
+Fixes: 121f80ba68f1 ("KVM: PPC: VFIO: Add in-kernel acceleration for VFIO")
+Cc: stable@vger.kernel.org # v4.12+
+Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
+Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ arch/powerpc/include/asm/mmu_context.h | 4 +--
+ arch/powerpc/kvm/book3s_64_vio.c | 2 -
+ arch/powerpc/kvm/book3s_64_vio_hv.c | 6 +++--
+ arch/powerpc/mm/mmu_context_iommu.c | 37 +++++++++++++++++++++++++++++++--
+ drivers/vfio/vfio_iommu_spapr_tce.c | 2 -
+ 5 files changed, 43 insertions(+), 8 deletions(-)
+
+--- a/arch/powerpc/include/asm/mmu_context.h
++++ b/arch/powerpc/include/asm/mmu_context.h
+@@ -35,9 +35,9 @@ extern struct mm_iommu_table_group_mem_t
+ extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
+ unsigned long ua, unsigned long entries);
+ extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
+- unsigned long ua, unsigned long *hpa);
++ unsigned long ua, unsigned int pageshift, unsigned long *hpa);
+ extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
+- unsigned long ua, unsigned long *hpa);
++ unsigned long ua, unsigned int pageshift, unsigned long *hpa);
+ extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
+ extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem);
+ #endif
+--- a/arch/powerpc/kvm/book3s_64_vio.c
++++ b/arch/powerpc/kvm/book3s_64_vio.c
+@@ -433,7 +433,7 @@ long kvmppc_tce_iommu_map(struct kvm *kv
+ /* This only handles v2 IOMMU type, v1 is handled via ioctl() */
+ return H_TOO_HARD;
+
+- if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, &hpa)))
++ if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa)))
+ return H_HARDWARE;
+
+ if (mm_iommu_mapped_inc(mem))
+--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
++++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
+@@ -262,7 +262,8 @@ static long kvmppc_rm_tce_iommu_map(stru
+ if (!mem)
+ return H_TOO_HARD;
+
+- if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, &hpa)))
++ if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, tbl->it_page_shift,
++ &hpa)))
+ return H_HARDWARE;
+
+ pua = (void *) vmalloc_to_phys(pua);
+@@ -431,7 +432,8 @@ long kvmppc_rm_h_put_tce_indirect(struct
+
+ mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
+ if (mem)
+- prereg = mm_iommu_ua_to_hpa_rm(mem, ua, &tces) == 0;
++ prereg = mm_iommu_ua_to_hpa_rm(mem, ua,
++ IOMMU_PAGE_SHIFT_4K, &tces) == 0;
+ }
+
+ if (!prereg) {
+--- a/arch/powerpc/mm/mmu_context_iommu.c
++++ b/arch/powerpc/mm/mmu_context_iommu.c
+@@ -19,6 +19,7 @@
+ #include <linux/hugetlb.h>
+ #include <linux/swap.h>
+ #include <asm/mmu_context.h>
++#include <asm/pte-walk.h>
+
+ static DEFINE_MUTEX(mem_list_mutex);
+
+@@ -27,6 +28,7 @@ struct mm_iommu_table_group_mem_t {
+ struct rcu_head rcu;
+ unsigned long used;
+ atomic64_t mapped;
++ unsigned int pageshift;
+ u64 ua; /* userspace address */
+ u64 entries; /* number of entries in hpas[] */
+ u64 *hpas; /* vmalloc'ed */
+@@ -125,6 +127,8 @@ long mm_iommu_get(struct mm_struct *mm,
+ {
+ struct mm_iommu_table_group_mem_t *mem;
+ long i, j, ret = 0, locked_entries = 0;
++ unsigned int pageshift;
++ unsigned long flags;
+ struct page *page = NULL;
+
+ mutex_lock(&mem_list_mutex);
+@@ -159,6 +163,12 @@ long mm_iommu_get(struct mm_struct *mm,
+ goto unlock_exit;
+ }
+
++ /*
++ * For a starting point for a maximum page size calculation
++ * we use @ua and @entries natural alignment to allow IOMMU pages
++ * smaller than huge pages but still bigger than PAGE_SIZE.
++ */
++ mem->pageshift = __ffs(ua | (entries << PAGE_SHIFT));
+ mem->hpas = vzalloc(entries * sizeof(mem->hpas[0]));
+ if (!mem->hpas) {
+ kfree(mem);
+@@ -199,6 +209,23 @@ long mm_iommu_get(struct mm_struct *mm,
+ }
+ }
+ populate:
++ pageshift = PAGE_SHIFT;
++ if (PageCompound(page)) {
++ pte_t *pte;
++ struct page *head = compound_head(page);
++ unsigned int compshift = compound_order(head);
++
++ local_irq_save(flags); /* disables as well */
++ pte = find_linux_pte(mm->pgd, ua, NULL, &pageshift);
++ local_irq_restore(flags);
++
++ /* Double check it is still the same pinned page */
++ if (pte && pte_page(*pte) == head &&
++ pageshift == compshift)
++ pageshift = max_t(unsigned int, pageshift,
++ PAGE_SHIFT);
++ }
++ mem->pageshift = min(mem->pageshift, pageshift);
+ mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;
+ }
+
+@@ -349,7 +376,7 @@ struct mm_iommu_table_group_mem_t *mm_io
+ EXPORT_SYMBOL_GPL(mm_iommu_find);
+
+ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
+- unsigned long ua, unsigned long *hpa)
++ unsigned long ua, unsigned int pageshift, unsigned long *hpa)
+ {
+ const long entry = (ua - mem->ua) >> PAGE_SHIFT;
+ u64 *va = &mem->hpas[entry];
+@@ -357,6 +384,9 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_
+ if (entry >= mem->entries)
+ return -EFAULT;
+
++ if (pageshift > mem->pageshift)
++ return -EFAULT;
++
+ *hpa = *va | (ua & ~PAGE_MASK);
+
+ return 0;
+@@ -364,7 +394,7 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_
+ EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa);
+
+ long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
+- unsigned long ua, unsigned long *hpa)
++ unsigned long ua, unsigned int pageshift, unsigned long *hpa)
+ {
+ const long entry = (ua - mem->ua) >> PAGE_SHIFT;
+ void *va = &mem->hpas[entry];
+@@ -373,6 +403,9 @@ long mm_iommu_ua_to_hpa_rm(struct mm_iom
+ if (entry >= mem->entries)
+ return -EFAULT;
+
++ if (pageshift > mem->pageshift)
++ return -EFAULT;
++
+ pa = (void *) vmalloc_to_phys(va);
+ if (!pa)
+ return -EFAULT;
+--- a/drivers/vfio/vfio_iommu_spapr_tce.c
++++ b/drivers/vfio/vfio_iommu_spapr_tce.c
+@@ -467,7 +467,7 @@ static int tce_iommu_prereg_ua_to_hpa(st
+ if (!mem)
+ return -EINVAL;
+
+- ret = mm_iommu_ua_to_hpa(mem, tce, phpa);
++ ret = mm_iommu_ua_to_hpa(mem, tce, shift, phpa);
+ if (ret)
+ return -EINVAL;
+
--- /dev/null
+From 98014068328c5574de9a4a30b604111fd9d8f901 Mon Sep 17 00:00:00 2001
+From: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Date: Tue, 8 May 2018 19:56:22 -0400
+Subject: xen/PVH: Set up GS segment for stack canary
+
+From: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+
+commit 98014068328c5574de9a4a30b604111fd9d8f901 upstream.
+
+We are making calls to C code (e.g. xen_prepare_pvh()) which may use
+stack canary (stored in GS segment).
+
+Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Reviewed-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Cc: Jason Andryuk <jandryuk@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/xen/xen-pvh.S | 26 +++++++++++++++++++++++++-
+ 1 file changed, 25 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/xen/xen-pvh.S
++++ b/arch/x86/xen/xen-pvh.S
+@@ -54,6 +54,9 @@
+ * charge of setting up it's own stack, GDT and IDT.
+ */
+
++#define PVH_GDT_ENTRY_CANARY 4
++#define PVH_CANARY_SEL (PVH_GDT_ENTRY_CANARY * 8)
++
+ ENTRY(pvh_start_xen)
+ cld
+
+@@ -98,6 +101,12 @@ ENTRY(pvh_start_xen)
+ /* 64-bit entry point. */
+ .code64
+ 1:
++ /* Set base address in stack canary descriptor. */
++ mov $MSR_GS_BASE,%ecx
++ mov $_pa(canary), %eax
++ xor %edx, %edx
++ wrmsr
++
+ call xen_prepare_pvh
+
+ /* startup_64 expects boot_params in %rsi. */
+@@ -107,6 +116,17 @@ ENTRY(pvh_start_xen)
+
+ #else /* CONFIG_X86_64 */
+
++ /* Set base address in stack canary descriptor. */
++ movl $_pa(gdt_start),%eax
++ movl $_pa(canary),%ecx
++ movw %cx, (PVH_GDT_ENTRY_CANARY * 8) + 2(%eax)
++ shrl $16, %ecx
++ movb %cl, (PVH_GDT_ENTRY_CANARY * 8) + 4(%eax)
++ movb %ch, (PVH_GDT_ENTRY_CANARY * 8) + 7(%eax)
++
++ mov $PVH_CANARY_SEL,%eax
++ mov %eax,%gs
++
+ call mk_early_pgtbl_32
+
+ mov $_pa(initial_page_table), %eax
+@@ -150,9 +170,13 @@ gdt_start:
+ .quad GDT_ENTRY(0xc09a, 0, 0xfffff) /* __KERNEL_CS */
+ #endif
+ .quad GDT_ENTRY(0xc092, 0, 0xfffff) /* __KERNEL_DS */
++ .quad GDT_ENTRY(0x4090, 0, 0x18) /* PVH_CANARY_SEL */
+ gdt_end:
+
+- .balign 4
++ .balign 16
++canary:
++ .fill 48, 1, 0
++
+ early_stack:
+ .fill 256, 1, 0
+ early_stack_end: