--- /dev/null
+From f3cd1b064f1179d9e6188c6d67297a2360880e10 Mon Sep 17 00:00:00 2001
+From: Lucas Stach <l.stach@pengutronix.de>
+Date: Wed, 22 Mar 2017 12:07:23 +0100
+Subject: drm/etnaviv: (re-)protect fence allocation with GPU mutex
+
+From: Lucas Stach <l.stach@pengutronix.de>
+
+commit f3cd1b064f1179d9e6188c6d67297a2360880e10 upstream.
+
+The fence allocation needs to be protected by the GPU mutex, otherwise
+the fence seqnos of concurrent submits might not match the insertion order
+of the jobs in the kernel ring. This breaks the assumption that jobs
+complete with monotonically increasing fence seqnos.
+
+Fixes: d9853490176c (drm/etnaviv: take GPU lock later in the submit process)
+Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/etnaviv/etnaviv_gpu.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+@@ -1299,6 +1299,8 @@ int etnaviv_gpu_submit(struct etnaviv_gp
+ goto out_pm_put;
+ }
+
++ mutex_lock(&gpu->lock);
++
+ fence = etnaviv_gpu_fence_alloc(gpu);
+ if (!fence) {
+ event_free(gpu, event);
+@@ -1306,8 +1308,6 @@ int etnaviv_gpu_submit(struct etnaviv_gp
+ goto out_pm_put;
+ }
+
+- mutex_lock(&gpu->lock);
+-
+ gpu->event[event].fence = fence;
+ submit->fence = fence->seqno;
+ gpu->active_fence = submit->fence;
--- /dev/null
+From ce4b4f228e51219b0b79588caf73225b08b5b779 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Michel=20D=C3=A4nzer?= <michel.daenzer@amd.com>
+Date: Fri, 24 Mar 2017 19:01:09 +0900
+Subject: drm/radeon: Override fpfn for all VRAM placements in radeon_evict_flags
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Michel Dänzer <michel.daenzer@amd.com>
+
+commit ce4b4f228e51219b0b79588caf73225b08b5b779 upstream.
+
+We were accidentally only overriding the first VRAM placement. For BOs
+with the RADEON_GEM_NO_CPU_ACCESS flag set,
+radeon_ttm_placement_from_domain creates a second VRAM placment with
+fpfn == 0. If VRAM is almost full, the first VRAM placement with
+fpfn > 0 may not work, but the second one with fpfn == 0 always will
+(the BO's current location trivially satisfies it). Because "moving"
+the BO to its current location puts it back on the LRU list, this
+results in an infinite loop.
+
+Fixes: 2a85aedd117c ("drm/radeon: Try evicting from CPU accessible to
+ inaccessible VRAM first")
+Reported-by: Zachary Michaels <zmichaels@oblong.com>
+Reported-and-Tested-by: Julien Isorce <jisorce@oblong.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Michel Dänzer <michel.daenzer@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/radeon/radeon_ttm.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/radeon/radeon_ttm.c
++++ b/drivers/gpu/drm/radeon/radeon_ttm.c
+@@ -213,8 +213,8 @@ static void radeon_evict_flags(struct tt
+ rbo->placement.num_busy_placement = 0;
+ for (i = 0; i < rbo->placement.num_placement; i++) {
+ if (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) {
+- if (rbo->placements[0].fpfn < fpfn)
+- rbo->placements[0].fpfn = fpfn;
++ if (rbo->placements[i].fpfn < fpfn)
++ rbo->placements[i].fpfn = fpfn;
+ } else {
+ rbo->placement.busy_placement =
+ &rbo->placements[i];
--- /dev/null
+From 6d6e500391875cc372336c88e9a8af377be19c36 Mon Sep 17 00:00:00 2001
+From: Eric Anholt <eric@anholt.net>
+Date: Tue, 28 Mar 2017 13:13:43 -0700
+Subject: drm/vc4: Allocate the right amount of space for boot-time CRTC state.
+
+From: Eric Anholt <eric@anholt.net>
+
+commit 6d6e500391875cc372336c88e9a8af377be19c36 upstream.
+
+Without this, the first modeset would dereference past the allocation
+when trying to free the mm node.
+
+Signed-off-by: Eric Anholt <eric@anholt.net>
+Tested-by: Stefan Wahren <stefan.wahren@i2se.com>
+Link: http://patchwork.freedesktop.org/patch/msgid/20170328201343.4884-1-eric@anholt.net
+Fixes: d8dbf44f13b9 ("drm/vc4: Make the CRTCs cooperate on allocating display lists.")
+Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/vc4/vc4_crtc.c | 13 ++++++++++++-
+ 1 file changed, 12 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/vc4/vc4_crtc.c
++++ b/drivers/gpu/drm/vc4/vc4_crtc.c
+@@ -842,6 +842,17 @@ static void vc4_crtc_destroy_state(struc
+ drm_atomic_helper_crtc_destroy_state(crtc, state);
+ }
+
++static void
++vc4_crtc_reset(struct drm_crtc *crtc)
++{
++ if (crtc->state)
++ __drm_atomic_helper_crtc_destroy_state(crtc->state);
++
++ crtc->state = kzalloc(sizeof(struct vc4_crtc_state), GFP_KERNEL);
++ if (crtc->state)
++ crtc->state->crtc = crtc;
++}
++
+ static const struct drm_crtc_funcs vc4_crtc_funcs = {
+ .set_config = drm_atomic_helper_set_config,
+ .destroy = vc4_crtc_destroy,
+@@ -849,7 +860,7 @@ static const struct drm_crtc_funcs vc4_c
+ .set_property = NULL,
+ .cursor_set = NULL, /* handled by drm_mode_cursor_universal */
+ .cursor_move = NULL, /* handled by drm_mode_cursor_universal */
+- .reset = drm_atomic_helper_crtc_reset,
++ .reset = vc4_crtc_reset,
+ .atomic_duplicate_state = vc4_crtc_duplicate_state,
+ .atomic_destroy_state = vc4_crtc_destroy_state,
+ .gamma_set = vc4_crtc_gamma_set,
--- /dev/null
+From 90db10434b163e46da413d34db8d0e77404cc645 Mon Sep 17 00:00:00 2001
+From: David Hildenbrand <david@redhat.com>
+Date: Thu, 23 Mar 2017 18:24:19 +0100
+Subject: KVM: kvm_io_bus_unregister_dev() should never fail
+
+From: David Hildenbrand <david@redhat.com>
+
+commit 90db10434b163e46da413d34db8d0e77404cc645 upstream.
+
+No caller currently checks the return value of
+kvm_io_bus_unregister_dev(). This is evil, as all callers silently go on
+freeing their device. A stale reference will remain in the io_bus,
+getting at least used again, when the iobus gets teared down on
+kvm_destroy_vm() - leading to use after free errors.
+
+There is nothing the callers could do, except retrying over and over
+again.
+
+So let's simply remove the bus altogether, print an error and make
+sure no one can access this broken bus again (returning -ENOMEM on any
+attempt to access it).
+
+Fixes: e93f8a0f821e ("KVM: convert io_bus to SRCU")
+Reported-by: Dmitry Vyukov <dvyukov@google.com>
+Reviewed-by: Cornelia Huck <cornelia.huck@de.ibm.com>
+Signed-off-by: David Hildenbrand <david@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/kvm_host.h | 4 ++--
+ virt/kvm/eventfd.c | 3 ++-
+ virt/kvm/kvm_main.c | 42 +++++++++++++++++++++++++-----------------
+ 3 files changed, 29 insertions(+), 20 deletions(-)
+
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -162,8 +162,8 @@ int kvm_io_bus_read(struct kvm_vcpu *vcp
+ int len, void *val);
+ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
+ int len, struct kvm_io_device *dev);
+-int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+- struct kvm_io_device *dev);
++void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
++ struct kvm_io_device *dev);
+ struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+ gpa_t addr);
+
+--- a/virt/kvm/eventfd.c
++++ b/virt/kvm/eventfd.c
+@@ -870,7 +870,8 @@ kvm_deassign_ioeventfd_idx(struct kvm *k
+ continue;
+
+ kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
+- kvm->buses[bus_idx]->ioeventfd_count--;
++ if (kvm->buses[bus_idx])
++ kvm->buses[bus_idx]->ioeventfd_count--;
+ ioeventfd_release(p);
+ ret = 0;
+ break;
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -721,7 +721,8 @@ static void kvm_destroy_vm(struct kvm *k
+ spin_unlock(&kvm_lock);
+ kvm_free_irq_routing(kvm);
+ for (i = 0; i < KVM_NR_BUSES; i++) {
+- kvm_io_bus_destroy(kvm->buses[i]);
++ if (kvm->buses[i])
++ kvm_io_bus_destroy(kvm->buses[i]);
+ kvm->buses[i] = NULL;
+ }
+ kvm_coalesced_mmio_free(kvm);
+@@ -3465,6 +3466,8 @@ int kvm_io_bus_write(struct kvm_vcpu *vc
+ };
+
+ bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
++ if (!bus)
++ return -ENOMEM;
+ r = __kvm_io_bus_write(vcpu, bus, &range, val);
+ return r < 0 ? r : 0;
+ }
+@@ -3482,6 +3485,8 @@ int kvm_io_bus_write_cookie(struct kvm_v
+ };
+
+ bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
++ if (!bus)
++ return -ENOMEM;
+
+ /* First try the device referenced by cookie. */
+ if ((cookie >= 0) && (cookie < bus->dev_count) &&
+@@ -3532,6 +3537,8 @@ int kvm_io_bus_read(struct kvm_vcpu *vcp
+ };
+
+ bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
++ if (!bus)
++ return -ENOMEM;
+ r = __kvm_io_bus_read(vcpu, bus, &range, val);
+ return r < 0 ? r : 0;
+ }
+@@ -3544,6 +3551,9 @@ int kvm_io_bus_register_dev(struct kvm *
+ struct kvm_io_bus *new_bus, *bus;
+
+ bus = kvm->buses[bus_idx];
++ if (!bus)
++ return -ENOMEM;
++
+ /* exclude ioeventfd which is limited by maximum fd */
+ if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1)
+ return -ENOSPC;
+@@ -3563,45 +3573,41 @@ int kvm_io_bus_register_dev(struct kvm *
+ }
+
+ /* Caller must hold slots_lock. */
+-int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+- struct kvm_io_device *dev)
++void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
++ struct kvm_io_device *dev)
+ {
+- int i, r;
++ int i;
+ struct kvm_io_bus *new_bus, *bus;
+
+ bus = kvm->buses[bus_idx];
+-
+- /*
+- * It's possible the bus being released before hand. If so,
+- * we're done here.
+- */
+ if (!bus)
+- return 0;
++ return;
+
+- r = -ENOENT;
+ for (i = 0; i < bus->dev_count; i++)
+ if (bus->range[i].dev == dev) {
+- r = 0;
+ break;
+ }
+
+- if (r)
+- return r;
++ if (i == bus->dev_count)
++ return;
+
+ new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count - 1) *
+ sizeof(struct kvm_io_range)), GFP_KERNEL);
+- if (!new_bus)
+- return -ENOMEM;
++ if (!new_bus) {
++ pr_err("kvm: failed to shrink bus, removing it completely\n");
++ goto broken;
++ }
+
+ memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
+ new_bus->dev_count--;
+ memcpy(new_bus->range + i, bus->range + i + 1,
+ (new_bus->dev_count - i) * sizeof(struct kvm_io_range));
+
++broken:
+ rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
+ synchronize_srcu_expedited(&kvm->srcu);
+ kfree(bus);
+- return r;
++ return;
+ }
+
+ struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+@@ -3614,6 +3620,8 @@ struct kvm_io_device *kvm_io_bus_get_dev
+ srcu_idx = srcu_read_lock(&kvm->srcu);
+
+ bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
++ if (!bus)
++ goto out_unlock;
+
+ dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1);
+ if (dev_idx < 0)
--- /dev/null
+From df630b8c1e851b5e265dc2ca9c87222e342c093b Mon Sep 17 00:00:00 2001
+From: Peter Xu <peterx@redhat.com>
+Date: Wed, 15 Mar 2017 16:01:17 +0800
+Subject: KVM: x86: clear bus pointer when destroyed
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Peter Xu <peterx@redhat.com>
+
+commit df630b8c1e851b5e265dc2ca9c87222e342c093b upstream.
+
+When releasing the bus, let's clear the bus pointers to mark it out. If
+any further device unregister happens on this bus, we know that we're
+done if we found the bus being released already.
+
+Signed-off-by: Peter Xu <peterx@redhat.com>
+Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ virt/kvm/kvm_main.c | 12 +++++++++++-
+ 1 file changed, 11 insertions(+), 1 deletion(-)
+
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -720,8 +720,10 @@ static void kvm_destroy_vm(struct kvm *k
+ list_del(&kvm->vm_list);
+ spin_unlock(&kvm_lock);
+ kvm_free_irq_routing(kvm);
+- for (i = 0; i < KVM_NR_BUSES; i++)
++ for (i = 0; i < KVM_NR_BUSES; i++) {
+ kvm_io_bus_destroy(kvm->buses[i]);
++ kvm->buses[i] = NULL;
++ }
+ kvm_coalesced_mmio_free(kvm);
+ #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
+ mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
+@@ -3568,6 +3570,14 @@ int kvm_io_bus_unregister_dev(struct kvm
+ struct kvm_io_bus *new_bus, *bus;
+
+ bus = kvm->buses[bus_idx];
++
++ /*
++ * It's possible the bus being released before hand. If so,
++ * we're done here.
++ */
++ if (!bus)
++ return 0;
++
+ r = -ENOENT;
+ for (i = 0; i < bus->dev_count; i++)
+ if (bus->range[i].dev == dev) {
--- /dev/null
+From 854fbd6e5f60fe99e8e3a569865409fca378f143 Mon Sep 17 00:00:00 2001
+From: Kees Cook <keescook@chromium.org>
+Date: Thu, 23 Mar 2017 15:46:16 -0700
+Subject: lib/syscall: Clear return values when no stack
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Kees Cook <keescook@chromium.org>
+
+commit 854fbd6e5f60fe99e8e3a569865409fca378f143 upstream.
+
+Commit:
+
+ aa1f1a639621 ("lib/syscall: Pin the task stack in collect_syscall()")
+
+... added logic to handle a process stack not existing, but left sp and pc
+uninitialized, which can be later reported via /proc/$pid/syscall for zombie
+processes, potentially exposing kernel memory to userspace.
+
+ Zombie /proc/$pid/syscall before:
+ -1 0xffffffff9a060100 0xffff92f42d6ad900
+
+ Zombie /proc/$pid/syscall after:
+ -1 0x0 0x0
+
+Reported-by: Robert Święcki <robert@swiecki.net>
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Reviewed-by: Andy Lutomirski <luto@kernel.org>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Fixes: aa1f1a639621 ("lib/syscall: Pin the task stack in collect_syscall()")
+Link: http://lkml.kernel.org/r/20170323224616.GA92694@beast
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ lib/syscall.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/lib/syscall.c
++++ b/lib/syscall.c
+@@ -11,6 +11,7 @@ static int collect_syscall(struct task_s
+
+ if (!try_get_task_stack(target)) {
+ /* Task has no stack, so the task isn't in a syscall. */
++ *sp = *pc = 0;
+ *callno = -1;
+ return 0;
+ }
--- /dev/null
+From c9d398fa237882ea07167e23bcfc5e6847066518 Mon Sep 17 00:00:00 2001
+From: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
+Date: Fri, 31 Mar 2017 15:11:55 -0700
+Subject: mm, hugetlb: use pte_present() instead of pmd_present() in follow_huge_pmd()
+
+From: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
+
+commit c9d398fa237882ea07167e23bcfc5e6847066518 upstream.
+
+I found the race condition which triggers the following bug when
+move_pages() and soft offline are called on a single hugetlb page
+concurrently.
+
+ Soft offlining page 0x119400 at 0x700000000000
+ BUG: unable to handle kernel paging request at ffffea0011943820
+ IP: follow_huge_pmd+0x143/0x190
+ PGD 7ffd2067
+ PUD 7ffd1067
+ PMD 0
+ [61163.582052] Oops: 0000 [#1] SMP
+ Modules linked in: binfmt_misc ppdev virtio_balloon parport_pc pcspkr i2c_piix4 parport i2c_core acpi_cpufreq ip_tables xfs libcrc32c ata_generic pata_acpi virtio_blk 8139too crc32c_intel ata_piix serio_raw libata virtio_pci 8139cp virtio_ring virtio mii floppy dm_mirror dm_region_hash dm_log dm_mod [last unloaded: cap_check]
+ CPU: 0 PID: 22573 Comm: iterate_numa_mo Tainted: P OE 4.11.0-rc2-mm1+ #2
+ Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2011
+ RIP: 0010:follow_huge_pmd+0x143/0x190
+ RSP: 0018:ffffc90004bdbcd0 EFLAGS: 00010202
+ RAX: 0000000465003e80 RBX: ffffea0004e34d30 RCX: 00003ffffffff000
+ RDX: 0000000011943800 RSI: 0000000000080001 RDI: 0000000465003e80
+ RBP: ffffc90004bdbd18 R08: 0000000000000000 R09: ffff880138d34000
+ R10: ffffea0004650000 R11: 0000000000c363b0 R12: ffffea0011943800
+ R13: ffff8801b8d34000 R14: ffffea0000000000 R15: 000077ff80000000
+ FS: 00007fc977710740(0000) GS:ffff88007dc00000(0000) knlGS:0000000000000000
+ CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ CR2: ffffea0011943820 CR3: 000000007a746000 CR4: 00000000001406f0
+ Call Trace:
+ follow_page_mask+0x270/0x550
+ SYSC_move_pages+0x4ea/0x8f0
+ SyS_move_pages+0xe/0x10
+ do_syscall_64+0x67/0x180
+ entry_SYSCALL64_slow_path+0x25/0x25
+ RIP: 0033:0x7fc976e03949
+ RSP: 002b:00007ffe72221d88 EFLAGS: 00000246 ORIG_RAX: 0000000000000117
+ RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007fc976e03949
+ RDX: 0000000000c22390 RSI: 0000000000001400 RDI: 0000000000005827
+ RBP: 00007ffe72221e00 R08: 0000000000c2c3a0 R09: 0000000000000004
+ R10: 0000000000c363b0 R11: 0000000000000246 R12: 0000000000400650
+ R13: 00007ffe72221ee0 R14: 0000000000000000 R15: 0000000000000000
+ Code: 81 e4 ff ff 1f 00 48 21 c2 49 c1 ec 0c 48 c1 ea 0c 4c 01 e2 49 bc 00 00 00 00 00 ea ff ff 48 c1 e2 06 49 01 d4 f6 45 bc 04 74 90 <49> 8b 7c 24 20 40 f6 c7 01 75 2b 4c 89 e7 8b 47 1c 85 c0 7e 2a
+ RIP: follow_huge_pmd+0x143/0x190 RSP: ffffc90004bdbcd0
+ CR2: ffffea0011943820
+ ---[ end trace e4f81353a2d23232 ]---
+ Kernel panic - not syncing: Fatal exception
+ Kernel Offset: disabled
+
+This bug is triggered when pmd_present() returns true for non-present
+hugetlb, so fixing the present check in follow_huge_pmd() prevents it.
+Using pmd_present() to determine present/non-present for hugetlb is not
+correct, because pmd_present() checks multiple bits (not only
+_PAGE_PRESENT) for historical reason and it can misjudge hugetlb state.
+
+Fixes: e66f17ff7177 ("mm/hugetlb: take page table lock in follow_huge_pmd()")
+Link: http://lkml.kernel.org/r/1490149898-20231-1-git-send-email-n-horiguchi@ah.jp.nec.com
+Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
+Acked-by: Hillf Danton <hillf.zj@alibaba-inc.com>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Michal Hocko <mhocko@kernel.org>
+Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Cc: Mike Kravetz <mike.kravetz@oracle.com>
+Cc: Christian Borntraeger <borntraeger@de.ibm.com>
+Cc: Gerald Schaefer <gerald.schaefer@de.ibm.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/hugetlb.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -4471,6 +4471,7 @@ follow_huge_pmd(struct mm_struct *mm, un
+ {
+ struct page *page = NULL;
+ spinlock_t *ptl;
++ pte_t pte;
+ retry:
+ ptl = pmd_lockptr(mm, pmd);
+ spin_lock(ptl);
+@@ -4480,12 +4481,13 @@ retry:
+ */
+ if (!pmd_huge(*pmd))
+ goto out;
+- if (pmd_present(*pmd)) {
++ pte = huge_ptep_get((pte_t *)pmd);
++ if (pte_present(pte)) {
+ page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
+ if (flags & FOLL_GET)
+ get_page(page);
+ } else {
+- if (is_hugetlb_entry_migration(huge_ptep_get((pte_t *)pmd))) {
++ if (is_hugetlb_entry_migration(pte)) {
+ spin_unlock(ptl);
+ __migration_entry_wait(mm, (pte_t *)pmd, ptl);
+ goto retry;
--- /dev/null
+From 553af430e7c981e6e8fa5007c5b7b5773acc63dd Mon Sep 17 00:00:00 2001
+From: Johannes Weiner <hannes@cmpxchg.org>
+Date: Fri, 31 Mar 2017 15:11:50 -0700
+Subject: mm: rmap: fix huge file mmap accounting in the memcg stats
+
+From: Johannes Weiner <hannes@cmpxchg.org>
+
+commit 553af430e7c981e6e8fa5007c5b7b5773acc63dd upstream.
+
+Huge pages are accounted as single units in the memcg's "file_mapped"
+counter. Account the correct number of base pages, like we do in the
+corresponding node counter.
+
+Link: http://lkml.kernel.org/r/20170322005111.3156-1-hannes@cmpxchg.org
+Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
+Reviewed-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/memcontrol.h | 6 ++++++
+ mm/rmap.c | 4 ++--
+ 2 files changed, 8 insertions(+), 2 deletions(-)
+
+--- a/include/linux/memcontrol.h
++++ b/include/linux/memcontrol.h
+@@ -739,6 +739,12 @@ static inline bool mem_cgroup_oom_synchr
+ return false;
+ }
+
++static inline void mem_cgroup_update_page_stat(struct page *page,
++ enum mem_cgroup_stat_index idx,
++ int nr)
++{
++}
++
+ static inline void mem_cgroup_inc_page_stat(struct page *page,
+ enum mem_cgroup_stat_index idx)
+ {
+--- a/mm/rmap.c
++++ b/mm/rmap.c
+@@ -1295,7 +1295,7 @@ void page_add_file_rmap(struct page *pag
+ goto out;
+ }
+ __mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, nr);
+- mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
++ mem_cgroup_update_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED, nr);
+ out:
+ unlock_page_memcg(page);
+ }
+@@ -1335,7 +1335,7 @@ static void page_remove_file_rmap(struct
+ * pte lock(a spinlock) is held, which implies preemption disabled.
+ */
+ __mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, -nr);
+- mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
++ mem_cgroup_update_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED, -nr);
+
+ if (unlikely(PageMlocked(page)))
+ clear_page_mlock(page);
tty-serial-atmel-fix-tx-path-in-atmel_console_write.patch
usb-fix-linked-list-corruption-in-rh_call_control.patch
serial-mxs-auart-fix-baudrate-calculation.patch
+kvm-x86-clear-bus-pointer-when-destroyed.patch
+kvm-kvm_io_bus_unregister_dev-should-never-fail.patch
+drm-radeon-override-fpfn-for-all-vram-placements-in-radeon_evict_flags.patch
+drm-vc4-allocate-the-right-amount-of-space-for-boot-time-crtc-state.patch
+drm-etnaviv-re-protect-fence-allocation-with-gpu-mutex.patch
+x86-mm-kaslr-exclude-efi-region-from-kaslr-va-space-randomization.patch
+x86-mce-fix-copy-paste-error-in-exception-table-entries.patch
+lib-syscall-clear-return-values-when-no-stack.patch
+mm-rmap-fix-huge-file-mmap-accounting-in-the-memcg-stats.patch
+mm-hugetlb-use-pte_present-instead-of-pmd_present-in-follow_huge_pmd.patch
--- /dev/null
+From 26a37ab319a26d330bab298770d692bb9c852aff Mon Sep 17 00:00:00 2001
+From: Tony Luck <tony.luck@intel.com>
+Date: Mon, 20 Mar 2017 14:40:30 -0700
+Subject: x86/mce: Fix copy/paste error in exception table entries
+
+From: Tony Luck <tony.luck@intel.com>
+
+commit 26a37ab319a26d330bab298770d692bb9c852aff upstream.
+
+Back in commit:
+
+ 92b0729c34cab ("x86/mm, x86/mce: Add memcpy_mcsafe()")
+
+... I made a copy/paste error setting up the exception table entries
+and ended up with two for label .L_cache_w3 and none for .L_cache_w2.
+
+This means that if we take a machine check on:
+
+ .L_cache_w2: movq 2*8(%rsi), %r10
+
+then we don't have an exception table entry for this instruction
+and we can't recover.
+
+Fix: s/3/2/
+
+Signed-off-by: Tony Luck <tony.luck@intel.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Fixes: 92b0729c34cab ("x86/mm, x86/mce: Add memcpy_mcsafe()")
+Link: http://lkml.kernel.org/r/1490046030-25862-1-git-send-email-tony.luck@intel.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/lib/memcpy_64.S | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/lib/memcpy_64.S
++++ b/arch/x86/lib/memcpy_64.S
+@@ -290,7 +290,7 @@ EXPORT_SYMBOL_GPL(memcpy_mcsafe_unrolled
+ _ASM_EXTABLE_FAULT(.L_copy_leading_bytes, .L_memcpy_mcsafe_fail)
+ _ASM_EXTABLE_FAULT(.L_cache_w0, .L_memcpy_mcsafe_fail)
+ _ASM_EXTABLE_FAULT(.L_cache_w1, .L_memcpy_mcsafe_fail)
+- _ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail)
++ _ASM_EXTABLE_FAULT(.L_cache_w2, .L_memcpy_mcsafe_fail)
+ _ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail)
+ _ASM_EXTABLE_FAULT(.L_cache_w4, .L_memcpy_mcsafe_fail)
+ _ASM_EXTABLE_FAULT(.L_cache_w5, .L_memcpy_mcsafe_fail)
--- /dev/null
+From a46f60d76004965e5669dbf3fc21ef3bc3632eb4 Mon Sep 17 00:00:00 2001
+From: Baoquan He <bhe@redhat.com>
+Date: Fri, 24 Mar 2017 12:59:52 +0800
+Subject: x86/mm/KASLR: Exclude EFI region from KASLR VA space randomization
+
+From: Baoquan He <bhe@redhat.com>
+
+commit a46f60d76004965e5669dbf3fc21ef3bc3632eb4 upstream.
+
+Currently KASLR is enabled on three regions: the direct mapping of physical
+memory, vamlloc and vmemmap. However the EFI region is also mistakenly
+included for VA space randomization because of misusing EFI_VA_START macro
+and assuming EFI_VA_START < EFI_VA_END.
+
+(This breaks kexec and possibly other things that rely on stable addresses.)
+
+The EFI region is reserved for EFI runtime services virtual mapping which
+should not be included in KASLR ranges. In Documentation/x86/x86_64/mm.txt,
+we can see:
+
+ ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space
+
+EFI uses the space from -4G to -64G thus EFI_VA_START > EFI_VA_END,
+Here EFI_VA_START = -4G, and EFI_VA_END = -64G.
+
+Changing EFI_VA_START to EFI_VA_END in mm/kaslr.c fixes this problem.
+
+Signed-off-by: Baoquan He <bhe@redhat.com>
+Reviewed-by: Bhupesh Sharma <bhsharma@redhat.com>
+Acked-by: Dave Young <dyoung@redhat.com>
+Acked-by: Thomas Garnier <thgarnie@google.com>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Masahiro Yamada <yamada.masahiro@socionext.com>
+Cc: Matt Fleming <matt@codeblueprint.co.uk>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-efi@vger.kernel.org
+Link: http://lkml.kernel.org/r/1490331592-31860-1-git-send-email-bhe@redhat.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/mm/kaslr.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/mm/kaslr.c
++++ b/arch/x86/mm/kaslr.c
+@@ -48,7 +48,7 @@ static const unsigned long vaddr_start =
+ #if defined(CONFIG_X86_ESPFIX64)
+ static const unsigned long vaddr_end = ESPFIX_BASE_ADDR;
+ #elif defined(CONFIG_EFI)
+-static const unsigned long vaddr_end = EFI_VA_START;
++static const unsigned long vaddr_end = EFI_VA_END;
+ #else
+ static const unsigned long vaddr_end = __START_KERNEL_map;
+ #endif
+@@ -105,7 +105,7 @@ void __init kernel_randomize_memory(void
+ */
+ BUILD_BUG_ON(vaddr_start >= vaddr_end);
+ BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_ESPFIX64) &&
+- vaddr_end >= EFI_VA_START);
++ vaddr_end >= EFI_VA_END);
+ BUILD_BUG_ON((IS_ENABLED(CONFIG_X86_ESPFIX64) ||
+ IS_ENABLED(CONFIG_EFI)) &&
+ vaddr_end >= __START_KERNEL_map);