--- /dev/null
+From 72f310481a08db821b614e7b5d00febcc9064b36 Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Thu, 16 Mar 2017 18:20:50 +0000
+Subject: arm/arm64: KVM: Take mmap_sem in kvm_arch_prepare_memory_region
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 72f310481a08db821b614e7b5d00febcc9064b36 upstream.
+
+We don't hold the mmap_sem while searching for VMAs (via find_vma), in
+kvm_arch_prepare_memory_region, which can end up in expected failures.
+
+Fixes: commit 8eef91239e57 ("arm/arm64: KVM: map MMIO regions at creation time")
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Cc: Eric Auger <eric.auger@rehat.com>
+Reviewed-by: Christoffer Dall <cdall@linaro.org>
+[ Handle dirty page logging failure case ]
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/kvm/mmu.c | 11 ++++++++---
+ 1 file changed, 8 insertions(+), 3 deletions(-)
+
+--- a/arch/arm/kvm/mmu.c
++++ b/arch/arm/kvm/mmu.c
+@@ -1806,6 +1806,7 @@ int kvm_arch_prepare_memory_region(struc
+ (KVM_PHYS_SIZE >> PAGE_SHIFT))
+ return -EFAULT;
+
++ down_read(¤t->mm->mmap_sem);
+ /*
+ * A memory region could potentially cover multiple VMAs, and any holes
+ * between them, so iterate over all of them to find out if we can map
+@@ -1849,8 +1850,10 @@ int kvm_arch_prepare_memory_region(struc
+ pa += vm_start - vma->vm_start;
+
+ /* IO region dirty page logging not allowed */
+- if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES)
+- return -EINVAL;
++ if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
++ ret = -EINVAL;
++ goto out;
++ }
+
+ ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
+ vm_end - vm_start,
+@@ -1862,7 +1865,7 @@ int kvm_arch_prepare_memory_region(struc
+ } while (hva < reg_end);
+
+ if (change == KVM_MR_FLAGS_ONLY)
+- return ret;
++ goto out;
+
+ spin_lock(&kvm->mmu_lock);
+ if (ret)
+@@ -1870,6 +1873,8 @@ int kvm_arch_prepare_memory_region(struc
+ else
+ stage2_flush_memslot(kvm, memslot);
+ spin_unlock(&kvm->mmu_lock);
++out:
++ up_read(¤t->mm->mmap_sem);
+ return ret;
+ }
+
--- /dev/null
+From 90f6e150e44a0dc3883110eeb3ab35d1be42b6bb Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Thu, 16 Mar 2017 18:20:49 +0000
+Subject: arm/arm64: KVM: Take mmap_sem in stage2_unmap_vm
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 90f6e150e44a0dc3883110eeb3ab35d1be42b6bb upstream.
+
+We don't hold the mmap_sem while searching for the VMAs when
+we try to unmap each memslot for a VM. Fix this properly to
+avoid unexpected results.
+
+Fixes: commit 957db105c997 ("arm/arm64: KVM: Introduce stage2_unmap_vm")
+Reviewed-by: Christoffer Dall <cdall@linaro.org>
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/kvm/mmu.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/arm/kvm/mmu.c
++++ b/arch/arm/kvm/mmu.c
+@@ -803,6 +803,7 @@ void stage2_unmap_vm(struct kvm *kvm)
+ int idx;
+
+ idx = srcu_read_lock(&kvm->srcu);
++ down_read(¤t->mm->mmap_sem);
+ spin_lock(&kvm->mmu_lock);
+
+ slots = kvm_memslots(kvm);
+@@ -810,6 +811,7 @@ void stage2_unmap_vm(struct kvm *kvm)
+ stage2_unmap_memslot(kvm, memslot);
+
+ spin_unlock(&kvm->mmu_lock);
++ up_read(¤t->mm->mmap_sem);
+ srcu_read_unlock(&kvm->srcu, idx);
+ }
+
--- /dev/null
+From 09a6adf53d42ca3088fa3fb41f40b768efc711ed Mon Sep 17 00:00:00 2001
+From: Victor Kamensky <kamensky@cisco.com>
+Date: Mon, 3 Apr 2017 22:51:01 -0700
+Subject: arm64: mm: unaligned access by user-land should be received as SIGBUS
+
+From: Victor Kamensky <kamensky@cisco.com>
+
+commit 09a6adf53d42ca3088fa3fb41f40b768efc711ed upstream.
+
+After 52d7523 (arm64: mm: allow the kernel to handle alignment faults on
+user accesses) commit user-land accesses that produce unaligned exceptions
+like in case of aarch32 ldm/stm/ldrd/strd instructions operating on
+unaligned memory received by user-land as SIGSEGV. It is wrong, it should
+be reported as SIGBUS as it was before 52d7523 commit.
+
+Changed do_bad_area function to take signal and code parameters out of esr
+value using fault_info table, so in case of do_alignment_fault fault
+user-land will receive SIGBUS. Wrapped access to fault_info table into
+esr_to_fault_info function.
+
+Fixes: 52d7523 (arm64: mm: allow the kernel to handle alignment faults on user accesses)
+Signed-off-by: Victor Kamensky <kamensky@cisco.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/mm/fault.c | 42 ++++++++++++++++++++++++------------------
+ 1 file changed, 24 insertions(+), 18 deletions(-)
+
+--- a/arch/arm64/mm/fault.c
++++ b/arch/arm64/mm/fault.c
+@@ -41,7 +41,20 @@
+ #include <asm/pgtable.h>
+ #include <asm/tlbflush.h>
+
+-static const char *fault_name(unsigned int esr);
++struct fault_info {
++ int (*fn)(unsigned long addr, unsigned int esr,
++ struct pt_regs *regs);
++ int sig;
++ int code;
++ const char *name;
++};
++
++static const struct fault_info fault_info[];
++
++static inline const struct fault_info *esr_to_fault_info(unsigned int esr)
++{
++ return fault_info + (esr & 63);
++}
+
+ #ifdef CONFIG_KPROBES
+ static inline int notify_page_fault(struct pt_regs *regs, unsigned int esr)
+@@ -196,10 +209,12 @@ static void __do_user_fault(struct task_
+ struct pt_regs *regs)
+ {
+ struct siginfo si;
++ const struct fault_info *inf;
+
+ if (unhandled_signal(tsk, sig) && show_unhandled_signals_ratelimited()) {
++ inf = esr_to_fault_info(esr);
+ pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n",
+- tsk->comm, task_pid_nr(tsk), fault_name(esr), sig,
++ tsk->comm, task_pid_nr(tsk), inf->name, sig,
+ addr, esr);
+ show_pte(tsk->mm, addr);
+ show_regs(regs);
+@@ -218,14 +233,16 @@ static void do_bad_area(unsigned long ad
+ {
+ struct task_struct *tsk = current;
+ struct mm_struct *mm = tsk->active_mm;
++ const struct fault_info *inf;
+
+ /*
+ * If we are in kernel mode at this point, we have no context to
+ * handle this fault with.
+ */
+- if (user_mode(regs))
+- __do_user_fault(tsk, addr, esr, SIGSEGV, SEGV_MAPERR, regs);
+- else
++ if (user_mode(regs)) {
++ inf = esr_to_fault_info(esr);
++ __do_user_fault(tsk, addr, esr, inf->sig, inf->code, regs);
++ } else
+ __do_kernel_fault(mm, addr, esr, regs);
+ }
+
+@@ -481,12 +498,7 @@ static int do_bad(unsigned long addr, un
+ return 1;
+ }
+
+-static const struct fault_info {
+- int (*fn)(unsigned long addr, unsigned int esr, struct pt_regs *regs);
+- int sig;
+- int code;
+- const char *name;
+-} fault_info[] = {
++static const struct fault_info fault_info[] = {
+ { do_bad, SIGBUS, 0, "ttbr address size fault" },
+ { do_bad, SIGBUS, 0, "level 1 address size fault" },
+ { do_bad, SIGBUS, 0, "level 2 address size fault" },
+@@ -553,19 +565,13 @@ static const struct fault_info {
+ { do_bad, SIGBUS, 0, "unknown 63" },
+ };
+
+-static const char *fault_name(unsigned int esr)
+-{
+- const struct fault_info *inf = fault_info + (esr & 63);
+- return inf->name;
+-}
+-
+ /*
+ * Dispatch a data abort to the relevant handler.
+ */
+ asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr,
+ struct pt_regs *regs)
+ {
+- const struct fault_info *inf = fault_info + (esr & 63);
++ const struct fault_info *inf = esr_to_fault_info(esr);
+ struct siginfo info;
+
+ if (!inf->fn(addr, esr, regs))
--- /dev/null
+From b3ef5520c1eabb56064474043c7c55a1a65b8708 Mon Sep 17 00:00:00 2001
+From: Arend Van Spriel <arend.vanspriel@broadcom.com>
+Date: Tue, 28 Mar 2017 09:11:31 +0100
+Subject: cfg80211: check rdev resume callback only for registered wiphy
+
+From: Arend Van Spriel <arend.vanspriel@broadcom.com>
+
+commit b3ef5520c1eabb56064474043c7c55a1a65b8708 upstream.
+
+We got the following use-after-free KASAN report:
+
+ BUG: KASAN: use-after-free in wiphy_resume+0x591/0x5a0 [cfg80211]
+ at addr ffff8803fc244090
+ Read of size 8 by task kworker/u16:24/2587
+ CPU: 6 PID: 2587 Comm: kworker/u16:24 Tainted: G B 4.9.13-debug+
+ Hardware name: Dell Inc. XPS 15 9550/0N7TVV, BIOS 1.2.19 12/22/2016
+ Workqueue: events_unbound async_run_entry_fn
+ ffff880425d4f9d8 ffffffffaeedb541 ffff88042b80ef00 ffff8803fc244088
+ ffff880425d4fa00 ffffffffae84d7a1 ffff880425d4fa98 ffff8803fc244080
+ ffff88042b80ef00 ffff880425d4fa88 ffffffffae84da3a ffffffffc141f7d9
+ Call Trace:
+ [<ffffffffaeedb541>] dump_stack+0x85/0xc4
+ [<ffffffffae84d7a1>] kasan_object_err+0x21/0x70
+ [<ffffffffae84da3a>] kasan_report_error+0x1fa/0x500
+ [<ffffffffc141f7d9>] ? cfg80211_bss_age+0x39/0xc0 [cfg80211]
+ [<ffffffffc141f83a>] ? cfg80211_bss_age+0x9a/0xc0 [cfg80211]
+ [<ffffffffae48d46d>] ? trace_hardirqs_on+0xd/0x10
+ [<ffffffffc13fb1c0>] ? wiphy_suspend+0xc70/0xc70 [cfg80211]
+ [<ffffffffae84def1>] __asan_report_load8_noabort+0x61/0x70
+ [<ffffffffc13fb100>] ? wiphy_suspend+0xbb0/0xc70 [cfg80211]
+ [<ffffffffc13fb751>] ? wiphy_resume+0x591/0x5a0 [cfg80211]
+ [<ffffffffc13fb751>] wiphy_resume+0x591/0x5a0 [cfg80211]
+ [<ffffffffc13fb1c0>] ? wiphy_suspend+0xc70/0xc70 [cfg80211]
+ [<ffffffffaf3b206e>] dpm_run_callback+0x6e/0x4f0
+ [<ffffffffaf3b31b2>] device_resume+0x1c2/0x670
+ [<ffffffffaf3b367d>] async_resume+0x1d/0x50
+ [<ffffffffae3ee84e>] async_run_entry_fn+0xfe/0x610
+ [<ffffffffae3d0666>] process_one_work+0x716/0x1a50
+ [<ffffffffae3d05c9>] ? process_one_work+0x679/0x1a50
+ [<ffffffffafdd7b6d>] ? _raw_spin_unlock_irq+0x3d/0x60
+ [<ffffffffae3cff50>] ? pwq_dec_nr_in_flight+0x2b0/0x2b0
+ [<ffffffffae3d1a80>] worker_thread+0xe0/0x1460
+ [<ffffffffae3d19a0>] ? process_one_work+0x1a50/0x1a50
+ [<ffffffffae3e54c2>] kthread+0x222/0x2e0
+ [<ffffffffae3e52a0>] ? kthread_park+0x80/0x80
+ [<ffffffffae3e52a0>] ? kthread_park+0x80/0x80
+ [<ffffffffae3e52a0>] ? kthread_park+0x80/0x80
+ [<ffffffffafdd86aa>] ret_from_fork+0x2a/0x40
+ Object at ffff8803fc244088, in cache kmalloc-1024 size: 1024
+ Allocated:
+ PID = 71
+ save_stack_trace+0x1b/0x20
+ save_stack+0x46/0xd0
+ kasan_kmalloc+0xad/0xe0
+ kasan_slab_alloc+0x12/0x20
+ __kmalloc_track_caller+0x134/0x360
+ kmemdup+0x20/0x50
+ brcmf_cfg80211_attach+0x10b/0x3a90 [brcmfmac]
+ brcmf_bus_start+0x19a/0x9a0 [brcmfmac]
+ brcmf_pcie_setup+0x1f1a/0x3680 [brcmfmac]
+ brcmf_fw_request_nvram_done+0x44c/0x11b0 [brcmfmac]
+ request_firmware_work_func+0x135/0x280
+ process_one_work+0x716/0x1a50
+ worker_thread+0xe0/0x1460
+ kthread+0x222/0x2e0
+ ret_from_fork+0x2a/0x40
+ Freed:
+ PID = 2568
+ save_stack_trace+0x1b/0x20
+ save_stack+0x46/0xd0
+ kasan_slab_free+0x71/0xb0
+ kfree+0xe8/0x2e0
+ brcmf_cfg80211_detach+0x62/0xf0 [brcmfmac]
+ brcmf_detach+0x14a/0x2b0 [brcmfmac]
+ brcmf_pcie_remove+0x140/0x5d0 [brcmfmac]
+ brcmf_pcie_pm_leave_D3+0x198/0x2e0 [brcmfmac]
+ pci_pm_resume+0x186/0x220
+ dpm_run_callback+0x6e/0x4f0
+ device_resume+0x1c2/0x670
+ async_resume+0x1d/0x50
+ async_run_entry_fn+0xfe/0x610
+ process_one_work+0x716/0x1a50
+ worker_thread+0xe0/0x1460
+ kthread+0x222/0x2e0
+ ret_from_fork+0x2a/0x40
+ Memory state around the buggy address:
+ ffff8803fc243f80: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+ ffff8803fc244000: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+ >ffff8803fc244080: fc fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+ ^
+ ffff8803fc244100: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+ ffff8803fc244180: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+
+What is happening is that brcmf_pcie_resume() detects a device that
+is no longer responsive and it decides to unbind resulting in a
+wiphy_unregister() and wiphy_free() call. Now the wiphy instance
+remains allocated, because PM needs to call wiphy_resume() for it.
+However, brcmfmac already does a kfree() for the struct
+cfg80211_registered_device::ops field. Change the checks in
+wiphy_resume() to only access the struct cfg80211_registered_device::ops
+if the wiphy instance is still registered at this time.
+
+Reported-by: Daniel J Blueman <daniel@quora.org>
+Reviewed-by: Hante Meuleman <hante.meuleman@broadcom.com>
+Reviewed-by: Pieter-Paul Giesberts <pieter-paul.giesberts@broadcom.com>
+Reviewed-by: Franky Lin <franky.lin@broadcom.com>
+Signed-off-by: Arend van Spriel <arend.vanspriel@broadcom.com>
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/wireless/sysfs.c | 10 ++++------
+ 1 file changed, 4 insertions(+), 6 deletions(-)
+
+--- a/net/wireless/sysfs.c
++++ b/net/wireless/sysfs.c
+@@ -130,12 +130,10 @@ static int wiphy_resume(struct device *d
+ /* Age scan results with time spent in suspend */
+ cfg80211_bss_age(rdev, get_seconds() - rdev->suspend_at);
+
+- if (rdev->ops->resume) {
+- rtnl_lock();
+- if (rdev->wiphy.registered)
+- ret = rdev_resume(rdev);
+- rtnl_unlock();
+- }
++ rtnl_lock();
++ if (rdev->wiphy.registered && rdev->ops->resume)
++ ret = rdev_resume(rdev);
++ rtnl_unlock();
+
+ return ret;
+ }
--- /dev/null
+From 4bdc9029685ac03be50b320b29691766d2326c2b Mon Sep 17 00:00:00 2001
+From: Quentin Schulz <quentin.schulz@free-electrons.com>
+Date: Tue, 21 Mar 2017 16:52:14 +0100
+Subject: iio: bmg160: reset chip when probing
+
+From: Quentin Schulz <quentin.schulz@free-electrons.com>
+
+commit 4bdc9029685ac03be50b320b29691766d2326c2b upstream.
+
+The gyroscope chip might need to be reset to be used.
+
+Without the chip being reset, the driver stopped at the first
+regmap_read (to get the CHIP_ID) and failed to probe.
+
+The datasheet of the gyroscope says that a minimum wait of 30ms after
+the reset has to be done.
+
+This patch has been checked on a BMX055 and the datasheet of the BMG160
+and the BMI055 give the same reset register and bits.
+
+Signed-off-by: Quentin Schulz <quentin.schulz@free-electrons.com>
+Signed-off-by: Jonathan Cameron <jic23@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iio/gyro/bmg160_core.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/drivers/iio/gyro/bmg160_core.c
++++ b/drivers/iio/gyro/bmg160_core.c
+@@ -27,6 +27,7 @@
+ #include <linux/iio/trigger_consumer.h>
+ #include <linux/iio/triggered_buffer.h>
+ #include <linux/regmap.h>
++#include <linux/delay.h>
+ #include "bmg160.h"
+
+ #define BMG160_IRQ_NAME "bmg160_event"
+@@ -52,6 +53,9 @@
+ #define BMG160_DEF_BW 100
+ #define BMG160_REG_PMU_BW_RES BIT(7)
+
++#define BMG160_GYRO_REG_RESET 0x14
++#define BMG160_GYRO_RESET_VAL 0xb6
++
+ #define BMG160_REG_INT_MAP_0 0x17
+ #define BMG160_INT_MAP_0_BIT_ANY BIT(1)
+
+@@ -236,6 +240,14 @@ static int bmg160_chip_init(struct bmg16
+ int ret;
+ unsigned int val;
+
++ /*
++ * Reset chip to get it in a known good state. A delay of 30ms after
++ * reset is required according to the datasheet.
++ */
++ regmap_write(data->regmap, BMG160_GYRO_REG_RESET,
++ BMG160_GYRO_RESET_VAL);
++ usleep_range(30000, 30700);
++
+ ret = regmap_read(data->regmap, BMG160_REG_CHIP_ID, &val);
+ if (ret < 0) {
+ dev_err(dev, "Error reading reg_chip_id\n");
--- /dev/null
+From 8b3405e345b5a098101b0c31b264c812bba045d9 Mon Sep 17 00:00:00 2001
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+Date: Mon, 3 Apr 2017 15:12:43 +0100
+Subject: kvm: arm/arm64: Fix locking for kvm_free_stage2_pgd
+
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+
+commit 8b3405e345b5a098101b0c31b264c812bba045d9 upstream.
+
+In kvm_free_stage2_pgd() we don't hold the kvm->mmu_lock while calling
+unmap_stage2_range() on the entire memory range for the guest. This could
+cause problems with other callers (e.g, munmap on a memslot) trying to
+unmap a range. And since we have to unmap the entire Guest memory range
+holding a spinlock, make sure we yield the lock if necessary, after we
+unmap each PUD range.
+
+Fixes: commit d5d8184d35c9 ("KVM: ARM: Memory virtualization setup")
+Cc: Paolo Bonzini <pbonzin@redhat.com>
+Cc: Marc Zyngier <marc.zyngier@arm.com>
+Cc: Christoffer Dall <christoffer.dall@linaro.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+[ Avoid vCPU starvation and lockup detector warnings ]
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Christoffer Dall <cdall@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/kvm/mmu.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/arch/arm/kvm/mmu.c
++++ b/arch/arm/kvm/mmu.c
+@@ -292,11 +292,18 @@ static void unmap_stage2_range(struct kv
+ phys_addr_t addr = start, end = start + size;
+ phys_addr_t next;
+
++ assert_spin_locked(&kvm->mmu_lock);
+ pgd = kvm->arch.pgd + stage2_pgd_index(addr);
+ do {
+ next = stage2_pgd_addr_end(addr, end);
+ if (!stage2_pgd_none(*pgd))
+ unmap_stage2_puds(kvm, pgd, addr, next);
++ /*
++ * If the range is too large, release the kvm->mmu_lock
++ * to prevent starvation and lockup detector warnings.
++ */
++ if (next != end)
++ cond_resched_lock(&kvm->mmu_lock);
+ } while (pgd++, addr = next, addr != end);
+ }
+
+@@ -831,7 +838,10 @@ void kvm_free_stage2_pgd(struct kvm *kvm
+ if (kvm->arch.pgd == NULL)
+ return;
+
++ spin_lock(&kvm->mmu_lock);
+ unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
++ spin_unlock(&kvm->mmu_lock);
++
+ /* Free the HW pgd, one page at a time */
+ free_pages_exact(kvm->arch.pgd, S2_PGD_SIZE);
+ kvm->arch.pgd = NULL;
drm-ttm-drm-vmwgfx-relax-permission-checking-when-opening-surfaces.patch
drm-vmwgfx-remove-getparam-error-message.patch
drm-vmwgfx-fix-integer-overflow-in-vmw_surface_define_ioctl.patch
+sysfs-be-careful-of-error-returns-from-ops-show.patch
+staging-android-ashmem-lseek-failed-due-to-no-fmode_lseek.patch
+arm-arm64-kvm-take-mmap_sem-in-stage2_unmap_vm.patch
+arm-arm64-kvm-take-mmap_sem-in-kvm_arch_prepare_memory_region.patch
+kvm-arm-arm64-fix-locking-for-kvm_free_stage2_pgd.patch
+iio-bmg160-reset-chip-when-probing.patch
+arm64-mm-unaligned-access-by-user-land-should-be-received-as-sigbus.patch
+cfg80211-check-rdev-resume-callback-only-for-registered-wiphy.patch
--- /dev/null
+From 97fbfef6bd597888485b653175fb846c6998b60c Mon Sep 17 00:00:00 2001
+From: Shuxiao Zhang <zhangshuxiao@xiaomi.com>
+Date: Thu, 6 Apr 2017 22:30:29 +0800
+Subject: staging: android: ashmem: lseek failed due to no FMODE_LSEEK.
+
+From: Shuxiao Zhang <zhangshuxiao@xiaomi.com>
+
+commit 97fbfef6bd597888485b653175fb846c6998b60c upstream.
+
+vfs_llseek will check whether the file mode has
+FMODE_LSEEK, no return failure. But ashmem can be
+lseek, so add FMODE_LSEEK to ashmem file.
+
+Comment From Greg Hackmann:
+ ashmem_llseek() passes the llseek() call through to the backing
+ shmem file. 91360b02ab48 ("ashmem: use vfs_llseek()") changed
+ this from directly calling the file's llseek() op into a VFS
+ layer call. This also adds a check for the FMODE_LSEEK bit, so
+ without that bit ashmem_llseek() now always fails with -ESPIPE.
+
+Fixes: 91360b02ab48 ("ashmem: use vfs_llseek()")
+Signed-off-by: Shuxiao Zhang <zhangshuxiao@xiaomi.com>
+Tested-by: Greg Hackmann <ghackmann@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/android/ashmem.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/staging/android/ashmem.c
++++ b/drivers/staging/android/ashmem.c
+@@ -405,6 +405,7 @@ static int ashmem_mmap(struct file *file
+ ret = PTR_ERR(vmfile);
+ goto out;
+ }
++ vmfile->f_mode |= FMODE_LSEEK;
+ asma->file = vmfile;
+ }
+ get_file(asma->file);
--- /dev/null
+From c8a139d001a1aab1ea8734db14b22dac9dd143b6 Mon Sep 17 00:00:00 2001
+From: NeilBrown <neilb@suse.com>
+Date: Mon, 3 Apr 2017 11:30:34 +1000
+Subject: sysfs: be careful of error returns from ops->show()
+
+From: NeilBrown <neilb@suse.com>
+
+commit c8a139d001a1aab1ea8734db14b22dac9dd143b6 upstream.
+
+ops->show() can return a negative error code.
+Commit 65da3484d9be ("sysfs: correctly handle short reads on PREALLOC attrs.")
+(in v4.4) caused this to be stored in an unsigned 'size_t' variable, so errors
+would look like large numbers.
+As a result, if an error is returned, sysfs_kf_read() will return the
+value of 'count', typically 4096.
+
+Commit 17d0774f8068 ("sysfs: correctly handle read offset on PREALLOC attrs")
+(in v4.8) extended this error to use the unsigned large 'len' as a size for
+memmove().
+Consequently, if ->show returns an error, then the first read() on the
+sysfs file will return 4096 and could return uninitialized memory to
+user-space.
+If the application performs a subsequent read, this will trigger a memmove()
+with extremely large count, and is likely to crash the machine is bizarre ways.
+
+This bug can currently only be triggered by reading from an md
+sysfs attribute declared with __ATTR_PREALLOC() during the
+brief period between when mddev_put() deletes an mddev from
+the ->all_mddevs list, and when mddev_delayed_delete() - which is
+scheduled on a workqueue - completes.
+Before this, an error won't be returned by the ->show()
+After this, the ->show() won't be called.
+
+I can reproduce it reliably only by putting delay like
+ usleep_range(500000,700000);
+early in mddev_delayed_delete(). Then after creating an
+md device md0 run
+ echo clear > /sys/block/md0/md/array_state; cat /sys/block/md0/md/array_state
+
+The bug can be triggered without the usleep.
+
+Fixes: 65da3484d9be ("sysfs: correctly handle short reads on PREALLOC attrs.")
+Fixes: 17d0774f8068 ("sysfs: correctly handle read offset on PREALLOC attrs")
+Signed-off-by: NeilBrown <neilb@suse.com>
+Acked-by: Tejun Heo <tj@kernel.org>
+Reported-and-tested-by: Miroslav Benes <mbenes@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/sysfs/file.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/fs/sysfs/file.c
++++ b/fs/sysfs/file.c
+@@ -108,7 +108,7 @@ static ssize_t sysfs_kf_read(struct kern
+ {
+ const struct sysfs_ops *ops = sysfs_file_ops(of->kn);
+ struct kobject *kobj = of->kn->parent->priv;
+- size_t len;
++ ssize_t len;
+
+ /*
+ * If buf != of->prealloc_buf, we don't know how
+@@ -117,13 +117,15 @@ static ssize_t sysfs_kf_read(struct kern
+ if (WARN_ON_ONCE(buf != of->prealloc_buf))
+ return 0;
+ len = ops->show(kobj, of->kn->priv, buf);
++ if (len < 0)
++ return len;
+ if (pos) {
+ if (len <= pos)
+ return 0;
+ len -= pos;
+ memmove(buf, buf + pos, len);
+ }
+- return min(count, len);
++ return min_t(ssize_t, count, len);
+ }
+
+ /* kernfs write callback for regular sysfs files */