--- /dev/null
+From 0de58f852875a0f0dcfb120bb8433e4e73c7803b Mon Sep 17 00:00:00 2001
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Thu, 3 Dec 2015 09:25:22 +0100
+Subject: ARM/arm64: KVM: correct PTE uncachedness check
+
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+
+commit 0de58f852875a0f0dcfb120bb8433e4e73c7803b upstream.
+
+Commit e6fab5442345 ("ARM/arm64: KVM: test properly for a PTE's
+uncachedness") modified the logic to test whether a HYP or stage-2
+mapping needs flushing, from [incorrectly] interpreting the page table
+attributes to [incorrectly] checking whether the PFN that backs the
+mapping is covered by host system RAM. The PFN number is part of the
+output of the translation, not the input, so we have to use pte_pfn()
+on the contents of the PTE, not __phys_to_pfn() on the HYP virtual
+address or stage-2 intermediate physical address.
+
+Fixes: e6fab5442345 ("ARM/arm64: KVM: test properly for a PTE's uncachedness")
+Tested-by: Pavel Fedin <p.fedin@samsung.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/kvm/mmu.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/arm/kvm/mmu.c
++++ b/arch/arm/kvm/mmu.c
+@@ -218,7 +218,7 @@ static void unmap_ptes(struct kvm *kvm,
+ kvm_tlb_flush_vmid_ipa(kvm, addr);
+
+ /* No need to invalidate the cache for device mappings */
+- if (!kvm_is_device_pfn(__phys_to_pfn(addr)))
++ if (!kvm_is_device_pfn(pte_pfn(old_pte)))
+ kvm_flush_dcache_pte(old_pte);
+
+ put_page(virt_to_page(pte));
+@@ -310,7 +310,7 @@ static void stage2_flush_ptes(struct kvm
+
+ pte = pte_offset_kernel(pmd, addr);
+ do {
+- if (!pte_none(*pte) && !kvm_is_device_pfn(__phys_to_pfn(addr)))
++ if (!pte_none(*pte) && !kvm_is_device_pfn(pte_pfn(*pte)))
+ kvm_flush_dcache_pte(*pte);
+ } while (pte++, addr += PAGE_SIZE, addr != end);
+ }
--- /dev/null
+From e6fab54423450d699a09ec2b899473a541f61971 Mon Sep 17 00:00:00 2001
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Tue, 10 Nov 2015 15:11:20 +0100
+Subject: ARM/arm64: KVM: test properly for a PTE's uncachedness
+
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+
+commit e6fab54423450d699a09ec2b899473a541f61971 upstream.
+
+The open coded tests for checking whether a PTE maps a page as
+uncached use a flawed '(pte_val(xxx) & CONST) != CONST' pattern,
+which is not guaranteed to work since the type of a mapping is
+not a set of mutually exclusive bits
+
+For HYP mappings, the type is an index into the MAIR table (i.e, the
+index itself does not contain any information whatsoever about the
+type of the mapping), and for stage-2 mappings it is a bit field where
+normal memory and device types are defined as follows:
+
+ #define MT_S2_NORMAL 0xf
+ #define MT_S2_DEVICE_nGnRE 0x1
+
+I.e., masking *and* comparing with the latter matches on the former,
+and we have been getting lucky merely because the S2 device mappings
+also have the PTE_UXN bit set, or we would misidentify memory mappings
+as device mappings.
+
+Since the unmap_range() code path (which contains one instance of the
+flawed test) is used both for HYP mappings and stage-2 mappings, and
+considering the difference between the two, it is non-trivial to fix
+this by rewriting the tests in place, as it would involve passing
+down the type of mapping through all the functions.
+
+However, since HYP mappings and stage-2 mappings both deal with host
+physical addresses, we can simply check whether the mapping is backed
+by memory that is managed by the host kernel, and only perform the
+D-cache maintenance if this is the case.
+
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Tested-by: Pavel Fedin <p.fedin@samsung.com>
+Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
+Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/kvm/mmu.c | 15 +++++++--------
+ 1 file changed, 7 insertions(+), 8 deletions(-)
+
+--- a/arch/arm/kvm/mmu.c
++++ b/arch/arm/kvm/mmu.c
+@@ -98,6 +98,11 @@ static void kvm_flush_dcache_pud(pud_t p
+ __kvm_flush_dcache_pud(pud);
+ }
+
++static bool kvm_is_device_pfn(unsigned long pfn)
++{
++ return !pfn_valid(pfn);
++}
++
+ /**
+ * stage2_dissolve_pmd() - clear and flush huge PMD entry
+ * @kvm: pointer to kvm structure.
+@@ -213,7 +218,7 @@ static void unmap_ptes(struct kvm *kvm,
+ kvm_tlb_flush_vmid_ipa(kvm, addr);
+
+ /* No need to invalidate the cache for device mappings */
+- if ((pte_val(old_pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
++ if (!kvm_is_device_pfn(__phys_to_pfn(addr)))
+ kvm_flush_dcache_pte(old_pte);
+
+ put_page(virt_to_page(pte));
+@@ -305,8 +310,7 @@ static void stage2_flush_ptes(struct kvm
+
+ pte = pte_offset_kernel(pmd, addr);
+ do {
+- if (!pte_none(*pte) &&
+- (pte_val(*pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
++ if (!pte_none(*pte) && !kvm_is_device_pfn(__phys_to_pfn(addr)))
+ kvm_flush_dcache_pte(*pte);
+ } while (pte++, addr += PAGE_SIZE, addr != end);
+ }
+@@ -1037,11 +1041,6 @@ static bool kvm_is_write_fault(struct kv
+ return kvm_vcpu_dabt_iswrite(vcpu);
+ }
+
+-static bool kvm_is_device_pfn(unsigned long pfn)
+-{
+- return !pfn_valid(pfn);
+-}
+-
+ /**
+ * stage2_wp_ptes - write protect PMD range
+ * @pmd: pointer to pmd entry
--- /dev/null
+From 251599e1d6906621f49218d7b474ddd159e58f3b Mon Sep 17 00:00:00 2001
+From: Zi Shen Lim <zlim.lnx@gmail.com>
+Date: Tue, 3 Nov 2015 22:56:44 -0800
+Subject: arm64: bpf: fix div-by-zero case
+
+From: Zi Shen Lim <zlim.lnx@gmail.com>
+
+commit 251599e1d6906621f49218d7b474ddd159e58f3b upstream.
+
+In the case of division by zero in a BPF program:
+ A = A / X; (X == 0)
+the expected behavior is to terminate with return value 0.
+
+This is confirmed by the test case introduced in commit 86bf1721b226
+("test_bpf: add tests checking that JIT/interpreter sets A and X to 0.").
+
+Reported-by: Yang Shi <yang.shi@linaro.org>
+Tested-by: Yang Shi <yang.shi@linaro.org>
+CC: Xi Wang <xi.wang@gmail.com>
+CC: Alexei Starovoitov <ast@plumgrid.com>
+CC: linux-arm-kernel@lists.infradead.org
+CC: linux-kernel@vger.kernel.org
+Fixes: e54bcde3d69d ("arm64: eBPF JIT compiler")
+Signed-off-by: Zi Shen Lim <zlim.lnx@gmail.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/net/bpf_jit.h | 3 ++-
+ arch/arm64/net/bpf_jit_comp.c | 37 +++++++++++++++++++++++++------------
+ 2 files changed, 27 insertions(+), 13 deletions(-)
+
+--- a/arch/arm64/net/bpf_jit.h
++++ b/arch/arm64/net/bpf_jit.h
+@@ -1,7 +1,7 @@
+ /*
+ * BPF JIT compiler for ARM64
+ *
+- * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
++ * Copyright (C) 2014-2015 Zi Shen Lim <zlim.lnx@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+@@ -35,6 +35,7 @@
+ aarch64_insn_gen_comp_branch_imm(0, offset, Rt, A64_VARIANT(sf), \
+ AARCH64_INSN_BRANCH_COMP_##type)
+ #define A64_CBZ(sf, Rt, imm19) A64_COMP_BRANCH(sf, Rt, (imm19) << 2, ZERO)
++#define A64_CBNZ(sf, Rt, imm19) A64_COMP_BRANCH(sf, Rt, (imm19) << 2, NONZERO)
+
+ /* Conditional branch (immediate) */
+ #define A64_COND_BRANCH(cond, offset) \
+--- a/arch/arm64/net/bpf_jit_comp.c
++++ b/arch/arm64/net/bpf_jit_comp.c
+@@ -1,7 +1,7 @@
+ /*
+ * BPF JIT compiler for ARM64
+ *
+- * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
++ * Copyright (C) 2014-2015 Zi Shen Lim <zlim.lnx@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+@@ -225,6 +225,17 @@ static int build_insn(const struct bpf_i
+ u8 jmp_cond;
+ s32 jmp_offset;
+
++#define check_imm(bits, imm) do { \
++ if ((((imm) > 0) && ((imm) >> (bits))) || \
++ (((imm) < 0) && (~(imm) >> (bits)))) { \
++ pr_info("[%2d] imm=%d(0x%x) out of range\n", \
++ i, imm, imm); \
++ return -EINVAL; \
++ } \
++} while (0)
++#define check_imm19(imm) check_imm(19, imm)
++#define check_imm26(imm) check_imm(26, imm)
++
+ switch (code) {
+ /* dst = src */
+ case BPF_ALU | BPF_MOV | BPF_X:
+@@ -258,8 +269,21 @@ static int build_insn(const struct bpf_i
+ break;
+ case BPF_ALU | BPF_DIV | BPF_X:
+ case BPF_ALU64 | BPF_DIV | BPF_X:
++ {
++ const u8 r0 = bpf2a64[BPF_REG_0];
++
++ /* if (src == 0) return 0 */
++ jmp_offset = 3; /* skip ahead to else path */
++ check_imm19(jmp_offset);
++ emit(A64_CBNZ(is64, src, jmp_offset), ctx);
++ emit(A64_MOVZ(1, r0, 0, 0), ctx);
++ jmp_offset = epilogue_offset(ctx);
++ check_imm26(jmp_offset);
++ emit(A64_B(jmp_offset), ctx);
++ /* else */
+ emit(A64_UDIV(is64, dst, dst, src), ctx);
+ break;
++ }
+ case BPF_ALU | BPF_MOD | BPF_X:
+ case BPF_ALU64 | BPF_MOD | BPF_X:
+ ctx->tmp_used = 1;
+@@ -393,17 +417,6 @@ emit_bswap_uxt:
+ emit(A64_ASR(is64, dst, dst, imm), ctx);
+ break;
+
+-#define check_imm(bits, imm) do { \
+- if ((((imm) > 0) && ((imm) >> (bits))) || \
+- (((imm) < 0) && (~(imm) >> (bits)))) { \
+- pr_info("[%2d] imm=%d(0x%x) out of range\n", \
+- i, imm, imm); \
+- return -EINVAL; \
+- } \
+-} while (0)
+-#define check_imm19(imm) check_imm(19, imm)
+-#define check_imm26(imm) check_imm(26, imm)
+-
+ /* JUMP off */
+ case BPF_JMP | BPF_JA:
+ jmp_offset = bpf2a64_offset(i + off, i, ctx);
--- /dev/null
+From 14e589ff4aa3f28a5424e92b6495ecb8950080f7 Mon Sep 17 00:00:00 2001
+From: Zi Shen Lim <zlim.lnx@gmail.com>
+Date: Wed, 4 Nov 2015 20:43:59 -0800
+Subject: arm64: bpf: fix mod-by-zero case
+
+From: Zi Shen Lim <zlim.lnx@gmail.com>
+
+commit 14e589ff4aa3f28a5424e92b6495ecb8950080f7 upstream.
+
+Turns out in the case of modulo by zero in a BPF program:
+ A = A % X; (X == 0)
+the expected behavior is to terminate with return value 0.
+
+The bug in JIT is exposed by a new test case [1].
+
+[1] https://lkml.org/lkml/2015/11/4/499
+
+Signed-off-by: Zi Shen Lim <zlim.lnx@gmail.com>
+Reported-by: Yang Shi <yang.shi@linaro.org>
+Reported-by: Xi Wang <xi.wang@gmail.com>
+CC: Alexei Starovoitov <ast@plumgrid.com>
+Fixes: e54bcde3d69d ("arm64: eBPF JIT compiler")
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/net/bpf_jit_comp.c | 21 +++++++++++++--------
+ 1 file changed, 13 insertions(+), 8 deletions(-)
+
+--- a/arch/arm64/net/bpf_jit_comp.c
++++ b/arch/arm64/net/bpf_jit_comp.c
+@@ -269,6 +269,8 @@ static int build_insn(const struct bpf_i
+ break;
+ case BPF_ALU | BPF_DIV | BPF_X:
+ case BPF_ALU64 | BPF_DIV | BPF_X:
++ case BPF_ALU | BPF_MOD | BPF_X:
++ case BPF_ALU64 | BPF_MOD | BPF_X:
+ {
+ const u8 r0 = bpf2a64[BPF_REG_0];
+
+@@ -281,16 +283,19 @@ static int build_insn(const struct bpf_i
+ check_imm26(jmp_offset);
+ emit(A64_B(jmp_offset), ctx);
+ /* else */
+- emit(A64_UDIV(is64, dst, dst, src), ctx);
++ switch (BPF_OP(code)) {
++ case BPF_DIV:
++ emit(A64_UDIV(is64, dst, dst, src), ctx);
++ break;
++ case BPF_MOD:
++ ctx->tmp_used = 1;
++ emit(A64_UDIV(is64, tmp, dst, src), ctx);
++ emit(A64_MUL(is64, tmp, tmp, src), ctx);
++ emit(A64_SUB(is64, dst, dst, tmp), ctx);
++ break;
++ }
+ break;
+ }
+- case BPF_ALU | BPF_MOD | BPF_X:
+- case BPF_ALU64 | BPF_MOD | BPF_X:
+- ctx->tmp_used = 1;
+- emit(A64_UDIV(is64, tmp, dst, src), ctx);
+- emit(A64_MUL(is64, tmp, tmp, src), ctx);
+- emit(A64_SUB(is64, dst, dst, tmp), ctx);
+- break;
+ case BPF_ALU | BPF_LSH | BPF_X:
+ case BPF_ALU64 | BPF_LSH | BPF_X:
+ emit(A64_LSLV(is64, dst, dst, src), ctx);
--- /dev/null
+From 5db4fd8c52810bd9740c1240ebf89223b171aa70 Mon Sep 17 00:00:00 2001
+From: John Blackwood <john.blackwood@ccur.com>
+Date: Mon, 7 Dec 2015 11:50:34 +0000
+Subject: arm64: Clear out any singlestep state on a ptrace detach operation
+
+From: John Blackwood <john.blackwood@ccur.com>
+
+commit 5db4fd8c52810bd9740c1240ebf89223b171aa70 upstream.
+
+Make sure to clear out any ptrace singlestep state when a ptrace(2)
+PTRACE_DETACH call is made on arm64 systems.
+
+Otherwise, the previously ptraced task will die off with a SIGTRAP
+signal if the debugger just previously singlestepped the ptraced task.
+
+Signed-off-by: John Blackwood <john.blackwood@ccur.com>
+[will: added comment to justify why this is in the arch code]
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kernel/ptrace.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/arch/arm64/kernel/ptrace.c
++++ b/arch/arm64/kernel/ptrace.c
+@@ -58,6 +58,12 @@
+ */
+ void ptrace_disable(struct task_struct *child)
+ {
++ /*
++ * This would be better off in core code, but PTRACE_DETACH has
++ * grown its fair share of arch-specific worts and changing it
++ * is likely to cause regressions on obscure architectures.
++ */
++ user_disable_single_step(child);
+ }
+
+ #ifdef CONFIG_HAVE_HW_BREAKPOINT
--- /dev/null
+From fbc416ff86183e2203cdf975e2881d7c164b0271 Mon Sep 17 00:00:00 2001
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Fri, 20 Nov 2015 12:12:21 +0100
+Subject: arm64: fix building without CONFIG_UID16
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+commit fbc416ff86183e2203cdf975e2881d7c164b0271 upstream.
+
+As reported by Michal Simek, building an ARM64 kernel with CONFIG_UID16
+disabled currently fails because the system call table still needs to
+reference the individual function entry points that are provided by
+kernel/sys_ni.c in this case, and the declarations are hidden inside
+of #ifdef CONFIG_UID16:
+
+arch/arm64/include/asm/unistd32.h:57:8: error: 'sys_lchown16' undeclared here (not in a function)
+ __SYSCALL(__NR_lchown, sys_lchown16)
+
+I believe this problem only exists on ARM64, because older architectures
+tend to not need declarations when their system call table is built
+in assembly code, while newer architectures tend to not need UID16
+support. ARM64 only uses these system calls for compatibility with
+32-bit ARM binaries.
+
+This changes the CONFIG_UID16 check into CONFIG_HAVE_UID16, which is
+set unconditionally on ARM64 with CONFIG_COMPAT, so we see the
+declarations whenever we need them, but otherwise the behavior is
+unchanged.
+
+Fixes: af1839eb4bd4 ("Kconfig: clean up the long arch list for the UID16 config option")
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/syscalls.h | 2 +-
+ include/linux/types.h | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/include/linux/syscalls.h
++++ b/include/linux/syscalls.h
+@@ -524,7 +524,7 @@ asmlinkage long sys_chown(const char __u
+ asmlinkage long sys_lchown(const char __user *filename,
+ uid_t user, gid_t group);
+ asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group);
+-#ifdef CONFIG_UID16
++#ifdef CONFIG_HAVE_UID16
+ asmlinkage long sys_chown16(const char __user *filename,
+ old_uid_t user, old_gid_t group);
+ asmlinkage long sys_lchown16(const char __user *filename,
+--- a/include/linux/types.h
++++ b/include/linux/types.h
+@@ -35,7 +35,7 @@ typedef __kernel_gid16_t gid16_t;
+
+ typedef unsigned long uintptr_t;
+
+-#ifdef CONFIG_UID16
++#ifdef CONFIG_HAVE_UID16
+ /* This is defined by include/asm-{arch}/posix_types.h */
+ typedef __kernel_old_uid_t old_uid_t;
+ typedef __kernel_old_gid_t old_gid_t;
--- /dev/null
+From de818bd4522c40ea02a81b387d2fa86f989c9623 Mon Sep 17 00:00:00 2001
+From: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Date: Tue, 17 Nov 2015 11:50:51 +0000
+Subject: arm64: kernel: pause/unpause function graph tracer in cpu_suspend()
+
+From: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+
+commit de818bd4522c40ea02a81b387d2fa86f989c9623 upstream.
+
+The function graph tracer adds instrumentation that is required to trace
+both entry and exit of a function. In particular the function graph
+tracer updates the "return address" of a function in order to insert
+a trace callback on function exit.
+
+Kernel power management functions like cpu_suspend() are called
+upon power down entry with functions called "finishers" that are in turn
+called to trigger the power down sequence but they may not return to the
+kernel through the normal return path.
+
+When the core resumes from low-power it returns to the cpu_suspend()
+function through the cpu_resume path, which leaves the trace stack frame
+set-up by the function tracer in an incosistent state upon return to the
+kernel when tracing is enabled.
+
+This patch fixes the issue by pausing/resuming the function graph
+tracer on the thread executing cpu_suspend() (ie the function call that
+subsequently triggers the "suspend finishers"), so that the function graph
+tracer state is kept consistent across functions that enter power down
+states and never return by effectively disabling graph tracer while they
+are executing.
+
+Fixes: 819e50e25d0c ("arm64: Add ftrace support")
+Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Reported-by: Catalin Marinas <catalin.marinas@arm.com>
+Reported-by: AKASHI Takahiro <takahiro.akashi@linaro.org>
+Suggested-by: Steven Rostedt <rostedt@goodmis.org>
+Acked-by: Steven Rostedt <rostedt@goodmis.org>
+Cc: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kernel/suspend.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/arch/arm64/kernel/suspend.c
++++ b/arch/arm64/kernel/suspend.c
+@@ -1,3 +1,4 @@
++#include <linux/ftrace.h>
+ #include <linux/percpu.h>
+ #include <linux/slab.h>
+ #include <asm/cacheflush.h>
+@@ -71,6 +72,13 @@ int cpu_suspend(unsigned long arg, int (
+ local_dbg_save(flags);
+
+ /*
++ * Function graph tracer state gets incosistent when the kernel
++ * calls functions that never return (aka suspend finishers) hence
++ * disable graph tracing during their execution.
++ */
++ pause_graph_tracing();
++
++ /*
+ * mm context saved on the stack, it will be restored when
+ * the cpu comes out of reset through the identity mapped
+ * page tables, so that the thread address space is properly
+@@ -111,6 +119,8 @@ int cpu_suspend(unsigned long arg, int (
+ hw_breakpoint_restore(NULL);
+ }
+
++ unpause_graph_tracing();
++
+ /*
+ * Restore pstate flags. OS lock and mdscr have been already
+ * restored, so from this point onwards, debugging is fully
--- /dev/null
+From c0f0963464c24e034b858441205455bf2a5d93ad Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Mon, 16 Nov 2015 10:28:17 +0000
+Subject: arm64: KVM: Fix AArch32 to AArch64 register mapping
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit c0f0963464c24e034b858441205455bf2a5d93ad upstream.
+
+When running a 32bit guest under a 64bit hypervisor, the ARMv8
+architecture defines a mapping of the 32bit registers in the 64bit
+space. This includes banked registers that are being demultiplexed
+over the 64bit ones.
+
+On exceptions caused by an operation involving a 32bit register, the
+HW exposes the register number in the ESR_EL2 register. It was so
+far understood that SW had to distinguish between AArch32 and AArch64
+accesses (based on the current AArch32 mode and register number).
+
+It turns out that I misinterpreted the ARM ARM, and the clue is in
+D1.20.1: "For some exceptions, the exception syndrome given in the
+ESR_ELx identifies one or more register numbers from the issued
+instruction that generated the exception. Where the exception is
+taken from an Exception level using AArch32 these register numbers
+give the AArch64 view of the register."
+
+Which means that the HW is already giving us the translated version,
+and that we shouldn't try to interpret it at all (for example, doing
+an MMIO operation from the IRQ mode using the LR register leads to
+very unexpected behaviours).
+
+The fix is thus not to perform a call to vcpu_reg32() at all from
+vcpu_reg(), and use whatever register number is supplied directly.
+The only case we need to find out about the mapping is when we
+actively generate a register access, which only occurs when injecting
+a fault in a guest.
+
+Reviewed-by: Robin Murphy <robin.murphy@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/kvm_emulate.h | 8 +++++---
+ arch/arm64/kvm/inject_fault.c | 2 +-
+ 2 files changed, 6 insertions(+), 4 deletions(-)
+
+--- a/arch/arm64/include/asm/kvm_emulate.h
++++ b/arch/arm64/include/asm/kvm_emulate.h
+@@ -99,11 +99,13 @@ static inline void vcpu_set_thumb(struct
+ *vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT;
+ }
+
++/*
++ * vcpu_reg should always be passed a register number coming from a
++ * read of ESR_EL2. Otherwise, it may give the wrong result on AArch32
++ * with banked registers.
++ */
+ static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num)
+ {
+- if (vcpu_mode_is_32bit(vcpu))
+- return vcpu_reg32(vcpu, reg_num);
+-
+ return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num];
+ }
+
+--- a/arch/arm64/kvm/inject_fault.c
++++ b/arch/arm64/kvm/inject_fault.c
+@@ -48,7 +48,7 @@ static void prepare_fault32(struct kvm_v
+
+ /* Note: These now point to the banked copies */
+ *vcpu_spsr(vcpu) = new_spsr_value;
+- *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
++ *vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
+
+ /* Branch to exception vector */
+ if (sctlr & (1 << 13))
--- /dev/null
+From 32d6397805d00573ce1fa55f408ce2bca15b0ad3 Mon Sep 17 00:00:00 2001
+From: Will Deacon <will.deacon@arm.com>
+Date: Thu, 10 Dec 2015 16:05:36 +0000
+Subject: arm64: mm: ensure that the zero page is visible to the page table walker
+
+From: Will Deacon <will.deacon@arm.com>
+
+commit 32d6397805d00573ce1fa55f408ce2bca15b0ad3 upstream.
+
+In paging_init, we allocate the zero page, memset it to zero and then
+point TTBR0 to it in order to avoid speculative fetches through the
+identity mapping.
+
+In order to guarantee that the freshly zeroed page is indeed visible to
+the page table walker, we need to execute a dsb instruction prior to
+writing the TTBR.
+
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/mm/mmu.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/arch/arm64/mm/mmu.c
++++ b/arch/arm64/mm/mmu.c
+@@ -450,6 +450,9 @@ void __init paging_init(void)
+
+ empty_zero_page = virt_to_page(zero_page);
+
++ /* Ensure the zero page is visible to the page table walker */
++ dsb(ishst);
++
+ /*
+ * TTBR0 is only used for the identity mapping at this stage. Make it
+ * point to zero page to avoid speculatively fetching new entries.
--- /dev/null
+From 4fee9f364b9b99f76732f2a6fd6df679a237fa74 Mon Sep 17 00:00:00 2001
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Mon, 16 Nov 2015 11:18:14 +0100
+Subject: arm64: mm: use correct mapping granularity under DEBUG_RODATA
+
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+
+commit 4fee9f364b9b99f76732f2a6fd6df679a237fa74 upstream.
+
+When booting a 64k pages kernel that is built with CONFIG_DEBUG_RODATA
+and resides at an offset that is not a multiple of 512 MB, the rounding
+that occurs in __map_memblock() and fixup_executable() results in
+incorrect regions being mapped.
+
+The following snippet from /sys/kernel/debug/kernel_page_tables shows
+how, when the kernel is loaded 2 MB above the base of DRAM at 0x40000000,
+the first 2 MB of memory (which may be inaccessible from non-secure EL1
+or just reserved by the firmware) is inadvertently mapped into the end of
+the module region.
+
+ ---[ Modules start ]---
+ 0xfffffdffffe00000-0xfffffe0000000000 2M RW NX ... UXN MEM/NORMAL
+ ---[ Modules end ]---
+ ---[ Kernel Mapping ]---
+ 0xfffffe0000000000-0xfffffe0000090000 576K RW NX ... UXN MEM/NORMAL
+ 0xfffffe0000090000-0xfffffe0000200000 1472K ro x ... UXN MEM/NORMAL
+ 0xfffffe0000200000-0xfffffe0000800000 6M ro x ... UXN MEM/NORMAL
+ 0xfffffe0000800000-0xfffffe0000810000 64K ro x ... UXN MEM/NORMAL
+ 0xfffffe0000810000-0xfffffe0000a00000 1984K RW NX ... UXN MEM/NORMAL
+ 0xfffffe0000a00000-0xfffffe00ffe00000 4084M RW NX ... UXN MEM/NORMAL
+
+The same issue is likely to occur on 16k pages kernels whose load
+address is not a multiple of 32 MB (i.e., SECTION_SIZE). So round to
+SWAPPER_BLOCK_SIZE instead of SECTION_SIZE.
+
+Fixes: da141706aea5 ("arm64: add better page protections to arm64")
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Acked-by: Laura Abbott <labbott@redhat.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/mm/mmu.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/arch/arm64/mm/mmu.c
++++ b/arch/arm64/mm/mmu.c
+@@ -307,8 +307,8 @@ static void __init __map_memblock(phys_a
+ * for now. This will get more fine grained later once all memory
+ * is mapped
+ */
+- unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
+- unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
++ unsigned long kernel_x_start = round_down(__pa(_stext), SWAPPER_BLOCK_SIZE);
++ unsigned long kernel_x_end = round_up(__pa(__init_end), SWAPPER_BLOCK_SIZE);
+
+ if (end < kernel_x_start) {
+ create_mapping(start, __phys_to_virt(start),
+@@ -396,18 +396,18 @@ void __init fixup_executable(void)
+ {
+ #ifdef CONFIG_DEBUG_RODATA
+ /* now that we are actually fully mapped, make the start/end more fine grained */
+- if (!IS_ALIGNED((unsigned long)_stext, SECTION_SIZE)) {
++ if (!IS_ALIGNED((unsigned long)_stext, SWAPPER_BLOCK_SIZE)) {
+ unsigned long aligned_start = round_down(__pa(_stext),
+- SECTION_SIZE);
++ SWAPPER_BLOCK_SIZE);
+
+ create_mapping(aligned_start, __phys_to_virt(aligned_start),
+ __pa(_stext) - aligned_start,
+ PAGE_KERNEL);
+ }
+
+- if (!IS_ALIGNED((unsigned long)__init_end, SECTION_SIZE)) {
++ if (!IS_ALIGNED((unsigned long)__init_end, SWAPPER_BLOCK_SIZE)) {
+ unsigned long aligned_end = round_up(__pa(__init_end),
+- SECTION_SIZE);
++ SWAPPER_BLOCK_SIZE);
+ create_mapping(__pa(__init_end), (unsigned long)__init_end,
+ aligned_end - __pa(__init_end),
+ PAGE_KERNEL);
--- /dev/null
+From 79b568b9d0c7c5d81932f4486d50b38efdd6da6d Mon Sep 17 00:00:00 2001
+From: Richard Purdie <richard.purdie@linuxfoundation.org>
+Date: Fri, 18 Sep 2015 16:31:33 -0700
+Subject: HID: core: Avoid uninitialized buffer access
+
+From: Richard Purdie <richard.purdie@linuxfoundation.org>
+
+commit 79b568b9d0c7c5d81932f4486d50b38efdd6da6d upstream.
+
+hid_connect adds various strings to the buffer but they're all
+conditional. You can find circumstances where nothing would be written
+to it but the kernel will still print the supposedly empty buffer with
+printk. This leads to corruption on the console/in the logs.
+
+Ensure buf is initialized to an empty string.
+
+Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
+[dvhart: Initialize string to "" rather than assign buf[0] = NULL;]
+Cc: Jiri Kosina <jikos@kernel.org>
+Cc: linux-input@vger.kernel.org
+Signed-off-by: Darren Hart <dvhart@linux.intel.com>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/hid/hid-core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1589,7 +1589,7 @@ int hid_connect(struct hid_device *hdev,
+ "Multi-Axis Controller"
+ };
+ const char *type, *bus;
+- char buf[64];
++ char buf[64] = "";
+ unsigned int i;
+ int len;
+ int ret;
--- /dev/null
+From d14053b3c714178525f22660e6aaf41263d00056 Mon Sep 17 00:00:00 2001
+From: David Woodhouse <David.Woodhouse@intel.com>
+Date: Thu, 15 Oct 2015 09:28:06 +0100
+Subject: iommu/vt-d: Fix ATSR handling for Root-Complex integrated endpoints
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: David Woodhouse <David.Woodhouse@intel.com>
+
+commit d14053b3c714178525f22660e6aaf41263d00056 upstream.
+
+The VT-d specification says that "Software must enable ATS on endpoint
+devices behind a Root Port only if the Root Port is reported as
+supporting ATS transactions."
+
+We walk up the tree to find a Root Port, but for integrated devices we
+don't find one — we get to the host bridge. In that case we *should*
+allow ATS. Currently we don't, which means that we are incorrectly
+failing to use ATS for the integrated graphics. Fix that.
+
+We should never break out of this loop "naturally" with bus==NULL,
+since we'll always find bridge==NULL in that case (and now return 1).
+
+So remove the check for (!bridge) after the loop, since it can never
+happen. If it did, it would be worthy of a BUG_ON(!bridge). But since
+it'll oops anyway in that case, that'll do just as well.
+
+Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iommu/intel-iommu.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -3928,14 +3928,17 @@ int dmar_find_matched_atsr_unit(struct p
+ dev = pci_physfn(dev);
+ for (bus = dev->bus; bus; bus = bus->parent) {
+ bridge = bus->self;
+- if (!bridge || !pci_is_pcie(bridge) ||
++ /* If it's an integrated device, allow ATS */
++ if (!bridge)
++ return 1;
++ /* Connected via non-PCIe: no ATS */
++ if (!pci_is_pcie(bridge) ||
+ pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
+ return 0;
++ /* If we found the root port, look it up in the ATSR */
+ if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
+ break;
+ }
+- if (!bridge)
+- return 0;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
--- /dev/null
+From eda98796aff0d9bf41094b06811f5def3b4c333c Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Salva=20Peir=C3=B3?= <speirofr@gmail.com>
+Date: Wed, 7 Oct 2015 07:09:26 -0300
+Subject: [media] media/vivid-osd: fix info leak in ioctl
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: =?UTF-8?q?Salva=20Peir=C3=B3?= <speirofr@gmail.com>
+
+commit eda98796aff0d9bf41094b06811f5def3b4c333c upstream.
+
+The vivid_fb_ioctl() code fails to initialize the 16 _reserved bytes of
+struct fb_vblank after the ->hcount member. Add an explicit
+memset(0) before filling the structure to avoid the info leak.
+
+Signed-off-by: Salva Peiró <speirofr@gmail.com>
+Signed-off-by: Hans Verkuil <hans.verkuil@cisco.com>
+Signed-off-by: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/media/platform/vivid/vivid-osd.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/media/platform/vivid/vivid-osd.c
++++ b/drivers/media/platform/vivid/vivid-osd.c
+@@ -85,6 +85,7 @@ static int vivid_fb_ioctl(struct fb_info
+ case FBIOGET_VBLANK: {
+ struct fb_vblank vblank;
+
++ memset(&vblank, 0, sizeof(vblank));
+ vblank.flags = FB_VBLANK_HAVE_COUNT | FB_VBLANK_HAVE_VCOUNT |
+ FB_VBLANK_HAVE_VSYNC;
+ vblank.count = 0;
--- /dev/null
+From e46e31a3696ae2d66f32c207df3969613726e636 Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Mon, 30 Nov 2015 14:47:46 -0500
+Subject: parisc iommu: fix panic due to trying to allocate too large region
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit e46e31a3696ae2d66f32c207df3969613726e636 upstream.
+
+When using the Promise TX2+ SATA controller on PA-RISC, the system often
+crashes with kernel panic, for example just writing data with the dd
+utility will make it crash.
+
+Kernel panic - not syncing: drivers/parisc/sba_iommu.c: I/O MMU @ 000000000000a000 is out of mapping resources
+
+CPU: 0 PID: 18442 Comm: mkspadfs Not tainted 4.4.0-rc2 #2
+Backtrace:
+ [<000000004021497c>] show_stack+0x14/0x20
+ [<0000000040410bf0>] dump_stack+0x88/0x100
+ [<000000004023978c>] panic+0x124/0x360
+ [<0000000040452c18>] sba_alloc_range+0x698/0x6a0
+ [<0000000040453150>] sba_map_sg+0x260/0x5b8
+ [<000000000c18dbb4>] ata_qc_issue+0x264/0x4a8 [libata]
+ [<000000000c19535c>] ata_scsi_translate+0xe4/0x220 [libata]
+ [<000000000c19a93c>] ata_scsi_queuecmd+0xbc/0x320 [libata]
+ [<0000000040499bbc>] scsi_dispatch_cmd+0xfc/0x130
+ [<000000004049da34>] scsi_request_fn+0x6e4/0x970
+ [<00000000403e95a8>] __blk_run_queue+0x40/0x60
+ [<00000000403e9d8c>] blk_run_queue+0x3c/0x68
+ [<000000004049a534>] scsi_run_queue+0x2a4/0x360
+ [<000000004049be68>] scsi_end_request+0x1a8/0x238
+ [<000000004049de84>] scsi_io_completion+0xfc/0x688
+ [<0000000040493c74>] scsi_finish_command+0x17c/0x1d0
+
+The cause of the crash is not exhaustion of the IOMMU space, there is
+plenty of free pages. The function sba_alloc_range is called with size
+0x11000, thus the pages_needed variable is 0x11. The function
+sba_search_bitmap is called with bits_wanted 0x11 and boundary size is
+0x10 (because dma_get_seg_boundary(dev) returns 0xffff).
+
+The function sba_search_bitmap attempts to allocate 17 pages that must not
+cross 16-page boundary - it can't satisfy this requirement
+(iommu_is_span_boundary always returns true) and fails even if there are
+many free entries in the IOMMU space.
+
+How did it happen that we try to allocate 17 pages that don't cross
+16-page boundary? The cause is in the function iommu_coalesce_chunks. This
+function tries to coalesce adjacent entries in the scatterlist. The
+function does several checks if it may coalesce one entry with the next,
+one of those checks is this:
+
+ if (startsg->length + dma_len > max_seg_size)
+ break;
+
+When it finishes coalescing adjacent entries, it allocates the mapping:
+
+sg_dma_len(contig_sg) = dma_len;
+dma_len = ALIGN(dma_len + dma_offset, IOVP_SIZE);
+sg_dma_address(contig_sg) =
+ PIDE_FLAG
+ | (iommu_alloc_range(ioc, dev, dma_len) << IOVP_SHIFT)
+ | dma_offset;
+
+It is possible that (startsg->length + dma_len > max_seg_size) is false
+(we are just near the 0x10000 max_seg_size boundary), so the funcion
+decides to coalesce this entry with the next entry. When the coalescing
+succeeds, the function performs
+ dma_len = ALIGN(dma_len + dma_offset, IOVP_SIZE);
+And now, because of non-zero dma_offset, dma_len is greater than 0x10000.
+iommu_alloc_range (a pointer to sba_alloc_range) is called and it attempts
+to allocate 17 pages for a device that must not cross 16-page boundary.
+
+To fix the bug, we must make sure that dma_len after addition of
+dma_offset and alignment doesn't cross the segment boundary. I.e. change
+ if (startsg->length + dma_len > max_seg_size)
+ break;
+to
+ if (ALIGN(dma_len + dma_offset + startsg->length, IOVP_SIZE) > max_seg_size)
+ break;
+
+This patch makes this change (it precalculates max_seg_boundary at the
+beginning of the function iommu_coalesce_chunks). I also added a check
+that the mapping length doesn't exceed dma_get_seg_boundary(dev) (it is
+not needed for Promise TX2+ SATA, but it may be needed for other devices
+that have dma_get_seg_boundary lower than dma_get_max_seg_size).
+
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/parisc/iommu-helpers.h | 15 ++++++++-------
+ 1 file changed, 8 insertions(+), 7 deletions(-)
+
+--- a/drivers/parisc/iommu-helpers.h
++++ b/drivers/parisc/iommu-helpers.h
+@@ -104,7 +104,11 @@ iommu_coalesce_chunks(struct ioc *ioc, s
+ struct scatterlist *contig_sg; /* contig chunk head */
+ unsigned long dma_offset, dma_len; /* start/len of DMA stream */
+ unsigned int n_mappings = 0;
+- unsigned int max_seg_size = dma_get_max_seg_size(dev);
++ unsigned int max_seg_size = min(dma_get_max_seg_size(dev),
++ (unsigned)DMA_CHUNK_SIZE);
++ unsigned int max_seg_boundary = dma_get_seg_boundary(dev) + 1;
++ if (max_seg_boundary) /* check if the addition above didn't overflow */
++ max_seg_size = min(max_seg_size, max_seg_boundary);
+
+ while (nents > 0) {
+
+@@ -138,14 +142,11 @@ iommu_coalesce_chunks(struct ioc *ioc, s
+
+ /*
+ ** First make sure current dma stream won't
+- ** exceed DMA_CHUNK_SIZE if we coalesce the
++ ** exceed max_seg_size if we coalesce the
+ ** next entry.
+ */
+- if(unlikely(ALIGN(dma_len + dma_offset + startsg->length,
+- IOVP_SIZE) > DMA_CHUNK_SIZE))
+- break;
+-
+- if (startsg->length + dma_len > max_seg_size)
++ if (unlikely(ALIGN(dma_len + dma_offset + startsg->length, IOVP_SIZE) >
++ max_seg_size))
+ break;
+
+ /*
--- /dev/null
+From 81d7a3294de7e9828310bbf986a67246b13fa01e Mon Sep 17 00:00:00 2001
+From: Boqun Feng <boqun.feng@gmail.com>
+Date: Mon, 2 Nov 2015 09:30:32 +0800
+Subject: powerpc: Make {cmp}xchg* and their atomic_ versions fully
+ ordered
+
+From: Boqun Feng <boqun.feng@gmail.com>
+
+commit 81d7a3294de7e9828310bbf986a67246b13fa01e upstream.
+
+According to memory-barriers.txt, xchg*, cmpxchg* and their atomic_
+versions all need to be fully ordered, however they are now just
+RELEASE+ACQUIRE, which are not fully ordered.
+
+So also replace PPC_RELEASE_BARRIER and PPC_ACQUIRE_BARRIER with
+PPC_ATOMIC_ENTRY_BARRIER and PPC_ATOMIC_EXIT_BARRIER in
+__{cmp,}xchg_{u32,u64} respectively to guarantee fully ordered semantics
+of atomic{,64}_{cmp,}xchg() and {cmp,}xchg(), as a complement of commit
+b97021f85517 ("powerpc: Fix atomic_xxx_return barrier semantics")
+
+This patch depends on patch "powerpc: Make value-returning atomics fully
+ordered" for PPC_ATOMIC_ENTRY_BARRIER definition.
+
+Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
+Reviewed-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/include/asm/cmpxchg.h | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+--- a/arch/powerpc/include/asm/cmpxchg.h
++++ b/arch/powerpc/include/asm/cmpxchg.h
+@@ -18,12 +18,12 @@ __xchg_u32(volatile void *p, unsigned lo
+ unsigned long prev;
+
+ __asm__ __volatile__(
+- PPC_RELEASE_BARRIER
++ PPC_ATOMIC_ENTRY_BARRIER
+ "1: lwarx %0,0,%2 \n"
+ PPC405_ERR77(0,%2)
+ " stwcx. %3,0,%2 \n\
+ bne- 1b"
+- PPC_ACQUIRE_BARRIER
++ PPC_ATOMIC_EXIT_BARRIER
+ : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
+ : "r" (p), "r" (val)
+ : "cc", "memory");
+@@ -61,12 +61,12 @@ __xchg_u64(volatile void *p, unsigned lo
+ unsigned long prev;
+
+ __asm__ __volatile__(
+- PPC_RELEASE_BARRIER
++ PPC_ATOMIC_ENTRY_BARRIER
+ "1: ldarx %0,0,%2 \n"
+ PPC405_ERR77(0,%2)
+ " stdcx. %3,0,%2 \n\
+ bne- 1b"
+- PPC_ACQUIRE_BARRIER
++ PPC_ATOMIC_EXIT_BARRIER
+ : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
+ : "r" (p), "r" (val)
+ : "cc", "memory");
+@@ -152,14 +152,14 @@ __cmpxchg_u32(volatile unsigned int *p,
+ unsigned int prev;
+
+ __asm__ __volatile__ (
+- PPC_RELEASE_BARRIER
++ PPC_ATOMIC_ENTRY_BARRIER
+ "1: lwarx %0,0,%2 # __cmpxchg_u32\n\
+ cmpw 0,%0,%3\n\
+ bne- 2f\n"
+ PPC405_ERR77(0,%2)
+ " stwcx. %4,0,%2\n\
+ bne- 1b"
+- PPC_ACQUIRE_BARRIER
++ PPC_ATOMIC_EXIT_BARRIER
+ "\n\
+ 2:"
+ : "=&r" (prev), "+m" (*p)
+@@ -198,13 +198,13 @@ __cmpxchg_u64(volatile unsigned long *p,
+ unsigned long prev;
+
+ __asm__ __volatile__ (
+- PPC_RELEASE_BARRIER
++ PPC_ATOMIC_ENTRY_BARRIER
+ "1: ldarx %0,0,%2 # __cmpxchg_u64\n\
+ cmpd 0,%0,%3\n\
+ bne- 2f\n\
+ stdcx. %4,0,%2\n\
+ bne- 1b"
+- PPC_ACQUIRE_BARRIER
++ PPC_ATOMIC_EXIT_BARRIER
+ "\n\
+ 2:"
+ : "=&r" (prev), "+m" (*p)
--- /dev/null
+From 49e9cf3f0c04bf76ffa59242254110309554861d Mon Sep 17 00:00:00 2001
+From: Boqun Feng <boqun.feng@gmail.com>
+Date: Mon, 2 Nov 2015 09:30:31 +0800
+Subject: powerpc: Make value-returning atomics fully ordered
+
+From: Boqun Feng <boqun.feng@gmail.com>
+
+commit 49e9cf3f0c04bf76ffa59242254110309554861d upstream.
+
+According to memory-barriers.txt:
+
+> Any atomic operation that modifies some state in memory and returns
+> information about the state (old or new) implies an SMP-conditional
+> general memory barrier (smp_mb()) on each side of the actual
+> operation ...
+
+Which mean these operations should be fully ordered. However on PPC,
+PPC_ATOMIC_ENTRY_BARRIER is the barrier before the actual operation,
+which is currently "lwsync" if SMP=y. The leading "lwsync" can not
+guarantee fully ordered atomics, according to Paul Mckenney:
+
+https://lkml.org/lkml/2015/10/14/970
+
+To fix this, we define PPC_ATOMIC_ENTRY_BARRIER as "sync" to guarantee
+the fully-ordered semantics.
+
+This also makes futex atomics fully ordered, which can avoid possible
+memory ordering problems if userspace code relies on futex system call
+for fully ordered semantics.
+
+Fixes: b97021f85517 ("powerpc: Fix atomic_xxx_return barrier semantics")
+Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
+Reviewed-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/include/asm/synch.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/powerpc/include/asm/synch.h
++++ b/arch/powerpc/include/asm/synch.h
+@@ -44,7 +44,7 @@ static inline void isync(void)
+ MAKE_LWSYNC_SECTION_ENTRY(97, __lwsync_fixup);
+ #define PPC_ACQUIRE_BARRIER "\n" stringify_in_c(__PPC_ACQUIRE_BARRIER)
+ #define PPC_RELEASE_BARRIER stringify_in_c(LWSYNC) "\n"
+-#define PPC_ATOMIC_ENTRY_BARRIER "\n" stringify_in_c(LWSYNC) "\n"
++#define PPC_ATOMIC_ENTRY_BARRIER "\n" stringify_in_c(sync) "\n"
+ #define PPC_ATOMIC_EXIT_BARRIER "\n" stringify_in_c(sync) "\n"
+ #else
+ #define PPC_ACQUIRE_BARRIER
--- /dev/null
+From a61674bdfc7c2bf909c4010699607b62b69b7bec Mon Sep 17 00:00:00 2001
+From: Ulrich Weigand <ulrich.weigand@de.ibm.com>
+Date: Tue, 12 Jan 2016 23:14:23 +1100
+Subject: powerpc/module: Handle R_PPC64_ENTRY relocations
+
+From: Ulrich Weigand <ulrich.weigand@de.ibm.com>
+
+commit a61674bdfc7c2bf909c4010699607b62b69b7bec upstream.
+
+GCC 6 will include changes to generated code with -mcmodel=large,
+which is used to build kernel modules on powerpc64le. This was
+necessary because the large model is supposed to allow arbitrary
+sizes and locations of the code and data sections, but the ELFv2
+global entry point prolog still made the unconditional assumption
+that the TOC associated with any particular function can be found
+within 2 GB of the function entry point:
+
+func:
+ addis r2,r12,(.TOC.-func)@ha
+ addi r2,r2,(.TOC.-func)@l
+ .localentry func, .-func
+
+To remove this assumption, GCC will now generate instead this global
+entry point prolog sequence when using -mcmodel=large:
+
+ .quad .TOC.-func
+func:
+ .reloc ., R_PPC64_ENTRY
+ ld r2, -8(r12)
+ add r2, r2, r12
+ .localentry func, .-func
+
+The new .reloc triggers an optimization in the linker that will
+replace this new prolog with the original code (see above) if the
+linker determines that the distance between .TOC. and func is in
+range after all.
+
+Since this new relocation is now present in module object files,
+the kernel module loader is required to handle them too. This
+patch adds support for the new relocation and implements the
+same optimization done by the GNU linker.
+
+Signed-off-by: Ulrich Weigand <ulrich.weigand@de.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/include/uapi/asm/elf.h | 2 ++
+ arch/powerpc/kernel/module_64.c | 27 +++++++++++++++++++++++++++
+ 2 files changed, 29 insertions(+)
+
+--- a/arch/powerpc/include/uapi/asm/elf.h
++++ b/arch/powerpc/include/uapi/asm/elf.h
+@@ -295,6 +295,8 @@ do { \
+ #define R_PPC64_TLSLD 108
+ #define R_PPC64_TOCSAVE 109
+
++#define R_PPC64_ENTRY 118
++
+ #define R_PPC64_REL16 249
+ #define R_PPC64_REL16_LO 250
+ #define R_PPC64_REL16_HI 251
+--- a/arch/powerpc/kernel/module_64.c
++++ b/arch/powerpc/kernel/module_64.c
+@@ -635,6 +635,33 @@ int apply_relocate_add(Elf64_Shdr *sechd
+ */
+ break;
+
++ case R_PPC64_ENTRY:
++ /*
++ * Optimize ELFv2 large code model entry point if
++ * the TOC is within 2GB range of current location.
++ */
++ value = my_r2(sechdrs, me) - (unsigned long)location;
++ if (value + 0x80008000 > 0xffffffff)
++ break;
++ /*
++ * Check for the large code model prolog sequence:
++ * ld r2, ...(r12)
++ * add r2, r2, r12
++ */
++ if ((((uint32_t *)location)[0] & ~0xfffc)
++ != 0xe84c0000)
++ break;
++ if (((uint32_t *)location)[1] != 0x7c426214)
++ break;
++ /*
++ * If found, replace it with:
++ * addis r2, r12, (.TOC.-func)@ha
++ * addi r2, r12, (.TOC.-func)@l
++ */
++ ((uint32_t *)location)[0] = 0x3c4c0000 + PPC_HA(value);
++ ((uint32_t *)location)[1] = 0x38420000 + PPC_LO(value);
++ break;
++
+ case R_PPC64_REL16_HA:
+ /* Subtract location pointer */
+ value -= (unsigned long)location;
--- /dev/null
+From 98da62b716a3b24ab8e77453c9a8a954124c18cd Mon Sep 17 00:00:00 2001
+From: Stewart Smith <stewart@linux.vnet.ibm.com>
+Date: Fri, 11 Dec 2015 12:08:23 +1100
+Subject: powerpc/powernv: pr_warn_once on unsupported OPAL_MSG type
+
+From: Stewart Smith <stewart@linux.vnet.ibm.com>
+
+commit 98da62b716a3b24ab8e77453c9a8a954124c18cd upstream.
+
+When running on newer OPAL firmware that supports sending extra
+OPAL_MSG types, we would print a warning on *every* message received.
+
+This could be a problem for kernels that don't support OPAL_MSG_OCC
+on machines that are running real close to thermal limits and the
+OCC is throttling the chip. For a kernel that is paying attention to
+the message queue, we could get these notifications quite often.
+
+Conceivably, future message types could also come fairly often,
+and printing that we didn't understand them 10,000 times provides
+no further information than printing them once.
+
+Signed-off-by: Stewart Smith <stewart@linux.vnet.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/platforms/powernv/opal.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/powerpc/platforms/powernv/opal.c
++++ b/arch/powerpc/platforms/powernv/opal.c
+@@ -358,7 +358,7 @@ static void opal_handle_message(void)
+
+ /* Sanity check */
+ if (type >= OPAL_MSG_TYPE_MAX) {
+- pr_warning("%s: Unknown message type: %u\n", __func__, type);
++ pr_warn_once("%s: Unknown message type: %u\n", __func__, type);
+ return;
+ }
+ opal_message_do_notify(type, (void *)&msg);
--- /dev/null
+From d2b9d2a5ad5ef04ff978c9923d19730cb05efd55 Mon Sep 17 00:00:00 2001
+From: Michael Neuling <mikey@neuling.org>
+Date: Thu, 19 Nov 2015 15:44:44 +1100
+Subject: powerpc/tm: Block signal return setting invalid MSR state
+
+From: Michael Neuling <mikey@neuling.org>
+
+commit d2b9d2a5ad5ef04ff978c9923d19730cb05efd55 upstream.
+
+Currently we allow both the MSR T and S bits to be set by userspace on
+a signal return. Unfortunately this is a reserved configuration and
+will cause a TM Bad Thing exception if attempted (via rfid).
+
+This patch checks for this case in both the 32 and 64 bit signals
+code. If both T and S are set, we mark the context as invalid.
+
+Found using a syscall fuzzer.
+
+Fixes: 2b0a576d15e0 ("powerpc: Add new transactional memory state to the signal context")
+Signed-off-by: Michael Neuling <mikey@neuling.org>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/include/asm/reg.h | 1 +
+ arch/powerpc/kernel/signal_32.c | 14 +++++++++-----
+ arch/powerpc/kernel/signal_64.c | 4 ++++
+ 3 files changed, 14 insertions(+), 5 deletions(-)
+
+--- a/arch/powerpc/include/asm/reg.h
++++ b/arch/powerpc/include/asm/reg.h
+@@ -108,6 +108,7 @@
+ #define MSR_TS_T __MASK(MSR_TS_T_LG) /* Transaction Transactional */
+ #define MSR_TS_MASK (MSR_TS_T | MSR_TS_S) /* Transaction State bits */
+ #define MSR_TM_ACTIVE(x) (((x) & MSR_TS_MASK) != 0) /* Transaction active? */
++#define MSR_TM_RESV(x) (((x) & MSR_TS_MASK) == MSR_TS_MASK) /* Reserved */
+ #define MSR_TM_TRANSACTIONAL(x) (((x) & MSR_TS_MASK) == MSR_TS_T)
+ #define MSR_TM_SUSPENDED(x) (((x) & MSR_TS_MASK) == MSR_TS_S)
+
+--- a/arch/powerpc/kernel/signal_32.c
++++ b/arch/powerpc/kernel/signal_32.c
+@@ -875,6 +875,15 @@ static long restore_tm_user_regs(struct
+ return 1;
+ #endif /* CONFIG_SPE */
+
++ /* Get the top half of the MSR from the user context */
++ if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
++ return 1;
++ msr_hi <<= 32;
++ /* If TM bits are set to the reserved value, it's an invalid context */
++ if (MSR_TM_RESV(msr_hi))
++ return 1;
++ /* Pull in the MSR TM bits from the user context */
++ regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK);
+ /* Now, recheckpoint. This loads up all of the checkpointed (older)
+ * registers, including FP and V[S]Rs. After recheckpointing, the
+ * transactional versions should be loaded.
+@@ -884,11 +893,6 @@ static long restore_tm_user_regs(struct
+ current->thread.tm_texasr |= TEXASR_FS;
+ /* This loads the checkpointed FP/VEC state, if used */
+ tm_recheckpoint(¤t->thread, msr);
+- /* Get the top half of the MSR */
+- if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
+- return 1;
+- /* Pull in MSR TM from user context */
+- regs->msr = (regs->msr & ~MSR_TS_MASK) | ((msr_hi<<32) & MSR_TS_MASK);
+
+ /* This loads the speculative FP/VEC state, if used */
+ if (msr & MSR_FP) {
+--- a/arch/powerpc/kernel/signal_64.c
++++ b/arch/powerpc/kernel/signal_64.c
+@@ -427,6 +427,10 @@ static long restore_tm_sigcontexts(struc
+
+ /* get MSR separately, transfer the LE bit if doing signal return */
+ err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
++ /* Don't allow reserved mode. */
++ if (MSR_TM_RESV(msr))
++ return -EINVAL;
++
+ /* pull in MSR TM from user context */
+ regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK);
+
--- /dev/null
+From 7f821fc9c77a9b01fe7b1d6e72717b33d8d64142 Mon Sep 17 00:00:00 2001
+From: Michael Neuling <mikey@neuling.org>
+Date: Thu, 19 Nov 2015 15:44:45 +1100
+Subject: powerpc/tm: Check for already reclaimed tasks
+
+From: Michael Neuling <mikey@neuling.org>
+
+commit 7f821fc9c77a9b01fe7b1d6e72717b33d8d64142 upstream.
+
+Currently we can hit a scenario where we'll tm_reclaim() twice. This
+results in a TM bad thing exception because the second reclaim occurs
+when not in suspend mode.
+
+The scenario in which this can happen is the following. We attempt to
+deliver a signal to userspace. To do this we need obtain the stack
+pointer to write the signal context. To get this stack pointer we
+must tm_reclaim() in case we need to use the checkpointed stack
+pointer (see get_tm_stackpointer()). Normally we'd then return
+directly to userspace to deliver the signal without going through
+__switch_to().
+
+Unfortunatley, if at this point we get an error (such as a bad
+userspace stack pointer), we need to exit the process. The exit will
+result in a __switch_to(). __switch_to() will attempt to save the
+process state which results in another tm_reclaim(). This
+tm_reclaim() now causes a TM Bad Thing exception as this state has
+already been saved and the processor is no longer in TM suspend mode.
+Whee!
+
+This patch checks the state of the MSR to ensure we are TM suspended
+before we attempt the tm_reclaim(). If we've already saved the state
+away, we should no longer be in TM suspend mode. This has the
+additional advantage of checking for a potential TM Bad Thing
+exception.
+
+Found using syscall fuzzer.
+
+Fixes: fb09692e71f1 ("powerpc: Add reclaim and recheckpoint functions for context switching transactional memory processes")
+Signed-off-by: Michael Neuling <mikey@neuling.org>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/process.c | 18 ++++++++++++++++++
+ 1 file changed, 18 insertions(+)
+
+--- a/arch/powerpc/kernel/process.c
++++ b/arch/powerpc/kernel/process.c
+@@ -551,6 +551,24 @@ static void tm_reclaim_thread(struct thr
+ msr_diff &= MSR_FP | MSR_VEC | MSR_VSX | MSR_FE0 | MSR_FE1;
+ }
+
++ /*
++ * Use the current MSR TM suspended bit to track if we have
++ * checkpointed state outstanding.
++ * On signal delivery, we'd normally reclaim the checkpointed
++ * state to obtain stack pointer (see:get_tm_stackpointer()).
++ * This will then directly return to userspace without going
++ * through __switch_to(). However, if the stack frame is bad,
++ * we need to exit this thread which calls __switch_to() which
++ * will again attempt to reclaim the already saved tm state.
++ * Hence we need to check that we've not already reclaimed
++ * this state.
++ * We do this using the current MSR, rather tracking it in
++ * some specific thread_struct bit, as it has the additional
++ * benifit of checking for a potential TM bad thing exception.
++ */
++ if (!MSR_TM_SUSPENDED(mfmsr()))
++ return;
++
+ tm_reclaim(thr, thr->regs->msr, cause);
+
+ /* Having done the reclaim, we now have the checkpointed
--- /dev/null
+From 2ee8a74f2a5da913637f75a19a0da0e7a08c0f86 Mon Sep 17 00:00:00 2001
+From: Li Bin <huawei.libin@huawei.com>
+Date: Fri, 30 Oct 2015 16:31:04 +0800
+Subject: recordmcount: arm64: Replace the ignored mcount call into nop
+
+From: Li Bin <huawei.libin@huawei.com>
+
+commit 2ee8a74f2a5da913637f75a19a0da0e7a08c0f86 upstream.
+
+By now, the recordmcount only records the function that in
+following sections:
+.text/.ref.text/.sched.text/.spinlock.text/.irqentry.text/
+.kprobes.text/.text.unlikely
+
+For the function that not in these sections, the call mcount
+will be in place and not be replaced when kernel boot up. And
+it will bring performance overhead, such as do_mem_abort (in
+.exception.text section). This patch make the call mcount to
+nop for this case in recordmcount.
+
+Link: http://lkml.kernel.org/r/1446019445-14421-1-git-send-email-huawei.libin@huawei.com
+Link: http://lkml.kernel.org/r/1446193864-24593-4-git-send-email-huawei.libin@huawei.com
+
+Cc: <lkp@intel.com>
+Cc: <catalin.marinas@arm.com>
+Cc: <takahiro.akashi@linaro.org>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Li Bin <huawei.libin@huawei.com>
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ scripts/recordmcount.c | 24 +++++++++++++++++++++++-
+ 1 file changed, 23 insertions(+), 1 deletion(-)
+
+--- a/scripts/recordmcount.c
++++ b/scripts/recordmcount.c
+@@ -42,6 +42,7 @@
+
+ #ifndef EM_AARCH64
+ #define EM_AARCH64 183
++#define R_AARCH64_NONE 0
+ #define R_AARCH64_ABS64 257
+ #endif
+
+@@ -160,6 +161,22 @@ static int make_nop_x86(void *map, size_
+ return 0;
+ }
+
++static unsigned char ideal_nop4_arm64[4] = {0x1f, 0x20, 0x03, 0xd5};
++static int make_nop_arm64(void *map, size_t const offset)
++{
++ uint32_t *ptr;
++
++ ptr = map + offset;
++ /* bl <_mcount> is 0x94000000 before relocation */
++ if (*ptr != 0x94000000)
++ return -1;
++
++ /* Convert to nop */
++ ulseek(fd_map, offset, SEEK_SET);
++ uwrite(fd_map, ideal_nop, 4);
++ return 0;
++}
++
+ /*
+ * Get the whole file as a programming convenience in order to avoid
+ * malloc+lseek+read+free of many pieces. If successful, then mmap
+@@ -353,7 +370,12 @@ do_file(char const *const fname)
+ altmcount = "__gnu_mcount_nc";
+ break;
+ case EM_AARCH64:
+- reltype = R_AARCH64_ABS64; gpfx = '_'; break;
++ reltype = R_AARCH64_ABS64;
++ make_nop = make_nop_arm64;
++ rel_type_nop = R_AARCH64_NONE;
++ ideal_nop = ideal_nop4_arm64;
++ gpfx = '_';
++ break;
+ case EM_IA_64: reltype = R_IA64_IMM64; gpfx = '_'; break;
+ case EM_METAG: reltype = R_METAG_ADDR32;
+ altmcount = "_mcount_wrapper";
--- /dev/null
+From 2e50c4bef77511b42cc226865d6bc568fa7f8769 Mon Sep 17 00:00:00 2001
+From: Ulrich Weigand <ulrich.weigand@de.ibm.com>
+Date: Tue, 12 Jan 2016 23:14:22 +1100
+Subject: scripts/recordmcount.pl: support data in text section on powerpc
+
+From: Ulrich Weigand <ulrich.weigand@de.ibm.com>
+
+commit 2e50c4bef77511b42cc226865d6bc568fa7f8769 upstream.
+
+If a text section starts out with a data blob before the first
+function start label, disassembly parsing doing in recordmcount.pl
+gets confused on powerpc, leading to creation of corrupted module
+objects.
+
+This was not a problem so far since the compiler would never create
+such text sections. However, this has changed with a recent change
+in GCC 6 to support distances of > 2GB between a function and its
+assoicated TOC in the ELFv2 ABI, exposing this problem.
+
+There is already code in recordmcount.pl to handle such data blobs
+on the sparc64 platform. This patch uses the same method to handle
+those on powerpc as well.
+
+Acked-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Ulrich Weigand <ulrich.weigand@de.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ scripts/recordmcount.pl | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/scripts/recordmcount.pl
++++ b/scripts/recordmcount.pl
+@@ -263,7 +263,8 @@ if ($arch eq "x86_64") {
+
+ } elsif ($arch eq "powerpc") {
+ $local_regex = "^[0-9a-fA-F]+\\s+t\\s+(\\.?\\S+)";
+- $function_regex = "^([0-9a-fA-F]+)\\s+<(\\.?.*?)>:";
++ # See comment in the sparc64 section for why we use '\w'.
++ $function_regex = "^([0-9a-fA-F]+)\\s+<(\\.?\\w*?)>:";
+ $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s\\.?_mcount\$";
+
+ if ($bits == 64) {
xen-netfront-respect-user-provided-max_queues.patch
xen-netfront-update-num_queues-to-real-created.patch
xfrm-dst_entries_init-per-net-dst_ops.patch
+powerpc-tm-block-signal-return-setting-invalid-msr-state.patch
+powerpc-tm-check-for-already-reclaimed-tasks.patch
+powerpc-powernv-pr_warn_once-on-unsupported-opal_msg-type.patch
+powerpc-make-value-returning-atomics-fully-ordered.patch
+powerpc-make-cmp-xchg-and-their-atomic_-versions-fully.patch
+scripts-recordmcount.pl-support-data-in-text-section-on-powerpc.patch
+powerpc-module-handle-r_ppc64_entry-relocations.patch
+recordmcount-arm64-replace-the-ignored-mcount-call-into-nop.patch
+arm64-bpf-fix-div-by-zero-case.patch
+arm64-bpf-fix-mod-by-zero-case.patch
+arm64-mm-use-correct-mapping-granularity-under-debug_rodata.patch
+arm64-kernel-pause-unpause-function-graph-tracer-in-cpu_suspend.patch
+arm-arm64-kvm-test-properly-for-a-pte-s-uncachedness.patch
+arm64-kvm-fix-aarch32-to-aarch64-register-mapping.patch
+arm64-fix-building-without-config_uid16.patch
+arm-arm64-kvm-correct-pte-uncachedness-check.patch
+arm64-clear-out-any-singlestep-state-on-a-ptrace-detach-operation.patch
+arm64-mm-ensure-that-the-zero-page-is-visible-to-the-page-table-walker.patch
+iommu-vt-d-fix-atsr-handling-for-root-complex-integrated-endpoints.patch
+parisc-iommu-fix-panic-due-to-trying-to-allocate-too-large-region.patch
+hid-core-avoid-uninitialized-buffer-access.patch
+staging-lustre-echo_copy.._lsm-dereferences-userland-pointers-directly.patch
+media-vivid-osd-fix-info-leak-in-ioctl.patch
--- /dev/null
+From 9225c0b7b976dd9ceac2b80727a60d8fcb906a62 Mon Sep 17 00:00:00 2001
+From: Al Viro <viro@ZenIV.linux.org.uk>
+Date: Tue, 1 Dec 2015 19:52:12 +0000
+Subject: staging: lustre: echo_copy.._lsm() dereferences userland pointers directly
+
+From: Al Viro <viro@ZenIV.linux.org.uk>
+
+commit 9225c0b7b976dd9ceac2b80727a60d8fcb906a62 upstream.
+
+missing get_user()
+
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/lustre/lustre/obdecho/echo_client.c | 20 +++++++++++---------
+ 1 file changed, 11 insertions(+), 9 deletions(-)
+
+--- a/drivers/staging/lustre/lustre/obdecho/echo_client.c
++++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c
+@@ -1268,6 +1268,7 @@ static int
+ echo_copyout_lsm(struct lov_stripe_md *lsm, void *_ulsm, int ulsm_nob)
+ {
+ struct lov_stripe_md *ulsm = _ulsm;
++ struct lov_oinfo **p;
+ int nob, i;
+
+ nob = offsetof(struct lov_stripe_md, lsm_oinfo[lsm->lsm_stripe_count]);
+@@ -1277,9 +1278,10 @@ echo_copyout_lsm(struct lov_stripe_md *l
+ if (copy_to_user(ulsm, lsm, sizeof(*ulsm)))
+ return -EFAULT;
+
+- for (i = 0; i < lsm->lsm_stripe_count; i++) {
+- if (copy_to_user(ulsm->lsm_oinfo[i], lsm->lsm_oinfo[i],
+- sizeof(lsm->lsm_oinfo[0])))
++ for (i = 0, p = lsm->lsm_oinfo; i < lsm->lsm_stripe_count; i++, p++) {
++ struct lov_oinfo __user *up;
++ if (get_user(up, ulsm->lsm_oinfo + i) ||
++ copy_to_user(up, *p, sizeof(struct lov_oinfo)))
+ return -EFAULT;
+ }
+ return 0;
+@@ -1287,9 +1289,10 @@ echo_copyout_lsm(struct lov_stripe_md *l
+
+ static int
+ echo_copyin_lsm(struct echo_device *ed, struct lov_stripe_md *lsm,
+- void *ulsm, int ulsm_nob)
++ struct lov_stripe_md __user *ulsm, int ulsm_nob)
+ {
+ struct echo_client_obd *ec = ed->ed_ec;
++ struct lov_oinfo **p;
+ int i;
+
+ if (ulsm_nob < sizeof(*lsm))
+@@ -1305,11 +1308,10 @@ echo_copyin_lsm(struct echo_device *ed,
+ return -EINVAL;
+
+
+- for (i = 0; i < lsm->lsm_stripe_count; i++) {
+- if (copy_from_user(lsm->lsm_oinfo[i],
+- ((struct lov_stripe_md *)ulsm)-> \
+- lsm_oinfo[i],
+- sizeof(lsm->lsm_oinfo[0])))
++ for (i = 0, p = lsm->lsm_oinfo; i < lsm->lsm_stripe_count; i++, p++) {
++ struct lov_oinfo __user *up;
++ if (get_user(up, ulsm->lsm_oinfo + i) ||
++ copy_from_user(*p, up, sizeof(struct lov_oinfo)))
+ return -EFAULT;
+ }
+ return 0;