--- /dev/null
+From 5db4fd8c52810bd9740c1240ebf89223b171aa70 Mon Sep 17 00:00:00 2001
+From: John Blackwood <john.blackwood@ccur.com>
+Date: Mon, 7 Dec 2015 11:50:34 +0000
+Subject: arm64: Clear out any singlestep state on a ptrace detach operation
+
+From: John Blackwood <john.blackwood@ccur.com>
+
+commit 5db4fd8c52810bd9740c1240ebf89223b171aa70 upstream.
+
+Make sure to clear out any ptrace singlestep state when a ptrace(2)
+PTRACE_DETACH call is made on arm64 systems.
+
+Otherwise, the previously ptraced task will die off with a SIGTRAP
+signal if the debugger just previously singlestepped the ptraced task.
+
+Signed-off-by: John Blackwood <john.blackwood@ccur.com>
+[will: added comment to justify why this is in the arch code]
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kernel/ptrace.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/arch/arm64/kernel/ptrace.c
++++ b/arch/arm64/kernel/ptrace.c
+@@ -58,6 +58,12 @@
+ */
+ void ptrace_disable(struct task_struct *child)
+ {
++ /*
++ * This would be better off in core code, but PTRACE_DETACH has
++ * grown its fair share of arch-specific worts and changing it
++ * is likely to cause regressions on obscure architectures.
++ */
++ user_disable_single_step(child);
+ }
+
+ #ifdef CONFIG_HAVE_HW_BREAKPOINT
--- /dev/null
+From 60792ad349f3c6dc5735aafefe5dc9121c79e320 Mon Sep 17 00:00:00 2001
+From: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Date: Fri, 18 Dec 2015 10:35:54 +0000
+Subject: arm64: kernel: enforce pmuserenr_el0 initialization and restore
+
+From: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+
+commit 60792ad349f3c6dc5735aafefe5dc9121c79e320 upstream.
+
+The pmuserenr_el0 register value is architecturally UNKNOWN on reset.
+Current kernel code resets that register value iff the core pmu device is
+correctly probed in the kernel. On platforms with missing DT pmu nodes (or
+disabled perf events in the kernel), the pmu is not probed, therefore the
+pmuserenr_el0 register is not reset in the kernel, which means that its
+value retains the reset value that is architecturally UNKNOWN (system
+may run with eg pmuserenr_el0 == 0x1, which means that PMU counters access
+is available at EL0, which must be disallowed).
+
+This patch adds code that resets pmuserenr_el0 on cold boot and restores
+it on core resume from shutdown, so that the pmuserenr_el0 setup is
+always enforced in the kernel.
+
+Cc: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kernel/perf_event.c | 3 ---
+ arch/arm64/mm/proc.S | 2 ++
+ 2 files changed, 2 insertions(+), 3 deletions(-)
+
+--- a/arch/arm64/kernel/perf_event.c
++++ b/arch/arm64/kernel/perf_event.c
+@@ -574,9 +574,6 @@ static void armv8pmu_reset(void *info)
+
+ /* Initialize & Reset PMNC: C and P bits. */
+ armv8pmu_pmcr_write(ARMV8_PMCR_P | ARMV8_PMCR_C);
+-
+- /* Disable access from userspace. */
+- asm volatile("msr pmuserenr_el0, %0" :: "r" (0));
+ }
+
+ static int armv8_pmuv3_map_event(struct perf_event *event)
+--- a/arch/arm64/mm/proc.S
++++ b/arch/arm64/mm/proc.S
+@@ -117,6 +117,7 @@ ENTRY(cpu_do_resume)
+ */
+ ubfx x11, x11, #1, #1
+ msr oslar_el1, x11
++ msr pmuserenr_el0, xzr // Disable PMU access from EL0
+ mov x0, x12
+ dsb nsh // Make sure local tlb invalidation completed
+ isb
+@@ -155,6 +156,7 @@ ENTRY(__cpu_setup)
+ msr cpacr_el1, x0 // Enable FP/ASIMD
+ mov x0, #1 << 12 // Reset mdscr_el1 and disable
+ msr mdscr_el1, x0 // access to the DCC from EL0
++ msr pmuserenr_el0, xzr // Disable PMU access from EL0
+ /*
+ * Memory region attributes for LPAE:
+ *
--- /dev/null
+From 32d6397805d00573ce1fa55f408ce2bca15b0ad3 Mon Sep 17 00:00:00 2001
+From: Will Deacon <will.deacon@arm.com>
+Date: Thu, 10 Dec 2015 16:05:36 +0000
+Subject: arm64: mm: ensure that the zero page is visible to the page table walker
+
+From: Will Deacon <will.deacon@arm.com>
+
+commit 32d6397805d00573ce1fa55f408ce2bca15b0ad3 upstream.
+
+In paging_init, we allocate the zero page, memset it to zero and then
+point TTBR0 to it in order to avoid speculative fetches through the
+identity mapping.
+
+In order to guarantee that the freshly zeroed page is indeed visible to
+the page table walker, we need to execute a dsb instruction prior to
+writing the TTBR.
+
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/mm/mmu.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/arch/arm64/mm/mmu.c
++++ b/arch/arm64/mm/mmu.c
+@@ -456,6 +456,9 @@ void __init paging_init(void)
+
+ empty_zero_page = virt_to_page(zero_page);
+
++ /* Ensure the zero page is visible to the page table walker */
++ dsb(ishst);
++
+ /*
+ * TTBR0 is only used for the identity mapping at this stage. Make it
+ * point to zero page to avoid speculatively fetching new entries.
--- /dev/null
+From 81d7a3294de7e9828310bbf986a67246b13fa01e Mon Sep 17 00:00:00 2001
+From: Boqun Feng <boqun.feng@gmail.com>
+Date: Mon, 2 Nov 2015 09:30:32 +0800
+Subject: powerpc: Make {cmp}xchg* and their atomic_ versions fully
+ ordered
+
+From: Boqun Feng <boqun.feng@gmail.com>
+
+commit 81d7a3294de7e9828310bbf986a67246b13fa01e upstream.
+
+According to memory-barriers.txt, xchg*, cmpxchg* and their atomic_
+versions all need to be fully ordered, however they are now just
+RELEASE+ACQUIRE, which are not fully ordered.
+
+So also replace PPC_RELEASE_BARRIER and PPC_ACQUIRE_BARRIER with
+PPC_ATOMIC_ENTRY_BARRIER and PPC_ATOMIC_EXIT_BARRIER in
+__{cmp,}xchg_{u32,u64} respectively to guarantee fully ordered semantics
+of atomic{,64}_{cmp,}xchg() and {cmp,}xchg(), as a complement of commit
+b97021f85517 ("powerpc: Fix atomic_xxx_return barrier semantics")
+
+This patch depends on patch "powerpc: Make value-returning atomics fully
+ordered" for PPC_ATOMIC_ENTRY_BARRIER definition.
+
+Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
+Reviewed-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/include/asm/cmpxchg.h | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+--- a/arch/powerpc/include/asm/cmpxchg.h
++++ b/arch/powerpc/include/asm/cmpxchg.h
+@@ -18,12 +18,12 @@ __xchg_u32(volatile void *p, unsigned lo
+ unsigned long prev;
+
+ __asm__ __volatile__(
+- PPC_RELEASE_BARRIER
++ PPC_ATOMIC_ENTRY_BARRIER
+ "1: lwarx %0,0,%2 \n"
+ PPC405_ERR77(0,%2)
+ " stwcx. %3,0,%2 \n\
+ bne- 1b"
+- PPC_ACQUIRE_BARRIER
++ PPC_ATOMIC_EXIT_BARRIER
+ : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
+ : "r" (p), "r" (val)
+ : "cc", "memory");
+@@ -61,12 +61,12 @@ __xchg_u64(volatile void *p, unsigned lo
+ unsigned long prev;
+
+ __asm__ __volatile__(
+- PPC_RELEASE_BARRIER
++ PPC_ATOMIC_ENTRY_BARRIER
+ "1: ldarx %0,0,%2 \n"
+ PPC405_ERR77(0,%2)
+ " stdcx. %3,0,%2 \n\
+ bne- 1b"
+- PPC_ACQUIRE_BARRIER
++ PPC_ATOMIC_EXIT_BARRIER
+ : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
+ : "r" (p), "r" (val)
+ : "cc", "memory");
+@@ -151,14 +151,14 @@ __cmpxchg_u32(volatile unsigned int *p,
+ unsigned int prev;
+
+ __asm__ __volatile__ (
+- PPC_RELEASE_BARRIER
++ PPC_ATOMIC_ENTRY_BARRIER
+ "1: lwarx %0,0,%2 # __cmpxchg_u32\n\
+ cmpw 0,%0,%3\n\
+ bne- 2f\n"
+ PPC405_ERR77(0,%2)
+ " stwcx. %4,0,%2\n\
+ bne- 1b"
+- PPC_ACQUIRE_BARRIER
++ PPC_ATOMIC_EXIT_BARRIER
+ "\n\
+ 2:"
+ : "=&r" (prev), "+m" (*p)
+@@ -197,13 +197,13 @@ __cmpxchg_u64(volatile unsigned long *p,
+ unsigned long prev;
+
+ __asm__ __volatile__ (
+- PPC_RELEASE_BARRIER
++ PPC_ATOMIC_ENTRY_BARRIER
+ "1: ldarx %0,0,%2 # __cmpxchg_u64\n\
+ cmpd 0,%0,%3\n\
+ bne- 2f\n\
+ stdcx. %4,0,%2\n\
+ bne- 1b"
+- PPC_ACQUIRE_BARRIER
++ PPC_ATOMIC_EXIT_BARRIER
+ "\n\
+ 2:"
+ : "=&r" (prev), "+m" (*p)
--- /dev/null
+From 49e9cf3f0c04bf76ffa59242254110309554861d Mon Sep 17 00:00:00 2001
+From: Boqun Feng <boqun.feng@gmail.com>
+Date: Mon, 2 Nov 2015 09:30:31 +0800
+Subject: powerpc: Make value-returning atomics fully ordered
+
+From: Boqun Feng <boqun.feng@gmail.com>
+
+commit 49e9cf3f0c04bf76ffa59242254110309554861d upstream.
+
+According to memory-barriers.txt:
+
+> Any atomic operation that modifies some state in memory and returns
+> information about the state (old or new) implies an SMP-conditional
+> general memory barrier (smp_mb()) on each side of the actual
+> operation ...
+
+Which mean these operations should be fully ordered. However on PPC,
+PPC_ATOMIC_ENTRY_BARRIER is the barrier before the actual operation,
+which is currently "lwsync" if SMP=y. The leading "lwsync" can not
+guarantee fully ordered atomics, according to Paul Mckenney:
+
+https://lkml.org/lkml/2015/10/14/970
+
+To fix this, we define PPC_ATOMIC_ENTRY_BARRIER as "sync" to guarantee
+the fully-ordered semantics.
+
+This also makes futex atomics fully ordered, which can avoid possible
+memory ordering problems if userspace code relies on futex system call
+for fully ordered semantics.
+
+Fixes: b97021f85517 ("powerpc: Fix atomic_xxx_return barrier semantics")
+Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
+Reviewed-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/include/asm/synch.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/powerpc/include/asm/synch.h
++++ b/arch/powerpc/include/asm/synch.h
+@@ -44,7 +44,7 @@ static inline void isync(void)
+ MAKE_LWSYNC_SECTION_ENTRY(97, __lwsync_fixup);
+ #define PPC_ACQUIRE_BARRIER "\n" stringify_in_c(__PPC_ACQUIRE_BARRIER)
+ #define PPC_RELEASE_BARRIER stringify_in_c(LWSYNC) "\n"
+-#define PPC_ATOMIC_ENTRY_BARRIER "\n" stringify_in_c(LWSYNC) "\n"
++#define PPC_ATOMIC_ENTRY_BARRIER "\n" stringify_in_c(sync) "\n"
+ #define PPC_ATOMIC_EXIT_BARRIER "\n" stringify_in_c(sync) "\n"
+ #else
+ #define PPC_ACQUIRE_BARRIER
--- /dev/null
+From a61674bdfc7c2bf909c4010699607b62b69b7bec Mon Sep 17 00:00:00 2001
+From: Ulrich Weigand <ulrich.weigand@de.ibm.com>
+Date: Tue, 12 Jan 2016 23:14:23 +1100
+Subject: powerpc/module: Handle R_PPC64_ENTRY relocations
+
+From: Ulrich Weigand <ulrich.weigand@de.ibm.com>
+
+commit a61674bdfc7c2bf909c4010699607b62b69b7bec upstream.
+
+GCC 6 will include changes to generated code with -mcmodel=large,
+which is used to build kernel modules on powerpc64le. This was
+necessary because the large model is supposed to allow arbitrary
+sizes and locations of the code and data sections, but the ELFv2
+global entry point prolog still made the unconditional assumption
+that the TOC associated with any particular function can be found
+within 2 GB of the function entry point:
+
+func:
+ addis r2,r12,(.TOC.-func)@ha
+ addi r2,r2,(.TOC.-func)@l
+ .localentry func, .-func
+
+To remove this assumption, GCC will now generate instead this global
+entry point prolog sequence when using -mcmodel=large:
+
+ .quad .TOC.-func
+func:
+ .reloc ., R_PPC64_ENTRY
+ ld r2, -8(r12)
+ add r2, r2, r12
+ .localentry func, .-func
+
+The new .reloc triggers an optimization in the linker that will
+replace this new prolog with the original code (see above) if the
+linker determines that the distance between .TOC. and func is in
+range after all.
+
+Since this new relocation is now present in module object files,
+the kernel module loader is required to handle them too. This
+patch adds support for the new relocation and implements the
+same optimization done by the GNU linker.
+
+Signed-off-by: Ulrich Weigand <ulrich.weigand@de.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/include/uapi/asm/elf.h | 2 ++
+ arch/powerpc/kernel/module_64.c | 27 +++++++++++++++++++++++++++
+ 2 files changed, 29 insertions(+)
+
+--- a/arch/powerpc/include/uapi/asm/elf.h
++++ b/arch/powerpc/include/uapi/asm/elf.h
+@@ -295,6 +295,8 @@ do { \
+ #define R_PPC64_TLSLD 108
+ #define R_PPC64_TOCSAVE 109
+
++#define R_PPC64_ENTRY 118
++
+ #define R_PPC64_REL16 249
+ #define R_PPC64_REL16_LO 250
+ #define R_PPC64_REL16_HI 251
+--- a/arch/powerpc/kernel/module_64.c
++++ b/arch/powerpc/kernel/module_64.c
+@@ -635,6 +635,33 @@ int apply_relocate_add(Elf64_Shdr *sechd
+ */
+ break;
+
++ case R_PPC64_ENTRY:
++ /*
++ * Optimize ELFv2 large code model entry point if
++ * the TOC is within 2GB range of current location.
++ */
++ value = my_r2(sechdrs, me) - (unsigned long)location;
++ if (value + 0x80008000 > 0xffffffff)
++ break;
++ /*
++ * Check for the large code model prolog sequence:
++ * ld r2, ...(r12)
++ * add r2, r2, r12
++ */
++ if ((((uint32_t *)location)[0] & ~0xfffc)
++ != 0xe84c0000)
++ break;
++ if (((uint32_t *)location)[1] != 0x7c426214)
++ break;
++ /*
++ * If found, replace it with:
++ * addis r2, r12, (.TOC.-func)@ha
++ * addi r2, r12, (.TOC.-func)@l
++ */
++ ((uint32_t *)location)[0] = 0x3c4c0000 + PPC_HA(value);
++ ((uint32_t *)location)[1] = 0x38420000 + PPC_LO(value);
++ break;
++
+ case R_PPC64_REL16_HA:
+ /* Subtract location pointer */
+ value -= (unsigned long)location;
--- /dev/null
+From 7f821fc9c77a9b01fe7b1d6e72717b33d8d64142 Mon Sep 17 00:00:00 2001
+From: Michael Neuling <mikey@neuling.org>
+Date: Thu, 19 Nov 2015 15:44:45 +1100
+Subject: powerpc/tm: Check for already reclaimed tasks
+
+From: Michael Neuling <mikey@neuling.org>
+
+commit 7f821fc9c77a9b01fe7b1d6e72717b33d8d64142 upstream.
+
+Currently we can hit a scenario where we'll tm_reclaim() twice. This
+results in a TM bad thing exception because the second reclaim occurs
+when not in suspend mode.
+
+The scenario in which this can happen is the following. We attempt to
+deliver a signal to userspace. To do this we need obtain the stack
+pointer to write the signal context. To get this stack pointer we
+must tm_reclaim() in case we need to use the checkpointed stack
+pointer (see get_tm_stackpointer()). Normally we'd then return
+directly to userspace to deliver the signal without going through
+__switch_to().
+
+Unfortunatley, if at this point we get an error (such as a bad
+userspace stack pointer), we need to exit the process. The exit will
+result in a __switch_to(). __switch_to() will attempt to save the
+process state which results in another tm_reclaim(). This
+tm_reclaim() now causes a TM Bad Thing exception as this state has
+already been saved and the processor is no longer in TM suspend mode.
+Whee!
+
+This patch checks the state of the MSR to ensure we are TM suspended
+before we attempt the tm_reclaim(). If we've already saved the state
+away, we should no longer be in TM suspend mode. This has the
+additional advantage of checking for a potential TM Bad Thing
+exception.
+
+Found using syscall fuzzer.
+
+Fixes: fb09692e71f1 ("powerpc: Add reclaim and recheckpoint functions for context switching transactional memory processes")
+Signed-off-by: Michael Neuling <mikey@neuling.org>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/process.c | 18 ++++++++++++++++++
+ 1 file changed, 18 insertions(+)
+
+--- a/arch/powerpc/kernel/process.c
++++ b/arch/powerpc/kernel/process.c
+@@ -569,6 +569,24 @@ static void tm_reclaim_thread(struct thr
+ if (!MSR_TM_SUSPENDED(mfmsr()))
+ return;
+
++ /*
++ * Use the current MSR TM suspended bit to track if we have
++ * checkpointed state outstanding.
++ * On signal delivery, we'd normally reclaim the checkpointed
++ * state to obtain stack pointer (see:get_tm_stackpointer()).
++ * This will then directly return to userspace without going
++ * through __switch_to(). However, if the stack frame is bad,
++ * we need to exit this thread which calls __switch_to() which
++ * will again attempt to reclaim the already saved tm state.
++ * Hence we need to check that we've not already reclaimed
++ * this state.
++ * We do this using the current MSR, rather tracking it in
++ * some specific thread_struct bit, as it has the additional
++ * benifit of checking for a potential TM bad thing exception.
++ */
++ if (!MSR_TM_SUSPENDED(mfmsr()))
++ return;
++
+ tm_reclaim(thr, thr->regs->msr, cause);
+
+ /* Having done the reclaim, we now have the checkpointed
--- /dev/null
+From 2e50c4bef77511b42cc226865d6bc568fa7f8769 Mon Sep 17 00:00:00 2001
+From: Ulrich Weigand <ulrich.weigand@de.ibm.com>
+Date: Tue, 12 Jan 2016 23:14:22 +1100
+Subject: scripts/recordmcount.pl: support data in text section on powerpc
+
+From: Ulrich Weigand <ulrich.weigand@de.ibm.com>
+
+commit 2e50c4bef77511b42cc226865d6bc568fa7f8769 upstream.
+
+If a text section starts out with a data blob before the first
+function start label, disassembly parsing doing in recordmcount.pl
+gets confused on powerpc, leading to creation of corrupted module
+objects.
+
+This was not a problem so far since the compiler would never create
+such text sections. However, this has changed with a recent change
+in GCC 6 to support distances of > 2GB between a function and its
+assoicated TOC in the ELFv2 ABI, exposing this problem.
+
+There is already code in recordmcount.pl to handle such data blobs
+on the sparc64 platform. This patch uses the same method to handle
+those on powerpc as well.
+
+Acked-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Ulrich Weigand <ulrich.weigand@de.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ scripts/recordmcount.pl | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/scripts/recordmcount.pl
++++ b/scripts/recordmcount.pl
+@@ -263,7 +263,8 @@ if ($arch eq "x86_64") {
+
+ } elsif ($arch eq "powerpc") {
+ $local_regex = "^[0-9a-fA-F]+\\s+t\\s+(\\.?\\S+)";
+- $function_regex = "^([0-9a-fA-F]+)\\s+<(\\.?.*?)>:";
++ # See comment in the sparc64 section for why we use '\w'.
++ $function_regex = "^([0-9a-fA-F]+)\\s+<(\\.?\\w*?)>:";
+ $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s\\.?_mcount\$";
+
+ if ($bits == 64) {
batman-adv-drop-immediate-neigh_ifinfo-free-function.patch
batman-adv-drop-immediate-batadv_hard_iface-free-function.patch
batman-adv-drop-immediate-orig_node-free-function.patch
+powerpc-tm-check-for-already-reclaimed-tasks.patch
+powerpc-make-value-returning-atomics-fully-ordered.patch
+powerpc-make-cmp-xchg-and-their-atomic_-versions-fully.patch
+scripts-recordmcount.pl-support-data-in-text-section-on-powerpc.patch
+powerpc-module-handle-r_ppc64_entry-relocations.patch
+arm64-clear-out-any-singlestep-state-on-a-ptrace-detach-operation.patch
+arm64-mm-ensure-that-the-zero-page-is-visible-to-the-page-table-walker.patch
+arm64-kernel-enforce-pmuserenr_el0-initialization-and-restore.patch