--- /dev/null
+From 63f13448d81c910a284b096149411a719cbed501 Mon Sep 17 00:00:00 2001
+From: Richard Guy Briggs <rgb@redhat.com>
+Date: Tue, 9 Dec 2014 15:37:07 -0500
+Subject: powerpc: add little endian flag to syscall_get_arch()
+
+From: Richard Guy Briggs <rgb@redhat.com>
+
+commit 63f13448d81c910a284b096149411a719cbed501 upstream.
+
+Since both ppc and ppc64 have LE variants which are now reported by uname, add
+that flag (__AUDIT_ARCH_LE) to syscall_get_arch() and add AUDIT_ARCH_PPC64LE
+variant.
+
+Without this, perf trace and auditctl fail.
+
+Mainline kernel reports ppc64le (per a058801) but there is no matching
+AUDIT_ARCH_PPC64LE.
+
+Since 32-bit PPC LE is not supported by audit, don't advertise it in
+AUDIT_ARCH_PPC* variants.
+
+See:
+ https://www.redhat.com/archives/linux-audit/2014-August/msg00082.html
+ https://www.redhat.com/archives/linux-audit/2014-December/msg00004.html
+
+Signed-off-by: Richard Guy Briggs <rgb@redhat.com>
+Acked-by: Paul Moore <paul@paul-moore.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/include/asm/syscall.h | 6 +++++-
+ include/uapi/linux/audit.h | 2 ++
+ 2 files changed, 7 insertions(+), 1 deletion(-)
+
+--- a/arch/powerpc/include/asm/syscall.h
++++ b/arch/powerpc/include/asm/syscall.h
+@@ -90,6 +90,10 @@ static inline void syscall_set_arguments
+
+ static inline int syscall_get_arch(void)
+ {
+- return is_32bit_task() ? AUDIT_ARCH_PPC : AUDIT_ARCH_PPC64;
++ int arch = is_32bit_task() ? AUDIT_ARCH_PPC : AUDIT_ARCH_PPC64;
++#ifdef __LITTLE_ENDIAN__
++ arch |= __AUDIT_ARCH_LE;
++#endif
++ return arch;
+ }
+ #endif /* _ASM_SYSCALL_H */
+--- a/include/uapi/linux/audit.h
++++ b/include/uapi/linux/audit.h
+@@ -365,7 +365,9 @@ enum {
+ #define AUDIT_ARCH_PARISC (EM_PARISC)
+ #define AUDIT_ARCH_PARISC64 (EM_PARISC|__AUDIT_ARCH_64BIT)
+ #define AUDIT_ARCH_PPC (EM_PPC)
++/* do not define AUDIT_ARCH_PPCLE since it is not supported by audit */
+ #define AUDIT_ARCH_PPC64 (EM_PPC64|__AUDIT_ARCH_64BIT)
++#define AUDIT_ARCH_PPC64LE (EM_PPC64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
+ #define AUDIT_ARCH_S390 (EM_S390)
+ #define AUDIT_ARCH_S390X (EM_S390|__AUDIT_ARCH_64BIT)
+ #define AUDIT_ARCH_SH (EM_SH)
--- /dev/null
+From 682e77c861c4c60f79ffbeae5e1938ffed24a575 Mon Sep 17 00:00:00 2001
+From: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
+Date: Fri, 5 Dec 2014 10:01:15 +0530
+Subject: powerpc/book3s: Fix partial invalidation of TLBs in MCE code.
+
+From: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
+
+commit 682e77c861c4c60f79ffbeae5e1938ffed24a575 upstream.
+
+The existing MCE code calls flush_tlb hook with IS=0 (single page) resulting
+in partial invalidation of TLBs which is not right. This patch fixes
+that by passing IS=0xc00 to invalidate whole TLB for successful recovery
+from TLB and ERAT errors.
+
+Signed-off-by: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/mce_power.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/powerpc/kernel/mce_power.c
++++ b/arch/powerpc/kernel/mce_power.c
+@@ -79,7 +79,7 @@ static long mce_handle_derror(uint64_t d
+ }
+ if (dsisr & P7_DSISR_MC_TLB_MULTIHIT_MFTLB) {
+ if (cur_cpu_spec && cur_cpu_spec->flush_tlb)
+- cur_cpu_spec->flush_tlb(TLBIEL_INVAL_PAGE);
++ cur_cpu_spec->flush_tlb(TLBIEL_INVAL_SET);
+ /* reset error bits */
+ dsisr &= ~P7_DSISR_MC_TLB_MULTIHIT_MFTLB;
+ }
+@@ -110,7 +110,7 @@ static long mce_handle_common_ierror(uin
+ break;
+ case P7_SRR1_MC_IFETCH_TLB_MULTIHIT:
+ if (cur_cpu_spec && cur_cpu_spec->flush_tlb) {
+- cur_cpu_spec->flush_tlb(TLBIEL_INVAL_PAGE);
++ cur_cpu_spec->flush_tlb(TLBIEL_INVAL_SET);
+ handled = 1;
+ }
+ break;
--- /dev/null
+From cd32e2dcc9de6c27ecbbfc0e2079fb64b42bad5f Mon Sep 17 00:00:00 2001
+From: Anton Blanchard <anton@samba.org>
+Date: Tue, 11 Nov 2014 09:12:28 +1100
+Subject: powerpc: Fix bad NULL pointer check in udbg_uart_getc_poll()
+
+From: Anton Blanchard <anton@samba.org>
+
+commit cd32e2dcc9de6c27ecbbfc0e2079fb64b42bad5f upstream.
+
+We have some code in udbg_uart_getc_poll() that tries to protect
+against a NULL udbg_uart_in, but gets it all wrong.
+
+Found with the LLVM static analyzer (scan-build).
+
+Fixes: 309257484cc1 ("powerpc: Cleanup udbg_16550 and add support for LPC PIO-only UARTs")
+Signed-off-by: Anton Blanchard <anton@samba.org>
+[mpe: Add some newlines for readability while we're here]
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/udbg_16550.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/arch/powerpc/kernel/udbg_16550.c
++++ b/arch/powerpc/kernel/udbg_16550.c
+@@ -69,8 +69,12 @@ static void udbg_uart_putc(char c)
+
+ static int udbg_uart_getc_poll(void)
+ {
+- if (!udbg_uart_in || !(udbg_uart_in(UART_LSR) & LSR_DR))
++ if (!udbg_uart_in)
++ return -1;
++
++ if (!(udbg_uart_in(UART_LSR) & LSR_DR))
+ return udbg_uart_in(UART_RBR);
++
+ return -1;
+ }
+
--- /dev/null
+From f34b6c72c3ebaa286d3311a825ef79eccbcca82f Mon Sep 17 00:00:00 2001
+From: "sukadev@linux.vnet.ibm.com" <sukadev@linux.vnet.ibm.com>
+Date: Wed, 10 Dec 2014 14:29:13 -0800
+Subject: powerpc/perf/hv-24x7: Use per-cpu page buffer
+
+From: "sukadev@linux.vnet.ibm.com" <sukadev@linux.vnet.ibm.com>
+
+commit f34b6c72c3ebaa286d3311a825ef79eccbcca82f upstream.
+
+The 24x7 counters are continuously running and not updated on an
+interrupt. So we record the event counts when stopping the event or
+deleting it.
+
+But to "read" a single counter in 24x7, we allocate a page and pass it
+into the hypervisor (The HV returns the page full of counters from which
+we extract the specific counter for this event).
+
+We allocate a page using GFP_USER and when deleting the event, we end up
+with the following warning because we are blocking in interrupt context.
+
+ [ 698.641709] BUG: scheduling while atomic: swapper/0/0/0x10010000
+
+We could use GFP_ATOMIC but that could result in failures. Pre-allocate
+a buffer so we don't have to allocate in interrupt context. Further as
+Michael Ellerman suggested, use Per-CPU buffer so we only need to
+allocate once per CPU.
+
+Signed-off-by: Sukadev Bhattiprolu <sukadev@linux.vnet.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/perf/hv-24x7.c | 21 +++++++++------------
+ 1 file changed, 9 insertions(+), 12 deletions(-)
+
+--- a/arch/powerpc/perf/hv-24x7.c
++++ b/arch/powerpc/perf/hv-24x7.c
+@@ -217,11 +217,14 @@ static bool is_physical_domain(int domai
+ domain == HV_24X7_PERF_DOMAIN_PHYSICAL_CORE;
+ }
+
++DEFINE_PER_CPU(char, hv_24x7_reqb[4096]) __aligned(4096);
++DEFINE_PER_CPU(char, hv_24x7_resb[4096]) __aligned(4096);
++
+ static unsigned long single_24x7_request(u8 domain, u32 offset, u16 ix,
+ u16 lpar, u64 *res,
+ bool success_expected)
+ {
+- unsigned long ret = -ENOMEM;
++ unsigned long ret;
+
+ /*
+ * request_buffer and result_buffer are not required to be 4k aligned,
+@@ -243,13 +246,11 @@ static unsigned long single_24x7_request
+ BUILD_BUG_ON(sizeof(*request_buffer) > 4096);
+ BUILD_BUG_ON(sizeof(*result_buffer) > 4096);
+
+- request_buffer = kmem_cache_zalloc(hv_page_cache, GFP_USER);
+- if (!request_buffer)
+- goto out;
++ request_buffer = (void *)get_cpu_var(hv_24x7_reqb);
++ result_buffer = (void *)get_cpu_var(hv_24x7_resb);
+
+- result_buffer = kmem_cache_zalloc(hv_page_cache, GFP_USER);
+- if (!result_buffer)
+- goto out_free_request_buffer;
++ memset(request_buffer, 0, 4096);
++ memset(result_buffer, 0, 4096);
+
+ *request_buffer = (struct reqb) {
+ .buf = {
+@@ -278,15 +279,11 @@ static unsigned long single_24x7_request
+ domain, offset, ix, lpar, ret, ret,
+ result_buffer->buf.detailed_rc,
+ result_buffer->buf.failing_request_ix);
+- goto out_free_result_buffer;
++ goto out;
+ }
+
+ *res = be64_to_cpu(result_buffer->result);
+
+-out_free_result_buffer:
+- kfree(result_buffer);
+-out_free_request_buffer:
+- kfree(request_buffer);
+ out:
+ return ret;
+ }
--- /dev/null
+From 8117ac6a6c2fa0f847ff6a21a1f32c8d2c8501d0 Mon Sep 17 00:00:00 2001
+From: Paul Mackerras <paulus@samba.org>
+Date: Wed, 10 Dec 2014 00:26:50 +0530
+Subject: powerpc/powernv: Switch off MMU before entering nap/sleep/rvwinkle mode
+
+From: Paul Mackerras <paulus@samba.org>
+
+commit 8117ac6a6c2fa0f847ff6a21a1f32c8d2c8501d0 upstream.
+
+Currently, when going idle, we set the flag indicating that we are in
+nap mode (paca->kvm_hstate.hwthread_state) and then execute the nap
+(or sleep or rvwinkle) instruction, all with the MMU on. This is bad
+for two reasons: (a) the architecture specifies that those instructions
+must be executed with the MMU off, and in fact with only the SF, HV, ME
+and possibly RI bits set, and (b) this introduces a race, because as
+soon as we set the flag, another thread can switch the MMU to a guest
+context. If the race is lost, this thread will typically start looping
+on relocation-on ISIs at 0xc...4400.
+
+This fixes it by setting the MSR as required by the architecture before
+setting the flag or executing the nap/sleep/rvwinkle instruction.
+
+[ shreyas@linux.vnet.ibm.com: Edited to handle LE ]
+Signed-off-by: Paul Mackerras <paulus@samba.org>
+Signed-off-by: Shreyas B. Prabhu <shreyas@linux.vnet.ibm.com>
+Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Cc: Michael Ellerman <mpe@ellerman.id.au>
+Cc: linuxppc-dev@lists.ozlabs.org
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/include/asm/reg.h | 2 ++
+ arch/powerpc/kernel/idle_power7.S | 18 +++++++++++++++++-
+ 2 files changed, 19 insertions(+), 1 deletion(-)
+
+--- a/arch/powerpc/include/asm/reg.h
++++ b/arch/powerpc/include/asm/reg.h
+@@ -118,8 +118,10 @@
+ #define __MSR (MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF |MSR_HV)
+ #ifdef __BIG_ENDIAN__
+ #define MSR_ __MSR
++#define MSR_IDLE (MSR_ME | MSR_SF | MSR_HV)
+ #else
+ #define MSR_ (__MSR | MSR_LE)
++#define MSR_IDLE (MSR_ME | MSR_SF | MSR_HV | MSR_LE)
+ #endif
+ #define MSR_KERNEL (MSR_ | MSR_64BIT)
+ #define MSR_USER32 (MSR_ | MSR_PR | MSR_EE)
+--- a/arch/powerpc/kernel/idle_power7.S
++++ b/arch/powerpc/kernel/idle_power7.S
+@@ -101,7 +101,23 @@ _GLOBAL(power7_powersave_common)
+ std r9,_MSR(r1)
+ std r1,PACAR1(r13)
+
+-_GLOBAL(power7_enter_nap_mode)
++ /*
++ * Go to real mode to do the nap, as required by the architecture.
++ * Also, we need to be in real mode before setting hwthread_state,
++ * because as soon as we do that, another thread can switch
++ * the MMU context to the guest.
++ */
++ LOAD_REG_IMMEDIATE(r5, MSR_IDLE)
++ li r6, MSR_RI
++ andc r6, r9, r6
++ LOAD_REG_ADDR(r7, power7_enter_nap_mode)
++ mtmsrd r6, 1 /* clear RI before setting SRR0/1 */
++ mtspr SPRN_SRR0, r7
++ mtspr SPRN_SRR1, r5
++ rfid
++
++ .globl power7_enter_nap_mode
++power7_enter_nap_mode:
+ #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ /* Tell KVM we're napping */
+ li r4,KVM_HWTHREAD_IN_NAP
--- /dev/null
+From 7c5c92ed56d932b2c19c3f8aea86369509407d33 Mon Sep 17 00:00:00 2001
+From: Anton Blanchard <anton@samba.org>
+Date: Tue, 9 Dec 2014 10:58:19 +1100
+Subject: powerpc: Secondary CPUs must set cpu_callin_map after setting active and online
+
+From: Anton Blanchard <anton@samba.org>
+
+commit 7c5c92ed56d932b2c19c3f8aea86369509407d33 upstream.
+
+I have a busy ppc64le KVM box where guests sometimes hit the infamous
+"kernel BUG at kernel/smpboot.c:134!" issue during boot:
+
+ BUG_ON(td->cpu != smp_processor_id());
+
+Basically a per CPU hotplug thread scheduled on the wrong CPU. The oops
+output confirms it:
+
+ CPU: 0
+ Comm: watchdog/130
+
+The problem is that we aren't ensuring the CPU active and online bits are set
+before allowing the master to continue on. The master unparks the secondary
+CPUs kthreads and the scheduler looks for a CPU to run on. It calls
+select_task_rq and realises the suggested CPU is not in the cpus_allowed
+mask. It then ends up in select_fallback_rq, and since the active and
+online bits aren't set we choose some other CPU to run on.
+
+Signed-off-by: Anton Blanchard <anton@samba.org>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/smp.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+--- a/arch/powerpc/kernel/smp.c
++++ b/arch/powerpc/kernel/smp.c
+@@ -700,7 +700,6 @@ void start_secondary(void *unused)
+ smp_store_cpu_info(cpu);
+ set_dec(tb_ticks_per_jiffy);
+ preempt_disable();
+- cpu_callin_map[cpu] = 1;
+
+ if (smp_ops->setup_cpu)
+ smp_ops->setup_cpu(cpu);
+@@ -739,6 +738,14 @@ void start_secondary(void *unused)
+ notify_cpu_starting(cpu);
+ set_cpu_online(cpu, true);
+
++ /*
++ * CPU must be marked active and online before we signal back to the
++ * master, because the scheduler needs to see the cpu_online and
++ * cpu_active bits set.
++ */
++ smp_wmb();
++ cpu_callin_map[cpu] = 1;
++
+ local_irq_enable();
+
+ cpu_startup_entry(CPUHP_ONLINE);
asoc-pcm512x-trigger-auto-increment-of-register-addresses-on-i2c.patch
asoc-dwc-ensure-fifos-are-flushed-to-prevent-channel-swap.patch
ktest-fix-make_min_config-to-handle-new-assign_configs-call.patch
+powerpc-fix-bad-null-pointer-check-in-udbg_uart_getc_poll.patch
+powerpc-book3s-fix-partial-invalidation-of-tlbs-in-mce-code.patch
+powerpc-secondary-cpus-must-set-cpu_callin_map-after-setting-active-and-online.patch
+powerpc-powernv-switch-off-mmu-before-entering-nap-sleep-rvwinkle-mode.patch
+powerpc-perf-hv-24x7-use-per-cpu-page-buffer.patch
+powerpc-add-little-endian-flag-to-syscall_get_arch.patch