--- /dev/null
+From e2477233145f2156434afb799583bccd878f3e9f Mon Sep 17 00:00:00 2001
+From: "Gustavo A. R. Silva" <gustavo@embeddedor.com>
+Date: Thu, 3 Jan 2019 14:14:08 -0600
+Subject: ARM: s3c24xx: Fix boolean expressions in osiris_dvs_notify
+
+From: Gustavo A. R. Silva <gustavo@embeddedor.com>
+
+commit e2477233145f2156434afb799583bccd878f3e9f upstream.
+
+Fix boolean expressions by using logical AND operator '&&' instead of
+bitwise operator '&'.
+
+This issue was detected with the help of Coccinelle.
+
+Fixes: 4fa084af28ca ("ARM: OSIRIS: DVS (Dynamic Voltage Scaling) supoort.")
+Cc: stable@vger.kernel.org
+Signed-off-by: Gustavo A. R. Silva <gustavo@embeddedor.com>
+[krzk: Fix -Wparentheses warning]
+Signed-off-by: Krzysztof Kozlowski <krzk@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/mach-s3c24xx/mach-osiris-dvs.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/arch/arm/mach-s3c24xx/mach-osiris-dvs.c
++++ b/arch/arm/mach-s3c24xx/mach-osiris-dvs.c
+@@ -65,16 +65,16 @@ static int osiris_dvs_notify(struct noti
+
+ switch (val) {
+ case CPUFREQ_PRECHANGE:
+- if (old_dvs & !new_dvs ||
+- cur_dvs & !new_dvs) {
++ if ((old_dvs && !new_dvs) ||
++ (cur_dvs && !new_dvs)) {
+ pr_debug("%s: exiting dvs\n", __func__);
+ cur_dvs = false;
+ gpio_set_value(OSIRIS_GPIO_DVS, 1);
+ }
+ break;
+ case CPUFREQ_POSTCHANGE:
+- if (!old_dvs & new_dvs ||
+- !cur_dvs & new_dvs) {
++ if ((!old_dvs && new_dvs) ||
++ (!cur_dvs && new_dvs)) {
+ pr_debug("entering dvs\n");
+ cur_dvs = true;
+ gpio_set_value(OSIRIS_GPIO_DVS, 0);
--- /dev/null
+From b9a4b9d084d978f80eb9210727c81804588b42ff Mon Sep 17 00:00:00 2001
+From: Will Deacon <will.deacon@arm.com>
+Date: Fri, 1 Mar 2019 13:28:00 +0000
+Subject: arm64: debug: Don't propagate UNKNOWN FAR into si_code for debug signals
+
+From: Will Deacon <will.deacon@arm.com>
+
+commit b9a4b9d084d978f80eb9210727c81804588b42ff upstream.
+
+FAR_EL1 is UNKNOWN for all debug exceptions other than those caused by
+taking a hardware watchpoint. Unfortunately, if a debug handler returns
+a non-zero value, then we will propagate the UNKNOWN FAR value to
+userspace via the si_addr field of the SIGTRAP siginfo_t.
+
+Instead, let's set si_addr to take on the PC of the faulting instruction,
+which we have available in the current pt_regs.
+
+Cc: <stable@vger.kernel.org>
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/mm/fault.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/arch/arm64/mm/fault.c
++++ b/arch/arm64/mm/fault.c
+@@ -824,11 +824,12 @@ void __init hook_debug_fault_code(int nr
+ debug_fault_info[nr].name = name;
+ }
+
+-asmlinkage int __exception do_debug_exception(unsigned long addr,
++asmlinkage int __exception do_debug_exception(unsigned long addr_if_watchpoint,
+ unsigned int esr,
+ struct pt_regs *regs)
+ {
+ const struct fault_info *inf = esr_to_debug_fault_info(esr);
++ unsigned long pc = instruction_pointer(regs);
+ int rv;
+
+ /*
+@@ -838,14 +839,14 @@ asmlinkage int __exception do_debug_exce
+ if (interrupts_enabled(regs))
+ trace_hardirqs_off();
+
+- if (user_mode(regs) && !is_ttbr0_addr(instruction_pointer(regs)))
++ if (user_mode(regs) && !is_ttbr0_addr(pc))
+ arm64_apply_bp_hardening();
+
+- if (!inf->fn(addr, esr, regs)) {
++ if (!inf->fn(addr_if_watchpoint, esr, regs)) {
+ rv = 1;
+ } else {
+ arm64_notify_die(inf->name, regs,
+- inf->sig, inf->code, (void __user *)addr, esr);
++ inf->sig, inf->code, (void __user *)pc, esr);
+ rv = 0;
+ }
+
--- /dev/null
+From 6bd288569b50bc89fa5513031086746968f585cb Mon Sep 17 00:00:00 2001
+From: Will Deacon <will.deacon@arm.com>
+Date: Fri, 1 Mar 2019 13:28:01 +0000
+Subject: arm64: debug: Ensure debug handlers check triggering exception level
+
+From: Will Deacon <will.deacon@arm.com>
+
+commit 6bd288569b50bc89fa5513031086746968f585cb upstream.
+
+Debug exception handlers may be called for exceptions generated both by
+user and kernel code. In many cases, this is checked explicitly, but
+in other cases things either happen to work by happy accident or they
+go slightly wrong. For example, executing 'brk #4' from userspace will
+enter the kprobes code and be ignored, but the instruction will be
+retried forever in userspace instead of delivering a SIGTRAP.
+
+Fix this issue in the most stable-friendly fashion by simply adding
+explicit checks of the triggering exception level to all of our debug
+exception handlers.
+
+Cc: <stable@vger.kernel.org>
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kernel/kgdb.c | 14 ++++++++++----
+ arch/arm64/kernel/probes/kprobes.c | 6 ++++++
+ 2 files changed, 16 insertions(+), 4 deletions(-)
+
+--- a/arch/arm64/kernel/kgdb.c
++++ b/arch/arm64/kernel/kgdb.c
+@@ -244,27 +244,33 @@ int kgdb_arch_handle_exception(int excep
+
+ static int kgdb_brk_fn(struct pt_regs *regs, unsigned int esr)
+ {
++ if (user_mode(regs))
++ return DBG_HOOK_ERROR;
++
+ kgdb_handle_exception(1, SIGTRAP, 0, regs);
+- return 0;
++ return DBG_HOOK_HANDLED;
+ }
+ NOKPROBE_SYMBOL(kgdb_brk_fn)
+
+ static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int esr)
+ {
++ if (user_mode(regs))
++ return DBG_HOOK_ERROR;
++
+ compiled_break = 1;
+ kgdb_handle_exception(1, SIGTRAP, 0, regs);
+
+- return 0;
++ return DBG_HOOK_HANDLED;
+ }
+ NOKPROBE_SYMBOL(kgdb_compiled_brk_fn);
+
+ static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr)
+ {
+- if (!kgdb_single_step)
++ if (user_mode(regs) || !kgdb_single_step)
+ return DBG_HOOK_ERROR;
+
+ kgdb_handle_exception(1, SIGTRAP, 0, regs);
+- return 0;
++ return DBG_HOOK_HANDLED;
+ }
+ NOKPROBE_SYMBOL(kgdb_step_brk_fn);
+
+--- a/arch/arm64/kernel/probes/kprobes.c
++++ b/arch/arm64/kernel/probes/kprobes.c
+@@ -450,6 +450,9 @@ kprobe_single_step_handler(struct pt_reg
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+ int retval;
+
++ if (user_mode(regs))
++ return DBG_HOOK_ERROR;
++
+ /* return error if this is not our step */
+ retval = kprobe_ss_hit(kcb, instruction_pointer(regs));
+
+@@ -466,6 +469,9 @@ kprobe_single_step_handler(struct pt_reg
+ int __kprobes
+ kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr)
+ {
++ if (user_mode(regs))
++ return DBG_HOOK_ERROR;
++
+ kprobe_handler(regs);
+ return DBG_HOOK_HANDLED;
+ }
--- /dev/null
+From 5870970b9a828d8693aa6d15742573289d7dbcd0 Mon Sep 17 00:00:00 2001
+From: Julien Thierry <julien.thierry@arm.com>
+Date: Thu, 31 Jan 2019 14:58:39 +0000
+Subject: arm64: Fix HCR.TGE status for NMI contexts
+
+From: Julien Thierry <julien.thierry@arm.com>
+
+commit 5870970b9a828d8693aa6d15742573289d7dbcd0 upstream.
+
+When using VHE, the host needs to clear HCR_EL2.TGE bit in order
+to interact with guest TLBs, switching from EL2&0 translation regime
+to EL1&0.
+
+However, some non-maskable asynchronous event could happen while TGE is
+cleared like SDEI. Because of this address translation operations
+relying on EL2&0 translation regime could fail (tlb invalidation,
+userspace access, ...).
+
+Fix this by properly setting HCR_EL2.TGE when entering NMI context and
+clear it if necessary when returning to the interrupted context.
+
+Signed-off-by: Julien Thierry <julien.thierry@arm.com>
+Suggested-by: Marc Zyngier <marc.zyngier@arm.com>
+Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
+Reviewed-by: James Morse <james.morse@arm.com>
+Cc: Arnd Bergmann <arnd@arndb.de>
+Cc: Will Deacon <will.deacon@arm.com>
+Cc: Marc Zyngier <marc.zyngier@arm.com>
+Cc: James Morse <james.morse@arm.com>
+Cc: linux-arch@vger.kernel.org
+Cc: stable@vger.kernel.org
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/hardirq.h | 31 +++++++++++++++++++++++++++++++
+ arch/arm64/kernel/irq.c | 3 +++
+ include/linux/hardirq.h | 7 +++++++
+ 3 files changed, 41 insertions(+)
+
+--- a/arch/arm64/include/asm/hardirq.h
++++ b/arch/arm64/include/asm/hardirq.h
+@@ -17,8 +17,12 @@
+ #define __ASM_HARDIRQ_H
+
+ #include <linux/cache.h>
++#include <linux/percpu.h>
+ #include <linux/threads.h>
++#include <asm/barrier.h>
+ #include <asm/irq.h>
++#include <asm/kvm_arm.h>
++#include <asm/sysreg.h>
+
+ #define NR_IPI 7
+
+@@ -37,6 +41,33 @@ u64 smp_irq_stat_cpu(unsigned int cpu);
+
+ #define __ARCH_IRQ_EXIT_IRQS_DISABLED 1
+
++struct nmi_ctx {
++ u64 hcr;
++};
++
++DECLARE_PER_CPU(struct nmi_ctx, nmi_contexts);
++
++#define arch_nmi_enter() \
++ do { \
++ if (is_kernel_in_hyp_mode()) { \
++ struct nmi_ctx *nmi_ctx = this_cpu_ptr(&nmi_contexts); \
++ nmi_ctx->hcr = read_sysreg(hcr_el2); \
++ if (!(nmi_ctx->hcr & HCR_TGE)) { \
++ write_sysreg(nmi_ctx->hcr | HCR_TGE, hcr_el2); \
++ isb(); \
++ } \
++ } \
++ } while (0)
++
++#define arch_nmi_exit() \
++ do { \
++ if (is_kernel_in_hyp_mode()) { \
++ struct nmi_ctx *nmi_ctx = this_cpu_ptr(&nmi_contexts); \
++ if (!(nmi_ctx->hcr & HCR_TGE)) \
++ write_sysreg(nmi_ctx->hcr, hcr_el2); \
++ } \
++ } while (0)
++
+ static inline void ack_bad_irq(unsigned int irq)
+ {
+ extern unsigned long irq_err_count;
+--- a/arch/arm64/kernel/irq.c
++++ b/arch/arm64/kernel/irq.c
+@@ -33,6 +33,9 @@
+
+ unsigned long irq_err_count;
+
++/* Only access this in an NMI enter/exit */
++DEFINE_PER_CPU(struct nmi_ctx, nmi_contexts);
++
+ DEFINE_PER_CPU(unsigned long *, irq_stack_ptr);
+
+ int arch_show_interrupts(struct seq_file *p, int prec)
+--- a/include/linux/hardirq.h
++++ b/include/linux/hardirq.h
+@@ -60,8 +60,14 @@ extern void irq_enter(void);
+ */
+ extern void irq_exit(void);
+
++#ifndef arch_nmi_enter
++#define arch_nmi_enter() do { } while (0)
++#define arch_nmi_exit() do { } while (0)
++#endif
++
+ #define nmi_enter() \
+ do { \
++ arch_nmi_enter(); \
+ printk_nmi_enter(); \
+ lockdep_off(); \
+ ftrace_nmi_enter(); \
+@@ -80,6 +86,7 @@ extern void irq_exit(void);
+ ftrace_nmi_exit(); \
+ lockdep_on(); \
+ printk_nmi_exit(); \
++ arch_nmi_exit(); \
+ } while (0)
+
+ #endif /* LINUX_HARDIRQ_H */
--- /dev/null
+From c88b093693ccbe41991ef2e9b1d251945e6e54ed Mon Sep 17 00:00:00 2001
+From: Dave Martin <Dave.Martin@arm.com>
+Date: Thu, 21 Feb 2019 11:42:32 +0000
+Subject: arm64: KVM: Fix architecturally invalid reset value for FPEXC32_EL2
+
+From: Dave Martin <Dave.Martin@arm.com>
+
+commit c88b093693ccbe41991ef2e9b1d251945e6e54ed upstream.
+
+Due to what looks like a typo dating back to the original addition
+of FPEXC32_EL2 handling, KVM currently initialises this register to
+an architecturally invalid value.
+
+As a result, the VECITR field (RES1) in bits [10:8] is initialised
+with 0, and the two reserved (RES0) bits [6:5] are initialised with
+1. (In the Common VFP Subarchitecture as specified by ARMv7-A,
+these two bits were IMP DEF. ARMv8-A removes them.)
+
+This patch changes the reset value from 0x70 to 0x700, which
+reflects the architectural constraints and is presumably what was
+originally intended.
+
+Cc: <stable@vger.kernel.org> # 4.12.x-
+Cc: Christoffer Dall <christoffer.dall@arm.com>
+Fixes: 62a89c44954f ("arm64: KVM: 32bit handling of coprocessor traps")
+Signed-off-by: Dave Martin <Dave.Martin@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kvm/sys_regs.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm64/kvm/sys_regs.c
++++ b/arch/arm64/kvm/sys_regs.c
+@@ -1476,7 +1476,7 @@ static const struct sys_reg_desc sys_reg
+
+ { SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
+ { SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
+- { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x70 },
++ { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x700 },
+ };
+
+ static bool trap_dbgidr(struct kvm_vcpu *vcpu,
--- /dev/null
+From 904cdbd41d749a476863a0ca41f6f396774f26e4 Mon Sep 17 00:00:00 2001
+From: "zhangyi (F)" <yi.zhang@huawei.com>
+Date: Sun, 10 Feb 2019 23:23:04 -0500
+Subject: jbd2: clear dirty flag when revoking a buffer from an older transaction
+
+From: zhangyi (F) <yi.zhang@huawei.com>
+
+commit 904cdbd41d749a476863a0ca41f6f396774f26e4 upstream.
+
+Now, we capture a data corruption problem on ext4 while we're truncating
+an extent index block. Imaging that if we are revoking a buffer which
+has been journaled by the committing transaction, the buffer's jbddirty
+flag will not be cleared in jbd2_journal_forget(), so the commit code
+will set the buffer dirty flag again after refile the buffer.
+
+fsx kjournald2
+ jbd2_journal_commit_transaction
+jbd2_journal_revoke commit phase 1~5...
+ jbd2_journal_forget
+ belongs to older transaction commit phase 6
+ jbddirty not clear __jbd2_journal_refile_buffer
+ __jbd2_journal_unfile_buffer
+ test_clear_buffer_jbddirty
+ mark_buffer_dirty
+
+Finally, if the freed extent index block was allocated again as data
+block by some other files, it may corrupt the file data after writing
+cached pages later, such as during unmount time. (In general,
+clean_bdev_aliases() related helpers should be invoked after
+re-allocation to prevent the above corruption, but unfortunately we
+missed it when zeroout the head of extra extent blocks in
+ext4_ext_handle_unwritten_extents()).
+
+This patch mark buffer as freed and set j_next_transaction to the new
+transaction when it already belongs to the committing transaction in
+jbd2_journal_forget(), so that commit code knows it should clear dirty
+bits when it is done with the buffer.
+
+This problem can be reproduced by xfstests generic/455 easily with
+seeds (3246 3247 3248 3249).
+
+Signed-off-by: zhangyi (F) <yi.zhang@huawei.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/jbd2/transaction.c | 17 ++++++++++++-----
+ 1 file changed, 12 insertions(+), 5 deletions(-)
+
+--- a/fs/jbd2/transaction.c
++++ b/fs/jbd2/transaction.c
+@@ -1609,14 +1609,21 @@ int jbd2_journal_forget (handle_t *handl
+ /* However, if the buffer is still owned by a prior
+ * (committing) transaction, we can't drop it yet... */
+ JBUFFER_TRACE(jh, "belongs to older transaction");
+- /* ... but we CAN drop it from the new transaction if we
+- * have also modified it since the original commit. */
++ /* ... but we CAN drop it from the new transaction through
++ * marking the buffer as freed and set j_next_transaction to
++ * the new transaction, so that not only the commit code
++ * knows it should clear dirty bits when it is done with the
++ * buffer, but also the buffer can be checkpointed only
++ * after the new transaction commits. */
+
+- if (jh->b_next_transaction) {
+- J_ASSERT(jh->b_next_transaction == transaction);
++ set_buffer_freed(bh);
++
++ if (!jh->b_next_transaction) {
+ spin_lock(&journal->j_list_lock);
+- jh->b_next_transaction = NULL;
++ jh->b_next_transaction = transaction;
+ spin_unlock(&journal->j_list_lock);
++ } else {
++ J_ASSERT(jh->b_next_transaction == transaction);
+
+ /*
+ * only drop a reference if this transaction modified
--- /dev/null
+From 01215d3edb0f384ddeaa5e4a22c1ae5ff634149f Mon Sep 17 00:00:00 2001
+From: "zhangyi (F)" <yi.zhang@huawei.com>
+Date: Thu, 21 Feb 2019 11:24:09 -0500
+Subject: jbd2: fix compile warning when using JBUFFER_TRACE
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: zhangyi (F) <yi.zhang@huawei.com>
+
+commit 01215d3edb0f384ddeaa5e4a22c1ae5ff634149f upstream.
+
+The jh pointer may be used uninitialized in the two cases below and the
+compiler complain about it when enabling JBUFFER_TRACE macro, fix them.
+
+In file included from fs/jbd2/transaction.c:19:0:
+fs/jbd2/transaction.c: In function ‘jbd2_journal_get_undo_access’:
+./include/linux/jbd2.h:1637:38: warning: ‘jh’ is used uninitialized in this function [-Wuninitialized]
+ #define JBUFFER_TRACE(jh, info) do { printk("%s: %d\n", __func__, jh->b_jcount);} while (0)
+ ^
+fs/jbd2/transaction.c:1219:23: note: ‘jh’ was declared here
+ struct journal_head *jh;
+ ^
+In file included from fs/jbd2/transaction.c:19:0:
+fs/jbd2/transaction.c: In function ‘jbd2_journal_dirty_metadata’:
+./include/linux/jbd2.h:1637:38: warning: ‘jh’ may be used uninitialized in this function [-Wmaybe-uninitialized]
+ #define JBUFFER_TRACE(jh, info) do { printk("%s: %d\n", __func__, jh->b_jcount);} while (0)
+ ^
+fs/jbd2/transaction.c:1332:23: note: ‘jh’ was declared here
+ struct journal_head *jh;
+ ^
+
+Signed-off-by: zhangyi (F) <yi.zhang@huawei.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Cc: stable@vger.kernel.org
+Reviewed-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/jbd2/transaction.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+--- a/fs/jbd2/transaction.c
++++ b/fs/jbd2/transaction.c
+@@ -1252,11 +1252,12 @@ int jbd2_journal_get_undo_access(handle_
+ struct journal_head *jh;
+ char *committed_data = NULL;
+
+- JBUFFER_TRACE(jh, "entry");
+ if (jbd2_write_access_granted(handle, bh, true))
+ return 0;
+
+ jh = jbd2_journal_add_journal_head(bh);
++ JBUFFER_TRACE(jh, "entry");
++
+ /*
+ * Do this first --- it can drop the journal lock, so we want to
+ * make sure that obtaining the committed_data is done
+@@ -1367,15 +1368,17 @@ int jbd2_journal_dirty_metadata(handle_t
+
+ if (is_handle_aborted(handle))
+ return -EROFS;
+- if (!buffer_jbd(bh)) {
+- ret = -EUCLEAN;
+- goto out;
+- }
++ if (!buffer_jbd(bh))
++ return -EUCLEAN;
++
+ /*
+ * We don't grab jh reference here since the buffer must be part
+ * of the running transaction.
+ */
+ jh = bh2jh(bh);
++ jbd_debug(5, "journal_head %p\n", jh);
++ JBUFFER_TRACE(jh, "entry");
++
+ /*
+ * This and the following assertions are unreliable since we may see jh
+ * in inconsistent state unless we grab bh_state lock. But this is
+@@ -1409,9 +1412,6 @@ int jbd2_journal_dirty_metadata(handle_t
+ }
+
+ journal = transaction->t_journal;
+- jbd_debug(5, "journal_head %p\n", jh);
+- JBUFFER_TRACE(jh, "entry");
+-
+ jbd_lock_bh_state(bh);
+
+ if (jh->b_modified == 0) {
--- /dev/null
+From 9580b71b5a7863c24a9bd18bcd2ad759b86b1eff Mon Sep 17 00:00:00 2001
+From: Christophe Leroy <christophe.leroy@c-s.fr>
+Date: Wed, 27 Feb 2019 11:45:30 +0000
+Subject: powerpc/32: Clear on-stack exception marker upon exception return
+
+From: Christophe Leroy <christophe.leroy@c-s.fr>
+
+commit 9580b71b5a7863c24a9bd18bcd2ad759b86b1eff upstream.
+
+Clear the on-stack STACK_FRAME_REGS_MARKER on exception exit in order
+to avoid confusing stacktrace like the one below.
+
+ Call Trace:
+ [c0e9dca0] [c01c42a0] print_address_description+0x64/0x2bc (unreliable)
+ [c0e9dcd0] [c01c4684] kasan_report+0xfc/0x180
+ [c0e9dd10] [c0895130] memchr+0x24/0x74
+ [c0e9dd30] [c00a9e38] msg_print_text+0x124/0x574
+ [c0e9dde0] [c00ab710] console_unlock+0x114/0x4f8
+ [c0e9de40] [c00adc60] vprintk_emit+0x188/0x1c4
+ --- interrupt: c0e9df00 at 0x400f330
+ LR = init_stack+0x1f00/0x2000
+ [c0e9de80] [c00ae3c4] printk+0xa8/0xcc (unreliable)
+ [c0e9df20] [c0c27e44] early_irq_init+0x38/0x108
+ [c0e9df50] [c0c15434] start_kernel+0x310/0x488
+ [c0e9dff0] [00003484] 0x3484
+
+With this patch the trace becomes:
+
+ Call Trace:
+ [c0e9dca0] [c01c42c0] print_address_description+0x64/0x2bc (unreliable)
+ [c0e9dcd0] [c01c46a4] kasan_report+0xfc/0x180
+ [c0e9dd10] [c0895150] memchr+0x24/0x74
+ [c0e9dd30] [c00a9e58] msg_print_text+0x124/0x574
+ [c0e9dde0] [c00ab730] console_unlock+0x114/0x4f8
+ [c0e9de40] [c00adc80] vprintk_emit+0x188/0x1c4
+ [c0e9de80] [c00ae3e4] printk+0xa8/0xcc
+ [c0e9df20] [c0c27e44] early_irq_init+0x38/0x108
+ [c0e9df50] [c0c15434] start_kernel+0x310/0x488
+ [c0e9dff0] [00003484] 0x3484
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/entry_32.S | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/arch/powerpc/kernel/entry_32.S
++++ b/arch/powerpc/kernel/entry_32.S
+@@ -745,6 +745,9 @@ fast_exception_return:
+ mtcr r10
+ lwz r10,_LINK(r11)
+ mtlr r10
++ /* Clear the exception_marker on the stack to avoid confusing stacktrace */
++ li r10, 0
++ stw r10, 8(r11)
+ REST_GPR(10, r11)
+ #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
+ mtspr SPRN_NRI, r0
+@@ -982,6 +985,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRE
+ mtcrf 0xFF,r10
+ mtlr r11
+
++ /* Clear the exception_marker on the stack to avoid confusing stacktrace */
++ li r10, 0
++ stw r10, 8(r1)
+ /*
+ * Once we put values in SRR0 and SRR1, we are in a state
+ * where exceptions are not recoverable, since taking an
+@@ -1021,6 +1027,9 @@ exc_exit_restart_end:
+ mtlr r11
+ lwz r10,_CCR(r1)
+ mtcrf 0xff,r10
++ /* Clear the exception_marker on the stack to avoid confusing stacktrace */
++ li r10, 0
++ stw r10, 8(r1)
+ REST_2GPRS(9, r1)
+ .globl exc_exit_restart
+ exc_exit_restart:
--- /dev/null
+From 7104dccfd052fde51eecc9972dad9c40bd3e0d11 Mon Sep 17 00:00:00 2001
+From: Nicholas Piggin <npiggin@gmail.com>
+Date: Fri, 15 Feb 2019 20:20:20 +1000
+Subject: powerpc/64s/hash: Fix assert_slb_presence() use of the slbfee. instruction
+
+From: Nicholas Piggin <npiggin@gmail.com>
+
+commit 7104dccfd052fde51eecc9972dad9c40bd3e0d11 upstream.
+
+The slbfee. instruction must have bit 24 of RB clear, failure to do
+so can result in false negatives that result in incorrect assertions.
+
+This is not obvious from the ISA v3.0B document, which only says:
+
+ The hardware ignores the contents of RB 36:38 40:63 -- p.1032
+
+This patch fixes the bug and also clears all other bits from PPC bit
+36-63, which is good practice when dealing with reserved or ignored
+bits.
+
+Fixes: e15a4fea4dee ("powerpc/64s/hash: Add some SLB debugging tests")
+Cc: stable@vger.kernel.org # v4.20+
+Reported-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
+Tested-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
+Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
+Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/mm/slb.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/arch/powerpc/mm/slb.c
++++ b/arch/powerpc/mm/slb.c
+@@ -69,6 +69,11 @@ static void assert_slb_presence(bool pre
+ if (!cpu_has_feature(CPU_FTR_ARCH_206))
+ return;
+
++ /*
++ * slbfee. requires bit 24 (PPC bit 39) be clear in RB. Hardware
++ * ignores all other bits from 0-27, so just clear them all.
++ */
++ ea &= ~((1UL << 28) - 1);
+ asm volatile(__PPC_SLBFEE_DOT(%0, %1) : "=r"(tmp) : "r"(ea) : "cr0");
+
+ WARN_ON(present == (tmp == 0));
--- /dev/null
+From 36da5ff0bea2dc67298150ead8d8471575c54c7d Mon Sep 17 00:00:00 2001
+From: Christophe Leroy <christophe.leroy@c-s.fr>
+Date: Fri, 25 Jan 2019 12:03:55 +0000
+Subject: powerpc/83xx: Also save/restore SPRG4-7 during suspend
+
+From: Christophe Leroy <christophe.leroy@c-s.fr>
+
+commit 36da5ff0bea2dc67298150ead8d8471575c54c7d upstream.
+
+The 83xx has 8 SPRG registers and uses at least SPRG4
+for DTLB handling LRU.
+
+Fixes: 2319f1239592 ("powerpc/mm: e300c2/c3/c4 TLB errata workaround")
+Cc: stable@vger.kernel.org
+Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/platforms/83xx/suspend-asm.S | 34 +++++++++++++++++++++++-------
+ 1 file changed, 27 insertions(+), 7 deletions(-)
+
+--- a/arch/powerpc/platforms/83xx/suspend-asm.S
++++ b/arch/powerpc/platforms/83xx/suspend-asm.S
+@@ -26,13 +26,13 @@
+ #define SS_MSR 0x74
+ #define SS_SDR1 0x78
+ #define SS_LR 0x7c
+-#define SS_SPRG 0x80 /* 4 SPRGs */
+-#define SS_DBAT 0x90 /* 8 DBATs */
+-#define SS_IBAT 0xd0 /* 8 IBATs */
+-#define SS_TB 0x110
+-#define SS_CR 0x118
+-#define SS_GPREG 0x11c /* r12-r31 */
+-#define STATE_SAVE_SIZE 0x16c
++#define SS_SPRG 0x80 /* 8 SPRGs */
++#define SS_DBAT 0xa0 /* 8 DBATs */
++#define SS_IBAT 0xe0 /* 8 IBATs */
++#define SS_TB 0x120
++#define SS_CR 0x128
++#define SS_GPREG 0x12c /* r12-r31 */
++#define STATE_SAVE_SIZE 0x17c
+
+ .section .data
+ .align 5
+@@ -103,6 +103,16 @@ _GLOBAL(mpc83xx_enter_deep_sleep)
+ stw r7, SS_SPRG+12(r3)
+ stw r8, SS_SDR1(r3)
+
++ mfspr r4, SPRN_SPRG4
++ mfspr r5, SPRN_SPRG5
++ mfspr r6, SPRN_SPRG6
++ mfspr r7, SPRN_SPRG7
++
++ stw r4, SS_SPRG+16(r3)
++ stw r5, SS_SPRG+20(r3)
++ stw r6, SS_SPRG+24(r3)
++ stw r7, SS_SPRG+28(r3)
++
+ mfspr r4, SPRN_DBAT0U
+ mfspr r5, SPRN_DBAT0L
+ mfspr r6, SPRN_DBAT1U
+@@ -493,6 +503,16 @@ mpc83xx_deep_resume:
+ mtspr SPRN_IBAT7U, r6
+ mtspr SPRN_IBAT7L, r7
+
++ lwz r4, SS_SPRG+16(r3)
++ lwz r5, SS_SPRG+20(r3)
++ lwz r6, SS_SPRG+24(r3)
++ lwz r7, SS_SPRG+28(r3)
++
++ mtspr SPRN_SPRG4, r4
++ mtspr SPRN_SPRG5, r5
++ mtspr SPRN_SPRG6, r6
++ mtspr SPRN_SPRG7, r7
++
+ lwz r4, SS_SPRG+0(r3)
+ lwz r5, SS_SPRG+4(r3)
+ lwz r6, SS_SPRG+8(r3)
--- /dev/null
+From fe1ef6bcdb4fca33434256a802a3ed6aacf0bd2f Mon Sep 17 00:00:00 2001
+From: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
+Date: Fri, 8 Feb 2019 14:33:19 +0000
+Subject: powerpc: Fix 32-bit KVM-PR lockup and host crash with MacOS guest
+
+From: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
+
+commit fe1ef6bcdb4fca33434256a802a3ed6aacf0bd2f upstream.
+
+Commit 8792468da5e1 "powerpc: Add the ability to save FPU without
+giving it up" unexpectedly removed the MSR_FE0 and MSR_FE1 bits from
+the bitmask used to update the MSR of the previous thread in
+__giveup_fpu() causing a KVM-PR MacOS guest to lockup and panic the
+host kernel.
+
+Leaving FE0/1 enabled means unrelated processes might receive FPEs
+when they're not expecting them and crash. In particular if this
+happens to init the host will then panic.
+
+eg (transcribed):
+ qemu-system-ppc[837]: unhandled signal 8 at 12cc9ce4 nip 12cc9ce4 lr 12cc9ca4 code 0
+ systemd[1]: unhandled signal 8 at 202f02e0 nip 202f02e0 lr 001003d4 code 0
+ Kernel panic - not syncing: Attempted to kill init! exitcode=0x0000000b
+
+Reinstate these bits to the MSR bitmask to enable MacOS guests to run
+under 32-bit KVM-PR once again without issue.
+
+Fixes: 8792468da5e1 ("powerpc: Add the ability to save FPU without giving it up")
+Cc: stable@vger.kernel.org # v4.6+
+Signed-off-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/process.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/powerpc/kernel/process.c
++++ b/arch/powerpc/kernel/process.c
+@@ -176,7 +176,7 @@ static void __giveup_fpu(struct task_str
+
+ save_fpu(tsk);
+ msr = tsk->thread.regs->msr;
+- msr &= ~MSR_FP;
++ msr &= ~(MSR_FP|MSR_FE0|MSR_FE1);
+ #ifdef CONFIG_VSX
+ if (cpu_has_feature(CPU_FTR_VSX))
+ msr &= ~MSR_VSX;
--- /dev/null
+From 35f2806b481f5b9207f25e1886cba5d1c4d12cc7 Mon Sep 17 00:00:00 2001
+From: "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com>
+Date: Fri, 22 Feb 2019 22:55:31 +0530
+Subject: powerpc/hugetlb: Don't do runtime allocation of 16G pages in LPAR configuration
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
+
+commit 35f2806b481f5b9207f25e1886cba5d1c4d12cc7 upstream.
+
+We added runtime allocation of 16G pages in commit 4ae279c2c96a
+("powerpc/mm/hugetlb: Allow runtime allocation of 16G.") That was done
+to enable 16G allocation on PowerNV and KVM config. In case of KVM
+config, we mostly would have the entire guest RAM backed by 16G
+hugetlb pages for this to work. PAPR do support partial backing of
+guest RAM with hugepages via ibm,expected#pages node of memory node in
+the device tree. This means rest of the guest RAM won't be backed by
+16G contiguous pages in the host and hence a hash page table insertion
+can fail in such case.
+
+An example error message will look like
+
+ hash-mmu: mm: Hashing failure ! EA=0x7efc00000000 access=0x8000000000000006 current=readback
+ hash-mmu: trap=0x300 vsid=0x67af789 ssize=1 base psize=14 psize 14 pte=0xc000000400000386
+ readback[12260]: unhandled signal 7 at 00007efc00000000 nip 00000000100012d0 lr 000000001000127c code 2
+
+This patch address that by preventing runtime allocation of 16G
+hugepages in LPAR config. To allocate 16G hugetlb one need to kernel
+command line hugepagesz=16G hugepages=<number of 16G pages>
+
+With radix translation mode we don't run into this issue.
+
+This change will prevent runtime allocation of 16G hugetlb pages on
+kvm with hash translation mode. However, with the current upstream it
+was observed that 16G hugetlbfs backed guest doesn't boot at all.
+
+We observe boot failure with the below message:
+ [131354.647546] KVM: map_vrma at 0 failed, ret=-4
+
+That means this patch is not resulting in an observable regression.
+Once we fix the boot issue with 16G hugetlb backed memory, we need to
+use ibm,expected#pages memory node attribute to indicate 16G page
+reservation to the guest. This will also enable partial backing of
+guest RAM with 16G pages.
+
+Fixes: 4ae279c2c96a ("powerpc/mm/hugetlb: Allow runtime allocation of 16G.")
+Cc: stable@vger.kernel.org # v4.14+
+Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/include/asm/book3s/64/hugetlb.h | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/arch/powerpc/include/asm/book3s/64/hugetlb.h
++++ b/arch/powerpc/include/asm/book3s/64/hugetlb.h
+@@ -35,6 +35,14 @@ static inline int hstate_get_psize(struc
+ #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
+ static inline bool gigantic_page_supported(void)
+ {
++ /*
++ * We used gigantic page reservation with hypervisor assist in some case.
++ * We cannot use runtime allocation of gigantic pages in those platforms
++ * This is hash translation mode LPARs.
++ */
++ if (firmware_has_feature(FW_FEATURE_LPAR) && !radix_enabled())
++ return false;
++
+ return true;
+ }
+ #endif
--- /dev/null
+From c3c7470c75566a077c8dc71dcf8f1948b8ddfab4 Mon Sep 17 00:00:00 2001
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Fri, 22 Feb 2019 13:22:08 +1100
+Subject: powerpc/kvm: Save and restore host AMR/IAMR/UAMOR
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit c3c7470c75566a077c8dc71dcf8f1948b8ddfab4 upstream.
+
+When the hash MMU is active the AMR, IAMR and UAMOR are used for
+pkeys. The AMR is directly writable by user space, and the UAMOR masks
+those writes, meaning both registers are effectively user register
+state. The IAMR is used to create an execute only key.
+
+Also we must maintain the value of at least the AMR when running in
+process context, so that any memory accesses done by the kernel on
+behalf of the process are correctly controlled by the AMR.
+
+Although we are correctly switching all registers when going into a
+guest, on returning to the host we just write 0 into all regs, except
+on Power9 where we restore the IAMR correctly.
+
+This could be observed by a user process if it writes the AMR, then
+runs a guest and we then return immediately to it without
+rescheduling. Because we have written 0 to the AMR that would have the
+effect of granting read/write permission to pages that the process was
+trying to protect.
+
+In addition, when using the Radix MMU, the AMR can prevent inadvertent
+kernel access to userspace data, writing 0 to the AMR disables that
+protection.
+
+So save and restore AMR, IAMR and UAMOR.
+
+Fixes: cf43d3b26452 ("powerpc: Enable pkey subsystem")
+Cc: stable@vger.kernel.org # v4.16+
+Signed-off-by: Russell Currey <ruscur@russell.cc>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Acked-by: Paul Mackerras <paulus@ozlabs.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kvm/book3s_hv_rmhandlers.S | 26 +++++++++++++++++---------
+ 1 file changed, 17 insertions(+), 9 deletions(-)
+
+--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+@@ -58,6 +58,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
+ #define STACK_SLOT_DAWR (SFS-56)
+ #define STACK_SLOT_DAWRX (SFS-64)
+ #define STACK_SLOT_HFSCR (SFS-72)
++#define STACK_SLOT_AMR (SFS-80)
++#define STACK_SLOT_UAMOR (SFS-88)
+ /* the following is used by the P9 short path */
+ #define STACK_SLOT_NVGPRS (SFS-152) /* 18 gprs */
+
+@@ -726,11 +728,9 @@ BEGIN_FTR_SECTION
+ mfspr r5, SPRN_TIDR
+ mfspr r6, SPRN_PSSCR
+ mfspr r7, SPRN_PID
+- mfspr r8, SPRN_IAMR
+ std r5, STACK_SLOT_TID(r1)
+ std r6, STACK_SLOT_PSSCR(r1)
+ std r7, STACK_SLOT_PID(r1)
+- std r8, STACK_SLOT_IAMR(r1)
+ mfspr r5, SPRN_HFSCR
+ std r5, STACK_SLOT_HFSCR(r1)
+ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
+@@ -738,11 +738,18 @@ BEGIN_FTR_SECTION
+ mfspr r5, SPRN_CIABR
+ mfspr r6, SPRN_DAWR
+ mfspr r7, SPRN_DAWRX
++ mfspr r8, SPRN_IAMR
+ std r5, STACK_SLOT_CIABR(r1)
+ std r6, STACK_SLOT_DAWR(r1)
+ std r7, STACK_SLOT_DAWRX(r1)
++ std r8, STACK_SLOT_IAMR(r1)
+ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+
++ mfspr r5, SPRN_AMR
++ std r5, STACK_SLOT_AMR(r1)
++ mfspr r6, SPRN_UAMOR
++ std r6, STACK_SLOT_UAMOR(r1)
++
+ BEGIN_FTR_SECTION
+ /* Set partition DABR */
+ /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
+@@ -1631,22 +1638,25 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_3
+ mtspr SPRN_PSPB, r0
+ mtspr SPRN_WORT, r0
+ BEGIN_FTR_SECTION
+- mtspr SPRN_IAMR, r0
+ mtspr SPRN_TCSCR, r0
+ /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
+ li r0, 1
+ sldi r0, r0, 31
+ mtspr SPRN_MMCRS, r0
+ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
+-8:
+
+- /* Save and reset AMR and UAMOR before turning on the MMU */
++ /* Save and restore AMR, IAMR and UAMOR before turning on the MMU */
++ ld r8, STACK_SLOT_IAMR(r1)
++ mtspr SPRN_IAMR, r8
++
++8: /* Power7 jumps back in here */
+ mfspr r5,SPRN_AMR
+ mfspr r6,SPRN_UAMOR
+ std r5,VCPU_AMR(r9)
+ std r6,VCPU_UAMOR(r9)
+- li r6,0
+- mtspr SPRN_AMR,r6
++ ld r5,STACK_SLOT_AMR(r1)
++ ld r6,STACK_SLOT_UAMOR(r1)
++ mtspr SPRN_AMR, r5
+ mtspr SPRN_UAMOR, r6
+
+ /* Switch DSCR back to host value */
+@@ -1746,11 +1756,9 @@ BEGIN_FTR_SECTION
+ ld r5, STACK_SLOT_TID(r1)
+ ld r6, STACK_SLOT_PSSCR(r1)
+ ld r7, STACK_SLOT_PID(r1)
+- ld r8, STACK_SLOT_IAMR(r1)
+ mtspr SPRN_TIDR, r5
+ mtspr SPRN_PSSCR, r6
+ mtspr SPRN_PID, r7
+- mtspr SPRN_IAMR, r8
+ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
+
+ #ifdef CONFIG_PPC_RADIX_MMU
--- /dev/null
+From 19f8a5b5be2898573a5e1dc1db93e8d40117606a Mon Sep 17 00:00:00 2001
+From: Paul Mackerras <paulus@ozlabs.org>
+Date: Tue, 12 Feb 2019 11:58:29 +1100
+Subject: powerpc/powernv: Don't reprogram SLW image on every KVM guest entry/exit
+
+From: Paul Mackerras <paulus@ozlabs.org>
+
+commit 19f8a5b5be2898573a5e1dc1db93e8d40117606a upstream.
+
+Commit 24be85a23d1f ("powerpc/powernv: Clear PECE1 in LPCR via stop-api
+only on Hotplug", 2017-07-21) added two calls to opal_slw_set_reg()
+inside pnv_cpu_offline(), with the aim of changing the LPCR value in
+the SLW image to disable wakeups from the decrementer while a CPU is
+offline. However, pnv_cpu_offline() gets called each time a secondary
+CPU thread is woken up to participate in running a KVM guest, that is,
+not just when a CPU is offlined.
+
+Since opal_slw_set_reg() is a very slow operation (with observed
+execution times around 20 milliseconds), this means that an offline
+secondary CPU can often be busy doing the opal_slw_set_reg() call
+when the primary CPU wants to grab all the secondary threads so that
+it can run a KVM guest. This leads to messages like "KVM: couldn't
+grab CPU n" being printed and guest execution failing.
+
+There is no need to reprogram the SLW image on every KVM guest entry
+and exit. So that we do it only when a CPU is really transitioning
+between online and offline, this moves the calls to
+pnv_program_cpu_hotplug_lpcr() into pnv_smp_cpu_kill_self().
+
+Fixes: 24be85a23d1f ("powerpc/powernv: Clear PECE1 in LPCR via stop-api only on Hotplug")
+Cc: stable@vger.kernel.org # v4.14+
+Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/include/asm/powernv.h | 2 ++
+ arch/powerpc/platforms/powernv/idle.c | 27 ++-------------------------
+ arch/powerpc/platforms/powernv/smp.c | 25 +++++++++++++++++++++++++
+ 3 files changed, 29 insertions(+), 25 deletions(-)
+
+--- a/arch/powerpc/include/asm/powernv.h
++++ b/arch/powerpc/include/asm/powernv.h
+@@ -23,6 +23,8 @@ extern int pnv_npu2_handle_fault(struct
+ unsigned long *flags, unsigned long *status,
+ int count);
+
++void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val);
++
+ void pnv_tm_init(void);
+ #else
+ static inline void powernv_set_nmmu_ptcr(unsigned long ptcr) { }
+--- a/arch/powerpc/platforms/powernv/idle.c
++++ b/arch/powerpc/platforms/powernv/idle.c
+@@ -458,7 +458,8 @@ EXPORT_SYMBOL_GPL(pnv_power9_force_smt4_
+ #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
+
+ #ifdef CONFIG_HOTPLUG_CPU
+-static void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val)
++
++void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val)
+ {
+ u64 pir = get_hard_smp_processor_id(cpu);
+
+@@ -481,20 +482,6 @@ unsigned long pnv_cpu_offline(unsigned i
+ {
+ unsigned long srr1;
+ u32 idle_states = pnv_get_supported_cpuidle_states();
+- u64 lpcr_val;
+-
+- /*
+- * We don't want to take decrementer interrupts while we are
+- * offline, so clear LPCR:PECE1. We keep PECE2 (and
+- * LPCR_PECE_HVEE on P9) enabled as to let IPIs in.
+- *
+- * If the CPU gets woken up by a special wakeup, ensure that
+- * the SLW engine sets LPCR with decrementer bit cleared, else
+- * the CPU will come back to the kernel due to a spurious
+- * wakeup.
+- */
+- lpcr_val = mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1;
+- pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val);
+
+ __ppc64_runlatch_off();
+
+@@ -526,16 +513,6 @@ unsigned long pnv_cpu_offline(unsigned i
+
+ __ppc64_runlatch_on();
+
+- /*
+- * Re-enable decrementer interrupts in LPCR.
+- *
+- * Further, we want stop states to be woken up by decrementer
+- * for non-hotplug cases. So program the LPCR via stop api as
+- * well.
+- */
+- lpcr_val = mfspr(SPRN_LPCR) | (u64)LPCR_PECE1;
+- pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val);
+-
+ return srr1;
+ }
+ #endif
+--- a/arch/powerpc/platforms/powernv/smp.c
++++ b/arch/powerpc/platforms/powernv/smp.c
+@@ -39,6 +39,7 @@
+ #include <asm/cpuidle.h>
+ #include <asm/kexec.h>
+ #include <asm/reg.h>
++#include <asm/powernv.h>
+
+ #include "powernv.h"
+
+@@ -153,6 +154,7 @@ static void pnv_smp_cpu_kill_self(void)
+ {
+ unsigned int cpu;
+ unsigned long srr1, wmask;
++ u64 lpcr_val;
+
+ /* Standard hot unplug procedure */
+ /*
+@@ -174,6 +176,19 @@ static void pnv_smp_cpu_kill_self(void)
+ if (cpu_has_feature(CPU_FTR_ARCH_207S))
+ wmask = SRR1_WAKEMASK_P8;
+
++ /*
++ * We don't want to take decrementer interrupts while we are
++ * offline, so clear LPCR:PECE1. We keep PECE2 (and
++ * LPCR_PECE_HVEE on P9) enabled so as to let IPIs in.
++ *
++ * If the CPU gets woken up by a special wakeup, ensure that
++ * the SLW engine sets LPCR with decrementer bit cleared, else
++ * the CPU will come back to the kernel due to a spurious
++ * wakeup.
++ */
++ lpcr_val = mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1;
++ pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val);
++
+ while (!generic_check_cpu_restart(cpu)) {
+ /*
+ * Clear IPI flag, since we don't handle IPIs while
+@@ -246,6 +261,16 @@ static void pnv_smp_cpu_kill_self(void)
+
+ }
+
++ /*
++ * Re-enable decrementer interrupts in LPCR.
++ *
++ * Further, we want stop states to be woken up by decrementer
++ * for non-hotplug cases. So program the LPCR via stop api as
++ * well.
++ */
++ lpcr_val = mfspr(SPRN_LPCR) | (u64)LPCR_PECE1;
++ pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val);
++
+ DBG("CPU%d coming online...\n", cpu);
+ }
+
--- /dev/null
+From 7b62f9bd2246b7d3d086e571397c14ba52645ef1 Mon Sep 17 00:00:00 2001
+From: Jordan Niethe <jniethe5@gmail.com>
+Date: Wed, 27 Feb 2019 14:02:29 +1100
+Subject: powerpc/powernv: Make opal log only readable by root
+
+From: Jordan Niethe <jniethe5@gmail.com>
+
+commit 7b62f9bd2246b7d3d086e571397c14ba52645ef1 upstream.
+
+Currently the opal log is globally readable. It is kernel policy to
+limit the visibility of physical addresses / kernel pointers to root.
+Given this and the fact the opal log may contain this information it
+would be better to limit the readability to root.
+
+Fixes: bfc36894a48b ("powerpc/powernv: Add OPAL message log interface")
+Cc: stable@vger.kernel.org # v3.15+
+Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
+Reviewed-by: Stewart Smith <stewart@linux.ibm.com>
+Reviewed-by: Andrew Donnellan <andrew.donnellan@au1.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/platforms/powernv/opal-msglog.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/powerpc/platforms/powernv/opal-msglog.c
++++ b/arch/powerpc/platforms/powernv/opal-msglog.c
+@@ -98,7 +98,7 @@ static ssize_t opal_msglog_read(struct f
+ }
+
+ static struct bin_attribute opal_msglog_attr = {
+- .attr = {.name = "msglog", .mode = 0444},
++ .attr = {.name = "msglog", .mode = 0400},
+ .read = opal_msglog_read
+ };
+
--- /dev/null
+From ca6d5149d2ad0a8d2f9c28cbe379802260a0a5e0 Mon Sep 17 00:00:00 2001
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Thu, 14 Feb 2019 11:08:29 +1100
+Subject: powerpc/ptrace: Simplify vr_get/set() to avoid GCC warning
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit ca6d5149d2ad0a8d2f9c28cbe379802260a0a5e0 upstream.
+
+GCC 8 warns about the logic in vr_get/set(), which with -Werror breaks
+the build:
+
+ In function ‘user_regset_copyin’,
+ inlined from ‘vr_set’ at arch/powerpc/kernel/ptrace.c:628:9:
+ include/linux/regset.h:295:4: error: ‘memcpy’ offset [-527, -529] is
+ out of the bounds [0, 16] of object ‘vrsave’ with type ‘union
+ <anonymous>’ [-Werror=array-bounds]
+ arch/powerpc/kernel/ptrace.c: In function ‘vr_set’:
+ arch/powerpc/kernel/ptrace.c:623:5: note: ‘vrsave’ declared here
+ } vrsave;
+
+This has been identified as a regression in GCC, see GCC bug 88273.
+
+However we can avoid the warning and also simplify the logic and make
+it more robust.
+
+Currently we pass -1 as end_pos to user_regset_copyout(). This says
+"copy up to the end of the regset".
+
+The definition of the regset is:
+ [REGSET_VMX] = {
+ .core_note_type = NT_PPC_VMX, .n = 34,
+ .size = sizeof(vector128), .align = sizeof(vector128),
+ .active = vr_active, .get = vr_get, .set = vr_set
+ },
+
+The end is calculated as (n * size), ie. 34 * sizeof(vector128).
+
+In vr_get/set() we pass start_pos as 33 * sizeof(vector128), meaning
+we can copy up to sizeof(vector128) into/out-of vrsave.
+
+The on-stack vrsave is defined as:
+ union {
+ elf_vrreg_t reg;
+ u32 word;
+ } vrsave;
+
+And elf_vrreg_t is:
+ typedef __vector128 elf_vrreg_t;
+
+So there is no bug, but we rely on all those sizes lining up,
+otherwise we would have a kernel stack exposure/overwrite on our
+hands.
+
+Rather than relying on that we can pass an explict end_pos based on
+the sizeof(vrsave). The result should be exactly the same but it's
+more obviously not over-reading/writing the stack and it avoids the
+compiler warning.
+
+Reported-by: Meelis Roos <mroos@linux.ee>
+Reported-by: Mathieu Malaterre <malat@debian.org>
+Cc: stable@vger.kernel.org
+Tested-by: Mathieu Malaterre <malat@debian.org>
+Tested-by: Meelis Roos <mroos@linux.ee>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/ptrace.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/arch/powerpc/kernel/ptrace.c
++++ b/arch/powerpc/kernel/ptrace.c
+@@ -561,6 +561,7 @@ static int vr_get(struct task_struct *ta
+ /*
+ * Copy out only the low-order word of vrsave.
+ */
++ int start, end;
+ union {
+ elf_vrreg_t reg;
+ u32 word;
+@@ -569,8 +570,10 @@ static int vr_get(struct task_struct *ta
+
+ vrsave.word = target->thread.vrsave;
+
++ start = 33 * sizeof(vector128);
++ end = start + sizeof(vrsave);
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
+- 33 * sizeof(vector128), -1);
++ start, end);
+ }
+
+ return ret;
+@@ -608,6 +611,7 @@ static int vr_set(struct task_struct *ta
+ /*
+ * We use only the first word of vrsave.
+ */
++ int start, end;
+ union {
+ elf_vrreg_t reg;
+ u32 word;
+@@ -616,8 +620,10 @@ static int vr_set(struct task_struct *ta
+
+ vrsave.word = target->thread.vrsave;
+
++ start = 33 * sizeof(vector128);
++ end = start + sizeof(vrsave);
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
+- 33 * sizeof(vector128), -1);
++ start, end);
+ if (!ret)
+ target->thread.vrsave = vrsave.word;
+ }
--- /dev/null
+From 1b5fc84aba170bdfe3533396ca9662ceea1609b7 Mon Sep 17 00:00:00 2001
+From: Nicholas Piggin <npiggin@gmail.com>
+Date: Mon, 26 Nov 2018 12:01:05 +1000
+Subject: powerpc/smp: Fix NMI IPI timeout
+
+From: Nicholas Piggin <npiggin@gmail.com>
+
+commit 1b5fc84aba170bdfe3533396ca9662ceea1609b7 upstream.
+
+The NMI IPI timeout logic is broken, if __smp_send_nmi_ipi() times out
+on the first condition, delay_us will be zero which will send it into
+the second spin loop with no timeout so it will spin forever.
+
+Fixes: 5b73151fff63 ("powerpc: NMI IPI make NMI IPIs fully sychronous")
+Cc: stable@vger.kernel.org # v4.19+
+Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/smp.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/arch/powerpc/kernel/smp.c
++++ b/arch/powerpc/kernel/smp.c
+@@ -519,7 +519,7 @@ int __smp_send_nmi_ipi(int cpu, void (*f
+ if (delay_us) {
+ delay_us--;
+ if (!delay_us)
+- break;
++ goto timeout;
+ }
+ }
+
+@@ -530,10 +530,11 @@ int __smp_send_nmi_ipi(int cpu, void (*f
+ if (delay_us) {
+ delay_us--;
+ if (!delay_us)
+- break;
++ goto timeout;
+ }
+ }
+
++timeout:
+ if (!cpumask_empty(&nmi_ipi_pending_mask)) {
+ /* Timeout waiting for CPUs to call smp_handle_nmi_ipi */
+ ret = 0;
--- /dev/null
+From 88b9a3d1425a436e95c41f09986fdae2daee437a Mon Sep 17 00:00:00 2001
+From: Nicholas Piggin <npiggin@gmail.com>
+Date: Mon, 26 Nov 2018 12:01:06 +1000
+Subject: powerpc/smp: Fix NMI IPI xmon timeout
+
+From: Nicholas Piggin <npiggin@gmail.com>
+
+commit 88b9a3d1425a436e95c41f09986fdae2daee437a upstream.
+
+The xmon debugger IPI handler waits in the callback function while
+xmon is still active. This means they don't complete the IPI, and the
+initiator always times out waiting for them.
+
+Things manage to work after the timeout because there is some fallback
+logic to keep NMI IPI state sane in case of the timeout, but this is a
+bit ugly.
+
+This patch changes NMI IPI back to half-asynchronous (i.e., wait for
+everyone to call in, do not wait for IPI function to complete), but
+the complexity is avoided by going one step further and allowing new
+IPIs to be issued before the IPI functions to all complete.
+
+If synchronization against that is required, it is left up to the
+caller, but current callers don't require that. In fact with the
+timeout handling, callers must be able to cope with this already.
+
+Fixes: 5b73151fff63 ("powerpc: NMI IPI make NMI IPIs fully sychronous")
+Cc: stable@vger.kernel.org # v4.19+
+Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/smp.c | 93 ++++++++++++++--------------------------------
+ 1 file changed, 29 insertions(+), 64 deletions(-)
+
+--- a/arch/powerpc/kernel/smp.c
++++ b/arch/powerpc/kernel/smp.c
+@@ -358,13 +358,12 @@ void arch_send_call_function_ipi_mask(co
+ * NMI IPIs may not be recoverable, so should not be used as ongoing part of
+ * a running system. They can be used for crash, debug, halt/reboot, etc.
+ *
+- * NMI IPIs are globally single threaded. No more than one in progress at
+- * any time.
+- *
+ * The IPI call waits with interrupts disabled until all targets enter the
+- * NMI handler, then the call returns.
++ * NMI handler, then returns. Subsequent IPIs can be issued before targets
++ * have returned from their handlers, so there is no guarantee about
++ * concurrency or re-entrancy.
+ *
+- * No new NMI can be initiated until targets exit the handler.
++ * A new NMI can be issued before all targets exit the handler.
+ *
+ * The IPI call may time out without all targets entering the NMI handler.
+ * In that case, there is some logic to recover (and ignore subsequent
+@@ -375,7 +374,7 @@ void arch_send_call_function_ipi_mask(co
+
+ static atomic_t __nmi_ipi_lock = ATOMIC_INIT(0);
+ static struct cpumask nmi_ipi_pending_mask;
+-static int nmi_ipi_busy_count = 0;
++static bool nmi_ipi_busy = false;
+ static void (*nmi_ipi_function)(struct pt_regs *) = NULL;
+
+ static void nmi_ipi_lock_start(unsigned long *flags)
+@@ -414,7 +413,7 @@ static void nmi_ipi_unlock_end(unsigned
+ */
+ int smp_handle_nmi_ipi(struct pt_regs *regs)
+ {
+- void (*fn)(struct pt_regs *);
++ void (*fn)(struct pt_regs *) = NULL;
+ unsigned long flags;
+ int me = raw_smp_processor_id();
+ int ret = 0;
+@@ -425,29 +424,17 @@ int smp_handle_nmi_ipi(struct pt_regs *r
+ * because the caller may have timed out.
+ */
+ nmi_ipi_lock_start(&flags);
+- if (!nmi_ipi_busy_count)
+- goto out;
+- if (!cpumask_test_cpu(me, &nmi_ipi_pending_mask))
+- goto out;
+-
+- fn = nmi_ipi_function;
+- if (!fn)
+- goto out;
+-
+- cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
+- nmi_ipi_busy_count++;
+- nmi_ipi_unlock();
+-
+- ret = 1;
+-
+- fn(regs);
+-
+- nmi_ipi_lock();
+- if (nmi_ipi_busy_count > 1) /* Can race with caller time-out */
+- nmi_ipi_busy_count--;
+-out:
++ if (cpumask_test_cpu(me, &nmi_ipi_pending_mask)) {
++ cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
++ fn = READ_ONCE(nmi_ipi_function);
++ WARN_ON_ONCE(!fn);
++ ret = 1;
++ }
+ nmi_ipi_unlock_end(&flags);
+
++ if (fn)
++ fn(regs);
++
+ return ret;
+ }
+
+@@ -473,7 +460,7 @@ static void do_smp_send_nmi_ipi(int cpu,
+ * - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS.
+ * - fn is the target callback function.
+ * - delay_us > 0 is the delay before giving up waiting for targets to
+- * complete executing the handler, == 0 specifies indefinite delay.
++ * begin executing the handler, == 0 specifies indefinite delay.
+ */
+ int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us, bool safe)
+ {
+@@ -487,31 +474,33 @@ int __smp_send_nmi_ipi(int cpu, void (*f
+ if (unlikely(!smp_ops))
+ return 0;
+
+- /* Take the nmi_ipi_busy count/lock with interrupts hard disabled */
+ nmi_ipi_lock_start(&flags);
+- while (nmi_ipi_busy_count) {
++ while (nmi_ipi_busy) {
+ nmi_ipi_unlock_end(&flags);
+- spin_until_cond(nmi_ipi_busy_count == 0);
++ spin_until_cond(!nmi_ipi_busy);
+ nmi_ipi_lock_start(&flags);
+ }
+-
++ nmi_ipi_busy = true;
+ nmi_ipi_function = fn;
+
++ WARN_ON_ONCE(!cpumask_empty(&nmi_ipi_pending_mask));
++
+ if (cpu < 0) {
+ /* ALL_OTHERS */
+ cpumask_copy(&nmi_ipi_pending_mask, cpu_online_mask);
+ cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
+ } else {
+- /* cpumask starts clear */
+ cpumask_set_cpu(cpu, &nmi_ipi_pending_mask);
+ }
+- nmi_ipi_busy_count++;
++
+ nmi_ipi_unlock();
+
++ /* Interrupts remain hard disabled */
++
+ do_smp_send_nmi_ipi(cpu, safe);
+
+ nmi_ipi_lock();
+- /* nmi_ipi_busy_count is held here, so unlock/lock is okay */
++ /* nmi_ipi_busy is set here, so unlock/lock is okay */
+ while (!cpumask_empty(&nmi_ipi_pending_mask)) {
+ nmi_ipi_unlock();
+ udelay(1);
+@@ -519,34 +508,19 @@ int __smp_send_nmi_ipi(int cpu, void (*f
+ if (delay_us) {
+ delay_us--;
+ if (!delay_us)
+- goto timeout;
++ break;
+ }
+ }
+
+- while (nmi_ipi_busy_count > 1) {
+- nmi_ipi_unlock();
+- udelay(1);
+- nmi_ipi_lock();
+- if (delay_us) {
+- delay_us--;
+- if (!delay_us)
+- goto timeout;
+- }
+- }
+-
+-timeout:
+ if (!cpumask_empty(&nmi_ipi_pending_mask)) {
+ /* Timeout waiting for CPUs to call smp_handle_nmi_ipi */
+ ret = 0;
+ cpumask_clear(&nmi_ipi_pending_mask);
+ }
+- if (nmi_ipi_busy_count > 1) {
+- /* Timeout waiting for CPUs to execute fn */
+- ret = 0;
+- nmi_ipi_busy_count = 1;
+- }
+
+- nmi_ipi_busy_count--;
++ nmi_ipi_function = NULL;
++ nmi_ipi_busy = false;
++
+ nmi_ipi_unlock_end(&flags);
+
+ return ret;
+@@ -614,17 +588,8 @@ void crash_send_ipi(void (*crash_ipi_cal
+ static void nmi_stop_this_cpu(struct pt_regs *regs)
+ {
+ /*
+- * This is a special case because it never returns, so the NMI IPI
+- * handling would never mark it as done, which makes any later
+- * smp_send_nmi_ipi() call spin forever. Mark it done now.
+- *
+ * IRQs are already hard disabled by the smp_handle_nmi_ipi.
+ */
+- nmi_ipi_lock();
+- if (nmi_ipi_busy_count > 1)
+- nmi_ipi_busy_count--;
+- nmi_ipi_unlock();
+-
+ spin_begin();
+ while (1)
+ spin_cpu_relax();
--- /dev/null
+From 0bbea75c476b77fa7d7811d6be911cc7583e640f Mon Sep 17 00:00:00 2001
+From: Christophe Leroy <christophe.leroy@c-s.fr>
+Date: Tue, 22 Jan 2019 14:11:24 +0000
+Subject: powerpc/traps: fix recoverability of machine check handling on book3s/32
+
+From: Christophe Leroy <christophe.leroy@c-s.fr>
+
+commit 0bbea75c476b77fa7d7811d6be911cc7583e640f upstream.
+
+Looks like book3s/32 doesn't set RI on machine check, so
+checking RI before calling die() will always be fatal
+allthought this is not an issue in most cases.
+
+Fixes: b96672dd840f ("powerpc: Machine check interrupt is a non-maskable interrupt")
+Fixes: daf00ae71dad ("powerpc/traps: restore recoverability of machine_check interrupts")
+Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
+Cc: stable@vger.kernel.org
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/traps.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/arch/powerpc/kernel/traps.c
++++ b/arch/powerpc/kernel/traps.c
+@@ -763,15 +763,15 @@ void machine_check_exception(struct pt_r
+ if (check_io_access(regs))
+ goto bail;
+
+- /* Must die if the interrupt is not recoverable */
+- if (!(regs->msr & MSR_RI))
+- nmi_panic(regs, "Unrecoverable Machine check");
+-
+ if (!nested)
+ nmi_exit();
+
+ die("Machine check", regs, SIGBUS);
+
++ /* Must die if the interrupt is not recoverable */
++ if (!(regs->msr & MSR_RI))
++ nmi_panic(regs, "Unrecoverable Machine check");
++
+ return;
+
+ bail:
--- /dev/null
+From 9bf3d3c4e4fd82c7174f4856df372ab2a71005b9 Mon Sep 17 00:00:00 2001
+From: Christophe Leroy <christophe.leroy@c-s.fr>
+Date: Tue, 29 Jan 2019 16:37:55 +0000
+Subject: powerpc/traps: Fix the message printed when stack overflows
+
+From: Christophe Leroy <christophe.leroy@c-s.fr>
+
+commit 9bf3d3c4e4fd82c7174f4856df372ab2a71005b9 upstream.
+
+Today's message is useless:
+
+ [ 42.253267] Kernel stack overflow in process (ptrval), r1=c65500b0
+
+This patch fixes it:
+
+ [ 66.905235] Kernel stack overflow in process sh[356], r1=c65560b0
+
+Fixes: ad67b74d2469 ("printk: hash addresses printed with %p")
+Cc: stable@vger.kernel.org # v4.15+
+Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
+[mpe: Use task_pid_nr()]
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/traps.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/powerpc/kernel/traps.c
++++ b/arch/powerpc/kernel/traps.c
+@@ -1542,8 +1542,8 @@ bail:
+
+ void StackOverflow(struct pt_regs *regs)
+ {
+- printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n",
+- current, regs->gpr[1]);
++ pr_crit("Kernel stack overflow in process %s[%d], r1=%lx\n",
++ current->comm, task_pid_nr(current), regs->gpr[1]);
+ debugger(regs);
+ show_regs(regs);
+ panic("kernel stack overflow");
--- /dev/null
+From 6d183ca8baec983dc4208ca45ece3c36763df912 Mon Sep 17 00:00:00 2001
+From: Christophe Leroy <christophe.leroy@c-s.fr>
+Date: Thu, 21 Feb 2019 19:08:37 +0000
+Subject: powerpc/wii: properly disable use of BATs when requested.
+
+From: Christophe Leroy <christophe.leroy@c-s.fr>
+
+commit 6d183ca8baec983dc4208ca45ece3c36763df912 upstream.
+
+'nobats' kernel parameter or some options like CONFIG_DEBUG_PAGEALLOC
+deny the use of BATS for mapping memory.
+
+This patch makes sure that the specific wii RAM mapping function
+takes it into account as well.
+
+Fixes: de32400dd26e ("wii: use both mem1 and mem2 as ram")
+Cc: stable@vger.kernel.org
+Reviewed-by: Jonathan Neuschafer <j.neuschaefer@gmx.net>
+Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/platforms/embedded6xx/wii.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/arch/powerpc/platforms/embedded6xx/wii.c
++++ b/arch/powerpc/platforms/embedded6xx/wii.c
+@@ -83,6 +83,10 @@ unsigned long __init wii_mmu_mapin_mem2(
+ /* MEM2 64MB@0x10000000 */
+ delta = wii_hole_start + wii_hole_size;
+ size = top - delta;
++
++ if (__map_without_bats)
++ return delta;
++
+ for (bl = 128<<10; bl < max_size; bl <<= 1) {
+ if (bl * 2 > size)
+ break;
--- /dev/null
+From 92da008fa21034c369cdb8ca2b629fe5c196826b Mon Sep 17 00:00:00 2001
+From: Ben Gardon <bgardon@google.com>
+Date: Tue, 12 Mar 2019 11:45:58 -0700
+Subject: Revert "KVM/MMU: Flush tlb directly in the kvm_zap_gfn_range()"
+
+From: Ben Gardon <bgardon@google.com>
+
+commit 92da008fa21034c369cdb8ca2b629fe5c196826b upstream.
+
+This reverts commit 71883a62fcd6c70639fa12cda733378b4d997409.
+
+The above commit contains an optimization to kvm_zap_gfn_range which
+uses gfn-limited TLB flushes, if enabled. If using these limited flushes,
+kvm_zap_gfn_range passes lock_flush_tlb=false to slot_handle_level_range
+which creates a race when the function unlocks to call cond_resched.
+See an example of this race below:
+
+CPU 0 CPU 1 CPU 3
+// zap_direct_gfn_range
+mmu_lock()
+// *ptep == pte_1
+*ptep = 0
+if (lock_flush_tlb)
+ flush_tlbs()
+mmu_unlock()
+ // In invalidate range
+ // MMU notifier
+ mmu_lock()
+ if (pte != 0)
+ *ptep = 0
+ flush = true
+ if (flush)
+ flush_remote_tlbs()
+ mmu_unlock()
+ return
+ // Host MM reallocates
+ // page previously
+ // backing guest memory.
+ // Guest accesses
+ // invalid page
+ // through pte_1
+ // in its TLB!!
+
+Tested: Ran all kvm-unit-tests on a Intel Haswell machine with and
+ without this patch. The patch introduced no new failures.
+
+Signed-off-by: Ben Gardon <bgardon@google.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/mmu.c | 16 +++-------------
+ 1 file changed, 3 insertions(+), 13 deletions(-)
+
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -5635,13 +5635,8 @@ void kvm_zap_gfn_range(struct kvm *kvm,
+ {
+ struct kvm_memslots *slots;
+ struct kvm_memory_slot *memslot;
+- bool flush_tlb = true;
+- bool flush = false;
+ int i;
+
+- if (kvm_available_flush_tlb_with_range())
+- flush_tlb = false;
+-
+ spin_lock(&kvm->mmu_lock);
+ for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
+ slots = __kvm_memslots(kvm, i);
+@@ -5653,17 +5648,12 @@ void kvm_zap_gfn_range(struct kvm *kvm,
+ if (start >= end)
+ continue;
+
+- flush |= slot_handle_level_range(kvm, memslot,
+- kvm_zap_rmapp, PT_PAGE_TABLE_LEVEL,
+- PT_MAX_HUGEPAGE_LEVEL, start,
+- end - 1, flush_tlb);
++ slot_handle_level_range(kvm, memslot, kvm_zap_rmapp,
++ PT_PAGE_TABLE_LEVEL, PT_MAX_HUGEPAGE_LEVEL,
++ start, end - 1, true);
+ }
+ }
+
+- if (flush)
+- kvm_flush_remote_tlbs_with_address(kvm, gfn_start,
+- gfn_end - gfn_start + 1);
+-
+ spin_unlock(&kvm->mmu_lock);
+ }
+
--- /dev/null
+From 3815a245b50124f0865415dcb606a034e97494d4 Mon Sep 17 00:00:00 2001
+From: "J. Bruce Fields" <bfields@redhat.com>
+Date: Tue, 5 Mar 2019 16:17:58 -0500
+Subject: security/selinux: fix SECURITY_LSM_NATIVE_LABELS on reused superblock
+
+From: J. Bruce Fields <bfields@redhat.com>
+
+commit 3815a245b50124f0865415dcb606a034e97494d4 upstream.
+
+In the case when we're reusing a superblock, selinux_sb_clone_mnt_opts()
+fails to set set_kern_flags, with the result that
+nfs_clone_sb_security() incorrectly clears NFS_CAP_SECURITY_LABEL.
+
+The result is that if you mount the same NFS filesystem twice, NFS
+security labels are turned off, even if they would work fine if you
+mounted the filesystem only once.
+
+("fixes" may be not exactly the right tag, it may be more like
+"fixed-other-cases-but-missed-this-one".)
+
+Cc: Scott Mayhew <smayhew@redhat.com>
+Cc: stable@vger.kernel.org
+Fixes: 0b4d3452b8b4 "security/selinux: allow security_sb_clone_mnt_opts..."
+Signed-off-by: J. Bruce Fields <bfields@redhat.com>
+Acked-by: Stephen Smalley <sds@tycho.nsa.gov>
+Signed-off-by: Paul Moore <paul@paul-moore.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ security/selinux/hooks.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -959,8 +959,11 @@ static int selinux_sb_clone_mnt_opts(con
+ BUG_ON(!(oldsbsec->flags & SE_SBINITIALIZED));
+
+ /* if fs is reusing a sb, make sure that the contexts match */
+- if (newsbsec->flags & SE_SBINITIALIZED)
++ if (newsbsec->flags & SE_SBINITIALIZED) {
++ if ((kern_flags & SECURITY_LSM_NATIVE_LABELS) && !set_context)
++ *set_kern_flags |= SECURITY_LSM_NATIVE_LABELS;
+ return selinux_cmp_sb_context(oldsb, newsb);
++ }
+
+ mutex_lock(&newsbsec->lock);
+
--- /dev/null
+From 292c997a1970f8d1e1dfa354ed770a22f7b5a434 Mon Sep 17 00:00:00 2001
+From: Xin Long <lucien.xin@gmail.com>
+Date: Sat, 9 Mar 2019 00:07:34 +0800
+Subject: selinux: add the missing walk_size + len check in selinux_sctp_bind_connect
+
+From: Xin Long <lucien.xin@gmail.com>
+
+commit 292c997a1970f8d1e1dfa354ed770a22f7b5a434 upstream.
+
+As does in __sctp_connect(), when checking addrs in a while loop, after
+get the addr len according to sa_family, it's necessary to do the check
+walk_size + af->sockaddr_len > addrs_size to make sure it won't access
+an out-of-bounds addr.
+
+The same thing is needed in selinux_sctp_bind_connect(), otherwise an
+out-of-bounds issue can be triggered:
+
+ [14548.772313] BUG: KASAN: slab-out-of-bounds in selinux_sctp_bind_connect+0x1aa/0x1f0
+ [14548.927083] Call Trace:
+ [14548.938072] dump_stack+0x9a/0xe9
+ [14548.953015] print_address_description+0x65/0x22e
+ [14548.996524] kasan_report.cold.6+0x92/0x1a6
+ [14549.015335] selinux_sctp_bind_connect+0x1aa/0x1f0
+ [14549.036947] security_sctp_bind_connect+0x58/0x90
+ [14549.058142] __sctp_setsockopt_connectx+0x5a/0x150 [sctp]
+ [14549.081650] sctp_setsockopt.part.24+0x1322/0x3ce0 [sctp]
+
+Cc: stable@vger.kernel.org
+Fixes: d452930fd3b9 ("selinux: Add SCTP support")
+Reported-by: Chunyu Hu <chuhu@redhat.com>
+Signed-off-by: Xin Long <lucien.xin@gmail.com>
+Reviewed-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
+Signed-off-by: Paul Moore <paul@paul-moore.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ security/selinux/hooks.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -5120,6 +5120,9 @@ static int selinux_sctp_bind_connect(str
+ return -EINVAL;
+ }
+
++ if (walk_size + len > addrlen)
++ return -EINVAL;
++
+ err = -EINVAL;
+ switch (optname) {
+ /* Bind checks */
--- /dev/null
+From f4817843e39ce78aace0195a57d4e8500a65a898 Mon Sep 17 00:00:00 2001
+From: Lubomir Rintel <lkundrak@v3.sk>
+Date: Sun, 24 Feb 2019 13:00:53 +0100
+Subject: serial: 8250_of: assume reg-shift of 2 for mrvl,mmp-uart
+
+From: Lubomir Rintel <lkundrak@v3.sk>
+
+commit f4817843e39ce78aace0195a57d4e8500a65a898 upstream.
+
+There are two other drivers that bind to mrvl,mmp-uart and both of them
+assume register shift of 2 bits. There are device trees that lack the
+property and rely on that assumption.
+
+If this driver wins the race to bind to those devices, it should behave
+the same as the older deprecated driver.
+
+Signed-off-by: Lubomir Rintel <lkundrak@v3.sk>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/tty/serial/8250/8250_of.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/tty/serial/8250/8250_of.c
++++ b/drivers/tty/serial/8250/8250_of.c
+@@ -130,6 +130,10 @@ static int of_platform_serial_setup(stru
+ port->flags |= UPF_IOREMAP;
+ }
+
++ /* Compatibility with the deprecated pxa driver and 8250_pxa drivers. */
++ if (of_device_is_compatible(np, "mrvl,mmp-uart"))
++ port->regshift = 2;
++
+ /* Check for registers offset within the devices address range */
+ if (of_property_read_u32(np, "reg-shift", &prop) == 0)
+ port->regshift = prop;
--- /dev/null
+From b896b03bc7fce43a07012cc6bf5e2ab2fddf3364 Mon Sep 17 00:00:00 2001
+From: Jay Dolan <jay.dolan@accesio.com>
+Date: Tue, 12 Feb 2019 21:43:11 -0800
+Subject: serial: 8250_pci: Fix number of ports for ACCES serial cards
+
+From: Jay Dolan <jay.dolan@accesio.com>
+
+commit b896b03bc7fce43a07012cc6bf5e2ab2fddf3364 upstream.
+
+Have the correct number of ports created for ACCES serial cards. Two port
+cards show up as four ports, and four port cards show up as eight.
+
+Fixes: c8d192428f52 ("serial: 8250: added acces i/o products quad and octal serial cards")
+Signed-off-by: Jay Dolan <jay.dolan@accesio.com>
+Cc: stable <stable@vger.kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/tty/serial/8250/8250_pci.c | 36 ++++++++++++++++++------------------
+ 1 file changed, 18 insertions(+), 18 deletions(-)
+
+--- a/drivers/tty/serial/8250/8250_pci.c
++++ b/drivers/tty/serial/8250/8250_pci.c
+@@ -4575,10 +4575,10 @@ static const struct pci_device_id serial
+ */
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SDB,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+- pbn_pericom_PI7C9X7954 },
++ pbn_pericom_PI7C9X7952 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2S,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+- pbn_pericom_PI7C9X7954 },
++ pbn_pericom_PI7C9X7952 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SDB,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_pericom_PI7C9X7954 },
+@@ -4587,10 +4587,10 @@ static const struct pci_device_id serial
+ pbn_pericom_PI7C9X7954 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_2DB,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+- pbn_pericom_PI7C9X7954 },
++ pbn_pericom_PI7C9X7952 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_2,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+- pbn_pericom_PI7C9X7954 },
++ pbn_pericom_PI7C9X7952 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4DB,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_pericom_PI7C9X7954 },
+@@ -4599,10 +4599,10 @@ static const struct pci_device_id serial
+ pbn_pericom_PI7C9X7954 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SMDB,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+- pbn_pericom_PI7C9X7954 },
++ pbn_pericom_PI7C9X7952 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2SM,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+- pbn_pericom_PI7C9X7954 },
++ pbn_pericom_PI7C9X7952 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SMDB,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_pericom_PI7C9X7954 },
+@@ -4611,13 +4611,13 @@ static const struct pci_device_id serial
+ pbn_pericom_PI7C9X7954 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_1,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+- pbn_pericom_PI7C9X7954 },
++ pbn_pericom_PI7C9X7951 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_2,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+- pbn_pericom_PI7C9X7954 },
++ pbn_pericom_PI7C9X7952 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_2,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+- pbn_pericom_PI7C9X7954 },
++ pbn_pericom_PI7C9X7952 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_4,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_pericom_PI7C9X7954 },
+@@ -4626,16 +4626,16 @@ static const struct pci_device_id serial
+ pbn_pericom_PI7C9X7954 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2S,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+- pbn_pericom_PI7C9X7954 },
++ pbn_pericom_PI7C9X7952 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_pericom_PI7C9X7954 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_2,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+- pbn_pericom_PI7C9X7954 },
++ pbn_pericom_PI7C9X7952 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_2,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+- pbn_pericom_PI7C9X7954 },
++ pbn_pericom_PI7C9X7952 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_pericom_PI7C9X7954 },
+@@ -4644,13 +4644,13 @@ static const struct pci_device_id serial
+ pbn_pericom_PI7C9X7954 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2SM,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+- pbn_pericom_PI7C9X7954 },
++ pbn_pericom_PI7C9X7952 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM422_4,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+- pbn_pericom_PI7C9X7958 },
++ pbn_pericom_PI7C9X7954 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM485_4,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+- pbn_pericom_PI7C9X7958 },
++ pbn_pericom_PI7C9X7954 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM422_8,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_pericom_PI7C9X7958 },
+@@ -4659,19 +4659,19 @@ static const struct pci_device_id serial
+ pbn_pericom_PI7C9X7958 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+- pbn_pericom_PI7C9X7958 },
++ pbn_pericom_PI7C9X7954 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_8,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_pericom_PI7C9X7958 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SM,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+- pbn_pericom_PI7C9X7958 },
++ pbn_pericom_PI7C9X7954 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_8SM,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_pericom_PI7C9X7958 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+- pbn_pericom_PI7C9X7958 },
++ pbn_pericom_PI7C9X7954 },
+ /*
+ * Topic TP560 Data/Fax/Voice 56k modem (reported by Evan Clarke)
+ */
--- /dev/null
+From 78d3820b9bd39028727c6aab7297b63c093db343 Mon Sep 17 00:00:00 2001
+From: Jay Dolan <jay.dolan@accesio.com>
+Date: Tue, 12 Feb 2019 21:43:12 -0800
+Subject: serial: 8250_pci: Have ACCES cards that use the four port Pericom PI7C9X7954 chip use the pci_pericom_setup()
+
+From: Jay Dolan <jay.dolan@accesio.com>
+
+commit 78d3820b9bd39028727c6aab7297b63c093db343 upstream.
+
+The four port Pericom chips have the fourth port at the wrong address.
+Make use of quirk to fix it.
+
+Fixes: c8d192428f52 ("serial: 8250: added acces i/o products quad and octal serial cards")
+Cc: stable <stable@vger.kernel.org>
+Signed-off-by: Jay Dolan <jay.dolan@accesio.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/tty/serial/8250/8250_pci.c | 105 +++++++++++++++++++++++++++++++++++++
+ 1 file changed, 105 insertions(+)
+
+--- a/drivers/tty/serial/8250/8250_pci.c
++++ b/drivers/tty/serial/8250/8250_pci.c
+@@ -2027,6 +2027,111 @@ static struct pci_serial_quirk pci_seria
+ .setup = pci_default_setup,
+ .exit = pci_plx9050_exit,
+ },
++ {
++ .vendor = PCI_VENDOR_ID_ACCESIO,
++ .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SDB,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .setup = pci_pericom_setup,
++ },
++ {
++ .vendor = PCI_VENDOR_ID_ACCESIO,
++ .device = PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4S,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .setup = pci_pericom_setup,
++ },
++ {
++ .vendor = PCI_VENDOR_ID_ACCESIO,
++ .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4DB,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .setup = pci_pericom_setup,
++ },
++ {
++ .vendor = PCI_VENDOR_ID_ACCESIO,
++ .device = PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_4,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .setup = pci_pericom_setup,
++ },
++ {
++ .vendor = PCI_VENDOR_ID_ACCESIO,
++ .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SMDB,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .setup = pci_pericom_setup,
++ },
++ {
++ .vendor = PCI_VENDOR_ID_ACCESIO,
++ .device = PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4SM,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .setup = pci_pericom_setup,
++ },
++ {
++ .vendor = PCI_VENDOR_ID_ACCESIO,
++ .device = PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_4,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .setup = pci_pericom_setup,
++ },
++ {
++ .vendor = PCI_VENDOR_ID_ACCESIO,
++ .device = PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_4,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .setup = pci_pericom_setup,
++ },
++ {
++ .vendor = PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S,
++ .device = PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .setup = pci_pericom_setup,
++ },
++ {
++ .vendor = PCI_VENDOR_ID_ACCESIO,
++ .device = PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_4,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .setup = pci_pericom_setup,
++ },
++ {
++ .vendor = PCI_VENDOR_ID_ACCESIO,
++ .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM422_4,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .setup = pci_pericom_setup,
++ },
++ {
++ .vendor = PCI_VENDOR_ID_ACCESIO,
++ .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM485_4,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .setup = pci_pericom_setup,
++ },
++ {
++ .vendor = PCI_VENDOR_ID_ACCESIO,
++ .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .setup = pci_pericom_setup,
++ },
++ {
++ .vendor = PCI_VENDOR_ID_ACCESIO,
++ .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SM,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .setup = pci_pericom_setup,
++ },
++ {
++ .vendor = PCI_VENDOR_ID_ACCESIO,
++ .device = PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .setup = pci_pericom_setup,
++ },
+ /*
+ * SBS Technologies, Inc., PMC-OCTALPRO 232
+ */
--- /dev/null
+From 7abab1605139bc41442864c18f9573440f7ca105 Mon Sep 17 00:00:00 2001
+From: Anssi Hannula <anssi.hannula@bitwise.fi>
+Date: Fri, 15 Feb 2019 18:45:08 +0200
+Subject: serial: uartps: Fix stuck ISR if RX disabled with non-empty FIFO
+
+From: Anssi Hannula <anssi.hannula@bitwise.fi>
+
+commit 7abab1605139bc41442864c18f9573440f7ca105 upstream.
+
+If RX is disabled while there are still unprocessed bytes in RX FIFO,
+cdns_uart_handle_rx() called from interrupt handler will get stuck in
+the receive loop as read bytes will not get removed from the RX FIFO
+and CDNS_UART_SR_RXEMPTY bit will never get set.
+
+Avoid the stuck handler by checking first if RX is disabled. port->lock
+protects against race with RX-disabling functions.
+
+This HW behavior was mentioned by Nathan Rossi in 43e98facc4a3 ("tty:
+xuartps: Fix RX hang, and TX corruption in termios call") which fixed a
+similar issue in cdns_uart_set_termios().
+The behavior can also be easily verified by e.g. setting
+CDNS_UART_CR_RX_DIS at the beginning of cdns_uart_handle_rx() - the
+following loop will then get stuck.
+
+Resetting the FIFO using RXRST would not set RXEMPTY either so simply
+issuing a reset after RX-disable would not work.
+
+I observe this frequently on a ZynqMP board during heavy RX load at 1M
+baudrate when the reader process exits and thus RX gets disabled.
+
+Fixes: 61ec9016988f ("tty/serial: add support for Xilinx PS UART")
+Signed-off-by: Anssi Hannula <anssi.hannula@bitwise.fi>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/tty/serial/xilinx_uartps.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/drivers/tty/serial/xilinx_uartps.c
++++ b/drivers/tty/serial/xilinx_uartps.c
+@@ -364,7 +364,13 @@ static irqreturn_t cdns_uart_isr(int irq
+ cdns_uart_handle_tx(dev_id);
+ isrstatus &= ~CDNS_UART_IXR_TXEMPTY;
+ }
+- if (isrstatus & CDNS_UART_IXR_RXMASK)
++
++ /*
++ * Skip RX processing if RX is disabled as RXEMPTY will never be set
++ * as read bytes will not be removed from the FIFO.
++ */
++ if (isrstatus & CDNS_UART_IXR_RXMASK &&
++ !(readl(port->membase + CDNS_UART_CR) & CDNS_UART_CR_RX_DIS))
+ cdns_uart_handle_rx(dev_id, isrstatus);
+
+ spin_unlock(&port->lock);
usb-chipidea-tegra-fix-missed-ci_hdrc_remove_device.patch
usb-typec-tps6598x-handle-block-writes-separately-with-plain-i2c-adapters.patch
dmaengine-usb-dmac-make-dmac-system-sleep-callbacks-explicit.patch
+serial-uartps-fix-stuck-isr-if-rx-disabled-with-non-empty-fifo.patch
+serial-8250_of-assume-reg-shift-of-2-for-mrvl-mmp-uart.patch
+serial-8250_pci-fix-number-of-ports-for-acces-serial-cards.patch
+serial-8250_pci-have-acces-cards-that-use-the-four-port-pericom-pi7c9x7954-chip-use-the-pci_pericom_setup.patch
+jbd2-clear-dirty-flag-when-revoking-a-buffer-from-an-older-transaction.patch
+jbd2-fix-compile-warning-when-using-jbuffer_trace.patch
+selinux-add-the-missing-walk_size-len-check-in-selinux_sctp_bind_connect.patch
+security-selinux-fix-security_lsm_native_labels-on-reused-superblock.patch
+powerpc-32-clear-on-stack-exception-marker-upon-exception-return.patch
+powerpc-wii-properly-disable-use-of-bats-when-requested.patch
+powerpc-powernv-make-opal-log-only-readable-by-root.patch
+powerpc-83xx-also-save-restore-sprg4-7-during-suspend.patch
+powerpc-kvm-save-and-restore-host-amr-iamr-uamor.patch
+powerpc-powernv-don-t-reprogram-slw-image-on-every-kvm-guest-entry-exit.patch
+powerpc-64s-hash-fix-assert_slb_presence-use-of-the-slbfee.-instruction.patch
+powerpc-fix-32-bit-kvm-pr-lockup-and-host-crash-with-macos-guest.patch
+powerpc-ptrace-simplify-vr_get-set-to-avoid-gcc-warning.patch
+powerpc-hugetlb-don-t-do-runtime-allocation-of-16g-pages-in-lpar-configuration.patch
+powerpc-smp-fix-nmi-ipi-timeout.patch
+powerpc-smp-fix-nmi-ipi-xmon-timeout.patch
+powerpc-traps-fix-recoverability-of-machine-check-handling-on-book3s-32.patch
+powerpc-traps-fix-the-message-printed-when-stack-overflows.patch
+arm-s3c24xx-fix-boolean-expressions-in-osiris_dvs_notify.patch
+arm64-fix-hcr.tge-status-for-nmi-contexts.patch
+arm64-debug-don-t-propagate-unknown-far-into-si_code-for-debug-signals.patch
+arm64-debug-ensure-debug-handlers-check-triggering-exception-level.patch
+arm64-kvm-fix-architecturally-invalid-reset-value-for-fpexc32_el2.patch
+revert-kvm-mmu-flush-tlb-directly-in-the-kvm_zap_gfn_range.patch