--- /dev/null
+From 7053f80d96967d8e72e9f2a724bbfc3906ce2b07 Mon Sep 17 00:00:00 2001
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Fri, 20 Mar 2020 14:21:16 +1100
+Subject: powerpc/64: Prevent stack protection in early boot
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit 7053f80d96967d8e72e9f2a724bbfc3906ce2b07 upstream.
+
+The previous commit reduced the amount of code that is run before we
+setup a paca. However there are still a few remaining functions that
+run with no paca, or worse, with an arbitrary value in r13 that will
+be used as a paca pointer.
+
+In particular the stack protector canary is stored in the paca, so if
+stack protector is activated for any of these functions we will read
+the stack canary from wherever r13 points. If r13 happens to point
+outside of memory we will get a machine check / checkstop.
+
+For example if we modify initialise_paca() to trigger stack
+protection, and then boot in the mambo simulator with r13 poisoned in
+skiboot before calling the kernel:
+
+ DEBUG: 19952232: (19952232): INSTRUCTION: PC=0xC0000000191FC1E8: [0x3C4C006D]: addis r2,r12,0x6D [fetch]
+ DEBUG: 19952236: (19952236): INSTRUCTION: PC=0xC00000001807EAD8: [0x7D8802A6]: mflr r12 [fetch]
+ FATAL ERROR: 19952276: (19952276): Check Stop for 0:0: Machine Check with ME bit of MSR off
+ DEBUG: 19952276: (19952276): INSTRUCTION: PC=0xC0000000191FCA7C: [0xE90D0CF8]: ld r8,0xCF8(r13) [Instruction Failed]
+ INFO: 19952276: (19952277): ** Execution stopped: Mambo Error, Machine Check Stop, **
+ systemsim % bt
+ pc: 0xC0000000191FCA7C initialise_paca+0x54
+ lr: 0xC0000000191FC22C early_setup+0x44
+ stack:0x00000000198CBED0 0x0 +0x0
+ stack:0x00000000198CBF00 0xC0000000191FC22C early_setup+0x44
+ stack:0x00000000198CBF90 0x1801C968 +0x1801C968
+
+So annotate the relevant functions to ensure stack protection is never
+enabled for them.
+
+Fixes: 06ec27aea9fc ("powerpc/64: add stack protector support")
+Cc: stable@vger.kernel.org # v4.20+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20200320032116.1024773-2-mpe@ellerman.id.au
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/paca.c | 4 ++--
+ arch/powerpc/kernel/setup.h | 6 ++++++
+ arch/powerpc/kernel/setup_64.c | 2 +-
+ 3 files changed, 9 insertions(+), 3 deletions(-)
+
+--- a/arch/powerpc/kernel/paca.c
++++ b/arch/powerpc/kernel/paca.c
+@@ -176,7 +176,7 @@ static struct slb_shadow * __init new_sl
+ struct paca_struct **paca_ptrs __read_mostly;
+ EXPORT_SYMBOL(paca_ptrs);
+
+-void __init initialise_paca(struct paca_struct *new_paca, int cpu)
++void __init __nostackprotector initialise_paca(struct paca_struct *new_paca, int cpu)
+ {
+ #ifdef CONFIG_PPC_PSERIES
+ new_paca->lppaca_ptr = NULL;
+@@ -205,7 +205,7 @@ void __init initialise_paca(struct paca_
+ }
+
+ /* Put the paca pointer into r13 and SPRG_PACA */
+-void setup_paca(struct paca_struct *new_paca)
++void __nostackprotector setup_paca(struct paca_struct *new_paca)
+ {
+ /* Setup r13 */
+ local_paca = new_paca;
+--- a/arch/powerpc/kernel/setup.h
++++ b/arch/powerpc/kernel/setup.h
+@@ -8,6 +8,12 @@
+ #ifndef __ARCH_POWERPC_KERNEL_SETUP_H
+ #define __ARCH_POWERPC_KERNEL_SETUP_H
+
++#ifdef CONFIG_CC_IS_CLANG
++#define __nostackprotector
++#else
++#define __nostackprotector __attribute__((__optimize__("no-stack-protector")))
++#endif
++
+ void initialize_cache_info(void);
+ void irqstack_early_init(void);
+
+--- a/arch/powerpc/kernel/setup_64.c
++++ b/arch/powerpc/kernel/setup_64.c
+@@ -279,7 +279,7 @@ void __init record_spr_defaults(void)
+ * device-tree is not accessible via normal means at this point.
+ */
+
+-void __init early_setup(unsigned long dt_ptr)
++void __init __nostackprotector early_setup(unsigned long dt_ptr)
+ {
+ static __initdata struct paca_struct boot_paca;
+
--- /dev/null
+From d4a8e98621543d5798421eed177978bf2b3cdd11 Mon Sep 17 00:00:00 2001
+From: Daniel Axtens <dja@axtens.net>
+Date: Fri, 20 Mar 2020 14:21:15 +1100
+Subject: powerpc/64: Setup a paca before parsing device tree etc.
+
+From: Daniel Axtens <dja@axtens.net>
+
+commit d4a8e98621543d5798421eed177978bf2b3cdd11 upstream.
+
+Currently we set up the paca after parsing the device tree for CPU
+features. Prior to that, r13 contains random data, which means there
+is random data in r13 while we're running the generic dt parsing code.
+
+This random data varies depending on whether we boot through a vmlinux
+or a zImage: for the vmlinux case it's usually around zero, but for
+zImages we see random values like 912a72603d420015.
+
+This is poor practice, and can also lead to difficult-to-debug
+crashes. For example, when kcov is enabled, the kcov instrumentation
+attempts to read preempt_count out of the current task, which goes via
+the paca. This then crashes in the zImage case.
+
+Similarly stack protector can cause crashes if r13 is bogus, by
+reading from the stack canary in the paca.
+
+To resolve this:
+
+ - move the paca setup to before the CPU feature parsing.
+
+ - because we no longer have access to CPU feature flags in paca
+ setup, change the HV feature test in the paca setup path to consider
+ the actual value of the MSR rather than the CPU feature.
+
+Translations get switched on once we leave early_setup, so I think
+we'd already catch any other cases where the paca or task aren't set
+up.
+
+Boot tested on a P9 guest and host.
+
+Fixes: fb0b0a73b223 ("powerpc: Enable kcov")
+Fixes: 06ec27aea9fc ("powerpc/64: add stack protector support")
+Cc: stable@vger.kernel.org # v4.20+
+Reviewed-by: Andrew Donnellan <ajd@linux.ibm.com>
+Suggested-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Daniel Axtens <dja@axtens.net>
+[mpe: Reword comments & change log a bit to mention stack protector]
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20200320032116.1024773-1-mpe@ellerman.id.au
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/dt_cpu_ftrs.c | 1 -
+ arch/powerpc/kernel/paca.c | 10 +++++++---
+ arch/powerpc/kernel/setup_64.c | 30 ++++++++++++++++++++++++------
+ 3 files changed, 31 insertions(+), 10 deletions(-)
+
+--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
++++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
+@@ -139,7 +139,6 @@ static void __init cpufeatures_setup_cpu
+ /* Initialize the base environment -- clear FSCR/HFSCR. */
+ hv_mode = !!(mfmsr() & MSR_HV);
+ if (hv_mode) {
+- /* CPU_FTR_HVMODE is used early in PACA setup */
+ cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE;
+ mtspr(SPRN_HFSCR, 0);
+ }
+--- a/arch/powerpc/kernel/paca.c
++++ b/arch/powerpc/kernel/paca.c
+@@ -214,11 +214,15 @@ void setup_paca(struct paca_struct *new_
+ /* On Book3E, initialize the TLB miss exception frames */
+ mtspr(SPRN_SPRG_TLB_EXFRAME, local_paca->extlb);
+ #else
+- /* In HV mode, we setup both HPACA and PACA to avoid problems
++ /*
++ * In HV mode, we setup both HPACA and PACA to avoid problems
+ * if we do a GET_PACA() before the feature fixups have been
+- * applied
++ * applied.
++ *
++ * Normally you should test against CPU_FTR_HVMODE, but CPU features
++ * are not yet set up when we first reach here.
+ */
+- if (early_cpu_has_feature(CPU_FTR_HVMODE))
++ if (mfmsr() & MSR_HV)
+ mtspr(SPRN_SPRG_HPACA, local_paca);
+ #endif
+ mtspr(SPRN_SPRG_PACA, local_paca);
+--- a/arch/powerpc/kernel/setup_64.c
++++ b/arch/powerpc/kernel/setup_64.c
+@@ -285,18 +285,36 @@ void __init early_setup(unsigned long dt
+
+ /* -------- printk is _NOT_ safe to use here ! ------- */
+
+- /* Try new device tree based feature discovery ... */
+- if (!dt_cpu_ftrs_init(__va(dt_ptr)))
+- /* Otherwise use the old style CPU table */
+- identify_cpu(0, mfspr(SPRN_PVR));
+-
+- /* Assume we're on cpu 0 for now. Don't write to the paca yet! */
++ /*
++ * Assume we're on cpu 0 for now.
++ *
++ * We need to load a PACA very early for a few reasons.
++ *
++ * The stack protector canary is stored in the paca, so as soon as we
++ * call any stack protected code we need r13 pointing somewhere valid.
++ *
++ * If we are using kcov it will call in_task() in its instrumentation,
++ * which relies on the current task from the PACA.
++ *
++ * dt_cpu_ftrs_init() calls into generic OF/fdt code, as well as
++ * printk(), which can trigger both stack protector and kcov.
++ *
++ * percpu variables and spin locks also use the paca.
++ *
++ * So set up a temporary paca. It will be replaced below once we know
++ * what CPU we are on.
++ */
+ initialise_paca(&boot_paca, 0);
+ setup_paca(&boot_paca);
+ fixup_boot_paca();
+
+ /* -------- printk is now safe to use ------- */
+
++ /* Try new device tree based feature discovery ... */
++ if (!dt_cpu_ftrs_init(__va(dt_ptr)))
++ /* Otherwise use the old style CPU table */
++ identify_cpu(0, mfspr(SPRN_PVR));
++
+ /* Enable early debugging if any specified (see udbg.h) */
+ udbg_early_init();
+
--- /dev/null
+From c7def7fbdeaa25feaa19caf4a27c5d10bd8789e4 Mon Sep 17 00:00:00 2001
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Tue, 31 Mar 2020 22:47:19 +1100
+Subject: powerpc/64/tm: Don't let userspace set regs->trap via sigreturn
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit c7def7fbdeaa25feaa19caf4a27c5d10bd8789e4 upstream.
+
+In restore_tm_sigcontexts() we take the trap value directly from the
+user sigcontext with no checking:
+
+ err |= __get_user(regs->trap, &sc->gp_regs[PT_TRAP]);
+
+This means we can be in the kernel with an arbitrary regs->trap value.
+
+Although that's not immediately problematic, there is a risk we could
+trigger one of the uses of CHECK_FULL_REGS():
+
+ #define CHECK_FULL_REGS(regs) BUG_ON(regs->trap & 1)
+
+It can also cause us to unnecessarily save non-volatile GPRs again in
+save_nvgprs(), which shouldn't be problematic but is still wrong.
+
+It's also possible it could trick the syscall restart machinery, which
+relies on regs->trap not being == 0xc00 (see 9a81c16b5275 ("powerpc:
+fix double syscall restarts")), though I haven't been able to make
+that happen.
+
+Finally it doesn't match the behaviour of the non-TM case, in
+restore_sigcontext() which zeroes regs->trap.
+
+So change restore_tm_sigcontexts() to zero regs->trap.
+
+This was discovered while testing Nick's upcoming rewrite of the
+syscall entry path. In that series the call to save_nvgprs() prior to
+signal handling (do_notify_resume()) is removed, which leaves the
+low-bit of regs->trap uncleared which can then trigger the FULL_REGS()
+WARNs in setup_tm_sigcontexts().
+
+Fixes: 2b0a576d15e0 ("powerpc: Add new transactional memory state to the signal context")
+Cc: stable@vger.kernel.org # v3.9+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20200401023836.3286664-1-mpe@ellerman.id.au
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/signal_64.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/arch/powerpc/kernel/signal_64.c
++++ b/arch/powerpc/kernel/signal_64.c
+@@ -473,8 +473,10 @@ static long restore_tm_sigcontexts(struc
+ err |= __get_user(tsk->thread.ckpt_regs.ccr,
+ &sc->gp_regs[PT_CCR]);
+
++ /* Don't allow userspace to set the trap value */
++ regs->trap = 0;
++
+ /* These regs are not checkpointed; they can go in 'regs'. */
+- err |= __get_user(regs->trap, &sc->gp_regs[PT_TRAP]);
+ err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]);
+ err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]);
+ err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]);
--- /dev/null
+From aa4113340ae6c2811e046f08c2bc21011d20a072 Mon Sep 17 00:00:00 2001
+From: Laurentiu Tudor <laurentiu.tudor@nxp.com>
+Date: Thu, 23 Jan 2020 11:19:25 +0000
+Subject: powerpc/fsl_booke: Avoid creating duplicate tlb1 entry
+
+From: Laurentiu Tudor <laurentiu.tudor@nxp.com>
+
+commit aa4113340ae6c2811e046f08c2bc21011d20a072 upstream.
+
+In the current implementation, the call to loadcam_multi() is wrapped
+between switch_to_as1() and restore_to_as0() calls so, when it tries
+to create its own temporary AS=1 TLB1 entry, it ends up duplicating
+the existing one created by switch_to_as1(). Add a check to skip
+creating the temporary entry if already running in AS=1.
+
+Fixes: d9e1831a4202 ("powerpc/85xx: Load all early TLB entries at once")
+Cc: stable@vger.kernel.org # v4.4+
+Signed-off-by: Laurentiu Tudor <laurentiu.tudor@nxp.com>
+Acked-by: Scott Wood <oss@buserror.net>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20200123111914.2565-1-laurentiu.tudor@nxp.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/mm/nohash/tlb_low.S | 12 +++++++++++-
+ 1 file changed, 11 insertions(+), 1 deletion(-)
+
+--- a/arch/powerpc/mm/nohash/tlb_low.S
++++ b/arch/powerpc/mm/nohash/tlb_low.S
+@@ -397,7 +397,7 @@ _GLOBAL(set_context)
+ * extern void loadcam_entry(unsigned int index)
+ *
+ * Load TLBCAM[index] entry in to the L2 CAM MMU
+- * Must preserve r7, r8, r9, and r10
++ * Must preserve r7, r8, r9, r10 and r11
+ */
+ _GLOBAL(loadcam_entry)
+ mflr r5
+@@ -433,6 +433,10 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PH
+ */
+ _GLOBAL(loadcam_multi)
+ mflr r8
++ /* Don't switch to AS=1 if already there */
++ mfmsr r11
++ andi. r11,r11,MSR_IS
++ bne 10f
+
+ /*
+ * Set up temporary TLB entry that is the same as what we're
+@@ -458,6 +462,7 @@ _GLOBAL(loadcam_multi)
+ mtmsr r6
+ isync
+
++10:
+ mr r9,r3
+ add r10,r3,r4
+ 2: bl loadcam_entry
+@@ -466,6 +471,10 @@ _GLOBAL(loadcam_multi)
+ mr r3,r9
+ blt 2b
+
++ /* Don't return to AS=0 if we were in AS=1 at function start */
++ andi. r11,r11,MSR_IS
++ bne 3f
++
+ /* Return to AS=0 and clear the temporary entry */
+ mfmsr r6
+ rlwinm. r6,r6,0,~(MSR_IS|MSR_DS)
+@@ -481,6 +490,7 @@ _GLOBAL(loadcam_multi)
+ tlbwe
+ isync
+
++3:
+ mtlr r8
+ blr
+ #endif
--- /dev/null
+From 36b78402d97a3b9aeab136feb9b00d8647ec2c20 Mon Sep 17 00:00:00 2001
+From: "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com>
+Date: Fri, 13 Mar 2020 15:18:42 +0530
+Subject: powerpc/hash64/devmap: Use H_PAGE_THP_HUGE when setting up huge devmap PTE entries
+
+From: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
+
+commit 36b78402d97a3b9aeab136feb9b00d8647ec2c20 upstream.
+
+H_PAGE_THP_HUGE is used to differentiate between a THP hugepage and
+hugetlb hugepage entries. The difference is WRT how we handle hash
+fault on these address. THP address enables MPSS in segments. We want
+to manage devmap hugepage entries similar to THP pt entries. Hence use
+H_PAGE_THP_HUGE for devmap huge PTE entries.
+
+With current code while handling hash PTE fault, we do set is_thp =
+true when finding devmap PTE huge PTE entries.
+
+Current code also does the below sequence we setting up huge devmap
+entries.
+
+ entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
+ if (pfn_t_devmap(pfn))
+ entry = pmd_mkdevmap(entry);
+
+In that case we would find both H_PAGE_THP_HUGE and PAGE_DEVMAP set
+for huge devmap PTE entries. This results in false positive error like
+below.
+
+ kernel BUG at /home/kvaneesh/src/linux/mm/memory.c:4321!
+ Oops: Exception in kernel mode, sig: 5 [#1]
+ LE PAGE_SIZE=64K MMU=Hash SMP NR_CPUS=2048 NUMA pSeries
+ Modules linked in:
+ CPU: 56 PID: 67996 Comm: t_mmap_dio Not tainted 5.6.0-rc4-59640-g371c804dedbc #128
+ ....
+ NIP [c00000000044c9e4] __follow_pte_pmd+0x264/0x900
+ LR [c0000000005d45f8] dax_writeback_one+0x1a8/0x740
+ Call Trace:
+ str_spec.74809+0x22ffb4/0x2d116c (unreliable)
+ dax_writeback_one+0x1a8/0x740
+ dax_writeback_mapping_range+0x26c/0x700
+ ext4_dax_writepages+0x150/0x5a0
+ do_writepages+0x68/0x180
+ __filemap_fdatawrite_range+0x138/0x180
+ file_write_and_wait_range+0xa4/0x110
+ ext4_sync_file+0x370/0x6e0
+ vfs_fsync_range+0x70/0xf0
+ sys_msync+0x220/0x2e0
+ system_call+0x5c/0x68
+
+This is because our pmd_trans_huge check doesn't exclude _PAGE_DEVMAP.
+
+To make this all consistent, update pmd_mkdevmap to set
+H_PAGE_THP_HUGE and pmd_trans_huge check now excludes _PAGE_DEVMAP
+correctly.
+
+Fixes: ebd31197931d ("powerpc/mm: Add devmap support for ppc64")
+Cc: stable@vger.kernel.org # v4.13+
+Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20200313094842.351830-1-aneesh.kumar@linux.ibm.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/include/asm/book3s/64/hash-4k.h | 6 ++++++
+ arch/powerpc/include/asm/book3s/64/hash-64k.h | 8 +++++++-
+ arch/powerpc/include/asm/book3s/64/pgtable.h | 4 +++-
+ arch/powerpc/include/asm/book3s/64/radix.h | 5 +++++
+ 4 files changed, 21 insertions(+), 2 deletions(-)
+
+--- a/arch/powerpc/include/asm/book3s/64/hash-4k.h
++++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h
+@@ -156,6 +156,12 @@ extern pmd_t hash__pmdp_huge_get_and_cle
+ extern int hash__has_transparent_hugepage(void);
+ #endif
+
++static inline pmd_t hash__pmd_mkdevmap(pmd_t pmd)
++{
++ BUG();
++ return pmd;
++}
++
+ #endif /* !__ASSEMBLY__ */
+
+ #endif /* _ASM_POWERPC_BOOK3S_64_HASH_4K_H */
+--- a/arch/powerpc/include/asm/book3s/64/hash-64k.h
++++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h
+@@ -246,7 +246,7 @@ static inline void mark_hpte_slot_valid(
+ */
+ static inline int hash__pmd_trans_huge(pmd_t pmd)
+ {
+- return !!((pmd_val(pmd) & (_PAGE_PTE | H_PAGE_THP_HUGE)) ==
++ return !!((pmd_val(pmd) & (_PAGE_PTE | H_PAGE_THP_HUGE | _PAGE_DEVMAP)) ==
+ (_PAGE_PTE | H_PAGE_THP_HUGE));
+ }
+
+@@ -272,6 +272,12 @@ extern pmd_t hash__pmdp_huge_get_and_cle
+ unsigned long addr, pmd_t *pmdp);
+ extern int hash__has_transparent_hugepage(void);
+ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
++
++static inline pmd_t hash__pmd_mkdevmap(pmd_t pmd)
++{
++ return __pmd(pmd_val(pmd) | (_PAGE_PTE | H_PAGE_THP_HUGE | _PAGE_DEVMAP));
++}
++
+ #endif /* __ASSEMBLY__ */
+
+ #endif /* _ASM_POWERPC_BOOK3S_64_HASH_64K_H */
+--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
++++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
+@@ -1303,7 +1303,9 @@ extern void serialize_against_pte_lookup
+
+ static inline pmd_t pmd_mkdevmap(pmd_t pmd)
+ {
+- return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_DEVMAP));
++ if (radix_enabled())
++ return radix__pmd_mkdevmap(pmd);
++ return hash__pmd_mkdevmap(pmd);
+ }
+
+ static inline int pmd_devmap(pmd_t pmd)
+--- a/arch/powerpc/include/asm/book3s/64/radix.h
++++ b/arch/powerpc/include/asm/book3s/64/radix.h
+@@ -263,6 +263,11 @@ static inline int radix__has_transparent
+ }
+ #endif
+
++static inline pmd_t radix__pmd_mkdevmap(pmd_t pmd)
++{
++ return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_DEVMAP));
++}
++
+ extern int __meminit radix__vmemmap_create_mapping(unsigned long start,
+ unsigned long page_size,
+ unsigned long phys);
--- /dev/null
+From 21f8b2fa3ca5b01f7a2b51b89ce97a3705a15aa0 Mon Sep 17 00:00:00 2001
+From: Christophe Leroy <christophe.leroy@c-s.fr>
+Date: Tue, 18 Feb 2020 19:38:27 +0000
+Subject: powerpc/kprobes: Ignore traps that happened in real mode
+
+From: Christophe Leroy <christophe.leroy@c-s.fr>
+
+commit 21f8b2fa3ca5b01f7a2b51b89ce97a3705a15aa0 upstream.
+
+When a program check exception happens while MMU translation is
+disabled, following Oops happens in kprobe_handler() in the following
+code:
+
+ } else if (*addr != BREAKPOINT_INSTRUCTION) {
+
+ BUG: Unable to handle kernel data access on read at 0x0000e268
+ Faulting instruction address: 0xc000ec34
+ Oops: Kernel access of bad area, sig: 11 [#1]
+ BE PAGE_SIZE=16K PREEMPT CMPC885
+ Modules linked in:
+ CPU: 0 PID: 429 Comm: cat Not tainted 5.6.0-rc1-s3k-dev-00824-g84195dc6c58a #3267
+ NIP: c000ec34 LR: c000ecd8 CTR: c019cab8
+ REGS: ca4d3b58 TRAP: 0300 Not tainted (5.6.0-rc1-s3k-dev-00824-g84195dc6c58a)
+ MSR: 00001032 <ME,IR,DR,RI> CR: 2a4d3c52 XER: 00000000
+ DAR: 0000e268 DSISR: c0000000
+ GPR00: c000b09c ca4d3c10 c66d0620 00000000 ca4d3c60 00000000 00009032 00000000
+ GPR08: 00020000 00000000 c087de44 c000afe0 c66d0ad0 100d3dd6 fffffff3 00000000
+ GPR16: 00000000 00000041 00000000 ca4d3d70 00000000 00000000 0000416d 00000000
+ GPR24: 00000004 c53b6128 00000000 0000e268 00000000 c07c0000 c07bb6fc ca4d3c60
+ NIP [c000ec34] kprobe_handler+0x128/0x290
+ LR [c000ecd8] kprobe_handler+0x1cc/0x290
+ Call Trace:
+ [ca4d3c30] [c000b09c] program_check_exception+0xbc/0x6fc
+ [ca4d3c50] [c000e43c] ret_from_except_full+0x0/0x4
+ --- interrupt: 700 at 0xe268
+ Instruction dump:
+ 913e0008 81220000 38600001 3929ffff 91220000 80010024 bb410008 7c0803a6
+ 38210020 4e800020 38600000 4e800020 <813b0000> 6d2a7fe0 2f8a0008 419e0154
+ ---[ end trace 5b9152d4cdadd06d ]---
+
+kprobe is not prepared to handle events in real mode and functions
+running in real mode should have been blacklisted, so kprobe_handler()
+can safely bail out telling 'this trap is not mine' for any trap that
+happened while in real-mode.
+
+If the trap happened with MSR_IR or MSR_DR cleared, return 0
+immediately.
+
+Reported-by: Larry Finger <Larry.Finger@lwfinger.net>
+Fixes: 6cc89bad60a6 ("powerpc/kprobes: Invoke handlers directly")
+Cc: stable@vger.kernel.org # v4.10+
+Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
+Reviewed-by: Masami Hiramatsu <mhiramat@kernel.org>
+Reviewed-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/424331e2006e7291a1bfe40e7f3fa58825f565e1.1582054578.git.christophe.leroy@c-s.fr
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/kprobes.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/arch/powerpc/kernel/kprobes.c
++++ b/arch/powerpc/kernel/kprobes.c
+@@ -264,6 +264,9 @@ int kprobe_handler(struct pt_regs *regs)
+ if (user_mode(regs))
+ return 0;
+
++ if (!(regs->msr & MSR_IR) || !(regs->msr & MSR_DR))
++ return 0;
++
+ /*
+ * We don't want to be preempted for the entire
+ * duration of kprobe processing
--- /dev/null
+From c17eb4dca5a353a9dbbb8ad6934fe57af7165e91 Mon Sep 17 00:00:00 2001
+From: Clement Courbet <courbet@google.com>
+Date: Mon, 30 Mar 2020 10:03:56 +0200
+Subject: powerpc: Make setjmp/longjmp signature standard
+
+From: Clement Courbet <courbet@google.com>
+
+commit c17eb4dca5a353a9dbbb8ad6934fe57af7165e91 upstream.
+
+Declaring setjmp()/longjmp() as taking longs makes the signature
+non-standard, and makes clang complain. In the past, this has been
+worked around by adding -ffreestanding to the compile flags.
+
+The implementation looks like it only ever propagates the value
+(in longjmp) or sets it to 1 (in setjmp), and we only call longjmp
+with integer parameters.
+
+This allows removing -ffreestanding from the compilation flags.
+
+Fixes: c9029ef9c957 ("powerpc: Avoid clang warnings around setjmp and longjmp")
+Cc: stable@vger.kernel.org # v4.14+
+Signed-off-by: Clement Courbet <courbet@google.com>
+Reviewed-by: Nathan Chancellor <natechancellor@gmail.com>
+Tested-by: Nathan Chancellor <natechancellor@gmail.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20200330080400.124803-1-courbet@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/include/asm/setjmp.h | 6 ++++--
+ arch/powerpc/kexec/Makefile | 3 ---
+ arch/powerpc/xmon/Makefile | 3 ---
+ 3 files changed, 4 insertions(+), 8 deletions(-)
+
+--- a/arch/powerpc/include/asm/setjmp.h
++++ b/arch/powerpc/include/asm/setjmp.h
+@@ -7,7 +7,9 @@
+
+ #define JMP_BUF_LEN 23
+
+-extern long setjmp(long *) __attribute__((returns_twice));
+-extern void longjmp(long *, long) __attribute__((noreturn));
++typedef long jmp_buf[JMP_BUF_LEN];
++
++extern int setjmp(jmp_buf env) __attribute__((returns_twice));
++extern void longjmp(jmp_buf env, int val) __attribute__((noreturn));
+
+ #endif /* _ASM_POWERPC_SETJMP_H */
+--- a/arch/powerpc/kexec/Makefile
++++ b/arch/powerpc/kexec/Makefile
+@@ -3,9 +3,6 @@
+ # Makefile for the linux kernel.
+ #
+
+-# Avoid clang warnings around longjmp/setjmp declarations
+-CFLAGS_crash.o += -ffreestanding
+-
+ obj-y += core.o crash.o core_$(BITS).o
+
+ obj-$(CONFIG_PPC32) += relocate_32.o
+--- a/arch/powerpc/xmon/Makefile
++++ b/arch/powerpc/xmon/Makefile
+@@ -1,9 +1,6 @@
+ # SPDX-License-Identifier: GPL-2.0
+ # Makefile for xmon
+
+-# Avoid clang warnings around longjmp/setjmp declarations
+-subdir-ccflags-y := -ffreestanding
+-
+ GCOV_PROFILE := n
+ KCOV_INSTRUMENT := n
+ UBSAN_SANITIZE := n
--- /dev/null
+From 97ef275077932c65b1b8ec5022abd737a9fbf3e0 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?C=C3=A9dric=20Le=20Goater?= <clg@kaod.org>
+Date: Fri, 6 Mar 2020 16:01:41 +0100
+Subject: powerpc/xive: Fix xmon support on the PowerNV platform
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Cédric Le Goater <clg@kaod.org>
+
+commit 97ef275077932c65b1b8ec5022abd737a9fbf3e0 upstream.
+
+The PowerNV platform has multiple IRQ chips and the xmon command
+dumping the state of the XIVE interrupt should only operate on the
+XIVE IRQ chip.
+
+Fixes: 5896163f7f91 ("powerpc/xmon: Improve output of XIVE interrupts")
+Cc: stable@vger.kernel.org # v5.4+
+Signed-off-by: Cédric Le Goater <clg@kaod.org>
+Reviewed-by: Greg Kurz <groug@kaod.org>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20200306150143.5551-3-clg@kaod.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/sysdev/xive/common.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/arch/powerpc/sysdev/xive/common.c
++++ b/arch/powerpc/sysdev/xive/common.c
+@@ -258,11 +258,15 @@ notrace void xmon_xive_do_dump(int cpu)
+
+ int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d)
+ {
++ struct irq_chip *chip = irq_data_get_irq_chip(d);
+ int rc;
+ u32 target;
+ u8 prio;
+ u32 lirq;
+
++ if (!is_xive_irq(chip))
++ return -EINVAL;
++
+ rc = xive_ops->get_irq_config(hw_irq, &target, &prio, &lirq);
+ if (rc) {
+ xmon_printf("IRQ 0x%08x : no config rc=%d\n", hw_irq, rc);
--- /dev/null
+From b1a504a6500df50e83b701b7946b34fce27ad8a3 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?C=C3=A9dric=20Le=20Goater?= <clg@kaod.org>
+Date: Fri, 6 Mar 2020 16:01:40 +0100
+Subject: powerpc/xive: Use XIVE_BAD_IRQ instead of zero to catch non configured IPIs
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Cédric Le Goater <clg@kaod.org>
+
+commit b1a504a6500df50e83b701b7946b34fce27ad8a3 upstream.
+
+When a CPU is brought up, an IPI number is allocated and recorded
+under the XIVE CPU structure. Invalid IPI numbers are tracked with
+interrupt number 0x0.
+
+On the PowerNV platform, the interrupt number space starts at 0x10 and
+this works fine. However, on the sPAPR platform, it is possible to
+allocate the interrupt number 0x0 and this raises an issue when CPU 0
+is unplugged. The XIVE spapr driver tracks allocated interrupt numbers
+in a bitmask and it is not correctly updated when interrupt number 0x0
+is freed. It stays allocated and it is then impossible to reallocate.
+
+Fix by using the XIVE_BAD_IRQ value instead of zero on both platforms.
+
+Reported-by: David Gibson <david@gibson.dropbear.id.au>
+Fixes: eac1e731b59e ("powerpc/xive: guest exploitation of the XIVE interrupt controller")
+Cc: stable@vger.kernel.org # v4.14+
+Signed-off-by: Cédric Le Goater <clg@kaod.org>
+Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
+Tested-by: David Gibson <david@gibson.dropbear.id.au>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20200306150143.5551-2-clg@kaod.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/sysdev/xive/common.c | 12 +++---------
+ arch/powerpc/sysdev/xive/native.c | 4 ++--
+ arch/powerpc/sysdev/xive/spapr.c | 4 ++--
+ arch/powerpc/sysdev/xive/xive-internal.h | 7 +++++++
+ 4 files changed, 14 insertions(+), 13 deletions(-)
+
+--- a/arch/powerpc/sysdev/xive/common.c
++++ b/arch/powerpc/sysdev/xive/common.c
+@@ -68,13 +68,6 @@ static u32 xive_ipi_irq;
+ /* Xive state for each CPU */
+ static DEFINE_PER_CPU(struct xive_cpu *, xive_cpu);
+
+-/*
+- * A "disabled" interrupt should never fire, to catch problems
+- * we set its logical number to this
+- */
+-#define XIVE_BAD_IRQ 0x7fffffff
+-#define XIVE_MAX_IRQ (XIVE_BAD_IRQ - 1)
+-
+ /* An invalid CPU target */
+ #define XIVE_INVALID_TARGET (-1)
+
+@@ -1150,7 +1143,7 @@ static int xive_setup_cpu_ipi(unsigned i
+ xc = per_cpu(xive_cpu, cpu);
+
+ /* Check if we are already setup */
+- if (xc->hw_ipi != 0)
++ if (xc->hw_ipi != XIVE_BAD_IRQ)
+ return 0;
+
+ /* Grab an IPI from the backend, this will populate xc->hw_ipi */
+@@ -1187,7 +1180,7 @@ static void xive_cleanup_cpu_ipi(unsigne
+ /* Disable the IPI and free the IRQ data */
+
+ /* Already cleaned up ? */
+- if (xc->hw_ipi == 0)
++ if (xc->hw_ipi == XIVE_BAD_IRQ)
+ return;
+
+ /* Mask the IPI */
+@@ -1343,6 +1336,7 @@ static int xive_prepare_cpu(unsigned int
+ if (np)
+ xc->chip_id = of_get_ibm_chip_id(np);
+ of_node_put(np);
++ xc->hw_ipi = XIVE_BAD_IRQ;
+
+ per_cpu(xive_cpu, cpu) = xc;
+ }
+--- a/arch/powerpc/sysdev/xive/native.c
++++ b/arch/powerpc/sysdev/xive/native.c
+@@ -312,7 +312,7 @@ static void xive_native_put_ipi(unsigned
+ s64 rc;
+
+ /* Free the IPI */
+- if (!xc->hw_ipi)
++ if (xc->hw_ipi == XIVE_BAD_IRQ)
+ return;
+ for (;;) {
+ rc = opal_xive_free_irq(xc->hw_ipi);
+@@ -320,7 +320,7 @@ static void xive_native_put_ipi(unsigned
+ msleep(OPAL_BUSY_DELAY_MS);
+ continue;
+ }
+- xc->hw_ipi = 0;
++ xc->hw_ipi = XIVE_BAD_IRQ;
+ break;
+ }
+ }
+--- a/arch/powerpc/sysdev/xive/spapr.c
++++ b/arch/powerpc/sysdev/xive/spapr.c
+@@ -560,11 +560,11 @@ static int xive_spapr_get_ipi(unsigned i
+
+ static void xive_spapr_put_ipi(unsigned int cpu, struct xive_cpu *xc)
+ {
+- if (!xc->hw_ipi)
++ if (xc->hw_ipi == XIVE_BAD_IRQ)
+ return;
+
+ xive_irq_bitmap_free(xc->hw_ipi);
+- xc->hw_ipi = 0;
++ xc->hw_ipi = XIVE_BAD_IRQ;
+ }
+ #endif /* CONFIG_SMP */
+
+--- a/arch/powerpc/sysdev/xive/xive-internal.h
++++ b/arch/powerpc/sysdev/xive/xive-internal.h
+@@ -5,6 +5,13 @@
+ #ifndef __XIVE_INTERNAL_H
+ #define __XIVE_INTERNAL_H
+
++/*
++ * A "disabled" interrupt should never fire, to catch problems
++ * we set its logical number to this
++ */
++#define XIVE_BAD_IRQ 0x7fffffff
++#define XIVE_MAX_IRQ (XIVE_BAD_IRQ - 1)
++
+ /* Each CPU carry one of these with various per-CPU state */
+ struct xive_cpu {
+ #ifdef CONFIG_SMP
--- /dev/null
+From 72655c0ebd1d941d80f47bf614b02d563a1e61ae Mon Sep 17 00:00:00 2001
+From: Bart Van Assche <bvanassche@acm.org>
+Date: Sun, 29 Mar 2020 19:51:51 -0700
+Subject: scsi: sr: Fix sr_block_release()
+
+From: Bart Van Assche <bvanassche@acm.org>
+
+commit 72655c0ebd1d941d80f47bf614b02d563a1e61ae upstream.
+
+This patch fixes the following two complaints:
+
+WARNING: CPU: 3 PID: 1326 at kernel/locking/mutex-debug.c:103 mutex_destroy+0x74/0x80
+Modules linked in: scsi_debug sd_mod t10_pi brd scsi_transport_iscsi af_packet crct10dif_pclmul sg aesni_intel glue_helper virtio_balloon button crypto_simd cryptd intel_agp intel_gtt agpgart ip_tables x_tables ipv6 nf_defrag_ipv6 autofs4 ext4 crc16 mbcache jbd2 hid_generic usbhid hid sr_mod cdrom ata_generic pata_acpi virtio_blk virtio_net net_failover failover ata_piix xhci_pci ahci libahci xhci_hcd i2c_piix4 libata virtio_pci usbcore i2c_core virtio_ring scsi_mod usb_common virtio [last unloaded: scsi_debug]
+CPU: 3 PID: 1326 Comm: systemd-udevd Not tainted 5.6.0-rc1-dbg+ #1
+Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011
+RIP: 0010:mutex_destroy+0x74/0x80
+Call Trace:
+ sr_kref_release+0xb9/0xd0 [sr_mod]
+ scsi_cd_put+0x79/0x90 [sr_mod]
+ sr_block_release+0x54/0x70 [sr_mod]
+ __blkdev_put+0x2ce/0x3c0
+ blkdev_put+0x68/0x220
+ blkdev_close+0x4d/0x60
+ __fput+0x170/0x3b0
+ ____fput+0x12/0x20
+ task_work_run+0xa2/0xf0
+ exit_to_usermode_loop+0xeb/0xf0
+ do_syscall_64+0x2be/0x300
+ entry_SYSCALL_64_after_hwframe+0x49/0xbe
+RIP: 0033:0x7fa16d40aab7
+
+BUG: KASAN: use-after-free in __mutex_unlock_slowpath+0x98/0x420
+Read of size 8 at addr ffff8881c6e4f4b0 by task systemd-udevd/1326
+
+CPU: 3 PID: 1326 Comm: systemd-udevd Tainted: G W 5.6.0-rc1-dbg+ #1
+Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011
+Call Trace:
+ dump_stack+0xa5/0xe6
+ print_address_description.constprop.0+0x46/0x60
+ __kasan_report.cold+0x7b/0x94
+ kasan_report+0x16/0x20
+ check_memory_region+0x140/0x1b0
+ __kasan_check_read+0x15/0x20
+ __mutex_unlock_slowpath+0x98/0x420
+ mutex_unlock+0x16/0x20
+ sr_block_release+0x5c/0x70 [sr_mod]
+ __blkdev_put+0x2ce/0x3c0
+hardirqs last enabled at (1875522): [<ffffffff81bb0696>] _raw_spin_unlock_irqrestore+0x56/0x70
+ blkdev_put+0x68/0x220
+ blkdev_close+0x4d/0x60
+ __fput+0x170/0x3b0
+ ____fput+0x12/0x20
+ task_work_run+0xa2/0xf0
+ exit_to_usermode_loop+0xeb/0xf0
+ do_syscall_64+0x2be/0x300
+ entry_SYSCALL_64_after_hwframe+0x49/0xbe
+RIP: 0033:0x7fa16d40aab7
+
+Allocated by task 3201:
+ save_stack+0x23/0x90
+ __kasan_kmalloc.constprop.0+0xcf/0xe0
+ kasan_kmalloc+0xd/0x10
+ kmem_cache_alloc_trace+0x161/0x3c0
+ sr_probe+0x12f/0xb60 [sr_mod]
+ really_probe+0x183/0x5d0
+ driver_probe_device+0x13f/0x1a0
+ __device_attach_driver+0xe6/0x150
+ bus_for_each_drv+0x101/0x160
+ __device_attach+0x183/0x230
+ device_initial_probe+0x17/0x20
+ bus_probe_device+0x110/0x130
+ device_add+0xb7b/0xd40
+ scsi_sysfs_add_sdev+0xe8/0x360 [scsi_mod]
+ scsi_probe_and_add_lun+0xdc4/0x14c0 [scsi_mod]
+ __scsi_scan_target+0x12d/0x850 [scsi_mod]
+ scsi_scan_channel+0xcd/0xe0 [scsi_mod]
+ scsi_scan_host_selected+0x182/0x190 [scsi_mod]
+ store_scan+0x1e9/0x200 [scsi_mod]
+ dev_attr_store+0x42/0x60
+ sysfs_kf_write+0x8b/0xb0
+ kernfs_fop_write+0x158/0x250
+ __vfs_write+0x4c/0x90
+ vfs_write+0x145/0x2c0
+ ksys_write+0xd7/0x180
+ __x64_sys_write+0x47/0x50
+ do_syscall_64+0x6f/0x300
+ entry_SYSCALL_64_after_hwframe+0x49/0xbe
+
+Freed by task 1326:
+ save_stack+0x23/0x90
+ __kasan_slab_free+0x13a/0x190
+ kasan_slab_free+0x12/0x20
+ kfree+0x109/0x410
+ sr_kref_release+0xc1/0xd0 [sr_mod]
+ scsi_cd_put+0x79/0x90 [sr_mod]
+ sr_block_release+0x54/0x70 [sr_mod]
+ __blkdev_put+0x2ce/0x3c0
+ blkdev_put+0x68/0x220
+ blkdev_close+0x4d/0x60
+ __fput+0x170/0x3b0
+ ____fput+0x12/0x20
+ task_work_run+0xa2/0xf0
+ exit_to_usermode_loop+0xeb/0xf0
+ do_syscall_64+0x2be/0x300
+ entry_SYSCALL_64_after_hwframe+0x49/0xbe
+
+Link: https://lore.kernel.org/r/20200330025151.10535-1-bvanassche@acm.org
+Fixes: 51a858817dcd ("scsi: sr: get rid of sr global mutex")
+Cc: Merlijn Wajer <merlijn@archive.org>
+Cc: Arnd Bergmann <arnd@arndb.de>
+Cc: <stable@kernel.org>
+Acked-by: Merlijn Wajer <merlijn@archive.org>
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/sr.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/scsi/sr.c
++++ b/drivers/scsi/sr.c
+@@ -550,10 +550,12 @@ out:
+ static void sr_block_release(struct gendisk *disk, fmode_t mode)
+ {
+ struct scsi_cd *cd = scsi_cd(disk);
++
+ mutex_lock(&cd->lock);
+ cdrom_release(&cd->cdi, mode);
+- scsi_cd_put(cd);
+ mutex_unlock(&cd->lock);
++
++ scsi_cd_put(cd);
+ }
+
+ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
--- /dev/null
+From 51a858817dcdbbdee22cb54b0b2b26eb145ca5b6 Mon Sep 17 00:00:00 2001
+From: Merlijn Wajer <merlijn@archive.org>
+Date: Tue, 18 Feb 2020 15:39:17 +0100
+Subject: scsi: sr: get rid of sr global mutex
+
+From: Merlijn Wajer <merlijn@archive.org>
+
+commit 51a858817dcdbbdee22cb54b0b2b26eb145ca5b6 upstream.
+
+When replacing the Big Kernel Lock in commit 2a48fc0ab242 ("block:
+autoconvert trivial BKL users to private mutex"), the lock was replaced
+with a sr-wide lock.
+
+This causes very poor performance when using multiple sr devices, as the sr
+driver was not able to execute more than one command to one drive at any
+given time, even when there were many CD drives available.
+
+Replace the global mutex with per-sr-device mutex.
+
+Someone tried this patch at the time, but it never made it upstream, due to
+possible concerns with race conditions, but it's not clear the patch
+actually caused those:
+
+https://www.spinics.net/lists/linux-scsi/msg63706.html
+https://www.spinics.net/lists/linux-scsi/msg63750.html
+
+Also see
+
+http://lists.xiph.org/pipermail/paranoia/2019-December/001647.html
+
+Link: https://lore.kernel.org/r/20200218143918.30267-1-merlijn@archive.org
+Acked-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Merlijn Wajer <merlijn@archive.org>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/sr.c | 20 +++++++++++---------
+ drivers/scsi/sr.h | 2 ++
+ 2 files changed, 13 insertions(+), 9 deletions(-)
+
+--- a/drivers/scsi/sr.c
++++ b/drivers/scsi/sr.c
+@@ -79,7 +79,6 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_WORM);
+ CDC_CD_R|CDC_CD_RW|CDC_DVD|CDC_DVD_R|CDC_DVD_RAM|CDC_GENERIC_PACKET| \
+ CDC_MRW|CDC_MRW_W|CDC_RAM)
+
+-static DEFINE_MUTEX(sr_mutex);
+ static int sr_probe(struct device *);
+ static int sr_remove(struct device *);
+ static blk_status_t sr_init_command(struct scsi_cmnd *SCpnt);
+@@ -536,9 +535,9 @@ static int sr_block_open(struct block_de
+ scsi_autopm_get_device(sdev);
+ check_disk_change(bdev);
+
+- mutex_lock(&sr_mutex);
++ mutex_lock(&cd->lock);
+ ret = cdrom_open(&cd->cdi, bdev, mode);
+- mutex_unlock(&sr_mutex);
++ mutex_unlock(&cd->lock);
+
+ scsi_autopm_put_device(sdev);
+ if (ret)
+@@ -551,10 +550,10 @@ out:
+ static void sr_block_release(struct gendisk *disk, fmode_t mode)
+ {
+ struct scsi_cd *cd = scsi_cd(disk);
+- mutex_lock(&sr_mutex);
++ mutex_lock(&cd->lock);
+ cdrom_release(&cd->cdi, mode);
+ scsi_cd_put(cd);
+- mutex_unlock(&sr_mutex);
++ mutex_unlock(&cd->lock);
+ }
+
+ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
+@@ -565,7 +564,7 @@ static int sr_block_ioctl(struct block_d
+ void __user *argp = (void __user *)arg;
+ int ret;
+
+- mutex_lock(&sr_mutex);
++ mutex_lock(&cd->lock);
+
+ ret = scsi_ioctl_block_when_processing_errors(sdev, cmd,
+ (mode & FMODE_NDELAY) != 0);
+@@ -595,7 +594,7 @@ put:
+ scsi_autopm_put_device(sdev);
+
+ out:
+- mutex_unlock(&sr_mutex);
++ mutex_unlock(&cd->lock);
+ return ret;
+ }
+
+@@ -608,7 +607,7 @@ static int sr_block_compat_ioctl(struct
+ void __user *argp = compat_ptr(arg);
+ int ret;
+
+- mutex_lock(&sr_mutex);
++ mutex_lock(&cd->lock);
+
+ ret = scsi_ioctl_block_when_processing_errors(sdev, cmd,
+ (mode & FMODE_NDELAY) != 0);
+@@ -638,7 +637,7 @@ put:
+ scsi_autopm_put_device(sdev);
+
+ out:
+- mutex_unlock(&sr_mutex);
++ mutex_unlock(&cd->lock);
+ return ret;
+
+ }
+@@ -745,6 +744,7 @@ static int sr_probe(struct device *dev)
+ disk = alloc_disk(1);
+ if (!disk)
+ goto fail_free;
++ mutex_init(&cd->lock);
+
+ spin_lock(&sr_index_lock);
+ minor = find_first_zero_bit(sr_index_bits, SR_DISKS);
+@@ -1055,6 +1055,8 @@ static void sr_kref_release(struct kref
+
+ put_disk(disk);
+
++ mutex_destroy(&cd->lock);
++
+ kfree(cd);
+ }
+
+--- a/drivers/scsi/sr.h
++++ b/drivers/scsi/sr.h
+@@ -20,6 +20,7 @@
+
+ #include <linux/genhd.h>
+ #include <linux/kref.h>
++#include <linux/mutex.h>
+
+ #define MAX_RETRIES 3
+ #define SR_TIMEOUT (30 * HZ)
+@@ -51,6 +52,7 @@ typedef struct scsi_cd {
+ bool ignore_get_event:1; /* GET_EVENT is unreliable, use TUR */
+
+ struct cdrom_device_info cdi;
++ struct mutex lock;
+ /* We hold gendisk and scsi_device references on probe and use
+ * the refs on this kref to decide when to release them */
+ struct kref kref;
libata-return-correct-status-in-sata_pmp_eh_recover_pm-when-ata_dflag_detach-is-set.patch
ipmi-fix-hung-processes-in-__get_guid.patch
xen-blkfront-fix-memory-allocation-flags-in-blkfront_setup_indirect.patch
+scsi-sr-get-rid-of-sr-global-mutex.patch
+scsi-sr-fix-sr_block_release.patch
+powerpc-make-setjmp-longjmp-signature-standard.patch
+powerpc-64-tm-don-t-let-userspace-set-regs-trap-via-sigreturn.patch
+powerpc-fsl_booke-avoid-creating-duplicate-tlb1-entry.patch
+powerpc-hash64-devmap-use-h_page_thp_huge-when-setting-up-huge-devmap-pte-entries.patch
+powerpc-xive-use-xive_bad_irq-instead-of-zero-to-catch-non-configured-ipis.patch
+powerpc-64-setup-a-paca-before-parsing-device-tree-etc.patch
+powerpc-xive-fix-xmon-support-on-the-powernv-platform.patch
+powerpc-kprobes-ignore-traps-that-happened-in-real-mode.patch
+powerpc-64-prevent-stack-protection-in-early-boot.patch