]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.1-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 21 Jul 2023 05:40:59 +0000 (07:40 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 21 Jul 2023 05:40:59 +0000 (07:40 +0200)
added patches:
mips-kvm-fix-null-pointer-dereference.patch
mips-loongson-fix-cpu_probe_loongson-again.patch
misc-fastrpc-create-fastrpc-scalar-with-correct-buffer-count.patch
powerpc-64s-fix-native_hpte_remove-to-be-irq-safe.patch
powerpc-fail-build-if-using-recordmcount-with-binutils-v2.37.patch
powerpc-security-fix-speculation_store_bypass-reporting-on-power10.patch

queue-6.1/mips-kvm-fix-null-pointer-dereference.patch [new file with mode: 0644]
queue-6.1/mips-loongson-fix-cpu_probe_loongson-again.patch [new file with mode: 0644]
queue-6.1/misc-fastrpc-create-fastrpc-scalar-with-correct-buffer-count.patch [new file with mode: 0644]
queue-6.1/powerpc-64s-fix-native_hpte_remove-to-be-irq-safe.patch [new file with mode: 0644]
queue-6.1/powerpc-fail-build-if-using-recordmcount-with-binutils-v2.37.patch [new file with mode: 0644]
queue-6.1/powerpc-security-fix-speculation_store_bypass-reporting-on-power10.patch [new file with mode: 0644]
queue-6.1/series

diff --git a/queue-6.1/mips-kvm-fix-null-pointer-dereference.patch b/queue-6.1/mips-kvm-fix-null-pointer-dereference.patch
new file mode 100644 (file)
index 0000000..94ec3f8
--- /dev/null
@@ -0,0 +1,390 @@
+From e4de2057698636c0ee709e545d19b169d2069fa3 Mon Sep 17 00:00:00 2001
+From: Huacai Chen <chenhuacai@loongson.cn>
+Date: Wed, 28 Jun 2023 19:08:17 +0800
+Subject: MIPS: KVM: Fix NULL pointer dereference
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Huacai Chen <chenhuacai@loongson.cn>
+
+commit e4de2057698636c0ee709e545d19b169d2069fa3 upstream.
+
+After commit 45c7e8af4a5e3f0bea4ac209 ("MIPS: Remove KVM_TE support") we
+get a NULL pointer dereference when creating a KVM guest:
+
+[  146.243409] Starting KVM with MIPS VZ extensions
+[  149.849151] CPU 3 Unable to handle kernel paging request at virtual address 0000000000000300, epc == ffffffffc06356ec, ra == ffffffffc063568c
+[  149.849177] Oops[#1]:
+[  149.849182] CPU: 3 PID: 2265 Comm: qemu-system-mip Not tainted 6.4.0-rc3+ #1671
+[  149.849188] Hardware name: THTF CX TL630 Series/THTF-LS3A4000-7A1000-ML4A, BIOS KL4.1F.TF.D.166.201225.R 12/25/2020
+[  149.849192] $ 0   : 0000000000000000 000000007400cce0 0000000000400004 ffffffff8119c740
+[  149.849209] $ 4   : 000000007400cce1 000000007400cce1 0000000000000000 0000000000000000
+[  149.849221] $ 8   : 000000240058bb36 ffffffff81421ac0 0000000000000000 0000000000400dc0
+[  149.849233] $12   : 9800000102a07cc8 ffffffff80e40e38 0000000000000001 0000000000400dc0
+[  149.849245] $16   : 0000000000000000 9800000106cd0000 9800000106cd0000 9800000100cce000
+[  149.849257] $20   : ffffffffc0632b28 ffffffffc05b31b0 9800000100ccca00 0000000000400000
+[  149.849269] $24   : 9800000106cd09ce ffffffff802f69d0
+[  149.849281] $28   : 9800000102a04000 9800000102a07cd0 98000001106a8000 ffffffffc063568c
+[  149.849293] Hi    : 00000335b2111e66
+[  149.849295] Lo    : 6668d90061ae0ae9
+[  149.849298] epc   : ffffffffc06356ec kvm_vz_vcpu_setup+0xc4/0x328 [kvm]
+[  149.849324] ra    : ffffffffc063568c kvm_vz_vcpu_setup+0x64/0x328 [kvm]
+[  149.849336] Status: 7400cce3 KX SX UX KERNEL EXL IE
+[  149.849351] Cause : 1000000c (ExcCode 03)
+[  149.849354] BadVA : 0000000000000300
+[  149.849357] PrId  : 0014c004 (ICT Loongson-3)
+[  149.849360] Modules linked in: kvm nfnetlink_queue nfnetlink_log nfnetlink fuse sha256_generic libsha256 cfg80211 rfkill binfmt_misc vfat fat snd_hda_codec_hdmi input_leds led_class snd_hda_intel snd_intel_dspcfg snd_hda_codec snd_hda_core snd_pcm snd_timer snd serio_raw xhci_pci radeon drm_suballoc_helper drm_display_helper xhci_hcd ip_tables x_tables
+[  149.849432] Process qemu-system-mip (pid: 2265, threadinfo=00000000ae2982d2, task=0000000038e09ad4, tls=000000ffeba16030)
+[  149.849439] Stack : 9800000000000003 9800000100ccca00 9800000100ccc000 ffffffffc062cef4
+[  149.849453]         9800000102a07d18 c89b63a7ab338e00 0000000000000000 ffffffff811a0000
+[  149.849465]         0000000000000000 9800000106cd0000 ffffffff80e59938 98000001106a8920
+[  149.849476]         ffffffff80e57f30 ffffffffc062854c ffffffff811a0000 9800000102bf4240
+[  149.849488]         ffffffffc05b0000 ffffffff80e3a798 000000ff78000000 000000ff78000010
+[  149.849500]         0000000000000255 98000001021f7de0 98000001023f0078 ffffffff81434000
+[  149.849511]         0000000000000000 0000000000000000 9800000102ae0000 980000025e92ae28
+[  149.849523]         0000000000000000 c89b63a7ab338e00 0000000000000001 ffffffff8119dce0
+[  149.849535]         000000ff78000010 ffffffff804f3d3c 9800000102a07eb0 0000000000000255
+[  149.849546]         0000000000000000 ffffffff8049460c 000000ff78000010 0000000000000255
+[  149.849558]         ...
+[  149.849565] Call Trace:
+[  149.849567] [<ffffffffc06356ec>] kvm_vz_vcpu_setup+0xc4/0x328 [kvm]
+[  149.849586] [<ffffffffc062cef4>] kvm_arch_vcpu_create+0x184/0x228 [kvm]
+[  149.849605] [<ffffffffc062854c>] kvm_vm_ioctl+0x64c/0xf28 [kvm]
+[  149.849623] [<ffffffff805209c0>] sys_ioctl+0xc8/0x118
+[  149.849631] [<ffffffff80219eb0>] syscall_common+0x34/0x58
+
+The root cause is the deletion of kvm_mips_commpage_init() leaves vcpu
+->arch.cop0 NULL. So fix it by making cop0 from a pointer to an embedded
+object.
+
+Fixes: 45c7e8af4a5e3f0bea4ac209 ("MIPS: Remove KVM_TE support")
+Cc: stable@vger.kernel.org
+Reported-by: Yu Zhao <yuzhao@google.com>
+Suggested-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/mips/include/asm/kvm_host.h |    6 +++---
+ arch/mips/kvm/emulate.c          |   22 +++++++++++-----------
+ arch/mips/kvm/mips.c             |   16 ++++++++--------
+ arch/mips/kvm/trace.h            |    8 ++++----
+ arch/mips/kvm/vz.c               |   20 ++++++++++----------
+ 5 files changed, 36 insertions(+), 36 deletions(-)
+
+--- a/arch/mips/include/asm/kvm_host.h
++++ b/arch/mips/include/asm/kvm_host.h
+@@ -317,7 +317,7 @@ struct kvm_vcpu_arch {
+       unsigned int aux_inuse;
+       /* COP0 State */
+-      struct mips_coproc *cop0;
++      struct mips_coproc cop0;
+       /* Resume PC after MMIO completion */
+       unsigned long io_pc;
+@@ -698,7 +698,7 @@ static inline bool kvm_mips_guest_can_ha
+ static inline bool kvm_mips_guest_has_fpu(struct kvm_vcpu_arch *vcpu)
+ {
+       return kvm_mips_guest_can_have_fpu(vcpu) &&
+-              kvm_read_c0_guest_config1(vcpu->cop0) & MIPS_CONF1_FP;
++              kvm_read_c0_guest_config1(&vcpu->cop0) & MIPS_CONF1_FP;
+ }
+ static inline bool kvm_mips_guest_can_have_msa(struct kvm_vcpu_arch *vcpu)
+@@ -710,7 +710,7 @@ static inline bool kvm_mips_guest_can_ha
+ static inline bool kvm_mips_guest_has_msa(struct kvm_vcpu_arch *vcpu)
+ {
+       return kvm_mips_guest_can_have_msa(vcpu) &&
+-              kvm_read_c0_guest_config3(vcpu->cop0) & MIPS_CONF3_MSA;
++              kvm_read_c0_guest_config3(&vcpu->cop0) & MIPS_CONF3_MSA;
+ }
+ struct kvm_mips_callbacks {
+--- a/arch/mips/kvm/emulate.c
++++ b/arch/mips/kvm/emulate.c
+@@ -312,7 +312,7 @@ int kvm_get_badinstrp(u32 *opc, struct k
+  */
+ int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
+ {
+-      struct mips_coproc *cop0 = vcpu->arch.cop0;
++      struct mips_coproc *cop0 = &vcpu->arch.cop0;
+       return  (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) ||
+               (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC);
+@@ -384,7 +384,7 @@ static inline ktime_t kvm_mips_count_tim
+  */
+ static u32 kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
+ {
+-      struct mips_coproc *cop0 = vcpu->arch.cop0;
++      struct mips_coproc *cop0 = &vcpu->arch.cop0;
+       ktime_t expires, threshold;
+       u32 count, compare;
+       int running;
+@@ -444,7 +444,7 @@ static u32 kvm_mips_read_count_running(s
+  */
+ u32 kvm_mips_read_count(struct kvm_vcpu *vcpu)
+ {
+-      struct mips_coproc *cop0 = vcpu->arch.cop0;
++      struct mips_coproc *cop0 = &vcpu->arch.cop0;
+       /* If count disabled just read static copy of count */
+       if (kvm_mips_count_disabled(vcpu))
+@@ -502,7 +502,7 @@ ktime_t kvm_mips_freeze_hrtimer(struct k
+ static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
+                                   ktime_t now, u32 count)
+ {
+-      struct mips_coproc *cop0 = vcpu->arch.cop0;
++      struct mips_coproc *cop0 = &vcpu->arch.cop0;
+       u32 compare;
+       u64 delta;
+       ktime_t expire;
+@@ -603,7 +603,7 @@ resume:
+  */
+ void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count)
+ {
+-      struct mips_coproc *cop0 = vcpu->arch.cop0;
++      struct mips_coproc *cop0 = &vcpu->arch.cop0;
+       ktime_t now;
+       /* Calculate bias */
+@@ -649,7 +649,7 @@ void kvm_mips_init_count(struct kvm_vcpu
+  */
+ int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
+ {
+-      struct mips_coproc *cop0 = vcpu->arch.cop0;
++      struct mips_coproc *cop0 = &vcpu->arch.cop0;
+       int dc;
+       ktime_t now;
+       u32 count;
+@@ -696,7 +696,7 @@ int kvm_mips_set_count_hz(struct kvm_vcp
+  */
+ void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
+ {
+-      struct mips_coproc *cop0 = vcpu->arch.cop0;
++      struct mips_coproc *cop0 = &vcpu->arch.cop0;
+       int dc;
+       u32 old_compare = kvm_read_c0_guest_compare(cop0);
+       s32 delta = compare - old_compare;
+@@ -779,7 +779,7 @@ void kvm_mips_write_compare(struct kvm_v
+  */
+ static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
+ {
+-      struct mips_coproc *cop0 = vcpu->arch.cop0;
++      struct mips_coproc *cop0 = &vcpu->arch.cop0;
+       u32 count;
+       ktime_t now;
+@@ -806,7 +806,7 @@ static ktime_t kvm_mips_count_disable(st
+  */
+ void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
+ {
+-      struct mips_coproc *cop0 = vcpu->arch.cop0;
++      struct mips_coproc *cop0 = &vcpu->arch.cop0;
+       kvm_set_c0_guest_cause(cop0, CAUSEF_DC);
+       if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
+@@ -826,7 +826,7 @@ void kvm_mips_count_disable_cause(struct
+  */
+ void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
+ {
+-      struct mips_coproc *cop0 = vcpu->arch.cop0;
++      struct mips_coproc *cop0 = &vcpu->arch.cop0;
+       u32 count;
+       kvm_clear_c0_guest_cause(cop0, CAUSEF_DC);
+@@ -852,7 +852,7 @@ void kvm_mips_count_enable_cause(struct
+  */
+ int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
+ {
+-      struct mips_coproc *cop0 = vcpu->arch.cop0;
++      struct mips_coproc *cop0 = &vcpu->arch.cop0;
+       s64 changed = count_ctl ^ vcpu->arch.count_ctl;
+       s64 delta;
+       ktime_t expire, now;
+--- a/arch/mips/kvm/mips.c
++++ b/arch/mips/kvm/mips.c
+@@ -659,7 +659,7 @@ static int kvm_mips_copy_reg_indices(str
+ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
+                           const struct kvm_one_reg *reg)
+ {
+-      struct mips_coproc *cop0 = vcpu->arch.cop0;
++      struct mips_coproc *cop0 = &vcpu->arch.cop0;
+       struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
+       int ret;
+       s64 v;
+@@ -771,7 +771,7 @@ static int kvm_mips_get_reg(struct kvm_v
+ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
+                           const struct kvm_one_reg *reg)
+ {
+-      struct mips_coproc *cop0 = vcpu->arch.cop0;
++      struct mips_coproc *cop0 = &vcpu->arch.cop0;
+       struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
+       s64 v;
+       s64 vs[2];
+@@ -1111,7 +1111,7 @@ int kvm_vm_ioctl_check_extension(struct
+ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
+ {
+       return kvm_mips_pending_timer(vcpu) ||
+-              kvm_read_c0_guest_cause(vcpu->arch.cop0) & C_TI;
++              kvm_read_c0_guest_cause(&vcpu->arch.cop0) & C_TI;
+ }
+ int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
+@@ -1135,7 +1135,7 @@ int kvm_arch_vcpu_dump_regs(struct kvm_v
+       kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi);
+       kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo);
+-      cop0 = vcpu->arch.cop0;
++      cop0 = &vcpu->arch.cop0;
+       kvm_debug("\tStatus: 0x%08x, Cause: 0x%08x\n",
+                 kvm_read_c0_guest_status(cop0),
+                 kvm_read_c0_guest_cause(cop0));
+@@ -1257,7 +1257,7 @@ static int __kvm_mips_handle_exit(struct
+       case EXCCODE_TLBS:
+               kvm_debug("TLB ST fault:  cause %#x, status %#x, PC: %p, BadVaddr: %#lx\n",
+-                        cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
++                        cause, kvm_read_c0_guest_status(&vcpu->arch.cop0), opc,
+                         badvaddr);
+               ++vcpu->stat.tlbmiss_st_exits;
+@@ -1329,7 +1329,7 @@ static int __kvm_mips_handle_exit(struct
+               kvm_get_badinstr(opc, vcpu, &inst);
+               kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x  BadVaddr: %#lx Status: %#x\n",
+                       exccode, opc, inst, badvaddr,
+-                      kvm_read_c0_guest_status(vcpu->arch.cop0));
++                      kvm_read_c0_guest_status(&vcpu->arch.cop0));
+               kvm_arch_vcpu_dump_regs(vcpu);
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+@@ -1402,7 +1402,7 @@ int noinstr kvm_mips_handle_exit(struct
+ /* Enable FPU for guest and restore context */
+ void kvm_own_fpu(struct kvm_vcpu *vcpu)
+ {
+-      struct mips_coproc *cop0 = vcpu->arch.cop0;
++      struct mips_coproc *cop0 = &vcpu->arch.cop0;
+       unsigned int sr, cfg5;
+       preempt_disable();
+@@ -1446,7 +1446,7 @@ void kvm_own_fpu(struct kvm_vcpu *vcpu)
+ /* Enable MSA for guest and restore context */
+ void kvm_own_msa(struct kvm_vcpu *vcpu)
+ {
+-      struct mips_coproc *cop0 = vcpu->arch.cop0;
++      struct mips_coproc *cop0 = &vcpu->arch.cop0;
+       unsigned int sr, cfg5;
+       preempt_disable();
+--- a/arch/mips/kvm/trace.h
++++ b/arch/mips/kvm/trace.h
+@@ -322,11 +322,11 @@ TRACE_EVENT_FN(kvm_guest_mode_change,
+           ),
+           TP_fast_assign(
+-                      __entry->epc = kvm_read_c0_guest_epc(vcpu->arch.cop0);
++                      __entry->epc = kvm_read_c0_guest_epc(&vcpu->arch.cop0);
+                       __entry->pc = vcpu->arch.pc;
+-                      __entry->badvaddr = kvm_read_c0_guest_badvaddr(vcpu->arch.cop0);
+-                      __entry->status = kvm_read_c0_guest_status(vcpu->arch.cop0);
+-                      __entry->cause = kvm_read_c0_guest_cause(vcpu->arch.cop0);
++                      __entry->badvaddr = kvm_read_c0_guest_badvaddr(&vcpu->arch.cop0);
++                      __entry->status = kvm_read_c0_guest_status(&vcpu->arch.cop0);
++                      __entry->cause = kvm_read_c0_guest_cause(&vcpu->arch.cop0);
+           ),
+           TP_printk("EPC: 0x%08lx PC: 0x%08lx Status: 0x%08x Cause: 0x%08x BadVAddr: 0x%08lx",
+--- a/arch/mips/kvm/vz.c
++++ b/arch/mips/kvm/vz.c
+@@ -422,7 +422,7 @@ static void _kvm_vz_restore_htimer(struc
+  */
+ static void kvm_vz_restore_timer(struct kvm_vcpu *vcpu)
+ {
+-      struct mips_coproc *cop0 = vcpu->arch.cop0;
++      struct mips_coproc *cop0 = &vcpu->arch.cop0;
+       u32 cause, compare;
+       compare = kvm_read_sw_gc0_compare(cop0);
+@@ -517,7 +517,7 @@ static void _kvm_vz_save_htimer(struct k
+  */
+ static void kvm_vz_save_timer(struct kvm_vcpu *vcpu)
+ {
+-      struct mips_coproc *cop0 = vcpu->arch.cop0;
++      struct mips_coproc *cop0 = &vcpu->arch.cop0;
+       u32 gctl0, compare, cause;
+       gctl0 = read_c0_guestctl0();
+@@ -863,7 +863,7 @@ static unsigned long mips_process_maar(u
+ static void kvm_write_maari(struct kvm_vcpu *vcpu, unsigned long val)
+ {
+-      struct mips_coproc *cop0 = vcpu->arch.cop0;
++      struct mips_coproc *cop0 = &vcpu->arch.cop0;
+       val &= MIPS_MAARI_INDEX;
+       if (val == MIPS_MAARI_INDEX)
+@@ -876,7 +876,7 @@ static enum emulation_result kvm_vz_gpsi
+                                             u32 *opc, u32 cause,
+                                             struct kvm_vcpu *vcpu)
+ {
+-      struct mips_coproc *cop0 = vcpu->arch.cop0;
++      struct mips_coproc *cop0 = &vcpu->arch.cop0;
+       enum emulation_result er = EMULATE_DONE;
+       u32 rt, rd, sel;
+       unsigned long curr_pc;
+@@ -1911,7 +1911,7 @@ static int kvm_vz_get_one_reg(struct kvm
+                             const struct kvm_one_reg *reg,
+                             s64 *v)
+ {
+-      struct mips_coproc *cop0 = vcpu->arch.cop0;
++      struct mips_coproc *cop0 = &vcpu->arch.cop0;
+       unsigned int idx;
+       switch (reg->id) {
+@@ -2081,7 +2081,7 @@ static int kvm_vz_get_one_reg(struct kvm
+       case KVM_REG_MIPS_CP0_MAARI:
+               if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
+                       return -EINVAL;
+-              *v = kvm_read_sw_gc0_maari(vcpu->arch.cop0);
++              *v = kvm_read_sw_gc0_maari(&vcpu->arch.cop0);
+               break;
+ #ifdef CONFIG_64BIT
+       case KVM_REG_MIPS_CP0_XCONTEXT:
+@@ -2135,7 +2135,7 @@ static int kvm_vz_set_one_reg(struct kvm
+                             const struct kvm_one_reg *reg,
+                             s64 v)
+ {
+-      struct mips_coproc *cop0 = vcpu->arch.cop0;
++      struct mips_coproc *cop0 = &vcpu->arch.cop0;
+       unsigned int idx;
+       int ret = 0;
+       unsigned int cur, change;
+@@ -2562,7 +2562,7 @@ static void kvm_vz_vcpu_load_tlb(struct
+ static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+ {
+-      struct mips_coproc *cop0 = vcpu->arch.cop0;
++      struct mips_coproc *cop0 = &vcpu->arch.cop0;
+       bool migrated, all;
+       /*
+@@ -2704,7 +2704,7 @@ static int kvm_vz_vcpu_load(struct kvm_v
+ static int kvm_vz_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
+ {
+-      struct mips_coproc *cop0 = vcpu->arch.cop0;
++      struct mips_coproc *cop0 = &vcpu->arch.cop0;
+       if (current->flags & PF_VCPU)
+               kvm_vz_vcpu_save_wired(vcpu);
+@@ -3076,7 +3076,7 @@ static void kvm_vz_vcpu_uninit(struct kv
+ static int kvm_vz_vcpu_setup(struct kvm_vcpu *vcpu)
+ {
+-      struct mips_coproc *cop0 = vcpu->arch.cop0;
++      struct mips_coproc *cop0 = &vcpu->arch.cop0;
+       unsigned long count_hz = 100*1000*1000; /* default to 100 MHz */
+       /*
diff --git a/queue-6.1/mips-loongson-fix-cpu_probe_loongson-again.patch b/queue-6.1/mips-loongson-fix-cpu_probe_loongson-again.patch
new file mode 100644 (file)
index 0000000..fb1a8bb
--- /dev/null
@@ -0,0 +1,85 @@
+From 65fee014dc41a774bcd94896f3fb380bc39d8dda Mon Sep 17 00:00:00 2001
+From: Huacai Chen <chenhuacai@loongson.cn>
+Date: Mon, 26 Jun 2023 15:50:14 +0800
+Subject: MIPS: Loongson: Fix cpu_probe_loongson() again
+
+From: Huacai Chen <chenhuacai@loongson.cn>
+
+commit 65fee014dc41a774bcd94896f3fb380bc39d8dda upstream.
+
+Commit 7db5e9e9e5e6c10d7d ("MIPS: loongson64: fix FTLB configuration")
+move decode_configs() from the beginning of cpu_probe_loongson() to the
+end in order to fix FTLB configuration. However, it breaks the CPUCFG
+decoding because decode_configs() use "c->options = xxxx" rather than
+"c->options |= xxxx", all information get from CPUCFG by decode_cpucfg()
+is lost.
+
+This causes error when creating a KVM guest on Loongson-3A4000:
+Exception Code: 4 not handled @ PC: 0000000087ad5981, inst: 0xcb7a1898 BadVaddr: 0x0 Status: 0x0
+
+Fix this by moving the c->cputype setting to the beginning and moving
+decode_configs() after that.
+
+Fixes: 7db5e9e9e5e6c10d7d ("MIPS: loongson64: fix FTLB configuration")
+Cc: stable@vger.kernel.org
+Cc: Huang Pei <huangpei@loongson.cn>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/mips/kernel/cpu-probe.c |    9 +++------
+ 1 file changed, 3 insertions(+), 6 deletions(-)
+
+--- a/arch/mips/kernel/cpu-probe.c
++++ b/arch/mips/kernel/cpu-probe.c
+@@ -1675,7 +1675,10 @@ static inline void decode_cpucfg(struct
+ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
+ {
++      c->cputype = CPU_LOONGSON64;
++
+       /* All Loongson processors covered here define ExcCode 16 as GSExc. */
++      decode_configs(c);
+       c->options |= MIPS_CPU_GSEXCEX;
+       switch (c->processor_id & PRID_IMP_MASK) {
+@@ -1685,7 +1688,6 @@ static inline void cpu_probe_loongson(st
+               case PRID_REV_LOONGSON2K_R1_1:
+               case PRID_REV_LOONGSON2K_R1_2:
+               case PRID_REV_LOONGSON2K_R1_3:
+-                      c->cputype = CPU_LOONGSON64;
+                       __cpu_name[cpu] = "Loongson-2K";
+                       set_elf_platform(cpu, "gs264e");
+                       set_isa(c, MIPS_CPU_ISA_M64R2);
+@@ -1698,14 +1700,12 @@ static inline void cpu_probe_loongson(st
+               switch (c->processor_id & PRID_REV_MASK) {
+               case PRID_REV_LOONGSON3A_R2_0:
+               case PRID_REV_LOONGSON3A_R2_1:
+-                      c->cputype = CPU_LOONGSON64;
+                       __cpu_name[cpu] = "ICT Loongson-3";
+                       set_elf_platform(cpu, "loongson3a");
+                       set_isa(c, MIPS_CPU_ISA_M64R2);
+                       break;
+               case PRID_REV_LOONGSON3A_R3_0:
+               case PRID_REV_LOONGSON3A_R3_1:
+-                      c->cputype = CPU_LOONGSON64;
+                       __cpu_name[cpu] = "ICT Loongson-3";
+                       set_elf_platform(cpu, "loongson3a");
+                       set_isa(c, MIPS_CPU_ISA_M64R2);
+@@ -1725,7 +1725,6 @@ static inline void cpu_probe_loongson(st
+               c->ases &= ~MIPS_ASE_VZ; /* VZ of Loongson-3A2000/3000 is incomplete */
+               break;
+       case PRID_IMP_LOONGSON_64G:
+-              c->cputype = CPU_LOONGSON64;
+               __cpu_name[cpu] = "ICT Loongson-3";
+               set_elf_platform(cpu, "loongson3a");
+               set_isa(c, MIPS_CPU_ISA_M64R2);
+@@ -1735,8 +1734,6 @@ static inline void cpu_probe_loongson(st
+               panic("Unknown Loongson Processor ID!");
+               break;
+       }
+-
+-      decode_configs(c);
+ }
+ #else
+ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) { }
diff --git a/queue-6.1/misc-fastrpc-create-fastrpc-scalar-with-correct-buffer-count.patch b/queue-6.1/misc-fastrpc-create-fastrpc-scalar-with-correct-buffer-count.patch
new file mode 100644 (file)
index 0000000..7d5cf60
--- /dev/null
@@ -0,0 +1,37 @@
+From 0b4e32df3e09406b835d8230b9331273f2805058 Mon Sep 17 00:00:00 2001
+From: Ekansh Gupta <quic_ekangupt@quicinc.com>
+Date: Wed, 14 Jun 2023 17:24:45 +0530
+Subject: misc: fastrpc: Create fastrpc scalar with correct buffer count
+
+From: Ekansh Gupta <quic_ekangupt@quicinc.com>
+
+commit 0b4e32df3e09406b835d8230b9331273f2805058 upstream.
+
+A process can spawn a PD on DSP with some attributes that can be
+associated with the PD during spawn and run. The invocation
+corresponding to the create request with attributes has total
+4 buffers at the DSP side implementation. If this number is not
+correct, the invocation is expected to fail on DSP. Added change
+to use correct number of buffer count for creating fastrpc scalar.
+
+Fixes: d73f71c7c6ee ("misc: fastrpc: Add support for create remote init process")
+Cc: stable <stable@kernel.org>
+Tested-by: Ekansh Gupta <quic_ekangupt@quicinc.com>
+Signed-off-by: Ekansh Gupta <quic_ekangupt@quicinc.com>
+Message-ID: <1686743685-21715-1-git-send-email-quic_ekangupt@quicinc.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/misc/fastrpc.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/misc/fastrpc.c
++++ b/drivers/misc/fastrpc.c
+@@ -1260,7 +1260,7 @@ static int fastrpc_init_create_process(s
+       sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE, 4, 0);
+       if (init.attrs)
+-              sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_ATTR, 6, 0);
++              sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_ATTR, 4, 0);
+       err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
+                                     sc, args);
diff --git a/queue-6.1/powerpc-64s-fix-native_hpte_remove-to-be-irq-safe.patch b/queue-6.1/powerpc-64s-fix-native_hpte_remove-to-be-irq-safe.patch
new file mode 100644 (file)
index 0000000..ff2223c
--- /dev/null
@@ -0,0 +1,121 @@
+From 8bbe9fee5848371d4af101be445303cac8d880c5 Mon Sep 17 00:00:00 2001
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Wed, 17 May 2023 22:30:33 +1000
+Subject: powerpc/64s: Fix native_hpte_remove() to be irq-safe
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit 8bbe9fee5848371d4af101be445303cac8d880c5 upstream.
+
+Lockdep warns that the use of the hpte_lock in native_hpte_remove() is
+not safe against an IRQ coming in:
+
+  ================================
+  WARNING: inconsistent lock state
+  6.4.0-rc2-g0c54f4d30ecc #1 Not tainted
+  --------------------------------
+  inconsistent {IN-SOFTIRQ-W} -> {SOFTIRQ-ON-W} usage.
+  qemu-system-ppc/93865 [HC0[0]:SC0[0]:HE1:SE1] takes:
+  c0000000021f5180 (hpte_lock){+.?.}-{0:0}, at: native_lock_hpte+0x8/0xd0
+  {IN-SOFTIRQ-W} state was registered at:
+    lock_acquire+0x134/0x3f0
+    native_lock_hpte+0x44/0xd0
+    native_hpte_insert+0xd4/0x2a0
+    __hash_page_64K+0x218/0x4f0
+    hash_page_mm+0x464/0x840
+    do_hash_fault+0x11c/0x260
+    data_access_common_virt+0x210/0x220
+    __ip_select_ident+0x140/0x150
+    ...
+    net_rx_action+0x3bc/0x440
+    __do_softirq+0x180/0x534
+    ...
+    sys_sendmmsg+0x34/0x50
+    system_call_exception+0x128/0x320
+    system_call_common+0x160/0x2e4
+  ...
+   Possible unsafe locking scenario:
+
+         CPU0
+         ----
+    lock(hpte_lock);
+    <Interrupt>
+      lock(hpte_lock);
+
+   *** DEADLOCK ***
+  ...
+  Call Trace:
+    dump_stack_lvl+0x98/0xe0 (unreliable)
+    print_usage_bug.part.0+0x250/0x278
+    mark_lock+0xc9c/0xd30
+    __lock_acquire+0x440/0x1ca0
+    lock_acquire+0x134/0x3f0
+    native_lock_hpte+0x44/0xd0
+    native_hpte_remove+0xb0/0x190
+    kvmppc_mmu_map_page+0x650/0x698 [kvm_pr]
+    kvmppc_handle_pagefault+0x534/0x6e8 [kvm_pr]
+    kvmppc_handle_exit_pr+0x6d8/0xe90 [kvm_pr]
+    after_sprg3_load+0x80/0x90 [kvm_pr]
+    kvmppc_vcpu_run_pr+0x108/0x270 [kvm_pr]
+    kvmppc_vcpu_run+0x34/0x48 [kvm]
+    kvm_arch_vcpu_ioctl_run+0x340/0x470 [kvm]
+    kvm_vcpu_ioctl+0x338/0x8b8 [kvm]
+    sys_ioctl+0x7c4/0x13e0
+    system_call_exception+0x128/0x320
+    system_call_common+0x160/0x2e4
+
+I suspect kvm_pr is the only caller that doesn't already have IRQs
+disabled, which is why this hasn't been reported previously.
+
+Fix it by disabling IRQs in native_hpte_remove().
+
+Fixes: 35159b5717fa ("powerpc/64s: make HPTE lock and native_tlbie_lock irq-safe")
+Cc: stable@vger.kernel.org # v6.1+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://msgid.link/20230517123033.18430-1-mpe@ellerman.id.au
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/mm/book3s64/hash_native.c | 13 +++++++++----
+ 1 file changed, 9 insertions(+), 4 deletions(-)
+
+diff --git a/arch/powerpc/mm/book3s64/hash_native.c b/arch/powerpc/mm/book3s64/hash_native.c
+index 9342e79870df..430d1d935a7c 100644
+--- a/arch/powerpc/mm/book3s64/hash_native.c
++++ b/arch/powerpc/mm/book3s64/hash_native.c
+@@ -328,10 +328,12 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
+ static long native_hpte_remove(unsigned long hpte_group)
+ {
++      unsigned long hpte_v, flags;
+       struct hash_pte *hptep;
+       int i;
+       int slot_offset;
+-      unsigned long hpte_v;
++
++      local_irq_save(flags);
+       DBG_LOW("    remove(group=%lx)\n", hpte_group);
+@@ -356,13 +358,16 @@ static long native_hpte_remove(unsigned long hpte_group)
+               slot_offset &= 0x7;
+       }
+-      if (i == HPTES_PER_GROUP)
+-              return -1;
++      if (i == HPTES_PER_GROUP) {
++              i = -1;
++              goto out;
++      }
+       /* Invalidate the hpte. NOTE: this also unlocks it */
+       release_hpte_lock();
+       hptep->v = 0;
+-
++out:
++      local_irq_restore(flags);
+       return i;
+ }
+-- 
+2.41.0
+
diff --git a/queue-6.1/powerpc-fail-build-if-using-recordmcount-with-binutils-v2.37.patch b/queue-6.1/powerpc-fail-build-if-using-recordmcount-with-binutils-v2.37.patch
new file mode 100644 (file)
index 0000000..df13a68
--- /dev/null
@@ -0,0 +1,49 @@
+From 25ea739ea1d4d3de41acc4f4eb2d1a97eee0eb75 Mon Sep 17 00:00:00 2001
+From: Naveen N Rao <naveen@kernel.org>
+Date: Tue, 30 May 2023 11:44:36 +0530
+Subject: powerpc: Fail build if using recordmcount with binutils v2.37
+
+From: Naveen N Rao <naveen@kernel.org>
+
+commit 25ea739ea1d4d3de41acc4f4eb2d1a97eee0eb75 upstream.
+
+binutils v2.37 drops unused section symbols, which prevents recordmcount
+from capturing mcount locations in sections that have no non-weak
+symbols. This results in a build failure with a message such as:
+       Cannot find symbol for section 12: .text.perf_callchain_kernel.
+       kernel/events/callchain.o: failed
+
+The change to binutils was reverted for v2.38, so this behavior is
+specific to binutils v2.37:
+https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=c09c8b42021180eee9495bd50d8b35e683d3901b
+
+Objtool is able to cope with such sections, so this issue is specific to
+recordmcount.
+
+Fail the build and print a warning if binutils v2.37 is detected and if
+we are using recordmcount.
+
+Cc: stable@vger.kernel.org
+Suggested-by: Joel Stanley <joel@jms.id.au>
+Signed-off-by: Naveen N Rao <naveen@kernel.org>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://msgid.link/20230530061436.56925-1-naveen@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/Makefile |    8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/arch/powerpc/Makefile
++++ b/arch/powerpc/Makefile
+@@ -402,3 +402,11 @@ checkbin:
+               echo -n '*** Please use a different binutils version.' ; \
+               false ; \
+       fi
++      @if test "x${CONFIG_FTRACE_MCOUNT_USE_RECORDMCOUNT}" = "xy" -a \
++              "x${CONFIG_LD_IS_BFD}" = "xy" -a \
++              "${CONFIG_LD_VERSION}" = "23700" ; then \
++              echo -n '*** binutils 2.37 drops unused section symbols, which recordmcount ' ; \
++              echo 'is unable to handle.' ; \
++              echo '*** Please use a different binutils version.' ; \
++              false ; \
++      fi
diff --git a/queue-6.1/powerpc-security-fix-speculation_store_bypass-reporting-on-power10.patch b/queue-6.1/powerpc-security-fix-speculation_store_bypass-reporting-on-power10.patch
new file mode 100644 (file)
index 0000000..dab3be8
--- /dev/null
@@ -0,0 +1,89 @@
+From 5bcedc5931e7bd6928a2d8207078d4cb476b3b55 Mon Sep 17 00:00:00 2001
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Wed, 17 May 2023 17:49:45 +1000
+Subject: powerpc/security: Fix Speculation_Store_Bypass reporting on Power10
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit 5bcedc5931e7bd6928a2d8207078d4cb476b3b55 upstream.
+
+Nageswara reported that /proc/self/status was showing "vulnerable" for
+the Speculation_Store_Bypass feature on Power10, eg:
+
+  $ grep Speculation_Store_Bypass: /proc/self/status
+  Speculation_Store_Bypass:       vulnerable
+
+But at the same time the sysfs files, and lscpu, were showing "Not
+affected".
+
+This turns out to simply be a bug in the reporting of the
+Speculation_Store_Bypass, aka. PR_SPEC_STORE_BYPASS, case.
+
+When SEC_FTR_STF_BARRIER was added, so that firmware could communicate
+the vulnerability was not present, the code in ssb_prctl_get() was not
+updated to check the new flag.
+
+So add the check for SEC_FTR_STF_BARRIER being disabled. Rather than
+adding the new check to the existing if block and expanding the comment
+to cover both cases, rewrite the three cases to be separate so they can
+be commented separately for clarity.
+
+Fixes: 84ed26fd00c5 ("powerpc/security: Add a security feature for STF barrier")
+Cc: stable@vger.kernel.org # v5.14+
+Reported-by: Nageswara R Sastry <rnsastry@linux.ibm.com>
+Tested-by: Nageswara R Sastry <rnsastry@linux.ibm.com>
+Reviewed-by: Russell Currey <ruscur@russell.cc>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://msgid.link/20230517074945.53188-1-mpe@ellerman.id.au
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/kernel/security.c |   35 ++++++++++++++++++-----------------
+ 1 file changed, 18 insertions(+), 17 deletions(-)
+
+--- a/arch/powerpc/kernel/security.c
++++ b/arch/powerpc/kernel/security.c
+@@ -364,26 +364,27 @@ ssize_t cpu_show_spec_store_bypass(struc
+ static int ssb_prctl_get(struct task_struct *task)
+ {
++      /*
++       * The STF_BARRIER feature is on by default, so if it's off that means
++       * firmware has explicitly said the CPU is not vulnerable via either
++       * the hypercall or device tree.
++       */
++      if (!security_ftr_enabled(SEC_FTR_STF_BARRIER))
++              return PR_SPEC_NOT_AFFECTED;
++
++      /*
++       * If the system's CPU has no known barrier (see setup_stf_barrier())
++       * then assume that the CPU is not vulnerable.
++       */
+       if (stf_enabled_flush_types == STF_BARRIER_NONE)
+-              /*
+-               * We don't have an explicit signal from firmware that we're
+-               * vulnerable or not, we only have certain CPU revisions that
+-               * are known to be vulnerable.
+-               *
+-               * We assume that if we're on another CPU, where the barrier is
+-               * NONE, then we are not vulnerable.
+-               */
+               return PR_SPEC_NOT_AFFECTED;
+-      else
+-              /*
+-               * If we do have a barrier type then we are vulnerable. The
+-               * barrier is not a global or per-process mitigation, so the
+-               * only value we can report here is PR_SPEC_ENABLE, which
+-               * appears as "vulnerable" in /proc.
+-               */
+-              return PR_SPEC_ENABLE;
+-      return -EINVAL;
++      /*
++       * Otherwise the CPU is vulnerable. The barrier is not a global or
++       * per-process mitigation, so the only value that can be reported here
++       * is PR_SPEC_ENABLE, which appears as "vulnerable" in /proc.
++       */
++      return PR_SPEC_ENABLE;
+ }
+ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
index d2253cd8552e8b719920c252e2b7da911412952d..43086406debbd2c904b124c68d0fc90e9deb5633 100644 (file)
@@ -95,3 +95,9 @@ net-bcmgenet-ensure-mdio-unregistration-has-clocks-enabled.patch
 net-phy-dp83td510-fix-kernel-stall-during-netboot-in-dp83td510e-phy-driver.patch
 kasan-add-kasan_tag_mismatch-prototype.patch
 tracing-user_events-fix-incorrect-return-value-for-writing-operation-when-events-are-disabled.patch
+powerpc-fail-build-if-using-recordmcount-with-binutils-v2.37.patch
+misc-fastrpc-create-fastrpc-scalar-with-correct-buffer-count.patch
+powerpc-security-fix-speculation_store_bypass-reporting-on-power10.patch
+powerpc-64s-fix-native_hpte_remove-to-be-irq-safe.patch
+mips-loongson-fix-cpu_probe_loongson-again.patch
+mips-kvm-fix-null-pointer-dereference.patch