From: Greg Kroah-Hartman Date: Mon, 25 Apr 2022 10:39:24 +0000 (+0200) Subject: 5.15-stable patches X-Git-Tag: v4.9.312~39 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=4261ece2bb36dc4af2fcf5a8775fb3a30d898145;p=thirdparty%2Fkernel%2Fstable-queue.git 5.15-stable patches added patches: arc-entry-fix-syscall_trace_exit-argument.patch asoc-soc-dapm-fix-two-incorrect-uses-of-list-iterator.patch e1000e-fix-possible-overflow-in-ltr-decoding.patch gpio-request-interrupts-after-irq-is-initialized.patch openvswitch-fix-oob-access-in-reserve_sfa_size.patch xtensa-fix-a7-clobbering-in-coprocessor-context-load-store.patch xtensa-patch_text-fixup-last-cpu-should-be-master.patch --- diff --git a/queue-5.15/arc-entry-fix-syscall_trace_exit-argument.patch b/queue-5.15/arc-entry-fix-syscall_trace_exit-argument.patch new file mode 100644 index 00000000000..c23458d848e --- /dev/null +++ b/queue-5.15/arc-entry-fix-syscall_trace_exit-argument.patch @@ -0,0 +1,31 @@ +From b1c6ecfdd06907554518ec384ce8e99889d15193 Mon Sep 17 00:00:00 2001 +From: Sergey Matyukevich +Date: Thu, 14 Apr 2022 11:17:22 +0300 +Subject: ARC: entry: fix syscall_trace_exit argument + +From: Sergey Matyukevich + +commit b1c6ecfdd06907554518ec384ce8e99889d15193 upstream. + +Function syscall_trace_exit expects pointer to pt_regs. However +r0 is also used to keep syscall return value. Restore pointer +to pt_regs before calling syscall_trace_exit. + +Cc: +Signed-off-by: Sergey Matyukevich +Signed-off-by: Vineet Gupta +Signed-off-by: Greg Kroah-Hartman +--- + arch/arc/kernel/entry.S | 1 + + 1 file changed, 1 insertion(+) + +--- a/arch/arc/kernel/entry.S ++++ b/arch/arc/kernel/entry.S +@@ -196,6 +196,7 @@ tracesys_exit: + st r0, [sp, PT_r0] ; sys call return value in pt_regs + + ;POST Sys Call Ptrace Hook ++ mov r0, sp ; pt_regs needed + bl @syscall_trace_exit + b ret_from_exception ; NOT ret_from_system_call at is saves r0 which + ; we'd done before calling post hook above diff --git a/queue-5.15/asoc-soc-dapm-fix-two-incorrect-uses-of-list-iterator.patch b/queue-5.15/asoc-soc-dapm-fix-two-incorrect-uses-of-list-iterator.patch new file mode 100644 index 00000000000..801aab45512 --- /dev/null +++ b/queue-5.15/asoc-soc-dapm-fix-two-incorrect-uses-of-list-iterator.patch @@ -0,0 +1,59 @@ +From f730a46b931d894816af34a0ff8e4ad51565b39f Mon Sep 17 00:00:00 2001 +From: Xiaomeng Tong +Date: Tue, 29 Mar 2022 09:21:34 +0800 +Subject: ASoC: soc-dapm: fix two incorrect uses of list iterator + +From: Xiaomeng Tong + +commit f730a46b931d894816af34a0ff8e4ad51565b39f upstream. + +These two bug are here: + list_for_each_entry_safe_continue(w, n, list, + power_list); + list_for_each_entry_safe_continue(w, n, list, + power_list); + +After the list_for_each_entry_safe_continue() exits, the list iterator +will always be a bogus pointer which point to an invalid struct objdect +containing HEAD member. The funciton poniter 'w->event' will be a +invalid value which can lead to a control-flow hijack if the 'w' can be +controlled. + +The original intention was to continue the outer list_for_each_entry_safe() +loop with the same entry if w->event is NULL, but misunderstanding the +meaning of list_for_each_entry_safe_continue(). + +So just add a 'continue;' to fix the bug. + +Cc: stable@vger.kernel.org +Fixes: 163cac061c973 ("ASoC: Factor out DAPM sequence execution") +Signed-off-by: Xiaomeng Tong +Link: https://lore.kernel.org/r/20220329012134.9375-1-xiam0nd.tong@gmail.com +Signed-off-by: Mark Brown +Signed-off-by: Greg Kroah-Hartman +--- + sound/soc/soc-dapm.c | 6 ++---- + 1 file changed, 2 insertions(+), 4 deletions(-) + +--- a/sound/soc/soc-dapm.c ++++ b/sound/soc/soc-dapm.c +@@ -1685,8 +1685,7 @@ static void dapm_seq_run(struct snd_soc_ + switch (w->id) { + case snd_soc_dapm_pre: + if (!w->event) +- list_for_each_entry_safe_continue(w, n, list, +- power_list); ++ continue; + + if (event == SND_SOC_DAPM_STREAM_START) + ret = w->event(w, +@@ -1698,8 +1697,7 @@ static void dapm_seq_run(struct snd_soc_ + + case snd_soc_dapm_post: + if (!w->event) +- list_for_each_entry_safe_continue(w, n, list, +- power_list); ++ continue; + + if (event == SND_SOC_DAPM_STREAM_START) + ret = w->event(w, diff --git a/queue-5.15/e1000e-fix-possible-overflow-in-ltr-decoding.patch b/queue-5.15/e1000e-fix-possible-overflow-in-ltr-decoding.patch new file mode 100644 index 00000000000..634a2c8ade6 --- /dev/null +++ b/queue-5.15/e1000e-fix-possible-overflow-in-ltr-decoding.patch @@ -0,0 +1,52 @@ +From 04ebaa1cfddae5f240cc7404f009133bb0389a47 Mon Sep 17 00:00:00 2001 +From: Sasha Neftin +Date: Tue, 5 Apr 2022 18:56:01 +0300 +Subject: e1000e: Fix possible overflow in LTR decoding + +From: Sasha Neftin + +commit 04ebaa1cfddae5f240cc7404f009133bb0389a47 upstream. + +When we decode the latency and the max_latency, u16 value may not fit +the required size and could lead to the wrong LTR representation. + +Scaling is represented as: +scale 0 - 1 (2^(5*0)) = 2^0 +scale 1 - 32 (2^(5 *1))= 2^5 +scale 2 - 1024 (2^(5 *2)) =2^10 +scale 3 - 32768 (2^(5 *3)) =2^15 +scale 4 - 1048576 (2^(5 *4)) = 2^20 +scale 5 - 33554432 (2^(5 *4)) = 2^25 +scale 4 and scale 5 required 20 and 25 bits respectively. +scale 6 reserved. + +Replace the u16 type with the u32 type and allow corrected LTR +representation. + +Cc: stable@vger.kernel.org +Fixes: 44a13a5d99c7 ("e1000e: Fix the max snoop/no-snoop latency for 10M") +Reported-by: James Hutchinson +Link: https://bugzilla.kernel.org/show_bug.cgi?id=215689 +Suggested-by: Dima Ruinskiy +Signed-off-by: Sasha Neftin +Tested-by: Naama Meir +Tested-by: James Hutchinson +Signed-off-by: Tony Nguyen +Signed-off-by: Greg Kroah-Hartman +--- + drivers/net/ethernet/intel/e1000e/ich8lan.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c ++++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c +@@ -1009,8 +1009,8 @@ static s32 e1000_platform_pm_pch_lpt(str + { + u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) | + link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND; +- u16 max_ltr_enc_d = 0; /* maximum LTR decoded by platform */ +- u16 lat_enc_d = 0; /* latency decoded */ ++ u32 max_ltr_enc_d = 0; /* maximum LTR decoded by platform */ ++ u32 lat_enc_d = 0; /* latency decoded */ + u16 lat_enc = 0; /* latency encoded */ + + if (link) { diff --git a/queue-5.15/gpio-request-interrupts-after-irq-is-initialized.patch b/queue-5.15/gpio-request-interrupts-after-irq-is-initialized.patch new file mode 100644 index 00000000000..49723c9f9e8 --- /dev/null +++ b/queue-5.15/gpio-request-interrupts-after-irq-is-initialized.patch @@ -0,0 +1,73 @@ +From 06fb4ecfeac7e00d6704fa5ed19299f2fefb3cc9 Mon Sep 17 00:00:00 2001 +From: Mario Limonciello +Date: Fri, 22 Apr 2022 08:14:52 -0500 +Subject: gpio: Request interrupts after IRQ is initialized +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Mario Limonciello + +commit 06fb4ecfeac7e00d6704fa5ed19299f2fefb3cc9 upstream. + +Commit 5467801f1fcb ("gpio: Restrict usage of GPIO chip irq members +before initialization") attempted to fix a race condition that lead to a +NULL pointer, but in the process caused a regression for _AEI/_EVT +declared GPIOs. + +This manifests in messages showing deferred probing while trying to +allocate IRQs like so: + + amd_gpio AMDI0030:00: Failed to translate GPIO pin 0x0000 to IRQ, err -517 + amd_gpio AMDI0030:00: Failed to translate GPIO pin 0x002C to IRQ, err -517 + amd_gpio AMDI0030:00: Failed to translate GPIO pin 0x003D to IRQ, err -517 + [ .. more of the same .. ] + +The code for walking _AEI doesn't handle deferred probing and so this +leads to non-functional GPIO interrupts. + +Fix this issue by moving the call to `acpi_gpiochip_request_interrupts` +to occur after gc->irc.initialized is set. + +Fixes: 5467801f1fcb ("gpio: Restrict usage of GPIO chip irq members before initialization") +Link: https://lore.kernel.org/linux-gpio/BL1PR12MB51577A77F000A008AA694675E2EF9@BL1PR12MB5157.namprd12.prod.outlook.com/ +Link: https://bugzilla.suse.com/show_bug.cgi?id=1198697 +Link: https://bugzilla.kernel.org/show_bug.cgi?id=215850 +Link: https://gitlab.freedesktop.org/drm/amd/-/issues/1979 +Link: https://gitlab.freedesktop.org/drm/amd/-/issues/1976 +Reported-by: Mario Limonciello +Signed-off-by: Mario Limonciello +Reviewed-by: Shreeya Patel +Tested-By: Samuel Čavoj +Tested-By: lukeluk498@gmail.com Link: +Reviewed-by: Andy Shevchenko +Acked-by: Linus Walleij +Reviewed-and-tested-by: Takashi Iwai +Cc: Shreeya Patel +Cc: stable@vger.kernel.org +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman +--- + drivers/gpio/gpiolib.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/drivers/gpio/gpiolib.c ++++ b/drivers/gpio/gpiolib.c +@@ -1560,8 +1560,6 @@ static int gpiochip_add_irqchip(struct g + + gpiochip_set_irq_hooks(gc); + +- acpi_gpiochip_request_interrupts(gc); +- + /* + * Using barrier() here to prevent compiler from reordering + * gc->irq.initialized before initialization of above +@@ -1571,6 +1569,8 @@ static int gpiochip_add_irqchip(struct g + + gc->irq.initialized = true; + ++ acpi_gpiochip_request_interrupts(gc); ++ + return 0; + } + diff --git a/queue-5.15/openvswitch-fix-oob-access-in-reserve_sfa_size.patch b/queue-5.15/openvswitch-fix-oob-access-in-reserve_sfa_size.patch new file mode 100644 index 00000000000..fdcb56120be --- /dev/null +++ b/queue-5.15/openvswitch-fix-oob-access-in-reserve_sfa_size.patch @@ -0,0 +1,83 @@ +From cefa91b2332d7009bc0be5d951d6cbbf349f90f8 Mon Sep 17 00:00:00 2001 +From: Paolo Valerio +Date: Fri, 15 Apr 2022 10:08:41 +0200 +Subject: openvswitch: fix OOB access in reserve_sfa_size() + +From: Paolo Valerio + +commit cefa91b2332d7009bc0be5d951d6cbbf349f90f8 upstream. + +Given a sufficiently large number of actions, while copying and +reserving memory for a new action of a new flow, if next_offset is +greater than MAX_ACTIONS_BUFSIZE, the function reserve_sfa_size() does +not return -EMSGSIZE as expected, but it allocates MAX_ACTIONS_BUFSIZE +bytes increasing actions_len by req_size. This can then lead to an OOB +write access, especially when further actions need to be copied. + +Fix it by rearranging the flow action size check. + +KASAN splat below: + +================================================================== +BUG: KASAN: slab-out-of-bounds in reserve_sfa_size+0x1ba/0x380 [openvswitch] +Write of size 65360 at addr ffff888147e4001c by task handler15/836 + +CPU: 1 PID: 836 Comm: handler15 Not tainted 5.18.0-rc1+ #27 +... +Call Trace: + + dump_stack_lvl+0x45/0x5a + print_report.cold+0x5e/0x5db + ? __lock_text_start+0x8/0x8 + ? reserve_sfa_size+0x1ba/0x380 [openvswitch] + kasan_report+0xb5/0x130 + ? reserve_sfa_size+0x1ba/0x380 [openvswitch] + kasan_check_range+0xf5/0x1d0 + memcpy+0x39/0x60 + reserve_sfa_size+0x1ba/0x380 [openvswitch] + __add_action+0x24/0x120 [openvswitch] + ovs_nla_add_action+0xe/0x20 [openvswitch] + ovs_ct_copy_action+0x29d/0x1130 [openvswitch] + ? __kernel_text_address+0xe/0x30 + ? unwind_get_return_address+0x56/0xa0 + ? create_prof_cpu_mask+0x20/0x20 + ? ovs_ct_verify+0xf0/0xf0 [openvswitch] + ? prep_compound_page+0x198/0x2a0 + ? __kasan_check_byte+0x10/0x40 + ? kasan_unpoison+0x40/0x70 + ? ksize+0x44/0x60 + ? reserve_sfa_size+0x75/0x380 [openvswitch] + __ovs_nla_copy_actions+0xc26/0x2070 [openvswitch] + ? __zone_watermark_ok+0x420/0x420 + ? validate_set.constprop.0+0xc90/0xc90 [openvswitch] + ? __alloc_pages+0x1a9/0x3e0 + ? __alloc_pages_slowpath.constprop.0+0x1da0/0x1da0 + ? unwind_next_frame+0x991/0x1e40 + ? __mod_node_page_state+0x99/0x120 + ? __mod_lruvec_page_state+0x2e3/0x470 + ? __kasan_kmalloc_large+0x90/0xe0 + ovs_nla_copy_actions+0x1b4/0x2c0 [openvswitch] + ovs_flow_cmd_new+0x3cd/0xb10 [openvswitch] + ... + +Cc: stable@vger.kernel.org +Fixes: f28cd2af22a0 ("openvswitch: fix flow actions reallocation") +Signed-off-by: Paolo Valerio +Acked-by: Eelco Chaudron +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman +--- + net/openvswitch/flow_netlink.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/net/openvswitch/flow_netlink.c ++++ b/net/openvswitch/flow_netlink.c +@@ -2436,7 +2436,7 @@ static struct nlattr *reserve_sfa_size(s + new_acts_size = max(next_offset + req_size, ksize(*sfa) * 2); + + if (new_acts_size > MAX_ACTIONS_BUFSIZE) { +- if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) { ++ if ((next_offset + req_size) > MAX_ACTIONS_BUFSIZE) { + OVS_NLERR(log, "Flow action size exceeds max %u", + MAX_ACTIONS_BUFSIZE); + return ERR_PTR(-EMSGSIZE); diff --git a/queue-5.15/series b/queue-5.15/series index aafdb539e6e..bfe7e291c34 100644 --- a/queue-5.15/series +++ b/queue-5.15/series @@ -96,3 +96,10 @@ powerpc-perf-fix-power9-event-alternatives.patch powerpc-perf-fix-power10-event-alternatives.patch perf-script-always-allow-field-data_src-for-auxtrace.patch perf-report-set-perf_sample_data_src-bit-for-arm-spe.patch +xtensa-patch_text-fixup-last-cpu-should-be-master.patch +xtensa-fix-a7-clobbering-in-coprocessor-context-load-store.patch +openvswitch-fix-oob-access-in-reserve_sfa_size.patch +gpio-request-interrupts-after-irq-is-initialized.patch +asoc-soc-dapm-fix-two-incorrect-uses-of-list-iterator.patch +e1000e-fix-possible-overflow-in-ltr-decoding.patch +arc-entry-fix-syscall_trace_exit-argument.patch diff --git a/queue-5.15/xtensa-fix-a7-clobbering-in-coprocessor-context-load-store.patch b/queue-5.15/xtensa-fix-a7-clobbering-in-coprocessor-context-load-store.patch new file mode 100644 index 00000000000..9b1fc8af72a --- /dev/null +++ b/queue-5.15/xtensa-fix-a7-clobbering-in-coprocessor-context-load-store.patch @@ -0,0 +1,43 @@ +From 839769c35477d4acc2369e45000ca7b0b6af39a7 Mon Sep 17 00:00:00 2001 +From: Max Filippov +Date: Wed, 13 Apr 2022 22:44:36 -0700 +Subject: xtensa: fix a7 clobbering in coprocessor context load/store + +From: Max Filippov + +commit 839769c35477d4acc2369e45000ca7b0b6af39a7 upstream. + +Fast coprocessor exception handler saves a3..a6, but coprocessor context +load/store code uses a4..a7 as temporaries, potentially clobbering a7. +'Potentially' because coprocessor state load/store macros may not use +all four temporary registers (and neither FPU nor HiFi macros do). +Use a3..a6 as intended. + +Cc: stable@vger.kernel.org +Fixes: c658eac628aa ("[XTENSA] Add support for configurable registers and coprocessors") +Signed-off-by: Max Filippov +Signed-off-by: Greg Kroah-Hartman +--- + arch/xtensa/kernel/coprocessor.S | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/arch/xtensa/kernel/coprocessor.S ++++ b/arch/xtensa/kernel/coprocessor.S +@@ -29,7 +29,7 @@ + .if XTENSA_HAVE_COPROCESSOR(x); \ + .align 4; \ + .Lsave_cp_regs_cp##x: \ +- xchal_cp##x##_store a2 a4 a5 a6 a7; \ ++ xchal_cp##x##_store a2 a3 a4 a5 a6; \ + jx a0; \ + .endif + +@@ -46,7 +46,7 @@ + .if XTENSA_HAVE_COPROCESSOR(x); \ + .align 4; \ + .Lload_cp_regs_cp##x: \ +- xchal_cp##x##_load a2 a4 a5 a6 a7; \ ++ xchal_cp##x##_load a2 a3 a4 a5 a6; \ + jx a0; \ + .endif + diff --git a/queue-5.15/xtensa-patch_text-fixup-last-cpu-should-be-master.patch b/queue-5.15/xtensa-patch_text-fixup-last-cpu-should-be-master.patch new file mode 100644 index 00000000000..64d28111def --- /dev/null +++ b/queue-5.15/xtensa-patch_text-fixup-last-cpu-should-be-master.patch @@ -0,0 +1,40 @@ +From ee69d4be8fd064cd08270b4808d2dfece3614ee0 Mon Sep 17 00:00:00 2001 +From: Guo Ren +Date: Thu, 7 Apr 2022 15:33:22 +0800 +Subject: xtensa: patch_text: Fixup last cpu should be master + +From: Guo Ren + +commit ee69d4be8fd064cd08270b4808d2dfece3614ee0 upstream. + +These patch_text implementations are using stop_machine_cpuslocked +infrastructure with atomic cpu_count. The original idea: When the +master CPU patch_text, the others should wait for it. But current +implementation is using the first CPU as master, which couldn't +guarantee the remaining CPUs are waiting. This patch changes the +last CPU as the master to solve the potential risk. + +Fixes: 64711f9a47d4 ("xtensa: implement jump_label support") +Signed-off-by: Guo Ren +Signed-off-by: Guo Ren +Reviewed-by: Max Filippov +Reviewed-by: Masami Hiramatsu +Cc: +Message-Id: <20220407073323.743224-4-guoren@kernel.org> +Signed-off-by: Max Filippov +Signed-off-by: Greg Kroah-Hartman +--- + arch/xtensa/kernel/jump_label.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/arch/xtensa/kernel/jump_label.c ++++ b/arch/xtensa/kernel/jump_label.c +@@ -40,7 +40,7 @@ static int patch_text_stop_machine(void + { + struct patch *patch = data; + +- if (atomic_inc_return(&patch->cpu_count) == 1) { ++ if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) { + local_patch_text(patch->addr, patch->data, patch->sz); + atomic_inc(&patch->cpu_count); + } else {