--- /dev/null
+From bc84fc504a03d21eebe1104aa2ab50556d540e5d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 4 Oct 2024 13:32:52 -0600
+Subject: accel/qaic: Fix the for loop used to walk SG table
+
+From: Pranjal Ramajor Asha Kanojiya <quic_pkanojiy@quicinc.com>
+
+[ Upstream commit c5e8e93897b7bb0a336bf3332f82f8d9f2b33f14 ]
+
+Only for_each_sgtable_dma_sg() should be used to walk through a SG table
+to grab correct bus address and length pair after calling DMA MAP API on
+a SG table as DMA MAP APIs updates the SG table and for_each_sgtable_sg()
+walks through the original SG table.
+
+Fixes: ff13be830333 ("accel/qaic: Add datapath")
+Fixes: 129776ac2e38 ("accel/qaic: Add control path")
+Signed-off-by: Pranjal Ramajor Asha Kanojiya <quic_pkanojiy@quicinc.com>
+Reviewed-by: Jeffrey Hugo <quic_jhugo@quicinc.com>
+Signed-off-by: Jeffrey Hugo <quic_jhugo@quicinc.com>
+Reviewed-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20241004193252.3888544-1-quic_jhugo@quicinc.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/accel/qaic/qaic_control.c | 2 +-
+ drivers/accel/qaic/qaic_data.c | 6 +++---
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/accel/qaic/qaic_control.c b/drivers/accel/qaic/qaic_control.c
+index 9e8a8cbadf6bb..d8bdab69f8009 100644
+--- a/drivers/accel/qaic/qaic_control.c
++++ b/drivers/accel/qaic/qaic_control.c
+@@ -496,7 +496,7 @@ static int encode_addr_size_pairs(struct dma_xfer *xfer, struct wrapper_list *wr
+ nents = sgt->nents;
+ nents_dma = nents;
+ *size = QAIC_MANAGE_EXT_MSG_LENGTH - msg_hdr_len - sizeof(**out_trans);
+- for_each_sgtable_sg(sgt, sg, i) {
++ for_each_sgtable_dma_sg(sgt, sg, i) {
+ *size -= sizeof(*asp);
+ /* Save 1K for possible follow-up transactions. */
+ if (*size < SZ_1K) {
+diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c
+index e86e71c1cdd86..c20eb63750f51 100644
+--- a/drivers/accel/qaic/qaic_data.c
++++ b/drivers/accel/qaic/qaic_data.c
+@@ -184,7 +184,7 @@ static int clone_range_of_sgt_for_slice(struct qaic_device *qdev, struct sg_tabl
+ nents = 0;
+
+ size = size ? size : PAGE_SIZE;
+- for (sg = sgt_in->sgl; sg; sg = sg_next(sg)) {
++ for_each_sgtable_dma_sg(sgt_in, sg, j) {
+ len = sg_dma_len(sg);
+
+ if (!len)
+@@ -221,7 +221,7 @@ static int clone_range_of_sgt_for_slice(struct qaic_device *qdev, struct sg_tabl
+
+ /* copy relevant sg node and fix page and length */
+ sgn = sgf;
+- for_each_sgtable_sg(sgt, sg, j) {
++ for_each_sgtable_dma_sg(sgt, sg, j) {
+ memcpy(sg, sgn, sizeof(*sg));
+ if (sgn == sgf) {
+ sg_dma_address(sg) += offf;
+@@ -301,7 +301,7 @@ static int encode_reqs(struct qaic_device *qdev, struct bo_slice *slice,
+ * fence.
+ */
+ dev_addr = req->dev_addr;
+- for_each_sgtable_sg(slice->sgt, sg, i) {
++ for_each_sgtable_dma_sg(slice->sgt, sg, i) {
+ slice->reqs[i].cmd = cmd;
+ slice->reqs[i].src_addr = cpu_to_le64(slice->dir == DMA_TO_DEVICE ?
+ sg_dma_address(sg) : dev_addr);
+--
+2.43.0
+
--- /dev/null
+From 3059a6b8b886a6aac94cd4a9b9a925c4a3c6a724 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 11 Oct 2024 01:16:45 +0300
+Subject: ALSA: hda/cs8409: Fix possible NULL dereference
+
+From: Murad Masimov <m.masimov@maxima.ru>
+
+[ Upstream commit c9bd4a82b4ed32c6d1c90500a52063e6e341517f ]
+
+If snd_hda_gen_add_kctl fails to allocate memory and returns NULL, then
+NULL pointer dereference will occur in the next line.
+
+Since dolphin_fixups function is a hda_fixup function which is not supposed
+to return any errors, add simple check before dereference, ignore the fail.
+
+Found by Linux Verification Center (linuxtesting.org) with SVACE.
+
+Fixes: 20e507724113 ("ALSA: hda/cs8409: Add support for dolphin")
+Signed-off-by: Murad Masimov <m.masimov@maxima.ru>
+Link: https://patch.msgid.link/20241010221649.1305-1-m.masimov@maxima.ru
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/pci/hda/patch_cs8409.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/sound/pci/hda/patch_cs8409.c b/sound/pci/hda/patch_cs8409.c
+index 26f3c31600d7b..614327218634c 100644
+--- a/sound/pci/hda/patch_cs8409.c
++++ b/sound/pci/hda/patch_cs8409.c
+@@ -1403,8 +1403,9 @@ void dolphin_fixups(struct hda_codec *codec, const struct hda_fixup *fix, int ac
+ kctrl = snd_hda_gen_add_kctl(&spec->gen, "Line Out Playback Volume",
+ &cs42l42_dac_volume_mixer);
+ /* Update Line Out kcontrol template */
+- kctrl->private_value = HDA_COMPOSE_AMP_VAL_OFS(DOLPHIN_HP_PIN_NID, 3, CS8409_CODEC1,
+- HDA_OUTPUT, CS42L42_VOL_DAC) | HDA_AMP_VAL_MIN_MUTE;
++ if (kctrl)
++ kctrl->private_value = HDA_COMPOSE_AMP_VAL_OFS(DOLPHIN_HP_PIN_NID, 3, CS8409_CODEC1,
++ HDA_OUTPUT, CS42L42_VOL_DAC) | HDA_AMP_VAL_MIN_MUTE;
+ cs8409_enable_ur(codec, 0);
+ snd_hda_codec_set_name(codec, "CS8409/CS42L42");
+ break;
+--
+2.43.0
+
--- /dev/null
+From 20c9385ef1475176849129f263e14a780d3b55c5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 16 Jul 2024 02:03:11 +0300
+Subject: ARM: dts: bcm2837-rpi-cm3-io3: Fix HDMI hpd-gpio pin
+
+From: Florian Klink <flokli@flokli.de>
+
+[ Upstream commit dc7785e4723510616d776862ddb4c08857a1bdb2 ]
+
+HDMI_HPD_N_1V8 is connected to GPIO pin 0, not 1.
+
+This fixes HDMI hotplug/output detection.
+
+See https://datasheets.raspberrypi.com/cm/cm3-schematics.pdf
+
+Signed-off-by: Florian Klink <flokli@flokli.de>
+Reviewed-by: Stefan Wahren <wahrenst@gmx.net>
+Link: https://lore.kernel.org/r/20240715230311.685641-1-flokli@flokli.de
+Reviewed-by: Stefan Wahren <wahrenst@gmx.net>
+Fixes: a54fe8a6cf66 ("ARM: dts: add Raspberry Pi Compute Module 3 and IO board")
+Signed-off-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/boot/dts/broadcom/bcm2837-rpi-cm3-io3.dts | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/arm/boot/dts/broadcom/bcm2837-rpi-cm3-io3.dts b/arch/arm/boot/dts/broadcom/bcm2837-rpi-cm3-io3.dts
+index 72d26d130efaa..85f54fa595aa8 100644
+--- a/arch/arm/boot/dts/broadcom/bcm2837-rpi-cm3-io3.dts
++++ b/arch/arm/boot/dts/broadcom/bcm2837-rpi-cm3-io3.dts
+@@ -77,7 +77,7 @@
+ };
+
+ &hdmi {
+- hpd-gpios = <&expgpio 1 GPIO_ACTIVE_LOW>;
++ hpd-gpios = <&expgpio 0 GPIO_ACTIVE_LOW>;
+ power-domains = <&power RPI_POWER_DOMAIN_HDMI>;
+ status = "okay";
+ };
+--
+2.43.0
+
--- /dev/null
+From 779fb1e6d02333838f4cd637583476ced5803d9a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 14 Oct 2024 17:07:08 +0800
+Subject: Bluetooth: bnep: fix wild-memory-access in proto_unregister
+
+From: Ye Bin <yebin10@huawei.com>
+
+[ Upstream commit 64a90991ba8d4e32e3173ddd83d0b24167a5668c ]
+
+There's issue as follows:
+ KASAN: maybe wild-memory-access in range [0xdead...108-0xdead...10f]
+ CPU: 3 UID: 0 PID: 2805 Comm: rmmod Tainted: G W
+ RIP: 0010:proto_unregister+0xee/0x400
+ Call Trace:
+ <TASK>
+ __do_sys_delete_module+0x318/0x580
+ do_syscall_64+0xc1/0x1d0
+ entry_SYSCALL_64_after_hwframe+0x77/0x7f
+
+As bnep_init() ignore bnep_sock_init()'s return value, and bnep_sock_init()
+will cleanup all resource. Then when remove bnep module will call
+bnep_sock_cleanup() to cleanup sock's resource.
+To solve above issue just return bnep_sock_init()'s return value in
+bnep_exit().
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Ye Bin <yebin10@huawei.com>
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bluetooth/bnep/core.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
+index ec45f77fce218..344e2e063be68 100644
+--- a/net/bluetooth/bnep/core.c
++++ b/net/bluetooth/bnep/core.c
+@@ -745,8 +745,7 @@ static int __init bnep_init(void)
+ if (flt[0])
+ BT_INFO("BNEP filters: %s", flt);
+
+- bnep_sock_init();
+- return 0;
++ return bnep_sock_init();
+ }
+
+ static void __exit bnep_exit(void)
+--
+2.43.0
+
--- /dev/null
+From 5d6df866a454ac2c4a0b3353b77e2616a172c337 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 8 Oct 2024 15:11:13 +0800
+Subject: bpf: Check the remaining info_cnt before repeating btf fields
+
+From: Hou Tao <houtao1@huawei.com>
+
+[ Upstream commit 797d73ee232dd1833dec4824bc53a22032e97c1c ]
+
+When trying to repeat the btf fields for array of nested struct, it
+doesn't check the remaining info_cnt. The following splat will be
+reported when the value of ret * nelems is greater than BTF_FIELDS_MAX:
+
+ ------------[ cut here ]------------
+ UBSAN: array-index-out-of-bounds in ../kernel/bpf/btf.c:3951:49
+ index 11 is out of range for type 'btf_field_info [11]'
+ CPU: 6 UID: 0 PID: 411 Comm: test_progs ...... 6.11.0-rc4+ #1
+ Tainted: [O]=OOT_MODULE
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS ...
+ Call Trace:
+ <TASK>
+ dump_stack_lvl+0x57/0x70
+ dump_stack+0x10/0x20
+ ubsan_epilogue+0x9/0x40
+ __ubsan_handle_out_of_bounds+0x6f/0x80
+ ? kallsyms_lookup_name+0x48/0xb0
+ btf_parse_fields+0x992/0xce0
+ map_create+0x591/0x770
+ __sys_bpf+0x229/0x2410
+ __x64_sys_bpf+0x1f/0x30
+ x64_sys_call+0x199/0x9f0
+ do_syscall_64+0x3b/0xc0
+ entry_SYSCALL_64_after_hwframe+0x4b/0x53
+ RIP: 0033:0x7fea56f2cc5d
+ ......
+ </TASK>
+ ---[ end trace ]---
+
+Fix it by checking the remaining info_cnt in btf_repeat_fields() before
+repeating the btf fields.
+
+Fixes: 64e8ee814819 ("bpf: look into the types of the fields of a struct type recursively.")
+Signed-off-by: Hou Tao <houtao1@huawei.com>
+Acked-by: Eduard Zingerman <eddyz87@gmail.com>
+Link: https://lore.kernel.org/r/20241008071114.3718177-2-houtao@huaweicloud.com
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/btf.c | 14 ++++++++++----
+ 1 file changed, 10 insertions(+), 4 deletions(-)
+
+diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
+index 9b068afd17953..5f4f1d0bc23a4 100644
+--- a/kernel/bpf/btf.c
++++ b/kernel/bpf/btf.c
+@@ -3528,7 +3528,7 @@ static int btf_get_field_type(const struct btf *btf, const struct btf_type *var_
+ * (i + 1) * elem_size
+ * where i is the repeat index and elem_size is the size of an element.
+ */
+-static int btf_repeat_fields(struct btf_field_info *info,
++static int btf_repeat_fields(struct btf_field_info *info, int info_cnt,
+ u32 field_cnt, u32 repeat_cnt, u32 elem_size)
+ {
+ u32 i, j;
+@@ -3548,6 +3548,12 @@ static int btf_repeat_fields(struct btf_field_info *info,
+ }
+ }
+
++ /* The type of struct size or variable size is u32,
++ * so the multiplication will not overflow.
++ */
++ if (field_cnt * (repeat_cnt + 1) > info_cnt)
++ return -E2BIG;
++
+ cur = field_cnt;
+ for (i = 0; i < repeat_cnt; i++) {
+ memcpy(&info[cur], &info[0], field_cnt * sizeof(info[0]));
+@@ -3592,7 +3598,7 @@ static int btf_find_nested_struct(const struct btf *btf, const struct btf_type *
+ info[i].off += off;
+
+ if (nelems > 1) {
+- err = btf_repeat_fields(info, ret, nelems - 1, t->size);
++ err = btf_repeat_fields(info, info_cnt, ret, nelems - 1, t->size);
+ if (err == 0)
+ ret *= nelems;
+ else
+@@ -3686,10 +3692,10 @@ static int btf_find_field_one(const struct btf *btf,
+
+ if (ret == BTF_FIELD_IGNORE)
+ return 0;
+- if (nelems > info_cnt)
++ if (!info_cnt)
+ return -E2BIG;
+ if (nelems > 1) {
+- ret = btf_repeat_fields(info, 1, nelems - 1, sz);
++ ret = btf_repeat_fields(info, info_cnt, 1, nelems - 1, sz);
+ if (ret < 0)
+ return ret;
+ }
+--
+2.43.0
+
--- /dev/null
+From 8a062d1f775d23c6a4b0c4fc49960bd97075cb9b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 11 Sep 2024 10:41:18 +0200
+Subject: bpf: devmap: provide rxq after redirect
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Florian Kauer <florian.kauer@linutronix.de>
+
+[ Upstream commit ca9984c5f0ab3690d98b13937b2485a978c8dd73 ]
+
+rxq contains a pointer to the device from where
+the redirect happened. Currently, the BPF program
+that was executed after a redirect via BPF_MAP_TYPE_DEVMAP*
+does not have it set.
+
+This is particularly bad since accessing ingress_ifindex, e.g.
+
+SEC("xdp")
+int prog(struct xdp_md *pkt)
+{
+ return bpf_redirect_map(&dev_redirect_map, 0, 0);
+}
+
+SEC("xdp/devmap")
+int prog_after_redirect(struct xdp_md *pkt)
+{
+ bpf_printk("ifindex %i", pkt->ingress_ifindex);
+ return XDP_PASS;
+}
+
+depends on access to rxq, so a NULL pointer gets dereferenced:
+
+<1>[ 574.475170] BUG: kernel NULL pointer dereference, address: 0000000000000000
+<1>[ 574.475188] #PF: supervisor read access in kernel mode
+<1>[ 574.475194] #PF: error_code(0x0000) - not-present page
+<6>[ 574.475199] PGD 0 P4D 0
+<4>[ 574.475207] Oops: Oops: 0000 [#1] PREEMPT SMP NOPTI
+<4>[ 574.475217] CPU: 4 UID: 0 PID: 217 Comm: kworker/4:1 Not tainted 6.11.0-rc5-reduced-00859-g780801200300 #23
+<4>[ 574.475226] Hardware name: Intel(R) Client Systems NUC13ANHi7/NUC13ANBi7, BIOS ANRPL357.0026.2023.0314.1458 03/14/2023
+<4>[ 574.475231] Workqueue: mld mld_ifc_work
+<4>[ 574.475247] RIP: 0010:bpf_prog_5e13354d9cf5018a_prog_after_redirect+0x17/0x3c
+<4>[ 574.475257] Code: cc cc cc cc cc cc cc 80 00 00 00 cc cc cc cc cc cc cc cc f3 0f 1e fa 0f 1f 44 00 00 66 90 55 48 89 e5 f3 0f 1e fa 48 8b 57 20 <48> 8b 52 00 8b 92 e0 00 00 00 48 bf f8 a6 d5 c4 5d a0 ff ff be 0b
+<4>[ 574.475263] RSP: 0018:ffffa62440280c98 EFLAGS: 00010206
+<4>[ 574.475269] RAX: ffffa62440280cd8 RBX: 0000000000000001 RCX: 0000000000000000
+<4>[ 574.475274] RDX: 0000000000000000 RSI: ffffa62440549048 RDI: ffffa62440280ce0
+<4>[ 574.475278] RBP: ffffa62440280c98 R08: 0000000000000002 R09: 0000000000000001
+<4>[ 574.475281] R10: ffffa05dc8b98000 R11: ffffa05f577fca40 R12: ffffa05dcab24000
+<4>[ 574.475285] R13: ffffa62440280ce0 R14: ffffa62440549048 R15: ffffa62440549000
+<4>[ 574.475289] FS: 0000000000000000(0000) GS:ffffa05f4f700000(0000) knlGS:0000000000000000
+<4>[ 574.475294] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+<4>[ 574.475298] CR2: 0000000000000000 CR3: 000000025522e000 CR4: 0000000000f50ef0
+<4>[ 574.475303] PKRU: 55555554
+<4>[ 574.475306] Call Trace:
+<4>[ 574.475313] <IRQ>
+<4>[ 574.475318] ? __die+0x23/0x70
+<4>[ 574.475329] ? page_fault_oops+0x180/0x4c0
+<4>[ 574.475339] ? skb_pp_cow_data+0x34c/0x490
+<4>[ 574.475346] ? kmem_cache_free+0x257/0x280
+<4>[ 574.475357] ? exc_page_fault+0x67/0x150
+<4>[ 574.475368] ? asm_exc_page_fault+0x26/0x30
+<4>[ 574.475381] ? bpf_prog_5e13354d9cf5018a_prog_after_redirect+0x17/0x3c
+<4>[ 574.475386] bq_xmit_all+0x158/0x420
+<4>[ 574.475397] __dev_flush+0x30/0x90
+<4>[ 574.475407] veth_poll+0x216/0x250 [veth]
+<4>[ 574.475421] __napi_poll+0x28/0x1c0
+<4>[ 574.475430] net_rx_action+0x32d/0x3a0
+<4>[ 574.475441] handle_softirqs+0xcb/0x2c0
+<4>[ 574.475451] do_softirq+0x40/0x60
+<4>[ 574.475458] </IRQ>
+<4>[ 574.475461] <TASK>
+<4>[ 574.475464] __local_bh_enable_ip+0x66/0x70
+<4>[ 574.475471] __dev_queue_xmit+0x268/0xe40
+<4>[ 574.475480] ? selinux_ip_postroute+0x213/0x420
+<4>[ 574.475491] ? alloc_skb_with_frags+0x4a/0x1d0
+<4>[ 574.475502] ip6_finish_output2+0x2be/0x640
+<4>[ 574.475512] ? nf_hook_slow+0x42/0xf0
+<4>[ 574.475521] ip6_finish_output+0x194/0x300
+<4>[ 574.475529] ? __pfx_ip6_finish_output+0x10/0x10
+<4>[ 574.475538] mld_sendpack+0x17c/0x240
+<4>[ 574.475548] mld_ifc_work+0x192/0x410
+<4>[ 574.475557] process_one_work+0x15d/0x380
+<4>[ 574.475566] worker_thread+0x29d/0x3a0
+<4>[ 574.475573] ? __pfx_worker_thread+0x10/0x10
+<4>[ 574.475580] ? __pfx_worker_thread+0x10/0x10
+<4>[ 574.475587] kthread+0xcd/0x100
+<4>[ 574.475597] ? __pfx_kthread+0x10/0x10
+<4>[ 574.475606] ret_from_fork+0x31/0x50
+<4>[ 574.475615] ? __pfx_kthread+0x10/0x10
+<4>[ 574.475623] ret_from_fork_asm+0x1a/0x30
+<4>[ 574.475635] </TASK>
+<4>[ 574.475637] Modules linked in: veth br_netfilter bridge stp llc iwlmvm x86_pkg_temp_thermal iwlwifi efivarfs nvme nvme_core
+<4>[ 574.475662] CR2: 0000000000000000
+<4>[ 574.475668] ---[ end trace 0000000000000000 ]---
+
+Therefore, provide it to the program by setting rxq properly.
+
+Fixes: cb261b594b41 ("bpf: Run devmap xdp_prog on flush instead of bulk enqueue")
+Reviewed-by: Toke HĂžiland-JĂžrgensen <toke@redhat.com>
+Signed-off-by: Florian Kauer <florian.kauer@linutronix.de>
+Acked-by: Jakub Kicinski <kuba@kernel.org>
+Link: https://lore.kernel.org/r/20240911-devel-koalo-fix-ingress-ifindex-v4-1-5c643ae10258@linutronix.de
+Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/devmap.c | 11 +++++++----
+ 1 file changed, 7 insertions(+), 4 deletions(-)
+
+diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
+index 9e0e3b0a18e40..7878be18e9d26 100644
+--- a/kernel/bpf/devmap.c
++++ b/kernel/bpf/devmap.c
+@@ -333,9 +333,11 @@ static int dev_map_hash_get_next_key(struct bpf_map *map, void *key,
+
+ static int dev_map_bpf_prog_run(struct bpf_prog *xdp_prog,
+ struct xdp_frame **frames, int n,
+- struct net_device *dev)
++ struct net_device *tx_dev,
++ struct net_device *rx_dev)
+ {
+- struct xdp_txq_info txq = { .dev = dev };
++ struct xdp_txq_info txq = { .dev = tx_dev };
++ struct xdp_rxq_info rxq = { .dev = rx_dev };
+ struct xdp_buff xdp;
+ int i, nframes = 0;
+
+@@ -346,6 +348,7 @@ static int dev_map_bpf_prog_run(struct bpf_prog *xdp_prog,
+
+ xdp_convert_frame_to_buff(xdpf, &xdp);
+ xdp.txq = &txq;
++ xdp.rxq = &rxq;
+
+ act = bpf_prog_run_xdp(xdp_prog, &xdp);
+ switch (act) {
+@@ -360,7 +363,7 @@ static int dev_map_bpf_prog_run(struct bpf_prog *xdp_prog,
+ bpf_warn_invalid_xdp_action(NULL, xdp_prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+- trace_xdp_exception(dev, xdp_prog, act);
++ trace_xdp_exception(tx_dev, xdp_prog, act);
+ fallthrough;
+ case XDP_DROP:
+ xdp_return_frame_rx_napi(xdpf);
+@@ -388,7 +391,7 @@ static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags)
+ }
+
+ if (bq->xdp_prog) {
+- to_send = dev_map_bpf_prog_run(bq->xdp_prog, bq->q, cnt, dev);
++ to_send = dev_map_bpf_prog_run(bq->xdp_prog, bq->q, cnt, dev, bq->dev_rx);
+ if (!to_send)
+ goto out;
+ }
+--
+2.43.0
+
--- /dev/null
+From 6da093ffea5f2c012912f0de40eeb879399a22f9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 16 Oct 2024 15:49:11 +0200
+Subject: bpf: Fix incorrect delta propagation between linked registers
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+[ Upstream commit 3878ae04e9fc24dacb77a1d32bd87e7d8108599e ]
+
+Nathaniel reported a bug in the linked scalar delta tracking, which can lead
+to accepting a program with OOB access. The specific code is related to the
+sync_linked_regs() function and the BPF_ADD_CONST flag, which signifies a
+constant offset between two scalar registers tracked by the same register id.
+
+The verifier attempts to track "similar" scalars in order to propagate bounds
+information learned about one scalar to others. For instance, if r1 and r2
+are known to contain the same value, then upon encountering 'if (r1 != 0x1234)
+goto xyz', not only does it know that r1 is equal to 0x1234 on the path where
+that conditional jump is not taken, it also knows that r2 is.
+
+Additionally, with env->bpf_capable set, the verifier will track scalars
+which should be a constant delta apart (if r1 is known to be one greater than
+r2, then if r1 is known to be equal to 0x1234, r2 must be equal to 0x1233.)
+The code path for the latter in adjust_reg_min_max_vals() is reached when
+processing both 32 and 64-bit addition operations. While adjust_reg_min_max_vals()
+knows whether dst_reg was produced by a 32 or a 64-bit addition (based on the
+alu32 bool), the only information saved in dst_reg is the id of the source
+register (reg->id, or'ed by BPF_ADD_CONST) and the value of the constant
+offset (reg->off).
+
+Later, the function sync_linked_regs() will attempt to use this information
+to propagate bounds information from one register (known_reg) to others,
+meaning, for all R in linked_regs, it copies known_reg range (and possibly
+adjusting delta) into R for the case of R->id == known_reg->id.
+
+For the delta adjustment, meaning, matching reg->id with BPF_ADD_CONST, the
+verifier adjusts the register as reg = known_reg; reg += delta where delta
+is computed as (s32)reg->off - (s32)known_reg->off and placed as a scalar
+into a fake_reg to then simulate the addition of reg += fake_reg. This is
+only correct, however, if the value in reg was created by a 64-bit addition.
+When reg contains the result of a 32-bit addition operation, its upper 32
+bits will always be zero. sync_linked_regs() on the other hand, may cause
+the verifier to believe that the addition between fake_reg and reg overflows
+into those upper bits. For example, if reg was generated by adding the
+constant 1 to known_reg using a 32-bit alu operation, then reg->off is 1
+and known_reg->off is 0. If known_reg is known to be the constant 0xFFFFFFFF,
+sync_linked_regs() will tell the verifier that reg is equal to the constant
+0x100000000. This is incorrect as the actual value of reg will be 0, as the
+32-bit addition will wrap around.
+
+Example:
+
+ 0: (b7) r0 = 0; R0_w=0
+ 1: (18) r1 = 0x80000001; R1_w=0x80000001
+ 3: (37) r1 /= 1; R1_w=scalar()
+ 4: (bf) r2 = r1; R1_w=scalar(id=1) R2_w=scalar(id=1)
+ 5: (bf) r4 = r1; R1_w=scalar(id=1) R4_w=scalar(id=1)
+ 6: (04) w2 += 2147483647; R2_w=scalar(id=1+2147483647,smin=0,smax=umax=0xffffffff,var_off=(0x0; 0xffffffff))
+ 7: (04) w4 += 0 ; R4_w=scalar(id=1+0,smin=0,smax=umax=0xffffffff,var_off=(0x0; 0xffffffff))
+ 8: (15) if r2 == 0x0 goto pc+1
+ 10: R0=0 R1=0xffffffff80000001 R2=0x7fffffff R4=0xffffffff80000001 R10=fp0
+
+What can be seen here is that r1 is copied to r2 and r4, such that {r1,r2,r4}.id
+are all the same which later lets sync_linked_regs() to be invoked. Then, in
+a next step constants are added with alu32 to r2 and r4, setting their ->off,
+as well as id |= BPF_ADD_CONST. Next, the conditional will bind r2 and
+propagate ranges to its linked registers. The verifier now believes the upper
+32 bits of r4 are r4=0xffffffff80000001, while actually r4=r1=0x80000001.
+
+One approach for a simple fix suitable also for stable is to limit the constant
+delta tracking to only 64-bit alu addition. If necessary at some later point,
+BPF_ADD_CONST could be split into BPF_ADD_CONST64 and BPF_ADD_CONST32 to avoid
+mixing the two under the tradeoff to further complicate sync_linked_regs().
+However, none of the added tests from dedf56d775c0 ("selftests/bpf: Add tests
+for add_const") make this necessary at this point, meaning, BPF CI also passes
+with just limiting tracking to 64-bit alu addition.
+
+Fixes: 98d7ca374ba4 ("bpf: Track delta between "linked" registers.")
+Reported-by: Nathaniel Theis <nathaniel.theis@nccgroup.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
+Reviewed-by: Eduard Zingerman <eddyz87@gmail.com>
+Link: https://lore.kernel.org/bpf/20241016134913.32249-1-daniel@iogearbox.net
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/verifier.c | 11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 42d6bc5392757..5b8b1d0e76cf1 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -14136,12 +14136,13 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
+ * r1 += 0x1
+ * if r2 < 1000 goto ...
+ * use r1 in memory access
+- * So remember constant delta between r2 and r1 and update r1 after
+- * 'if' condition.
++ * So for 64-bit alu remember constant delta between r2 and r1 and
++ * update r1 after 'if' condition.
+ */
+- if (env->bpf_capable && BPF_OP(insn->code) == BPF_ADD &&
+- dst_reg->id && is_reg_const(src_reg, alu32)) {
+- u64 val = reg_const_value(src_reg, alu32);
++ if (env->bpf_capable &&
++ BPF_OP(insn->code) == BPF_ADD && !alu32 &&
++ dst_reg->id && is_reg_const(src_reg, false)) {
++ u64 val = reg_const_value(src_reg, false);
+
+ if ((dst_reg->id & BPF_ADD_CONST) ||
+ /* prevent overflow in find_equal_scalars() later */
+--
+2.43.0
+
--- /dev/null
+From 044684dcd5a3998c97542b44eff2a8226b9b1a03 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 16 Oct 2024 14:00:47 -0700
+Subject: bpf: Fix iter/task tid filtering
+
+From: Jordan Rome <linux@jordanrome.com>
+
+[ Upstream commit 9495a5b731fcaf580448a3438d63601c88367661 ]
+
+In userspace, you can add a tid filter by setting
+the "task.tid" field for "bpf_iter_link_info".
+However, `get_pid_task` when called for the
+`BPF_TASK_ITER_TID` type should have been using
+`PIDTYPE_PID` (tid) instead of `PIDTYPE_TGID` (pid).
+
+Fixes: f0d74c4da1f0 ("bpf: Parameterize task iterators.")
+Signed-off-by: Jordan Rome <linux@jordanrome.com>
+Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
+Link: https://lore.kernel.org/bpf/20241016210048.1213935-1-linux@jordanrome.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/task_iter.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/bpf/task_iter.c b/kernel/bpf/task_iter.c
+index 02aa9db8d7961..5af9e130e500f 100644
+--- a/kernel/bpf/task_iter.c
++++ b/kernel/bpf/task_iter.c
+@@ -99,7 +99,7 @@ static struct task_struct *task_seq_get_next(struct bpf_iter_seq_task_common *co
+ rcu_read_lock();
+ pid = find_pid_ns(common->pid, common->ns);
+ if (pid) {
+- task = get_pid_task(pid, PIDTYPE_TGID);
++ task = get_pid_task(pid, PIDTYPE_PID);
+ *tid = common->pid;
+ }
+ rcu_read_unlock();
+--
+2.43.0
+
--- /dev/null
+From 2cf822c5a1530459071ddb8a5d6a32c05d57e3d3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 10 Oct 2024 15:27:07 +0200
+Subject: bpf: fix kfunc btf caching for modules
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Toke HĂžiland-JĂžrgensen <toke@redhat.com>
+
+[ Upstream commit 6cb86a0fdece87e126323ec1bb19deb16a52aedf ]
+
+The verifier contains a cache for looking up module BTF objects when
+calling kfuncs defined in modules. This cache uses a 'struct
+bpf_kfunc_btf_tab', which contains a sorted list of BTF objects that
+were already seen in the current verifier run, and the BTF objects are
+looked up by the offset stored in the relocated call instruction using
+bsearch().
+
+The first time a given offset is seen, the module BTF is loaded from the
+file descriptor passed in by libbpf, and stored into the cache. However,
+there's a bug in the code storing the new entry: it stores a pointer to
+the new cache entry, then calls sort() to keep the cache sorted for the
+next lookup using bsearch(), and then returns the entry that was just
+stored through the stored pointer. However, because sort() modifies the
+list of entries in place *by value*, the stored pointer may no longer
+point to the right entry, in which case the wrong BTF object will be
+returned.
+
+The end result of this is an intermittent bug where, if a BPF program
+calls two functions with the same signature in two different modules,
+the function from the wrong module may sometimes end up being called.
+Whether this happens depends on the order of the calls in the BPF
+program (as that affects whether sort() reorders the array of BTF
+objects), making it especially hard to track down. Simon, credited as
+reporter below, spent significant effort analysing and creating a
+reproducer for this issue. The reproducer is added as a selftest in a
+subsequent patch.
+
+The fix is straight forward: simply don't use the stored pointer after
+calling sort(). Since we already have an on-stack pointer to the BTF
+object itself at the point where the function return, just use that, and
+populate it from the cache entry in the branch where the lookup
+succeeds.
+
+Fixes: 2357672c54c3 ("bpf: Introduce BPF support for kernel module function calls")
+Reported-by: Simon Sundberg <simon.sundberg@kau.se>
+Acked-by: Jiri Olsa <jolsa@kernel.org>
+Acked-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
+Signed-off-by: Toke HĂžiland-JĂžrgensen <toke@redhat.com>
+Link: https://lore.kernel.org/r/20241010-fix-kfunc-btf-caching-for-modules-v2-1-745af6c1af98@redhat.com
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/verifier.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 5c5dea5e137e7..a8d49b2b58e1e 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -2715,10 +2715,16 @@ static struct btf *__find_kfunc_desc_btf(struct bpf_verifier_env *env,
+ b->module = mod;
+ b->offset = offset;
+
++ /* sort() reorders entries by value, so b may no longer point
++ * to the right entry after this
++ */
+ sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]),
+ kfunc_btf_cmp_by_off, NULL);
++ } else {
++ btf = b->btf;
+ }
+- return b->btf;
++
++ return btf;
+ }
+
+ void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab)
+--
+2.43.0
+
--- /dev/null
+From 547f3ac4a0f509ac9c0977c5fed8da30dd141427 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 11 Oct 2024 19:32:51 +0000
+Subject: bpf: Fix link info netfilter flags to populate defrag flag
+
+From: Tyrone Wu <wudevelops@gmail.com>
+
+[ Upstream commit 92f3715e1eba1d41e55be06159dc3d856b18326d ]
+
+This fix correctly populates the `bpf_link_info.netfilter.flags` field
+when user passes the `BPF_F_NETFILTER_IP_DEFRAG` flag.
+
+Fixes: 91721c2d02d3 ("netfilter: bpf: Support BPF_F_NETFILTER_IP_DEFRAG in netfilter link")
+Signed-off-by: Tyrone Wu <wudevelops@gmail.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Florian Westphal <fw@strlen.de>
+Cc: Daniel Xu <dxu@dxuuu.xyz>
+Link: https://lore.kernel.org/bpf/20241011193252.178997-1-wudevelops@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_bpf_link.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/net/netfilter/nf_bpf_link.c b/net/netfilter/nf_bpf_link.c
+index 5257d5e7eb09d..797fe8a9971e7 100644
+--- a/net/netfilter/nf_bpf_link.c
++++ b/net/netfilter/nf_bpf_link.c
+@@ -150,11 +150,12 @@ static int bpf_nf_link_fill_link_info(const struct bpf_link *link,
+ struct bpf_link_info *info)
+ {
+ struct bpf_nf_link *nf_link = container_of(link, struct bpf_nf_link, link);
++ const struct nf_defrag_hook *hook = nf_link->defrag_hook;
+
+ info->netfilter.pf = nf_link->hook_ops.pf;
+ info->netfilter.hooknum = nf_link->hook_ops.hooknum;
+ info->netfilter.priority = nf_link->hook_ops.priority;
+- info->netfilter.flags = 0;
++ info->netfilter.flags = hook ? BPF_F_NETFILTER_IP_DEFRAG : 0;
+
+ return 0;
+ }
+--
+2.43.0
+
--- /dev/null
+From 1ef4c7cc9f30db21d34ab8126764c58682e0a103 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 7 Oct 2024 18:09:58 +0200
+Subject: bpf: Fix memory leak in bpf_core_apply
+
+From: Jiri Olsa <jolsa@kernel.org>
+
+[ Upstream commit 45126b155e3b5201179cdc038504bf93a8ccd921 ]
+
+We need to free specs properly.
+
+Fixes: 3d2786d65aaa ("bpf: correctly handle malformed BPF_CORE_TYPE_ID_LOCAL relos")
+Signed-off-by: Jiri Olsa <jolsa@kernel.org>
+Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
+Acked-by: Eduard Zingerman <eddyz87@gmail.com>
+Link: https://lore.kernel.org/bpf/20241007160958.607434-1-jolsa@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/btf.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
+index 7783b16b87cfe..9b068afd17953 100644
+--- a/kernel/bpf/btf.c
++++ b/kernel/bpf/btf.c
+@@ -8905,6 +8905,7 @@ int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo,
+ if (!type) {
+ bpf_log(ctx->log, "relo #%u: bad type id %u\n",
+ relo_idx, relo->type_id);
++ kfree(specs);
+ return -EINVAL;
+ }
+
+--
+2.43.0
+
--- /dev/null
+From 87076f4a0662631bcf9d3cda08f1c65be8708e7a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 16 Oct 2024 15:49:12 +0200
+Subject: bpf: Fix print_reg_state's constant scalar dump
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+[ Upstream commit 3e9e708757ca3b7eb65a820031d62fea1a265709 ]
+
+print_reg_state() should not consider adding reg->off to reg->var_off.value
+when dumping scalars. Scalars can be produced with reg->off != 0 through
+BPF_ADD_CONST, and thus as-is this can skew the register log dump.
+
+Fixes: 98d7ca374ba4 ("bpf: Track delta between "linked" registers.")
+Reported-by: Nathaniel Theis <nathaniel.theis@nccgroup.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
+Acked-by: Andrii Nakryiko <andrii@kernel.org>
+Link: https://lore.kernel.org/bpf/20241016134913.32249-2-daniel@iogearbox.net
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/log.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/kernel/bpf/log.c b/kernel/bpf/log.c
+index 5aebfc3051e3a..4a858fdb6476f 100644
+--- a/kernel/bpf/log.c
++++ b/kernel/bpf/log.c
+@@ -688,8 +688,7 @@ static void print_reg_state(struct bpf_verifier_env *env,
+ if (t == SCALAR_VALUE && reg->precise)
+ verbose(env, "P");
+ if (t == SCALAR_VALUE && tnum_is_const(reg->var_off)) {
+- /* reg->off should be 0 for SCALAR_VALUE */
+- verbose_snum(env, reg->var_off.value + reg->off);
++ verbose_snum(env, reg->var_off.value);
+ return;
+ }
+
+--
+2.43.0
+
--- /dev/null
+From 21cfd4e2edc6152d0c1638c154e300e280356f3f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 14 Oct 2024 15:11:53 +0300
+Subject: bpf: Fix truncation bug in coerce_reg_to_size_sx()
+
+From: Dimitar Kanaliev <dimitar.kanaliev@siteground.com>
+
+[ Upstream commit ae67b9fb8c4e981e929a665dcaa070f4b05ebdb4 ]
+
+coerce_reg_to_size_sx() updates the register state after a sign-extension
+operation. However, there's a bug in the assignment order of the unsigned
+min/max values, leading to incorrect truncation:
+
+ 0: (85) call bpf_get_prandom_u32#7 ; R0_w=scalar()
+ 1: (57) r0 &= 1 ; R0_w=scalar(smin=smin32=0,smax=umax=smax32=umax32=1,var_off=(0x0; 0x1))
+ 2: (07) r0 += 254 ; R0_w=scalar(smin=umin=smin32=umin32=254,smax=umax=smax32=umax32=255,var_off=(0xfe; 0x1))
+ 3: (bf) r0 = (s8)r0 ; R0_w=scalar(smin=smin32=-2,smax=smax32=-1,umin=umin32=0xfffffffe,umax=0xffffffff,var_off=(0xfffffffffffffffe; 0x1))
+
+In the current implementation, the unsigned 32-bit min/max values
+(u32_min_value and u32_max_value) are assigned directly from the 64-bit
+signed min/max values (s64_min and s64_max):
+
+ reg->umin_value = reg->u32_min_value = s64_min;
+ reg->umax_value = reg->u32_max_value = s64_max;
+
+Due to the chain assigmnent, this is equivalent to:
+
+ reg->u32_min_value = s64_min; // Unintended truncation
+ reg->umin_value = reg->u32_min_value;
+ reg->u32_max_value = s64_max; // Unintended truncation
+ reg->umax_value = reg->u32_max_value;
+
+Fixes: 1f9a1ea821ff ("bpf: Support new sign-extension load insns")
+Reported-by: Shung-Hsi Yu <shung-hsi.yu@suse.com>
+Reported-by: Zac Ecob <zacecob@protonmail.com>
+Signed-off-by: Dimitar Kanaliev <dimitar.kanaliev@siteground.com>
+Acked-by: Yonghong Song <yonghong.song@linux.dev>
+Reviewed-by: Shung-Hsi Yu <shung-hsi.yu@suse.com>
+Link: https://lore.kernel.org/r/20241014121155.92887-2-dimitar.kanaliev@siteground.com
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/verifier.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index a8d49b2b58e1e..42d6bc5392757 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -6255,10 +6255,10 @@ static void coerce_reg_to_size_sx(struct bpf_reg_state *reg, int size)
+
+ /* both of s64_max/s64_min positive or negative */
+ if ((s64_max >= 0) == (s64_min >= 0)) {
+- reg->smin_value = reg->s32_min_value = s64_min;
+- reg->smax_value = reg->s32_max_value = s64_max;
+- reg->umin_value = reg->u32_min_value = s64_min;
+- reg->umax_value = reg->u32_max_value = s64_max;
++ reg->s32_min_value = reg->smin_value = s64_min;
++ reg->s32_max_value = reg->smax_value = s64_max;
++ reg->u32_min_value = reg->umin_value = s64_min;
++ reg->u32_max_value = reg->umax_value = s64_max;
+ reg->var_off = tnum_range(s64_min, s64_max);
+ return;
+ }
+--
+2.43.0
+
--- /dev/null
+From 6093d88d160ce418a9758ea0df282202142e12b0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 8 Oct 2024 16:43:11 +0000
+Subject: bpf: fix unpopulated name_len field in perf_event link info
+
+From: Tyrone Wu <wudevelops@gmail.com>
+
+[ Upstream commit 4deecdd29cf29844c7bd164d72dc38d2e672f64e ]
+
+Previously when retrieving `bpf_link_info.perf_event` for
+kprobe/uprobe/tracepoint, the `name_len` field was not populated by the
+kernel, leaving it to reflect the value initially set by the user. This
+behavior was inconsistent with how other input/output string buffer
+fields function (e.g. `raw_tracepoint.tp_name_len`).
+
+This patch fills `name_len` with the actual size of the string name.
+
+Fixes: 1b715e1b0ec5 ("bpf: Support ->fill_link_info for perf_event")
+Signed-off-by: Tyrone Wu <wudevelops@gmail.com>
+Acked-by: Jiri Olsa <jolsa@kernel.org>
+Acked-by: Yafang Shao <laoar.shao@gmail.com>
+Link: https://lore.kernel.org/r/20241008164312.46269-1-wudevelops@gmail.com
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/syscall.c | 29 ++++++++++++++++++++++-------
+ 1 file changed, 22 insertions(+), 7 deletions(-)
+
+diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
+index 21fb9c4d498fb..26e69d4fc3dad 100644
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -3636,15 +3636,16 @@ static void bpf_perf_link_dealloc(struct bpf_link *link)
+ }
+
+ static int bpf_perf_link_fill_common(const struct perf_event *event,
+- char __user *uname, u32 ulen,
++ char __user *uname, u32 *ulenp,
+ u64 *probe_offset, u64 *probe_addr,
+ u32 *fd_type, unsigned long *missed)
+ {
+ const char *buf;
+- u32 prog_id;
++ u32 prog_id, ulen;
+ size_t len;
+ int err;
+
++ ulen = *ulenp;
+ if (!ulen ^ !uname)
+ return -EINVAL;
+
+@@ -3652,10 +3653,17 @@ static int bpf_perf_link_fill_common(const struct perf_event *event,
+ probe_offset, probe_addr, missed);
+ if (err)
+ return err;
++
++ if (buf) {
++ len = strlen(buf);
++ *ulenp = len + 1;
++ } else {
++ *ulenp = 1;
++ }
+ if (!uname)
+ return 0;
++
+ if (buf) {
+- len = strlen(buf);
+ err = bpf_copy_to_user(uname, buf, ulen, len);
+ if (err)
+ return err;
+@@ -3680,7 +3688,7 @@ static int bpf_perf_link_fill_kprobe(const struct perf_event *event,
+
+ uname = u64_to_user_ptr(info->perf_event.kprobe.func_name);
+ ulen = info->perf_event.kprobe.name_len;
+- err = bpf_perf_link_fill_common(event, uname, ulen, &offset, &addr,
++ err = bpf_perf_link_fill_common(event, uname, &ulen, &offset, &addr,
+ &type, &missed);
+ if (err)
+ return err;
+@@ -3688,7 +3696,7 @@ static int bpf_perf_link_fill_kprobe(const struct perf_event *event,
+ info->perf_event.type = BPF_PERF_EVENT_KRETPROBE;
+ else
+ info->perf_event.type = BPF_PERF_EVENT_KPROBE;
+-
++ info->perf_event.kprobe.name_len = ulen;
+ info->perf_event.kprobe.offset = offset;
+ info->perf_event.kprobe.missed = missed;
+ if (!kallsyms_show_value(current_cred()))
+@@ -3710,7 +3718,7 @@ static int bpf_perf_link_fill_uprobe(const struct perf_event *event,
+
+ uname = u64_to_user_ptr(info->perf_event.uprobe.file_name);
+ ulen = info->perf_event.uprobe.name_len;
+- err = bpf_perf_link_fill_common(event, uname, ulen, &offset, &addr,
++ err = bpf_perf_link_fill_common(event, uname, &ulen, &offset, &addr,
+ &type, NULL);
+ if (err)
+ return err;
+@@ -3719,6 +3727,7 @@ static int bpf_perf_link_fill_uprobe(const struct perf_event *event,
+ info->perf_event.type = BPF_PERF_EVENT_URETPROBE;
+ else
+ info->perf_event.type = BPF_PERF_EVENT_UPROBE;
++ info->perf_event.uprobe.name_len = ulen;
+ info->perf_event.uprobe.offset = offset;
+ info->perf_event.uprobe.cookie = event->bpf_cookie;
+ return 0;
+@@ -3744,12 +3753,18 @@ static int bpf_perf_link_fill_tracepoint(const struct perf_event *event,
+ {
+ char __user *uname;
+ u32 ulen;
++ int err;
+
+ uname = u64_to_user_ptr(info->perf_event.tracepoint.tp_name);
+ ulen = info->perf_event.tracepoint.name_len;
++ err = bpf_perf_link_fill_common(event, uname, &ulen, NULL, NULL, NULL, NULL);
++ if (err)
++ return err;
++
+ info->perf_event.type = BPF_PERF_EVENT_TRACEPOINT;
++ info->perf_event.tracepoint.name_len = ulen;
+ info->perf_event.tracepoint.cookie = event->bpf_cookie;
+- return bpf_perf_link_fill_common(event, uname, ulen, NULL, NULL, NULL, NULL);
++ return 0;
+ }
+
+ static int bpf_perf_link_fill_perf_event(const struct perf_event *event,
+--
+2.43.0
+
--- /dev/null
+From e37e857740314751fbcb4bf5218e6966bc46b19c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 11 Oct 2024 00:08:02 +0000
+Subject: bpf: Fix unpopulated path_size when uprobe_multi fields unset
+
+From: Tyrone Wu <wudevelops@gmail.com>
+
+[ Upstream commit ad6b5b6ea9b764018249285a4fe0a2226bef4caa ]
+
+Previously when retrieving `bpf_link_info.uprobe_multi` with `path` and
+`path_size` fields unset, the `path_size` field is not populated
+(remains 0). This behavior was inconsistent with how other input/output
+string buffer fields work, as the field should be populated in cases
+when:
+- both buffer and length are set (currently works as expected)
+- both buffer and length are unset (not working as expected)
+
+This patch now fills the `path_size` field when `path` and `path_size`
+are unset.
+
+Fixes: e56fdbfb06e2 ("bpf: Add link_info support for uprobe multi link")
+Signed-off-by: Tyrone Wu <wudevelops@gmail.com>
+Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
+Link: https://lore.kernel.org/bpf/20241011000803.681190-1-wudevelops@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/bpf_trace.c | 36 +++++++++++++++++-------------------
+ 1 file changed, 17 insertions(+), 19 deletions(-)
+
+diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
+index add26dc27d7e3..d9bc5ef1cafc3 100644
+--- a/kernel/trace/bpf_trace.c
++++ b/kernel/trace/bpf_trace.c
+@@ -3222,7 +3222,8 @@ static int bpf_uprobe_multi_link_fill_link_info(const struct bpf_link *link,
+ struct bpf_uprobe_multi_link *umulti_link;
+ u32 ucount = info->uprobe_multi.count;
+ int err = 0, i;
+- long left;
++ char *p, *buf;
++ long left = 0;
+
+ if (!upath ^ !upath_size)
+ return -EINVAL;
+@@ -3236,26 +3237,23 @@ static int bpf_uprobe_multi_link_fill_link_info(const struct bpf_link *link,
+ info->uprobe_multi.pid = umulti_link->task ?
+ task_pid_nr_ns(umulti_link->task, task_active_pid_ns(current)) : 0;
+
+- if (upath) {
+- char *p, *buf;
+-
+- upath_size = min_t(u32, upath_size, PATH_MAX);
+-
+- buf = kmalloc(upath_size, GFP_KERNEL);
+- if (!buf)
+- return -ENOMEM;
+- p = d_path(&umulti_link->path, buf, upath_size);
+- if (IS_ERR(p)) {
+- kfree(buf);
+- return PTR_ERR(p);
+- }
+- upath_size = buf + upath_size - p;
+- left = copy_to_user(upath, p, upath_size);
++ upath_size = upath_size ? min_t(u32, upath_size, PATH_MAX) : PATH_MAX;
++ buf = kmalloc(upath_size, GFP_KERNEL);
++ if (!buf)
++ return -ENOMEM;
++ p = d_path(&umulti_link->path, buf, upath_size);
++ if (IS_ERR(p)) {
+ kfree(buf);
+- if (left)
+- return -EFAULT;
+- info->uprobe_multi.path_size = upath_size;
++ return PTR_ERR(p);
+ }
++ upath_size = buf + upath_size - p;
++
++ if (upath)
++ left = copy_to_user(upath, p, upath_size);
++ kfree(buf);
++ if (left)
++ return -EFAULT;
++ info->uprobe_multi.path_size = upath_size;
+
+ if (!uoffsets && !ucookies && !uref_ctr_offsets)
+ return 0;
+--
+2.43.0
+
--- /dev/null
+From 9c555d2aae4d28bc57fd2743b6013581e1d7c0b2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 20 Sep 2024 14:56:24 +0200
+Subject: bpf: Make sure internal and UAPI bpf_redirect flags don't overlap
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Toke HĂžiland-JĂžrgensen <toke@redhat.com>
+
+[ Upstream commit 09d88791c7cd888d5195c84733caf9183dcfbd16 ]
+
+The bpf_redirect_info is shared between the SKB and XDP redirect paths,
+and the two paths use the same numeric flag values in the ri->flags
+field (specifically, BPF_F_BROADCAST == BPF_F_NEXTHOP). This means that
+if skb bpf_redirect_neigh() is used with a non-NULL params argument and,
+subsequently, an XDP redirect is performed using the same
+bpf_redirect_info struct, the XDP path will get confused and end up
+crashing, which syzbot managed to trigger.
+
+With the stack-allocated bpf_redirect_info, the structure is no longer
+shared between the SKB and XDP paths, so the crash doesn't happen
+anymore. However, different code paths using identically-numbered flag
+values in the same struct field still seems like a bit of a mess, so
+this patch cleans that up by moving the flag definitions together and
+redefining the three flags in BPF_F_REDIRECT_INTERNAL to not overlap
+with the flags used for XDP. It also adds a BUILD_BUG_ON() check to make
+sure the overlap is not re-introduced by mistake.
+
+Fixes: e624d4ed4aa8 ("xdp: Extend xdp_redirect_map with broadcast support")
+Reported-by: syzbot+cca39e6e84a367a7e6f6@syzkaller.appspotmail.com
+Signed-off-by: Toke HĂžiland-JĂžrgensen <toke@redhat.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Daniel Borkmann <daniel@iogearbox.net>
+Closes: https://syzkaller.appspot.com/bug?extid=cca39e6e84a367a7e6f6
+Link: https://lore.kernel.org/bpf/20240920125625.59465-1-toke@redhat.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/uapi/linux/bpf.h | 13 +++++--------
+ net/core/filter.c | 8 +++++---
+ 2 files changed, 10 insertions(+), 11 deletions(-)
+
+diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
+index 35bcf52dbc652..fea4bca4066c1 100644
+--- a/include/uapi/linux/bpf.h
++++ b/include/uapi/linux/bpf.h
+@@ -6046,11 +6046,6 @@ enum {
+ BPF_F_MARK_ENFORCE = (1ULL << 6),
+ };
+
+-/* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */
+-enum {
+- BPF_F_INGRESS = (1ULL << 0),
+-};
+-
+ /* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */
+ enum {
+ BPF_F_TUNINFO_IPV6 = (1ULL << 0),
+@@ -6197,10 +6192,12 @@ enum {
+ BPF_F_BPRM_SECUREEXEC = (1ULL << 0),
+ };
+
+-/* Flags for bpf_redirect_map helper */
++/* Flags for bpf_redirect and bpf_redirect_map helpers */
+ enum {
+- BPF_F_BROADCAST = (1ULL << 3),
+- BPF_F_EXCLUDE_INGRESS = (1ULL << 4),
++ BPF_F_INGRESS = (1ULL << 0), /* used for skb path */
++ BPF_F_BROADCAST = (1ULL << 3), /* used for XDP path */
++ BPF_F_EXCLUDE_INGRESS = (1ULL << 4), /* used for XDP path */
++#define BPF_F_REDIRECT_FLAGS (BPF_F_INGRESS | BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS)
+ };
+
+ #define __bpf_md_ptr(type, name) \
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 0e719c7c43bb7..b7a5f525e65b8 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -2437,9 +2437,9 @@ static int __bpf_redirect_neigh(struct sk_buff *skb, struct net_device *dev,
+
+ /* Internal, non-exposed redirect flags. */
+ enum {
+- BPF_F_NEIGH = (1ULL << 1),
+- BPF_F_PEER = (1ULL << 2),
+- BPF_F_NEXTHOP = (1ULL << 3),
++ BPF_F_NEIGH = (1ULL << 16),
++ BPF_F_PEER = (1ULL << 17),
++ BPF_F_NEXTHOP = (1ULL << 18),
+ #define BPF_F_REDIRECT_INTERNAL (BPF_F_NEIGH | BPF_F_PEER | BPF_F_NEXTHOP)
+ };
+
+@@ -2449,6 +2449,8 @@ BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags)
+ struct sk_buff *clone;
+ int ret;
+
++ BUILD_BUG_ON(BPF_F_REDIRECT_INTERNAL & BPF_F_REDIRECT_FLAGS);
++
+ if (unlikely(flags & (~(BPF_F_INGRESS) | BPF_F_REDIRECT_INTERNAL)))
+ return -EINVAL;
+
+--
+2.43.0
+
--- /dev/null
+From af58c2a5c6b69e2d5004375ea41fb85daf0579d9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 13 Oct 2024 18:26:39 +0200
+Subject: bpf, sockmap: SK_DROP on attempted redirects of unsupported af_vsock
+
+From: Michal Luczaj <mhal@rbox.co>
+
+[ Upstream commit 9c5bd93edf7b8834aecaa7c340b852d5990d7c78 ]
+
+Don't mislead the callers of bpf_{sk,msg}_redirect_{map,hash}(): make sure
+to immediately and visibly fail the forwarding of unsupported af_vsock
+packets.
+
+Fixes: 634f1a7110b4 ("vsock: support sockmap")
+Signed-off-by: Michal Luczaj <mhal@rbox.co>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: John Fastabend <john.fastabend@gmail.com>
+Link: https://lore.kernel.org/bpf/20241013-vsock-fixes-for-redir-v2-1-d6577bbfe742@rbox.co
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/sock.h | 5 +++++
+ net/core/sock_map.c | 8 ++++++++
+ 2 files changed, 13 insertions(+)
+
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 2d4149075091b..f127fc268a5ef 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -2715,6 +2715,11 @@ static inline bool sk_is_stream_unix(const struct sock *sk)
+ return sk->sk_family == AF_UNIX && sk->sk_type == SOCK_STREAM;
+ }
+
++static inline bool sk_is_vsock(const struct sock *sk)
++{
++ return sk->sk_family == AF_VSOCK;
++}
++
+ /**
+ * sk_eat_skb - Release a skb if it is no longer needed
+ * @sk: socket to eat this skb from
+diff --git a/net/core/sock_map.c b/net/core/sock_map.c
+index 724b6856fcc3e..219fd8f1ca2a4 100644
+--- a/net/core/sock_map.c
++++ b/net/core/sock_map.c
+@@ -656,6 +656,8 @@ BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb,
+ sk = __sock_map_lookup_elem(map, key);
+ if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
+ return SK_DROP;
++ if ((flags & BPF_F_INGRESS) && sk_is_vsock(sk))
++ return SK_DROP;
+
+ skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS);
+ return SK_PASS;
+@@ -684,6 +686,8 @@ BPF_CALL_4(bpf_msg_redirect_map, struct sk_msg *, msg,
+ return SK_DROP;
+ if (!(flags & BPF_F_INGRESS) && !sk_is_tcp(sk))
+ return SK_DROP;
++ if (sk_is_vsock(sk))
++ return SK_DROP;
+
+ msg->flags = flags;
+ msg->sk_redir = sk;
+@@ -1258,6 +1262,8 @@ BPF_CALL_4(bpf_sk_redirect_hash, struct sk_buff *, skb,
+ sk = __sock_hash_lookup_elem(map, key);
+ if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
+ return SK_DROP;
++ if ((flags & BPF_F_INGRESS) && sk_is_vsock(sk))
++ return SK_DROP;
+
+ skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS);
+ return SK_PASS;
+@@ -1286,6 +1292,8 @@ BPF_CALL_4(bpf_msg_redirect_hash, struct sk_msg *, msg,
+ return SK_DROP;
+ if (!(flags & BPF_F_INGRESS) && !sk_is_tcp(sk))
+ return SK_DROP;
++ if (sk_is_vsock(sk))
++ return SK_DROP;
+
+ msg->flags = flags;
+ msg->sk_redir = sk;
+--
+2.43.0
+
--- /dev/null
+From cdc3e4354d579cac65f644e226a8e79efe53cfb6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Sep 2024 14:08:43 -0700
+Subject: bpf: sync_linked_regs() must preserve subreg_def
+
+From: Eduard Zingerman <eddyz87@gmail.com>
+
+[ Upstream commit e9bd9c498cb0f5843996dbe5cbce7a1836a83c70 ]
+
+Range propagation must not affect subreg_def marks, otherwise the
+following example is rewritten by verifier incorrectly when
+BPF_F_TEST_RND_HI32 flag is set:
+
+ 0: call bpf_ktime_get_ns call bpf_ktime_get_ns
+ 1: r0 &= 0x7fffffff after verifier r0 &= 0x7fffffff
+ 2: w1 = w0 rewrites w1 = w0
+ 3: if w0 < 10 goto +0 --------------> r11 = 0x2f5674a6 (r)
+ 4: r1 >>= 32 r11 <<= 32 (r)
+ 5: r0 = r1 r1 |= r11 (r)
+ 6: exit; if w0 < 0xa goto pc+0
+ r1 >>= 32
+ r0 = r1
+ exit
+
+(or zero extension of w1 at (2) is missing for architectures that
+ require zero extension for upper register half).
+
+The following happens w/o this patch:
+- r0 is marked as not a subreg at (0);
+- w1 is marked as subreg at (2);
+- w1 subreg_def is overridden at (3) by copy_register_state();
+- w1 is read at (5) but mark_insn_zext() does not mark (2)
+ for zero extension, because w1 subreg_def is not set;
+- because of BPF_F_TEST_RND_HI32 flag verifier inserts random
+ value for hi32 bits of (2) (marked (r));
+- this random value is read at (5).
+
+Fixes: 75748837b7e5 ("bpf: Propagate scalar ranges through register assignments.")
+Reported-by: Lonial Con <kongln9170@gmail.com>
+Signed-off-by: Lonial Con <kongln9170@gmail.com>
+Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
+Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Daniel Borkmann <daniel@iogearbox.net>
+Closes: https://lore.kernel.org/bpf/7e2aa30a62d740db182c170fdd8f81c596df280d.camel@gmail.com
+Link: https://lore.kernel.org/bpf/20240924210844.1758441-1-eddyz87@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/verifier.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index d5215cb1747f1..5c5dea5e137e7 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -15140,8 +15140,12 @@ static void find_equal_scalars(struct bpf_verifier_state *vstate,
+ continue;
+ if ((!(reg->id & BPF_ADD_CONST) && !(known_reg->id & BPF_ADD_CONST)) ||
+ reg->off == known_reg->off) {
++ s32 saved_subreg_def = reg->subreg_def;
++
+ copy_register_state(reg, known_reg);
++ reg->subreg_def = saved_subreg_def;
+ } else {
++ s32 saved_subreg_def = reg->subreg_def;
+ s32 saved_off = reg->off;
+
+ fake_reg.type = SCALAR_VALUE;
+@@ -15154,6 +15158,7 @@ static void find_equal_scalars(struct bpf_verifier_state *vstate,
+ * otherwise another find_equal_scalars() will be incorrect.
+ */
+ reg->off = saved_off;
++ reg->subreg_def = saved_subreg_def;
+
+ scalar32_min_max_add(reg, &fake_reg);
+ scalar_min_max_add(reg, &fake_reg);
+--
+2.43.0
+
--- /dev/null
+From 6e42950105ee79c8fb3c5042982980302b840177 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 20 Sep 2024 16:06:59 -0300
+Subject: bpf: Use raw_spinlock_t in ringbuf
+
+From: Wander Lairson Costa <wander.lairson@gmail.com>
+
+[ Upstream commit 8b62645b09f870d70c7910e7550289d444239a46 ]
+
+The function __bpf_ringbuf_reserve is invoked from a tracepoint, which
+disables preemption. Using spinlock_t in this context can lead to a
+"sleep in atomic" warning in the RT variant. This issue is illustrated
+in the example below:
+
+BUG: sleeping function called from invalid context at kernel/locking/spinlock_rt.c:48
+in_atomic(): 1, irqs_disabled(): 0, non_block: 0, pid: 556208, name: test_progs
+preempt_count: 1, expected: 0
+RCU nest depth: 1, expected: 1
+INFO: lockdep is turned off.
+Preemption disabled at:
+[<ffffd33a5c88ea44>] migrate_enable+0xc0/0x39c
+CPU: 7 PID: 556208 Comm: test_progs Tainted: G
+Hardware name: Qualcomm SA8775P Ride (DT)
+Call trace:
+ dump_backtrace+0xac/0x130
+ show_stack+0x1c/0x30
+ dump_stack_lvl+0xac/0xe8
+ dump_stack+0x18/0x30
+ __might_resched+0x3bc/0x4fc
+ rt_spin_lock+0x8c/0x1a4
+ __bpf_ringbuf_reserve+0xc4/0x254
+ bpf_ringbuf_reserve_dynptr+0x5c/0xdc
+ bpf_prog_ac3d15160d62622a_test_read_write+0x104/0x238
+ trace_call_bpf+0x238/0x774
+ perf_call_bpf_enter.isra.0+0x104/0x194
+ perf_syscall_enter+0x2f8/0x510
+ trace_sys_enter+0x39c/0x564
+ syscall_trace_enter+0x220/0x3c0
+ do_el0_svc+0x138/0x1dc
+ el0_svc+0x54/0x130
+ el0t_64_sync_handler+0x134/0x150
+ el0t_64_sync+0x17c/0x180
+
+Switch the spinlock to raw_spinlock_t to avoid this error.
+
+Fixes: 457f44363a88 ("bpf: Implement BPF ring buffer and verifier support for it")
+Reported-by: Brian Grech <bgrech@redhat.com>
+Signed-off-by: Wander Lairson Costa <wander.lairson@gmail.com>
+Signed-off-by: Wander Lairson Costa <wander@redhat.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/r/20240920190700.617253-1-wander@redhat.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/ringbuf.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c
+index e20b90c361316..de3b681d1d13d 100644
+--- a/kernel/bpf/ringbuf.c
++++ b/kernel/bpf/ringbuf.c
+@@ -29,7 +29,7 @@ struct bpf_ringbuf {
+ u64 mask;
+ struct page **pages;
+ int nr_pages;
+- spinlock_t spinlock ____cacheline_aligned_in_smp;
++ raw_spinlock_t spinlock ____cacheline_aligned_in_smp;
+ /* For user-space producer ring buffers, an atomic_t busy bit is used
+ * to synchronize access to the ring buffers in the kernel, rather than
+ * the spinlock that is used for kernel-producer ring buffers. This is
+@@ -173,7 +173,7 @@ static struct bpf_ringbuf *bpf_ringbuf_alloc(size_t data_sz, int numa_node)
+ if (!rb)
+ return NULL;
+
+- spin_lock_init(&rb->spinlock);
++ raw_spin_lock_init(&rb->spinlock);
+ atomic_set(&rb->busy, 0);
+ init_waitqueue_head(&rb->waitq);
+ init_irq_work(&rb->work, bpf_ringbuf_notify);
+@@ -421,10 +421,10 @@ static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
+ cons_pos = smp_load_acquire(&rb->consumer_pos);
+
+ if (in_nmi()) {
+- if (!spin_trylock_irqsave(&rb->spinlock, flags))
++ if (!raw_spin_trylock_irqsave(&rb->spinlock, flags))
+ return NULL;
+ } else {
+- spin_lock_irqsave(&rb->spinlock, flags);
++ raw_spin_lock_irqsave(&rb->spinlock, flags);
+ }
+
+ pend_pos = rb->pending_pos;
+@@ -450,7 +450,7 @@ static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
+ */
+ if (new_prod_pos - cons_pos > rb->mask ||
+ new_prod_pos - pend_pos > rb->mask) {
+- spin_unlock_irqrestore(&rb->spinlock, flags);
++ raw_spin_unlock_irqrestore(&rb->spinlock, flags);
+ return NULL;
+ }
+
+@@ -462,7 +462,7 @@ static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
+ /* pairs with consumer's smp_load_acquire() */
+ smp_store_release(&rb->producer_pos, new_prod_pos);
+
+- spin_unlock_irqrestore(&rb->spinlock, flags);
++ raw_spin_unlock_irqrestore(&rb->spinlock, flags);
+
+ return (void *)hdr + BPF_RINGBUF_HDR_SZ;
+ }
+--
+2.43.0
+
--- /dev/null
+From d9f38f348aecdbc97d431c17bd43bd543b60c6da Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 13 Oct 2024 18:26:42 +0200
+Subject: bpf, vsock: Drop static vsock_bpf_prot initialization
+
+From: Michal Luczaj <mhal@rbox.co>
+
+[ Upstream commit 19039f279797efbe044cae41ee216c5fe481fc33 ]
+
+vsock_bpf_prot is set up at runtime. Remove the superfluous init.
+
+No functional change intended.
+
+Fixes: 634f1a7110b4 ("vsock: support sockmap")
+Signed-off-by: Michal Luczaj <mhal@rbox.co>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: John Fastabend <john.fastabend@gmail.com>
+Link: https://lore.kernel.org/bpf/20241013-vsock-fixes-for-redir-v2-4-d6577bbfe742@rbox.co
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/vmw_vsock/vsock_bpf.c | 8 --------
+ 1 file changed, 8 deletions(-)
+
+diff --git a/net/vmw_vsock/vsock_bpf.c b/net/vmw_vsock/vsock_bpf.c
+index c42c5cc18f324..4aa6e74ec2957 100644
+--- a/net/vmw_vsock/vsock_bpf.c
++++ b/net/vmw_vsock/vsock_bpf.c
+@@ -114,14 +114,6 @@ static int vsock_bpf_recvmsg(struct sock *sk, struct msghdr *msg,
+ return copied;
+ }
+
+-/* Copy of original proto with updated sock_map methods */
+-static struct proto vsock_bpf_prot = {
+- .close = sock_map_close,
+- .recvmsg = vsock_bpf_recvmsg,
+- .sock_is_readable = sk_msg_is_readable,
+- .unhash = sock_map_unhash,
+-};
+-
+ static void vsock_bpf_rebuild_protos(struct proto *prot, const struct proto *base)
+ {
+ *prot = *base;
+--
+2.43.0
+
--- /dev/null
+From 0c198c5cf7f64fb0ba332c5a0f2317425a00d53e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 17 Oct 2024 15:09:02 -0700
+Subject: cdrom: Avoid barrier_nospec() in cdrom_ioctl_media_changed()
+
+From: Josh Poimboeuf <jpoimboe@kernel.org>
+
+[ Upstream commit b0bf1afde7c34698cf61422fa8ee60e690dc25c3 ]
+
+The barrier_nospec() after the array bounds check is overkill and
+painfully slow for arches which implement it.
+
+Furthermore, most arches don't implement it, so they remain exposed to
+Spectre v1 (which can affect pretty much any CPU with branch
+prediction).
+
+Instead, clamp the user pointer to a valid range so it's guaranteed to
+be a valid array index even when the bounds check mispredicts.
+
+Fixes: 8270cb10c068 ("cdrom: Fix spectre-v1 gadget")
+Signed-off-by: Josh Poimboeuf <jpoimboe@kernel.org>
+Link: https://lore.kernel.org/r/1d86f4d9d8fba68e5ca64cdeac2451b95a8bf872.1729202937.git.jpoimboe@kernel.org
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/cdrom/cdrom.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
+index 9b0f37d4b9d49..6a99a459b80b2 100644
+--- a/drivers/cdrom/cdrom.c
++++ b/drivers/cdrom/cdrom.c
+@@ -2313,7 +2313,7 @@ static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi,
+ return -EINVAL;
+
+ /* Prevent arg from speculatively bypassing the length check */
+- barrier_nospec();
++ arg = array_index_nospec(arg, cdi->capacity);
+
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+--
+2.43.0
+
--- /dev/null
+From 817e55746382df4cb09ce77167e6fb78a93fe33d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 Sep 2024 13:32:05 +0000
+Subject: clk: rockchip: fix finding of maximum clock ID
+
+From: Yao Zi <ziyao@disroot.org>
+
+[ Upstream commit ad1081a0da2744141d12e94ff816ac91feb871ca ]
+
+If an ID of a branch's child is greater than current maximum, we should
+set new maximum to the child's ID, instead of its parent's.
+
+Fixes: 2dc66a5ab2c6 ("clk: rockchip: rk3588: fix CLK_NR_CLKS usage")
+Signed-off-by: Yao Zi <ziyao@disroot.org>
+Link: https://lore.kernel.org/r/20240912133204.29089-2-ziyao@disroot.org
+Reviewed-by: Sebastian Reichel <sebastian.reichel@collabora.com>
+Reviewed-by: Heiko Stuebner <heiko@sntech.de>
+Signed-off-by: Stephen Boyd <sboyd@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/clk/rockchip/clk.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
+index 2fa7253c73b2c..88629a9abc9c9 100644
+--- a/drivers/clk/rockchip/clk.c
++++ b/drivers/clk/rockchip/clk.c
+@@ -439,7 +439,7 @@ unsigned long rockchip_clk_find_max_clk_id(struct rockchip_clk_branch *list,
+ if (list->id > max)
+ max = list->id;
+ if (list->child && list->child->id > max)
+- max = list->id;
++ max = list->child->id;
+ }
+
+ return max;
+--
+2.43.0
+
--- /dev/null
+From 70b4bf77e09ee31d6cb7e03bab4e9447c6030b72 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 4 Oct 2024 12:23:04 +0000
+Subject: cpufreq/amd-pstate: Fix amd_pstate mode switch on shared memory
+ systems
+
+From: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
+
+[ Upstream commit c10e50a469b5ec91eabf653526a22bdce03a9bca ]
+
+While switching the driver mode between active and passive, Collaborative
+Processor Performance Control (CPPC) is disabled in
+amd_pstate_unregister_driver(). But, it is not enabled back while registering
+the new driver (passive or active). This leads to the new driver mode not
+working correctly, so enable it back in amd_pstate_register_driver().
+
+Fixes: 3ca7bc818d8c ("cpufreq: amd-pstate: Add guided mode control support via sysfs")
+Signed-off-by: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
+Reviewed-by: Mario Limonciello <mario.limonciello@amd.com>
+Link: https://lore.kernel.org/r/20241004122303.94283-1-Dhananjay.Ugwekar@amd.com
+Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/cpufreq/amd-pstate.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
+index 589fde37ccd7a..929b9097a6c17 100644
+--- a/drivers/cpufreq/amd-pstate.c
++++ b/drivers/cpufreq/amd-pstate.c
+@@ -1281,11 +1281,21 @@ static int amd_pstate_register_driver(int mode)
+ return -EINVAL;
+
+ cppc_state = mode;
++
++ ret = amd_pstate_enable(true);
++ if (ret) {
++ pr_err("failed to enable cppc during amd-pstate driver registration, return %d\n",
++ ret);
++ amd_pstate_driver_cleanup();
++ return ret;
++ }
++
+ ret = cpufreq_register_driver(current_pstate_driver);
+ if (ret) {
+ amd_pstate_driver_cleanup();
+ return ret;
+ }
++
+ return 0;
+ }
+
+--
+2.43.0
+
--- /dev/null
+From 39b235d7c28406c1c2aa83204e7ddfe11f286d6e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 8 Oct 2024 19:01:48 +0530
+Subject: drm/amd/amdgpu: Fix double unlock in amdgpu_mes_add_ring
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Srinivasan Shanmugam <srinivasan.shanmugam@amd.com>
+
+[ Upstream commit e7457532cb7167516263150ceae86f36d6ef9683 ]
+
+This patch addresses a double unlock issue in the amdgpu_mes_add_ring
+function. The mutex was being unlocked twice under certain error
+conditions, which could lead to undefined behavior.
+
+The fix ensures that the mutex is unlocked only once before jumping to
+the clean_up_memory label. The unlock operation is moved to just before
+the goto statement within the conditional block that checks the return
+value of amdgpu_ring_init. This prevents the second unlock attempt after
+the clean_up_memory label, which is no longer necessary as the mutex is
+already unlocked by this point in the code flow.
+
+This change resolves the potential double unlock and maintains the
+correct mutex handling throughout the function.
+
+Fixes below:
+Commit d0c423b64765 ("drm/amdgpu/mes: use ring for kernel queue
+submission"), leads to the following Smatch static checker warning:
+
+ drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c:1240 amdgpu_mes_add_ring()
+ warn: double unlock '&adev->mes.mutex_hidden' (orig line 1213)
+
+drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+ 1143 int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
+ 1144 int queue_type, int idx,
+ 1145 struct amdgpu_mes_ctx_data *ctx_data,
+ 1146 struct amdgpu_ring **out)
+ 1147 {
+ 1148 struct amdgpu_ring *ring;
+ 1149 struct amdgpu_mes_gang *gang;
+ 1150 struct amdgpu_mes_queue_properties qprops = {0};
+ 1151 int r, queue_id, pasid;
+ 1152
+ 1153 /*
+ 1154 * Avoid taking any other locks under MES lock to avoid circular
+ 1155 * lock dependencies.
+ 1156 */
+ 1157 amdgpu_mes_lock(&adev->mes);
+ 1158 gang = idr_find(&adev->mes.gang_id_idr, gang_id);
+ 1159 if (!gang) {
+ 1160 DRM_ERROR("gang id %d doesn't exist\n", gang_id);
+ 1161 amdgpu_mes_unlock(&adev->mes);
+ 1162 return -EINVAL;
+ 1163 }
+ 1164 pasid = gang->process->pasid;
+ 1165
+ 1166 ring = kzalloc(sizeof(struct amdgpu_ring), GFP_KERNEL);
+ 1167 if (!ring) {
+ 1168 amdgpu_mes_unlock(&adev->mes);
+ 1169 return -ENOMEM;
+ 1170 }
+ 1171
+ 1172 ring->ring_obj = NULL;
+ 1173 ring->use_doorbell = true;
+ 1174 ring->is_mes_queue = true;
+ 1175 ring->mes_ctx = ctx_data;
+ 1176 ring->idx = idx;
+ 1177 ring->no_scheduler = true;
+ 1178
+ 1179 if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
+ 1180 int offset = offsetof(struct amdgpu_mes_ctx_meta_data,
+ 1181 compute[ring->idx].mec_hpd);
+ 1182 ring->eop_gpu_addr =
+ 1183 amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
+ 1184 }
+ 1185
+ 1186 switch (queue_type) {
+ 1187 case AMDGPU_RING_TYPE_GFX:
+ 1188 ring->funcs = adev->gfx.gfx_ring[0].funcs;
+ 1189 ring->me = adev->gfx.gfx_ring[0].me;
+ 1190 ring->pipe = adev->gfx.gfx_ring[0].pipe;
+ 1191 break;
+ 1192 case AMDGPU_RING_TYPE_COMPUTE:
+ 1193 ring->funcs = adev->gfx.compute_ring[0].funcs;
+ 1194 ring->me = adev->gfx.compute_ring[0].me;
+ 1195 ring->pipe = adev->gfx.compute_ring[0].pipe;
+ 1196 break;
+ 1197 case AMDGPU_RING_TYPE_SDMA:
+ 1198 ring->funcs = adev->sdma.instance[0].ring.funcs;
+ 1199 break;
+ 1200 default:
+ 1201 BUG();
+ 1202 }
+ 1203
+ 1204 r = amdgpu_ring_init(adev, ring, 1024, NULL, 0,
+ 1205 AMDGPU_RING_PRIO_DEFAULT, NULL);
+ 1206 if (r)
+ 1207 goto clean_up_memory;
+ 1208
+ 1209 amdgpu_mes_ring_to_queue_props(adev, ring, &qprops);
+ 1210
+ 1211 dma_fence_wait(gang->process->vm->last_update, false);
+ 1212 dma_fence_wait(ctx_data->meta_data_va->last_pt_update, false);
+ 1213 amdgpu_mes_unlock(&adev->mes);
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ 1214
+ 1215 r = amdgpu_mes_add_hw_queue(adev, gang_id, &qprops, &queue_id);
+ 1216 if (r)
+ 1217 goto clean_up_ring;
+ ^^^^^^^^^^^^^^^^^^
+
+ 1218
+ 1219 ring->hw_queue_id = queue_id;
+ 1220 ring->doorbell_index = qprops.doorbell_off;
+ 1221
+ 1222 if (queue_type == AMDGPU_RING_TYPE_GFX)
+ 1223 sprintf(ring->name, "gfx_%d.%d.%d", pasid, gang_id, queue_id);
+ 1224 else if (queue_type == AMDGPU_RING_TYPE_COMPUTE)
+ 1225 sprintf(ring->name, "compute_%d.%d.%d", pasid, gang_id,
+ 1226 queue_id);
+ 1227 else if (queue_type == AMDGPU_RING_TYPE_SDMA)
+ 1228 sprintf(ring->name, "sdma_%d.%d.%d", pasid, gang_id,
+ 1229 queue_id);
+ 1230 else
+ 1231 BUG();
+ 1232
+ 1233 *out = ring;
+ 1234 return 0;
+ 1235
+ 1236 clean_up_ring:
+ 1237 amdgpu_ring_fini(ring);
+ 1238 clean_up_memory:
+ 1239 kfree(ring);
+--> 1240 amdgpu_mes_unlock(&adev->mes);
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ 1241 return r;
+ 1242 }
+
+Fixes: d0c423b64765 ("drm/amdgpu/mes: use ring for kernel queue submission")
+Cc: Christian König <christian.koenig@amd.com>
+Cc: Alex Deucher <alexander.deucher@amd.com>
+Cc: Hawking Zhang <Hawking.Zhang@amd.com>
+Suggested-by: Jack Xiao <Jack.Xiao@amd.com>
+Reported by: Dan Carpenter <dan.carpenter@linaro.org>
+Signed-off-by: Srinivasan Shanmugam <srinivasan.shanmugam@amd.com>
+Reviewed-by: Jack Xiao <Jack.Xiao@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit bfaf1883605fd0c0dbabacd67ed49708470d5ea4)
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+index 1cb1ec7beefed..2304a13fcb048 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+@@ -1124,8 +1124,10 @@ int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
+
+ r = amdgpu_ring_init(adev, ring, 1024, NULL, 0,
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
+- if (r)
++ if (r) {
++ amdgpu_mes_unlock(&adev->mes);
+ goto clean_up_memory;
++ }
+
+ amdgpu_mes_ring_to_queue_props(adev, ring, &qprops);
+
+@@ -1158,7 +1160,6 @@ int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
+ amdgpu_ring_fini(ring);
+ clean_up_memory:
+ kfree(ring);
+- amdgpu_mes_unlock(&adev->mes);
+ return r;
+ }
+
+--
+2.43.0
+
--- /dev/null
+From 97e3f39b2b99f73b7f98654e2334cb02f33064d9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 15 Oct 2024 15:13:34 -0700
+Subject: drm/msm/a6xx+: Insert a fence wait before SMMU table update
+
+From: Rob Clark <robdclark@chromium.org>
+
+[ Upstream commit 77ad507dbb7ec1ecd60fc081d03616960ef596fd ]
+
+The CP_SMMU_TABLE_UPDATE _should_ be waiting for idle, but on some
+devices (x1-85, possibly others), it seems to pass that barrier while
+there are still things in the event completion FIFO waiting to be
+written back to memory.
+
+Work around that by adding a fence wait before context switch. The
+CP_EVENT_WRITE that writes the fence is the last write from a submit,
+so seeing this value hit memory is a reliable indication that it is
+safe to proceed with the context switch.
+
+v2: Only emit CP_WAIT_TIMESTAMP on a7xx, as it is not supported on a6xx.
+ Conversely, I've not been able to reproduce this issue on a6xx, so
+ hopefully it is limited to a7xx, or perhaps just certain a7xx
+ devices.
+
+Fixes: af66706accdf ("drm/msm/a6xx: Add skeleton A7xx support")
+Closes: https://gitlab.freedesktop.org/drm/msm/-/issues/63
+Signed-off-by: Rob Clark <robdclark@chromium.org>
+Reviewed-by: Akhil P Oommen <quic_akhilpo@quicinc.com>
+Signed-off-by: Abhinav Kumar <quic_abhinavk@quicinc.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/msm/adreno/a6xx_gpu.c | 16 +++++++++++++---
+ 1 file changed, 13 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+index bcaec86ac67a5..89b379060596d 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+@@ -101,9 +101,10 @@ static void get_stats_counter(struct msm_ringbuffer *ring, u32 counter,
+ }
+
+ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
+- struct msm_ringbuffer *ring, struct msm_file_private *ctx)
++ struct msm_ringbuffer *ring, struct msm_gem_submit *submit)
+ {
+ bool sysprof = refcount_read(&a6xx_gpu->base.base.sysprof_active) > 1;
++ struct msm_file_private *ctx = submit->queue->ctx;
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ phys_addr_t ttbr;
+ u32 asid;
+@@ -115,6 +116,15 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
+ if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid))
+ return;
+
++ if (adreno_gpu->info->family >= ADRENO_7XX_GEN1) {
++ /* Wait for previous submit to complete before continuing: */
++ OUT_PKT7(ring, CP_WAIT_TIMESTAMP, 4);
++ OUT_RING(ring, 0);
++ OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence)));
++ OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
++ OUT_RING(ring, submit->seqno - 1);
++ }
++
+ if (!sysprof) {
+ if (!adreno_is_a7xx(adreno_gpu)) {
+ /* Turn off protected mode to write to special registers */
+@@ -193,7 +203,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+ struct msm_ringbuffer *ring = submit->ring;
+ unsigned int i, ibs = 0;
+
+- a6xx_set_pagetable(a6xx_gpu, ring, submit->queue->ctx);
++ a6xx_set_pagetable(a6xx_gpu, ring, submit);
+
+ get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0),
+ rbmemptr_stats(ring, index, cpcycles_start));
+@@ -283,7 +293,7 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+ OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
+ OUT_RING(ring, CP_THREAD_CONTROL_0_SYNC_THREADS | CP_SET_THREAD_BR);
+
+- a6xx_set_pagetable(a6xx_gpu, ring, submit->queue->ctx);
++ a6xx_set_pagetable(a6xx_gpu, ring, submit);
+
+ get_stats_counter(ring, REG_A7XX_RBBM_PERFCTR_CP(0),
+ rbmemptr_stats(ring, index, cpcycles_start));
+--
+2.43.0
+
--- /dev/null
+From ac6ccb79459563a6da183062fafed2a0123dbd83 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 14 Oct 2024 09:36:09 -0700
+Subject: drm/msm: Allocate memory for disp snapshot with kvzalloc()
+
+From: Douglas Anderson <dianders@chromium.org>
+
+[ Upstream commit e4a45582db1b792c57bdb52c45958264f7fcfbdc ]
+
+With the "drm/msm: add a display mmu fault handler" series [1] we saw
+issues in the field where memory allocation was failing when
+allocating space for registers in msm_disp_state_dump_regs().
+Specifically we were seeing an order 5 allocation fail. It's not
+surprising that order 5 allocations will sometimes fail after the
+system has been up and running for a while.
+
+There's no need here for contiguous memory. Change the allocation to
+kvzalloc() which should make it much less likely to fail.
+
+[1] https://lore.kernel.org/r/20240628214848.4075651-1-quic_abhinavk@quicinc.com/
+
+Fixes: 98659487b845 ("drm/msm: add support to take dpu snapshot")
+Signed-off-by: Douglas Anderson <dianders@chromium.org>
+Reviewed-by: Abhinav Kumar <quic_abhinavk@quicinc.com>
+Patchwork: https://patchwork.freedesktop.org/patch/619658/
+Link: https://lore.kernel.org/r/20241014093605.2.I72441365ffe91f3dceb17db0a8ec976af8139590@changeid
+Signed-off-by: Abhinav Kumar <quic_abhinavk@quicinc.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
+index bb149281d31fa..4d55e3cf570f0 100644
+--- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
++++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
+@@ -26,7 +26,7 @@ static void msm_disp_state_dump_regs(u32 **reg, u32 aligned_len, void __iomem *b
+ end_addr = base_addr + aligned_len;
+
+ if (!(*reg))
+- *reg = kzalloc(len_padded, GFP_KERNEL);
++ *reg = kvzalloc(len_padded, GFP_KERNEL);
+
+ if (*reg)
+ dump_addr = *reg;
+@@ -162,7 +162,7 @@ void msm_disp_state_free(void *data)
+
+ list_for_each_entry_safe(block, tmp, &disp_state->blocks, node) {
+ list_del(&block->node);
+- kfree(block->state);
++ kvfree(block->state);
+ kfree(block);
+ }
+
+--
+2.43.0
+
--- /dev/null
+From 535fb980916e918aacf5ea6def065ed77d6adc11 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 14 Oct 2024 09:36:08 -0700
+Subject: drm/msm: Avoid NULL dereference in msm_disp_state_print_regs()
+
+From: Douglas Anderson <dianders@chromium.org>
+
+[ Upstream commit 293f53263266bc4340d777268ab4328a97f041fa ]
+
+If the allocation in msm_disp_state_dump_regs() failed then
+`block->state` can be NULL. The msm_disp_state_print_regs() function
+_does_ have code to try to handle it with:
+
+ if (*reg)
+ dump_addr = *reg;
+
+...but since "dump_addr" is initialized to NULL the above is actually
+a noop. The code then goes on to dereference `dump_addr`.
+
+Make the function print "Registers not stored" when it sees a NULL to
+solve this. Since we're touching the code, fix
+msm_disp_state_print_regs() not to pointlessly take a double-pointer
+and properly mark the pointer as `const`.
+
+Fixes: 98659487b845 ("drm/msm: add support to take dpu snapshot")
+Signed-off-by: Douglas Anderson <dianders@chromium.org>
+Reviewed-by: Abhinav Kumar <quic_abhinavk@quicinc.com>
+Patchwork: https://patchwork.freedesktop.org/patch/619657/
+Link: https://lore.kernel.org/r/20241014093605.1.Ia1217cecec9ef09eb3c6d125360cc6c8574b0e73@changeid
+Signed-off-by: Abhinav Kumar <quic_abhinavk@quicinc.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c | 15 ++++++++-------
+ 1 file changed, 8 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
+index add72bbc28b17..bb149281d31fa 100644
+--- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
++++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
+@@ -48,20 +48,21 @@ static void msm_disp_state_dump_regs(u32 **reg, u32 aligned_len, void __iomem *b
+ }
+ }
+
+-static void msm_disp_state_print_regs(u32 **reg, u32 len, void __iomem *base_addr,
+- struct drm_printer *p)
++static void msm_disp_state_print_regs(const u32 *dump_addr, u32 len,
++ void __iomem *base_addr, struct drm_printer *p)
+ {
+ int i;
+- u32 *dump_addr = NULL;
+ void __iomem *addr;
+ u32 num_rows;
+
++ if (!dump_addr) {
++ drm_printf(p, "Registers not stored\n");
++ return;
++ }
++
+ addr = base_addr;
+ num_rows = len / REG_DUMP_ALIGN;
+
+- if (*reg)
+- dump_addr = *reg;
+-
+ for (i = 0; i < num_rows; i++) {
+ drm_printf(p, "0x%lx : %08x %08x %08x %08x\n",
+ (unsigned long)(addr - base_addr),
+@@ -89,7 +90,7 @@ void msm_disp_state_print(struct msm_disp_state *state, struct drm_printer *p)
+
+ list_for_each_entry_safe(block, tmp, &state->blocks, node) {
+ drm_printf(p, "====================%s================\n", block->name);
+- msm_disp_state_print_regs(&block->state, block->size, block->base_addr, p);
++ msm_disp_state_print_regs(block->state, block->size, block->base_addr, p);
+ }
+
+ drm_printf(p, "===================dpu drm state================\n");
+--
+2.43.0
+
--- /dev/null
+From 04e3c659c022b5504d6a46791bdc6306ee73dc7b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Sep 2024 06:22:46 +0300
+Subject: drm/msm/dpu: check for overflow in _dpu_crtc_setup_lm_bounds()
+
+From: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+
+[ Upstream commit 3a0851b442d1f63ba42ecfa2506d3176cfabf9d4 ]
+
+Make _dpu_crtc_setup_lm_bounds() check that CRTC width is not
+overflowing LM requirements. Rename the function accordingly.
+
+Fixes: 25fdd5933e4c ("drm/msm: Add SDM845 DPU support")
+Reviewed-by: Abhinav Kumar <quic_abhinavk@quicinc.com>
+Tested-by: Abhinav Kumar <quic_abhinavk@quicinc.com> # sc7280
+Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Patchwork: https://patchwork.freedesktop.org/patch/612237/
+Link: https://lore.kernel.org/r/20240903-dpu-mode-config-width-v6-3-617e1ecc4b7a@linaro.org
+Signed-off-by: Abhinav Kumar <quic_abhinavk@quicinc.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c | 17 +++++++++++++----
+ 1 file changed, 13 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+index e81feb0d67f3e..db6c57900781d 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+@@ -711,12 +711,13 @@ void dpu_crtc_complete_commit(struct drm_crtc *crtc)
+ _dpu_crtc_complete_flip(crtc);
+ }
+
+-static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
++static int _dpu_crtc_check_and_setup_lm_bounds(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+ {
+ struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
+ struct drm_display_mode *adj_mode = &state->adjusted_mode;
+ u32 crtc_split_width = adj_mode->hdisplay / cstate->num_mixers;
++ struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
+ int i;
+
+ for (i = 0; i < cstate->num_mixers; i++) {
+@@ -727,7 +728,12 @@ static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
+ r->y2 = adj_mode->vdisplay;
+
+ trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
++
++ if (drm_rect_width(r) > dpu_kms->catalog->caps->max_mixer_width)
++ return -E2BIG;
+ }
++
++ return 0;
+ }
+
+ static void _dpu_crtc_get_pcc_coeff(struct drm_crtc_state *state,
+@@ -803,7 +809,7 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
+
+ DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
+
+- _dpu_crtc_setup_lm_bounds(crtc, crtc->state);
++ _dpu_crtc_check_and_setup_lm_bounds(crtc, crtc->state);
+
+ /* encoder will trigger pending mask now */
+ drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
+@@ -1189,8 +1195,11 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
+ if (crtc_state->active_changed)
+ crtc_state->mode_changed = true;
+
+- if (cstate->num_mixers)
+- _dpu_crtc_setup_lm_bounds(crtc, crtc_state);
++ if (cstate->num_mixers) {
++ rc = _dpu_crtc_check_and_setup_lm_bounds(crtc, crtc_state);
++ if (rc)
++ return rc;
++ }
+
+ /* FIXME: move this to dpu_plane_atomic_check? */
+ drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
+--
+2.43.0
+
--- /dev/null
+From 1b20078bfca6a4cfbf1e10f0cc7baf64b72fd0f8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 9 Oct 2024 20:46:19 -0700
+Subject: drm/msm/dpu: don't always program merge_3d block
+
+From: Jessica Zhang <quic_jesszhan@quicinc.com>
+
+[ Upstream commit f87f3b80abaf7949e638dd17dfdc267066eb52d5 ]
+
+Only program the merge_3d block for the video phys encoder when the 3d
+blend mode is not NONE
+
+Fixes: 3e79527a33a8 ("drm/msm/dpu: enable merge_3d support on sm8150/sm8250")
+Suggested-by: Abhinav Kumar <quic_abhinavk@quicinc.com>
+Signed-off-by: Jessica Zhang <quic_jesszhan@quicinc.com>
+Patchwork: https://patchwork.freedesktop.org/patch/619095/
+Link: https://lore.kernel.org/r/20241009-merge3d-fix-v1-1-0d0b6f5c244e@quicinc.com
+Signed-off-by: Abhinav Kumar <quic_abhinavk@quicinc.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+index 8864ace938e03..d8a2edebfe8c3 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+@@ -302,7 +302,7 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
+ intf_cfg.stream_sel = 0; /* Don't care value for video mode */
+ intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
+ intf_cfg.dsc = dpu_encoder_helper_get_dsc(phys_enc);
+- if (phys_enc->hw_pp->merge_3d)
++ if (intf_cfg.mode_3d && phys_enc->hw_pp->merge_3d)
+ intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx;
+
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+--
+2.43.0
+
--- /dev/null
+From 75c1e2c7f5ac095af5517d27abbc25991d05891c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 9 Oct 2024 20:41:13 -0700
+Subject: drm/msm/dpu: Don't always set merge_3d pending flush
+
+From: Jessica Zhang <quic_jesszhan@quicinc.com>
+
+[ Upstream commit 40dad89cb86ce824f2080441b2a6b7aedf695329 ]
+
+Don't set the merge_3d pending flush bits if the mode_3d is
+BLEND_3D_NONE.
+
+Always flushing merge_3d can cause timeout issues when there are
+multiple commits with concurrent writeback enabled.
+
+This is because the video phys enc waits for the hw_ctl flush register
+to be completely cleared [1] in its wait_for_commit_done(), but the WB
+encoder always sets the merge_3d pending flush during each commit
+regardless of if the merge_3d is actually active.
+
+This means that the hw_ctl flush register will never be 0 when there are
+multiple CWB commits and the video phys enc will hit vblank timeout
+errors after the first CWB commit.
+
+[1] commit fe9df3f50c39 ("drm/msm/dpu: add real wait_for_commit_done()")
+
+Fixes: 3e79527a33a8 ("drm/msm/dpu: enable merge_3d support on sm8150/sm8250")
+Fixes: d7d0e73f7de3 ("drm/msm/dpu: introduce the dpu_encoder_phys_* for writeback")
+Signed-off-by: Jessica Zhang <quic_jesszhan@quicinc.com>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Patchwork: https://patchwork.freedesktop.org/patch/619092/
+Link: https://lore.kernel.org/r/20241009-mode3d-fix-v1-1-c0258354fadc@quicinc.com
+Signed-off-by: Abhinav Kumar <quic_abhinavk@quicinc.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c | 5 ++++-
+ drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c | 5 ++++-
+ 2 files changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+index ba8878d21cf0e..8864ace938e03 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+@@ -440,10 +440,12 @@ static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
+ struct dpu_hw_ctl *ctl;
+ const struct msm_format *fmt;
+ u32 fmt_fourcc;
++ u32 mode_3d;
+
+ ctl = phys_enc->hw_ctl;
+ fmt_fourcc = dpu_encoder_get_drm_fmt(phys_enc);
+ fmt = mdp_get_format(&phys_enc->dpu_kms->base, fmt_fourcc, 0);
++ mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
+
+ DPU_DEBUG_VIDENC(phys_enc, "\n");
+
+@@ -466,7 +468,8 @@ static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
+ goto skip_flush;
+
+ ctl->ops.update_pending_flush_intf(ctl, phys_enc->hw_intf->idx);
+- if (ctl->ops.update_pending_flush_merge_3d && phys_enc->hw_pp->merge_3d)
++ if (mode_3d && ctl->ops.update_pending_flush_merge_3d &&
++ phys_enc->hw_pp->merge_3d)
+ ctl->ops.update_pending_flush_merge_3d(ctl, phys_enc->hw_pp->merge_3d->idx);
+
+ if (ctl->ops.update_pending_flush_cdm && phys_enc->hw_cdm)
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
+index 882c717859cec..07035ab77b792 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
+@@ -275,6 +275,7 @@ static void _dpu_encoder_phys_wb_update_flush(struct dpu_encoder_phys *phys_enc)
+ struct dpu_hw_pingpong *hw_pp;
+ struct dpu_hw_cdm *hw_cdm;
+ u32 pending_flush = 0;
++ u32 mode_3d;
+
+ if (!phys_enc)
+ return;
+@@ -283,6 +284,7 @@ static void _dpu_encoder_phys_wb_update_flush(struct dpu_encoder_phys *phys_enc)
+ hw_pp = phys_enc->hw_pp;
+ hw_ctl = phys_enc->hw_ctl;
+ hw_cdm = phys_enc->hw_cdm;
++ mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
+
+ DPU_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0);
+
+@@ -294,7 +296,8 @@ static void _dpu_encoder_phys_wb_update_flush(struct dpu_encoder_phys *phys_enc)
+ if (hw_ctl->ops.update_pending_flush_wb)
+ hw_ctl->ops.update_pending_flush_wb(hw_ctl, hw_wb->idx);
+
+- if (hw_ctl->ops.update_pending_flush_merge_3d && hw_pp && hw_pp->merge_3d)
++ if (mode_3d && hw_ctl->ops.update_pending_flush_merge_3d &&
++ hw_pp && hw_pp->merge_3d)
+ hw_ctl->ops.update_pending_flush_merge_3d(hw_ctl,
+ hw_pp->merge_3d->idx);
+
+--
+2.43.0
+
--- /dev/null
+From 061459540c63520e16367198d43513c71c1e6f68 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Sep 2024 06:22:44 +0300
+Subject: drm/msm/dpu: make sure phys resources are properly initialized
+
+From: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+
+[ Upstream commit bfecbc2cfba9b06d67d9d249c33d92e570e2fa70 ]
+
+The commit b954fa6baaca ("drm/msm/dpu: Refactor rm iterator") removed
+zero-init of the hw_ctl array, but didn't change the error condition,
+that checked for hw_ctl[i] being NULL. At the same time because of the
+early returns in case of an error dpu_encoder_phys might be left with
+the resources assigned in the previous state. Rework assigning of hw_pp
+/ hw_ctl to the dpu_encoder_phys in order to make sure they are always
+set correctly.
+
+Fixes: b954fa6baaca ("drm/msm/dpu: Refactor rm iterator")
+Suggested-by: Abhinav Kumar <quic_abhinavk@quicinc.com>
+Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Reviewed-by: Abhinav Kumar <quic_abhinavk@quicinc.com>
+Patchwork: https://patchwork.freedesktop.org/patch/612233/
+Link: https://lore.kernel.org/r/20240903-dpu-mode-config-width-v6-1-617e1ecc4b7a@linaro.org
+Signed-off-by: Abhinav Kumar <quic_abhinavk@quicinc.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c | 9 ++++-----
+ 1 file changed, 4 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+index 3b171bf227d16..949ebda2fa829 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+@@ -1174,21 +1174,20 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+- if (!dpu_enc->hw_pp[i]) {
++ phys->hw_pp = dpu_enc->hw_pp[i];
++ if (!phys->hw_pp) {
+ DPU_ERROR_ENC(dpu_enc,
+ "no pp block assigned at idx: %d\n", i);
+ return;
+ }
+
+- if (!hw_ctl[i]) {
++ phys->hw_ctl = i < num_ctl ? to_dpu_hw_ctl(hw_ctl[i]) : NULL;
++ if (!phys->hw_ctl) {
+ DPU_ERROR_ENC(dpu_enc,
+ "no ctl block assigned at idx: %d\n", i);
+ return;
+ }
+
+- phys->hw_pp = dpu_enc->hw_pp[i];
+- phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[i]);
+-
+ phys->cached_mode = crtc_state->adjusted_mode;
+ if (phys->ops.atomic_mode_set)
+ phys->ops.atomic_mode_set(phys, crtc_state, conn_state);
+--
+2.43.0
+
--- /dev/null
+From cb9d909e0b6d8362866dd6229c47a3c6758c77fc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Sep 2024 06:22:45 +0300
+Subject: drm/msm/dpu: move CRTC resource assignment to
+ dpu_encoder_virt_atomic_check
+
+From: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+
+[ Upstream commit 3ae133b0192b9b0c9f560bbc096887053150195f ]
+
+Historically CRTC resources (LMs and CTLs) were assigned in
+dpu_crtc_atomic_begin(). The commit 9222cdd27e82 ("drm/msm/dpu: move hw
+resource tracking to crtc state") simply moved resources to
+struct dpu_crtc_state, without changing the code sequence. Later on the
+commit b107603b4ad0 ("drm/msm/dpu: map mixer/ctl hw blocks in encoder
+modeset") rearanged the code, but still kept the cstate->num_mixers
+assignment to happen during commit phase. This makes dpu_crtc_state
+inconsistent between consequent atomic_check() calls.
+
+Move CRTC resource assignment to happen at the end of
+dpu_encoder_virt_atomic_check().
+
+Fixes: b107603b4ad0 ("drm/msm/dpu: map mixer/ctl hw blocks in encoder modeset")
+Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Reviewed-by: Abhinav Kumar <quic_abhinavk@quicinc.com>
+Patchwork: https://patchwork.freedesktop.org/patch/612235/
+Link: https://lore.kernel.org/r/20240903-dpu-mode-config-width-v6-2-617e1ecc4b7a@linaro.org
+Signed-off-by: Abhinav Kumar <quic_abhinavk@quicinc.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c | 3 --
+ drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c | 59 +++++++++++++--------
+ 2 files changed, 38 insertions(+), 24 deletions(-)
+
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+index 4c1be2f0555f7..e81feb0d67f3e 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+@@ -1091,9 +1091,6 @@ static void dpu_crtc_disable(struct drm_crtc *crtc,
+
+ dpu_core_perf_crtc_update(crtc, 0);
+
+- memset(cstate->mixers, 0, sizeof(cstate->mixers));
+- cstate->num_mixers = 0;
+-
+ /* disable clk & bw control until clk & bw properties are set */
+ cstate->bw_control = false;
+ cstate->bw_split_vote = false;
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+index 949ebda2fa829..bd3698bf0cf74 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+@@ -624,6 +624,40 @@ static struct msm_display_topology dpu_encoder_get_topology(
+ return topology;
+ }
+
++static void dpu_encoder_assign_crtc_resources(struct dpu_kms *dpu_kms,
++ struct drm_encoder *drm_enc,
++ struct dpu_global_state *global_state,
++ struct drm_crtc_state *crtc_state)
++{
++ struct dpu_crtc_state *cstate;
++ struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC];
++ struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC];
++ struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_ENC];
++ int num_lm, num_ctl, num_dspp, i;
++
++ cstate = to_dpu_crtc_state(crtc_state);
++
++ memset(cstate->mixers, 0, sizeof(cstate->mixers));
++
++ num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
++ drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl));
++ num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
++ drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
++ num_dspp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
++ drm_enc->base.id, DPU_HW_BLK_DSPP, hw_dspp,
++ ARRAY_SIZE(hw_dspp));
++
++ for (i = 0; i < num_lm; i++) {
++ int ctl_idx = (i < num_ctl) ? i : (num_ctl-1);
++
++ cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]);
++ cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]);
++ cstate->mixers[i].hw_dspp = i < num_dspp ? to_dpu_hw_dspp(hw_dspp[i]) : NULL;
++ }
++
++ cstate->num_mixers = num_lm;
++}
++
+ static int dpu_encoder_virt_atomic_check(
+ struct drm_encoder *drm_enc,
+ struct drm_crtc_state *crtc_state,
+@@ -692,6 +726,9 @@ static int dpu_encoder_virt_atomic_check(
+ if (!crtc_state->active_changed || crtc_state->enable)
+ ret = dpu_rm_reserve(&dpu_kms->rm, global_state,
+ drm_enc, crtc_state, topology);
++ if (!ret)
++ dpu_encoder_assign_crtc_resources(dpu_kms, drm_enc,
++ global_state, crtc_state);
+ }
+
+ trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags);
+@@ -1093,14 +1130,11 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
+ struct dpu_encoder_virt *dpu_enc;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+- struct dpu_crtc_state *cstate;
+ struct dpu_global_state *global_state;
+ struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC];
+ struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC];
+- struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC];
+- struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_ENC] = { NULL };
+ struct dpu_hw_blk *hw_dsc[MAX_CHANNELS_PER_ENC];
+- int num_lm, num_ctl, num_pp, num_dsc;
++ int num_ctl, num_pp, num_dsc;
+ unsigned int dsc_mask = 0;
+ int i;
+
+@@ -1129,11 +1163,6 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
+ ARRAY_SIZE(hw_pp));
+ num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl));
+- num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+- drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
+- dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+- drm_enc->base.id, DPU_HW_BLK_DSPP, hw_dspp,
+- ARRAY_SIZE(hw_dspp));
+
+ for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
+ dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i])
+@@ -1159,18 +1188,6 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
+ dpu_enc->cur_master->hw_cdm = hw_cdm ? to_dpu_hw_cdm(hw_cdm) : NULL;
+ }
+
+- cstate = to_dpu_crtc_state(crtc_state);
+-
+- for (i = 0; i < num_lm; i++) {
+- int ctl_idx = (i < num_ctl) ? i : (num_ctl-1);
+-
+- cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]);
+- cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]);
+- cstate->mixers[i].hw_dspp = to_dpu_hw_dspp(hw_dspp[i]);
+- }
+-
+- cstate->num_mixers = num_lm;
+-
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+--
+2.43.0
+
--- /dev/null
+From bf4eeba0b25a727292cf4f669e0d13fc0bc2a334 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 7 Oct 2024 01:01:49 -0400
+Subject: drm/msm/dsi: fix 32-bit signed integer extension in pclk_rate
+ calculation
+
+From: Jonathan Marek <jonathan@marek.ca>
+
+[ Upstream commit 358b762400bd94db2a14a72dfcef74c7da6bd845 ]
+
+When (mode->clock * 1000) is larger than (1<<31), int to unsigned long
+conversion will sign extend the int to 64 bits and the pclk_rate value
+will be incorrect.
+
+Fix this by making the result of the multiplication unsigned.
+
+Note that above (1<<32) would still be broken and require more changes, but
+its unlikely anyone will need that anytime soon.
+
+Fixes: c4d8cfe516dc ("drm/msm/dsi: add implementation for helper functions")
+Signed-off-by: Jonathan Marek <jonathan@marek.ca>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Reviewed-by: Abhinav Kumar <quic_abhinavk@quicinc.com>
+Patchwork: https://patchwork.freedesktop.org/patch/618434/
+Link: https://lore.kernel.org/r/20241007050157.26855-2-jonathan@marek.ca
+Signed-off-by: Abhinav Kumar <quic_abhinavk@quicinc.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/msm/dsi/dsi_host.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
+index 1205aa398e445..a98d24b7cb00b 100644
+--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
++++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
+@@ -550,7 +550,7 @@ static unsigned long dsi_get_pclk_rate(const struct drm_display_mode *mode,
+ {
+ unsigned long pclk_rate;
+
+- pclk_rate = mode->clock * 1000;
++ pclk_rate = mode->clock * 1000u;
+
+ if (dsc)
+ pclk_rate = dsi_adjust_pclk_for_compression(mode, dsc);
+--
+2.43.0
+
--- /dev/null
+From f1d650cd58e04e1695d94fe2848127ed9b6f3164 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 7 Oct 2024 01:01:48 -0400
+Subject: drm/msm/dsi: improve/fix dsc pclk calculation
+
+From: Jonathan Marek <jonathan@marek.ca>
+
+[ Upstream commit 24436a540d16ca6a523b8e5441180001c31b6b35 ]
+
+drm_mode_vrefresh() can introduce a large rounding error, avoid it.
+
+Fixes: 7c9e4a554d4a ("drm/msm/dsi: Reduce pclk rate for compression")
+Signed-off-by: Jonathan Marek <jonathan@marek.ca>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Reviewed-by: Abhinav Kumar <quic_abhinavk@quicinc.com>
+Patchwork: https://patchwork.freedesktop.org/patch/618432/
+Link: https://lore.kernel.org/r/20241007050157.26855-1-jonathan@marek.ca
+Signed-off-by: Abhinav Kumar <quic_abhinavk@quicinc.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/msm/dsi/dsi_host.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
+index 185d7de0bf376..1205aa398e445 100644
+--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
++++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
+@@ -542,7 +542,7 @@ static unsigned long dsi_adjust_pclk_for_compression(const struct drm_display_mo
+
+ int new_htotal = mode->htotal - mode->hdisplay + new_hdisplay;
+
+- return new_htotal * mode->vtotal * drm_mode_vrefresh(mode);
++ return mult_frac(mode->clock * 1000u, new_htotal, mode->htotal);
+ }
+
+ static unsigned long dsi_get_pclk_rate(const struct drm_display_mode *mode,
+--
+2.43.0
+
--- /dev/null
+From 971616729a3c92fa3a0b3d67cb07da4b59cf8c7a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 11 Oct 2024 10:08:19 +0800
+Subject: drm/panel: himax-hx83102: Adjust power and gamma to optimize
+ brightness
+
+From: Cong Yang <yangcong5@huaqin.corp-partner.google.com>
+
+[ Upstream commit fcf38bc321fbc87dfcd829f42e64e541f17599f7 ]
+
+The current panel brightness is only 360 nit. Adjust the power and gamma to
+optimize the panel brightness. The brightness after adjustment is 390 nit.
+
+Fixes: 3179338750d8 ("drm/panel: himax-hx83102: Support for IVO t109nw41 MIPI-DSI panel")
+Signed-off-by: Cong Yang <yangcong5@huaqin.corp-partner.google.com>
+Reviewed-by: Douglas Anderson <dianders@chromium.org>
+Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Douglas Anderson <dianders@chromium.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20241011020819.1254157-1-yangcong5@huaqin.corp-partner.google.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/panel/panel-himax-hx83102.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/panel/panel-himax-hx83102.c b/drivers/gpu/drm/panel/panel-himax-hx83102.c
+index 6e4b7e4644ce0..8b48bba181316 100644
+--- a/drivers/gpu/drm/panel/panel-himax-hx83102.c
++++ b/drivers/gpu/drm/panel/panel-himax-hx83102.c
+@@ -298,7 +298,7 @@ static int ivo_t109nw41_init(struct hx83102 *ctx)
+ msleep(60);
+
+ hx83102_enable_extended_cmds(&dsi_ctx, true);
+- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPOWER, 0x2c, 0xed, 0xed, 0x0f, 0xcf, 0x42,
++ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPOWER, 0x2c, 0xed, 0xed, 0x27, 0xe7, 0x52,
+ 0xf5, 0x39, 0x36, 0x36, 0x36, 0x36, 0x32, 0x8b, 0x11, 0x65, 0x00, 0x88,
+ 0xfa, 0xff, 0xff, 0x8f, 0xff, 0x08, 0xd6, 0x33);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETDISP, 0x00, 0x47, 0xb0, 0x80, 0x00, 0x12,
+@@ -343,11 +343,11 @@ static int ivo_t109nw41_init(struct hx83102 *ctx)
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
+- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGMA, 0x04, 0x04, 0x06, 0x0a, 0x0a, 0x05,
+- 0x12, 0x14, 0x17, 0x13, 0x2c, 0x33, 0x39, 0x4b, 0x4c, 0x56, 0x61, 0x78,
+- 0x7a, 0x41, 0x50, 0x68, 0x73, 0x04, 0x04, 0x06, 0x0a, 0x0a, 0x05, 0x12,
+- 0x14, 0x17, 0x13, 0x2c, 0x33, 0x39, 0x4b, 0x4c, 0x56, 0x61, 0x78, 0x7a,
+- 0x41, 0x50, 0x68, 0x73);
++ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGMA, 0x00, 0x07, 0x10, 0x17, 0x1c, 0x33,
++ 0x48, 0x50, 0x57, 0x50, 0x68, 0x6e, 0x71, 0x7f, 0x81, 0x8a, 0x8e, 0x9b,
++ 0x9c, 0x4d, 0x56, 0x5d, 0x73, 0x00, 0x07, 0x10, 0x17, 0x1c, 0x33, 0x48,
++ 0x50, 0x57, 0x50, 0x68, 0x6e, 0x71, 0x7f, 0x81, 0x8a, 0x8e, 0x9b, 0x9c,
++ 0x4d, 0x56, 0x5d, 0x73);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETTP1, 0x07, 0x10, 0x10, 0x1a, 0x26, 0x9e,
+ 0x00, 0x4f, 0xa0, 0x14, 0x14, 0x00, 0x00, 0x00, 0x00, 0x12, 0x0a, 0x02,
+ 0x02, 0x00, 0x33, 0x02, 0x04, 0x18, 0x01);
+--
+2.43.0
+
--- /dev/null
+From d55888977c8e8e50ad4eafc486167556b92e0227 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 9 Aug 2024 13:37:56 -0500
+Subject: drm/vmwgfx: Handle possible ENOMEM in vmw_stdu_connector_atomic_check
+
+From: Ian Forbes <ian.forbes@broadcom.com>
+
+[ Upstream commit 4809a017a2bc42ff239d53ade4b2e70f2fe81348 ]
+
+Handle unlikely ENOMEN condition and other errors in
+vmw_stdu_connector_atomic_check.
+
+Signed-off-by: Ian Forbes <ian.forbes@broadcom.com>
+Reported-by: Dan Carpenter <dan.carpenter@linaro.org>
+Fixes: 75c3e8a26a35 ("drm/vmwgfx: Trigger a modeset when the screen moves")
+Reviewed-by: Zack Rusin <zack.rusin@broadcom.com>
+Reviewed-by: Martin Krastev <martin.krastev@broadcom.com>
+Signed-off-by: Zack Rusin <zack.rusin@broadcom.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240809183756.27283-1-ian.forbes@broadcom.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+index fab155a68054a..82d18b88f4a7e 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+@@ -886,6 +886,10 @@ static int vmw_stdu_connector_atomic_check(struct drm_connector *conn,
+ struct drm_crtc_state *new_crtc_state;
+
+ conn_state = drm_atomic_get_connector_state(state, conn);
++
++ if (IS_ERR(conn_state))
++ return PTR_ERR(conn_state);
++
+ du = vmw_connector_to_stdu(conn);
+
+ if (!conn_state->crtc)
+--
+2.43.0
+
--- /dev/null
+From 3a2670cb08c092296eb7900e1c3f5c9aeafd268e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 2 Oct 2024 17:16:57 -0700
+Subject: drm/xe: Don't free job in TDR
+
+From: Matthew Brost <matthew.brost@intel.com>
+
+[ Upstream commit 82926f52d7a09c65d916c0ef8d4305fc95d68c0c ]
+
+Freeing job in TDR is not safe as TDR can pass the run_job thread
+resulting in UAF. It is only safe for free job to naturally be called by
+the scheduler. Rather free job in TDR, add to pending list.
+
+Closes: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/2811
+Cc: Matthew Auld <matthew.auld@intel.com>
+Fixes: e275d61c5f3f ("drm/xe/guc: Handle timing out of signaled jobs gracefully")
+Signed-off-by: Matthew Brost <matthew.brost@intel.com>
+Reviewed-by: Matthew Auld <matthew.auld@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20241003001657.3517883-3-matthew.brost@intel.com
+(cherry picked from commit ea2f6a77d0c40d97f4a4dc93fee4afe15d94926d)
+Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_guc_submit.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
+index 690f821f8bf5a..dfd809e7bbd25 100644
+--- a/drivers/gpu/drm/xe/xe_guc_submit.c
++++ b/drivers/gpu/drm/xe/xe_guc_submit.c
+@@ -1101,10 +1101,13 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
+
+ /*
+ * TDR has fired before free job worker. Common if exec queue
+- * immediately closed after last fence signaled.
++ * immediately closed after last fence signaled. Add back to pending
++ * list so job can be freed and kick scheduler ensuring free job is not
++ * lost.
+ */
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &job->fence->flags)) {
+- guc_exec_queue_free_job(drm_job);
++ xe_sched_add_pending_job(sched, job);
++ xe_sched_submission_start(sched);
+
+ return DRM_GPU_SCHED_STAT_NOMINAL;
+ }
+--
+2.43.0
+
--- /dev/null
+From 5de0435885f689183952ace355ddc109d26ce951 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 9 Oct 2024 09:48:10 +0100
+Subject: drm/xe: fix unbalanced rpm put() with declare_wedged()
+
+From: Matthew Auld <matthew.auld@intel.com>
+
+[ Upstream commit 761f916af44279a99db4e78c5f5ee839b31107ea ]
+
+Technically the or_reset() means we call the action on failure, however
+that would lead to unbalanced rpm put(). Move the get() earlier to fix
+this. It should be extremely unlikely to ever trigger this in practice.
+
+Fixes: 90936a0a4c54 ("drm/xe: Don't suspend device upon wedge")
+Signed-off-by: Matthew Auld <matthew.auld@intel.com>
+Cc: Matthew Brost <matthew.brost@intel.com>
+Cc: Nirmoy Das <nirmoy.das@intel.com>
+Reviewed-by: Matthew Brost <matthew.brost@intel.com>
+Reviewed-by: Nirmoy Das <nirmoy.das@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20241009084808.204432-4-matthew.auld@intel.com
+(cherry picked from commit a187c1b0a800565a4db6372268692aff99df7f53)
+Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_device.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
+index 8a44a2b6dcbb6..fb394189d9e23 100644
+--- a/drivers/gpu/drm/xe/xe_device.c
++++ b/drivers/gpu/drm/xe/xe_device.c
+@@ -960,13 +960,13 @@ void xe_device_declare_wedged(struct xe_device *xe)
+ return;
+ }
+
++ xe_pm_runtime_get_noresume(xe);
++
+ if (drmm_add_action_or_reset(&xe->drm, xe_device_wedged_fini, xe)) {
+ drm_err(&xe->drm, "Failed to register xe_device_wedged_fini clean-up. Although device is wedged.\n");
+ return;
+ }
+
+- xe_pm_runtime_get_noresume(xe);
+-
+ if (!atomic_xchg(&xe->wedged.flag, 1)) {
+ xe->needs_flr_on_fini = true;
+ drm_err(&xe->drm,
+--
+2.43.0
+
--- /dev/null
+From d7cff62963ac41ee65ad9872ce06fd24ffad92ff Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 9 Oct 2024 09:48:09 +0100
+Subject: drm/xe: fix unbalanced rpm put() with fence_fini()
+
+From: Matthew Auld <matthew.auld@intel.com>
+
+[ Upstream commit 03a86c24aea0920a1ca20a0d7771d5e176db538d ]
+
+Currently we can call fence_fini() twice if something goes wrong when
+sending the GuC CT for the tlb request, since we signal the fence and
+return an error, leading to the caller also calling fini() on the error
+path in the case of stack version of the flow, which leads to an extra
+rpm put() which might later cause device to enter suspend when it
+shouldn't. It looks like we can just drop the fini() call since the
+fence signaller side will already call this for us.
+
+There are known mysterious splats with device going to sleep even with
+an rpm ref, and this could be one candidate.
+
+v2 (Matt B):
+ - Prefer warning if we detect double fini()
+
+Fixes: f002702290fc ("drm/xe: Hold a PM ref when GT TLB invalidations are inflight")
+Signed-off-by: Matthew Auld <matthew.auld@intel.com>
+Cc: Matthew Brost <matthew.brost@intel.com>
+Cc: Nirmoy Das <nirmoy.das@intel.com>
+Reviewed-by: Matthew Brost <matthew.brost@intel.com>
+Reviewed-by: Nirmoy Das <nirmoy.das@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20241009084808.204432-3-matthew.auld@intel.com
+(cherry picked from commit cfcbc0520d5055825f0647ab922b655688605183)
+Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c | 29 +++++++++------------
+ drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h | 1 -
+ drivers/gpu/drm/xe/xe_vm.c | 8 ++----
+ 3 files changed, 15 insertions(+), 23 deletions(-)
+
+diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+index 87cb76a8718c9..82795133e129e 100644
+--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
++++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+@@ -36,6 +36,15 @@ static long tlb_timeout_jiffies(struct xe_gt *gt)
+ return hw_tlb_timeout + 2 * delay;
+ }
+
++static void xe_gt_tlb_invalidation_fence_fini(struct xe_gt_tlb_invalidation_fence *fence)
++{
++ if (WARN_ON_ONCE(!fence->gt))
++ return;
++
++ xe_pm_runtime_put(gt_to_xe(fence->gt));
++ fence->gt = NULL; /* fini() should be called once */
++}
++
+ static void
+ __invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence)
+ {
+@@ -203,7 +212,7 @@ static int send_tlb_invalidation(struct xe_guc *guc,
+ tlb_timeout_jiffies(gt));
+ }
+ spin_unlock_irq(>->tlb_invalidation.pending_lock);
+- } else if (ret < 0) {
++ } else {
+ __invalidation_fence_signal(xe, fence);
+ }
+ if (!ret) {
+@@ -265,10 +274,8 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt)
+
+ xe_gt_tlb_invalidation_fence_init(gt, &fence, true);
+ ret = xe_gt_tlb_invalidation_guc(gt, &fence);
+- if (ret < 0) {
+- xe_gt_tlb_invalidation_fence_fini(&fence);
++ if (ret)
+ return ret;
+- }
+
+ xe_gt_tlb_invalidation_fence_wait(&fence);
+ } else if (xe_device_uc_enabled(xe) && !xe_device_wedged(xe)) {
+@@ -494,7 +501,8 @@ static const struct dma_fence_ops invalidation_fence_ops = {
+ * @stack: fence is stack variable
+ *
+ * Initialize TLB invalidation fence for use. xe_gt_tlb_invalidation_fence_fini
+- * must be called if fence is not signaled.
++ * will be automatically called when fence is signalled (all fences must signal),
++ * even on error.
+ */
+ void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt,
+ struct xe_gt_tlb_invalidation_fence *fence,
+@@ -514,14 +522,3 @@ void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt,
+ dma_fence_get(&fence->base);
+ fence->gt = gt;
+ }
+-
+-/**
+- * xe_gt_tlb_invalidation_fence_fini - Finalize TLB invalidation fence
+- * @fence: TLB invalidation fence to finalize
+- *
+- * Drop PM ref which fence took durinig init.
+- */
+-void xe_gt_tlb_invalidation_fence_fini(struct xe_gt_tlb_invalidation_fence *fence)
+-{
+- xe_pm_runtime_put(gt_to_xe(fence->gt));
+-}
+diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
+index a84065fa324c7..f430d5797af70 100644
+--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
++++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
+@@ -28,7 +28,6 @@ int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
+ void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt,
+ struct xe_gt_tlb_invalidation_fence *fence,
+ bool stack);
+-void xe_gt_tlb_invalidation_fence_fini(struct xe_gt_tlb_invalidation_fence *fence);
+
+ static inline void
+ xe_gt_tlb_invalidation_fence_wait(struct xe_gt_tlb_invalidation_fence *fence)
+diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
+index 49ba9a1e375f4..3ac41f70ea6b1 100644
+--- a/drivers/gpu/drm/xe/xe_vm.c
++++ b/drivers/gpu/drm/xe/xe_vm.c
+@@ -3377,10 +3377,8 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
+
+ ret = xe_gt_tlb_invalidation_vma(tile->primary_gt,
+ &fence[fence_id], vma);
+- if (ret < 0) {
+- xe_gt_tlb_invalidation_fence_fini(&fence[fence_id]);
++ if (ret)
+ goto wait;
+- }
+ ++fence_id;
+
+ if (!tile->media_gt)
+@@ -3392,10 +3390,8 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
+
+ ret = xe_gt_tlb_invalidation_vma(tile->media_gt,
+ &fence[fence_id], vma);
+- if (ret < 0) {
+- xe_gt_tlb_invalidation_fence_fini(&fence[fence_id]);
++ if (ret)
+ goto wait;
+- }
+ ++fence_id;
+ }
+ }
+--
+2.43.0
+
--- /dev/null
+From 8d30d74cd5e49d7ea556f9ea96ed56ea2356097c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 2 Oct 2024 17:16:56 -0700
+Subject: drm/xe: Take job list lock in xe_sched_add_pending_job
+
+From: Matthew Brost <matthew.brost@intel.com>
+
+[ Upstream commit ed931fb40e353586f26c3327813d142f782f5f78 ]
+
+A fragile micro optimization in xe_sched_add_pending_job relied on both
+the GPU scheduler being stopped and fence signaling stopped to safely
+add a job to the pending list without the job list lock in
+xe_sched_add_pending_job. Remove this optimization and just take the job
+list lock.
+
+Fixes: 7ddb9403dd74 ("drm/xe: Sample ctx timestamp to determine if jobs have timed out")
+Signed-off-by: Matthew Brost <matthew.brost@intel.com>
+Reviewed-by: Matthew Auld <matthew.auld@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20241003001657.3517883-2-matthew.brost@intel.com
+(cherry picked from commit 90521df5fc43980e4575bd8c5b1cb62afe1a9f5f)
+Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_gpu_scheduler.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/gpu/drm/xe/xe_gpu_scheduler.h b/drivers/gpu/drm/xe/xe_gpu_scheduler.h
+index 6aac7fe686735..6bdd0a5b36122 100644
+--- a/drivers/gpu/drm/xe/xe_gpu_scheduler.h
++++ b/drivers/gpu/drm/xe/xe_gpu_scheduler.h
+@@ -51,7 +51,9 @@ xe_sched_invalidate_job(struct xe_sched_job *job, int threshold)
+ static inline void xe_sched_add_pending_job(struct xe_gpu_scheduler *sched,
+ struct xe_sched_job *job)
+ {
++ spin_lock(&sched->base.job_list_lock);
+ list_add(&job->drm.list, &sched->base.pending_list);
++ spin_unlock(&sched->base.job_list_lock);
+ }
+
+ static inline
+--
+2.43.0
+
--- /dev/null
+From dd381de616b766734959487cd2bcef69663e6e78 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 11 Sep 2024 08:26:22 -0700
+Subject: drm/xe: Use bookkeep slots for external BO's in exec IOCTL
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Matthew Brost <matthew.brost@intel.com>
+
+[ Upstream commit e7518276e9388d36f103e8c1c7e99898a30d11f5 ]
+
+Fix external BO's dma-resv usage in exec IOCTL using bookkeep slots
+rather than write slots. This leaves syncing to user space rather than
+the KMD blindly enforcing write semantics on every external BO.
+
+Fixes: dd08ebf6c352 ("drm/xe: Introduce a new DRM driver for Intel GPUs")
+Cc: José Roberto de Souza <jose.souza@intel.com>
+Cc: Kenneth Graunke <kenneth.w.graunke@intel.com>
+Cc: Paulo Zanoni <paulo.r.zanoni@intel.com>
+Reported-by: Simona Vetter <simona.vetter@ffwll.ch>
+Closes: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/2673
+Signed-off-by: Matthew Brost <matthew.brost@intel.com>
+Reviewed-by: José Roberto de Souza <jose.souza@intel.com>
+Reviewed-by: Kenneth Graunke <kenneth@whitecape.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240911152622.903058-1-matthew.brost@intel.com
+(cherry picked from commit b8b1163248759ba18509f7443a2d19b15b4c1df8)
+Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_exec.c | 12 ++++--------
+ 1 file changed, 4 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c
+index f36980aa26e69..6e5ba381eaded 100644
+--- a/drivers/gpu/drm/xe/xe_exec.c
++++ b/drivers/gpu/drm/xe/xe_exec.c
+@@ -40,11 +40,6 @@
+ * user knows an exec writes to a BO and reads from the BO in the next exec, it
+ * is the user's responsibility to pass in / out fence between the two execs).
+ *
+- * Implicit dependencies for external BOs are handled by using the dma-buf
+- * implicit dependency uAPI (TODO: add link). To make this works each exec must
+- * install the job's fence into the DMA_RESV_USAGE_WRITE slot of every external
+- * BO mapped in the VM.
+- *
+ * We do not allow a user to trigger a bind at exec time rather we have a VM
+ * bind IOCTL which uses the same in / out fence interface as exec. In that
+ * sense, a VM bind is basically the same operation as an exec from the user
+@@ -58,8 +53,8 @@
+ * behind any pending kernel operations on any external BOs in VM or any BOs
+ * private to the VM. This is accomplished by the rebinds waiting on BOs
+ * DMA_RESV_USAGE_KERNEL slot (kernel ops) and kernel ops waiting on all BOs
+- * slots (inflight execs are in the DMA_RESV_USAGE_BOOKING for private BOs and
+- * in DMA_RESV_USAGE_WRITE for external BOs).
++ * slots (inflight execs are in the DMA_RESV_USAGE_BOOKKEEP for private BOs and
++ * for external BOs).
+ *
+ * Rebinds / dma-resv usage applies to non-compute mode VMs only as for compute
+ * mode VMs we use preempt fences and a rebind worker (TODO: add link).
+@@ -292,7 +287,8 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+ xe_sched_job_arm(job);
+ if (!xe_vm_in_lr_mode(vm))
+ drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, &job->drm.s_fence->finished,
+- DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_WRITE);
++ DMA_RESV_USAGE_BOOKKEEP,
++ DMA_RESV_USAGE_BOOKKEEP);
+
+ for (i = 0; i < num_syncs; i++) {
+ xe_sync_entry_signal(&syncs[i], &job->drm.s_fence->finished);
+--
+2.43.0
+
--- /dev/null
+From adc2ce0604e5a56670f1fa24f21c44379dc728c5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 11 Oct 2024 10:01:21 -0700
+Subject: elevator: do not request_module if elevator exists
+
+From: Breno Leitao <leitao@debian.org>
+
+[ Upstream commit b4ff6e93bfd0093ce3ffc7322e89fbaa8300488f ]
+
+Whenever an I/O elevator is changed, the system attempts to load a
+module for the new elevator. This occurs regardless of whether the
+elevator is already loaded or built directly into the kernel. This
+behavior introduces unnecessary overhead and potential issues.
+
+This makes the operation slower, and more error-prone. For instance,
+making the problem fixed by [1] visible for users that doesn't even rely
+on modules being available through modules.
+
+Do not try to load the ioscheduler if it is already visible.
+
+This change brings two main benefits: it improves the performance of
+elevator changes, and it reduces the likelihood of errors occurring
+during this process.
+
+[1] Commit e3accac1a976 ("block: Fix elv_iosched_local_module handling of "none" scheduler")
+
+Fixes: 734e1a860312 ("block: Prevent deadlocks when switching elevators")
+Signed-off-by: Breno Leitao <leitao@debian.org>
+Link: https://lore.kernel.org/r/20241011170122.3880087-1-leitao@debian.org
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ block/elevator.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+diff --git a/block/elevator.c b/block/elevator.c
+index 4122026b11f1a..8f155512bcd84 100644
+--- a/block/elevator.c
++++ b/block/elevator.c
+@@ -709,13 +709,21 @@ int elv_iosched_load_module(struct gendisk *disk, const char *buf,
+ size_t count)
+ {
+ char elevator_name[ELV_NAME_MAX];
++ struct elevator_type *found;
++ const char *name;
+
+ if (!elv_support_iosched(disk->queue))
+ return -EOPNOTSUPP;
+
+ strscpy(elevator_name, buf, sizeof(elevator_name));
++ name = strstrip(elevator_name);
+
+- request_module("%s-iosched", strstrip(elevator_name));
++ spin_lock(&elv_list_lock);
++ found = __elevator_find(name);
++ spin_unlock(&elv_list_lock);
++
++ if (!found)
++ request_module("%s-iosched", name);
+
+ return 0;
+ }
+--
+2.43.0
+
--- /dev/null
+From 974401ab765527f411bc760c87877134381614e9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 11 Oct 2024 08:56:15 -0700
+Subject: elevator: Remove argument from elevator_find_get
+
+From: Breno Leitao <leitao@debian.org>
+
+[ Upstream commit ee7ff15bf507d4cf9a2b11b00690dfe6046ad325 ]
+
+Commit e4eb37cc0f3ed ("block: Remove elevator required features")
+removed the usage of `struct request_queue` from elevator_find_get(),
+but didn't removed the argument.
+
+Remove the "struct request_queue *q" argument from elevator_find_get()
+given it is useless.
+
+Fixes: e4eb37cc0f3e ("block: Remove elevator required features")
+Signed-off-by: Breno Leitao <leitao@debian.org>
+Link: https://lore.kernel.org/r/20241011155615.3361143-1-leitao@debian.org
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ block/elevator.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+diff --git a/block/elevator.c b/block/elevator.c
+index 8f155512bcd84..640fcc891b0d2 100644
+--- a/block/elevator.c
++++ b/block/elevator.c
+@@ -106,8 +106,7 @@ static struct elevator_type *__elevator_find(const char *name)
+ return NULL;
+ }
+
+-static struct elevator_type *elevator_find_get(struct request_queue *q,
+- const char *name)
++static struct elevator_type *elevator_find_get(const char *name)
+ {
+ struct elevator_type *e;
+
+@@ -569,7 +568,7 @@ static struct elevator_type *elevator_get_default(struct request_queue *q)
+ !blk_mq_is_shared_tags(q->tag_set->flags))
+ return NULL;
+
+- return elevator_find_get(q, "mq-deadline");
++ return elevator_find_get("mq-deadline");
+ }
+
+ /*
+@@ -697,7 +696,7 @@ static int elevator_change(struct request_queue *q, const char *elevator_name)
+ if (q->elevator && elevator_match(q->elevator->type, elevator_name))
+ return 0;
+
+- e = elevator_find_get(q, elevator_name);
++ e = elevator_find_get(elevator_name);
+ if (!e)
+ return -EINVAL;
+ ret = elevator_switch(q, e);
+--
+2.43.0
+
--- /dev/null
+From 16e9fc4fe3e673aaf95a9d0fb5832c0306c4a4dc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 18 Oct 2024 21:52:12 -0400
+Subject: fgraph: Allocate ret_stack_list with proper size
+
+From: Steven Rostedt <rostedt@goodmis.org>
+
+[ Upstream commit fae4078c289a2f24229c0de652249948b1cd6bdb ]
+
+The ret_stack_list is an array of ret_stack shadow stacks for the function
+graph usage. When the first function graph is enabled, all tasks in the
+system get a shadow stack. The ret_stack_list is a 32 element array of
+pointers to these shadow stacks. It allocates the shadow stack in batches
+(32 stacks at a time), assigns them to running tasks, and continues until
+all tasks are covered.
+
+When the function graph shadow stack changed from an array of
+ftrace_ret_stack structures to an array of longs, the allocation of
+ret_stack_list went from allocating an array of 32 elements to just a
+block defined by SHADOW_STACK_SIZE. Luckily, that's defined as PAGE_SIZE
+and is much more than enough to hold 32 pointers. But it is way overkill
+for the amount needed to allocate.
+
+Change the allocation of ret_stack_list back to a kcalloc() of
+FTRACE_RETSTACK_ALLOC_SIZE pointers.
+
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Link: https://lore.kernel.org/20241018215212.23f13f40@rorschach
+Fixes: 42675b723b484 ("function_graph: Convert ret_stack to a series of longs")
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/fgraph.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c
+index 43f4e3f57438b..41e7a15dcb50c 100644
+--- a/kernel/trace/fgraph.c
++++ b/kernel/trace/fgraph.c
+@@ -1162,7 +1162,8 @@ static int start_graph_tracing(void)
+ unsigned long **ret_stack_list;
+ int ret;
+
+- ret_stack_list = kmalloc(SHADOW_STACK_SIZE, GFP_KERNEL);
++ ret_stack_list = kcalloc(FTRACE_RETSTACK_ALLOC_SIZE,
++ sizeof(*ret_stack_list), GFP_KERNEL);
+
+ if (!ret_stack_list)
+ return -ENOMEM;
+--
+2.43.0
+
--- /dev/null
+From 4f16c699aeececeb9389a2631f4ce56ede9e17c1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 11 Oct 2024 18:40:02 +0800
+Subject: firmware: arm_scmi: Fix the double free in
+ scmi_debugfs_common_setup()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Su Hui <suhui@nfschina.com>
+
+[ Upstream commit 39b13dce1a91cdfc3bec9238f9e89094551bd428 ]
+
+Clang static checker(scan-build) throws below warningïŒ
+ | drivers/firmware/arm_scmi/driver.c:line 2915, column 2
+ | Attempt to free released memory.
+
+When devm_add_action_or_reset() fails, scmi_debugfs_common_cleanup()
+will run twice which causes double free of 'dbg->name'.
+
+Remove the redundant scmi_debugfs_common_cleanup() to fix this problem.
+
+Fixes: c3d4aed763ce ("firmware: arm_scmi: Populate a common SCMI debugfs root")
+Signed-off-by: Su Hui <suhui@nfschina.com>
+Reviewed-by: Cristian Marussi <cristian.marussi@arm.com>
+Message-Id: <20241011104001.1546476-1-suhui@nfschina.com>
+Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/firmware/arm_scmi/driver.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
+index 6b6957f4743fe..dc09f2d755f41 100644
+--- a/drivers/firmware/arm_scmi/driver.c
++++ b/drivers/firmware/arm_scmi/driver.c
+@@ -2902,10 +2902,8 @@ static struct scmi_debug_info *scmi_debugfs_common_setup(struct scmi_info *info)
+ dbg->top_dentry = top_dentry;
+
+ if (devm_add_action_or_reset(info->dev,
+- scmi_debugfs_common_cleanup, dbg)) {
+- scmi_debugfs_common_cleanup(dbg);
++ scmi_debugfs_common_cleanup, dbg))
+ return NULL;
+- }
+
+ return dbg;
+ }
+--
+2.43.0
+
--- /dev/null
+From 0d4a58dd2551feb487d3329340266ac32ee81fb3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 14 Oct 2024 09:07:17 -0700
+Subject: firmware: arm_scmi: Queue in scmi layer for mailbox implementation
+
+From: Justin Chen <justin.chen@broadcom.com>
+
+[ Upstream commit da1642bc97c4ef67f347edcd493bd0a52f88777b ]
+
+send_message() does not block in the MBOX implementation. This is
+because the mailbox layer has its own queue. However, this confuses
+the per xfer timeouts as they all start their timeout ticks in
+parallel.
+
+Consider a case where the xfer timeout is 30ms and a SCMI transaction
+takes 25ms:
+
+ | 0ms: Message #0 is queued in mailbox layer and sent out, then sits
+ | at scmi_wait_for_message_response() with a timeout of 30ms
+ | 1ms: Message #1 is queued in mailbox layer but not sent out yet.
+ | Since send_message() doesn't block, it also sits at
+ | scmi_wait_for_message_response() with a timeout of 30ms
+ | ...
+ | 25ms: Message #0 is completed, txdone is called and message #1 is sent
+ | 31ms: Message #1 times out since the count started at 1ms. Even though
+ | it has only been inflight for 6ms.
+
+Fixes: 5c8a47a5a91d ("firmware: arm_scmi: Make scmi core independent of the transport type")
+Signed-off-by: Justin Chen <justin.chen@broadcom.com>
+Message-Id: <20241014160717.1678953-1-justin.chen@broadcom.com>
+Reviewed-by: Cristian Marussi <cristian.marussi@arm.com>
+Tested-by: Cristian Marussi <cristian.marussi@arm.com>
+Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/firmware/arm_scmi/mailbox.c | 32 +++++++++++++++++++----------
+ 1 file changed, 21 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c
+index 0219a12e3209a..06087cb785f36 100644
+--- a/drivers/firmware/arm_scmi/mailbox.c
++++ b/drivers/firmware/arm_scmi/mailbox.c
+@@ -24,6 +24,7 @@
+ * @chan_platform_receiver: Optional Platform Receiver mailbox unidirectional channel
+ * @cinfo: SCMI channel info
+ * @shmem: Transmit/Receive shared memory area
++ * @chan_lock: Lock that prevents multiple xfers from being queued
+ */
+ struct scmi_mailbox {
+ struct mbox_client cl;
+@@ -32,6 +33,7 @@ struct scmi_mailbox {
+ struct mbox_chan *chan_platform_receiver;
+ struct scmi_chan_info *cinfo;
+ struct scmi_shared_mem __iomem *shmem;
++ struct mutex chan_lock;
+ };
+
+ #define client_to_scmi_mailbox(c) container_of(c, struct scmi_mailbox, cl)
+@@ -255,6 +257,7 @@ static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
+
+ cinfo->transport_info = smbox;
+ smbox->cinfo = cinfo;
++ mutex_init(&smbox->chan_lock);
+
+ return 0;
+ }
+@@ -284,13 +287,23 @@ static int mailbox_send_message(struct scmi_chan_info *cinfo,
+ struct scmi_mailbox *smbox = cinfo->transport_info;
+ int ret;
+
+- ret = mbox_send_message(smbox->chan, xfer);
++ /*
++ * The mailbox layer has its own queue. However the mailbox queue
++ * confuses the per message SCMI timeouts since the clock starts when
++ * the message is submitted into the mailbox queue. So when multiple
++ * messages are queued up the clock starts on all messages instead of
++ * only the one inflight.
++ */
++ mutex_lock(&smbox->chan_lock);
+
+- /* mbox_send_message returns non-negative value on success, so reset */
+- if (ret > 0)
+- ret = 0;
++ ret = mbox_send_message(smbox->chan, xfer);
++ /* mbox_send_message returns non-negative value on success */
++ if (ret < 0) {
++ mutex_unlock(&smbox->chan_lock);
++ return ret;
++ }
+
+- return ret;
++ return 0;
+ }
+
+ static void mailbox_mark_txdone(struct scmi_chan_info *cinfo, int ret,
+@@ -298,13 +311,10 @@ static void mailbox_mark_txdone(struct scmi_chan_info *cinfo, int ret,
+ {
+ struct scmi_mailbox *smbox = cinfo->transport_info;
+
+- /*
+- * NOTE: we might prefer not to need the mailbox ticker to manage the
+- * transfer queueing since the protocol layer queues things by itself.
+- * Unfortunately, we have to kick the mailbox framework after we have
+- * received our message.
+- */
+ mbox_client_txdone(smbox->chan, ret);
++
++ /* Release channel */
++ mutex_unlock(&smbox->chan_lock);
+ }
+
+ static void mailbox_fetch_response(struct scmi_chan_info *cinfo,
+--
+2.43.0
+
--- /dev/null
+From adeaf59afa73a051251235aa3787c9e84d740e87 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 11 Oct 2024 17:12:17 +0000
+Subject: genetlink: hold RCU in genlmsg_mcast()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 56440d7ec28d60f8da3bfa09062b3368ff9b16db ]
+
+While running net selftests with CONFIG_PROVE_RCU_LIST=y I saw
+one lockdep splat [1].
+
+genlmsg_mcast() uses for_each_net_rcu(), and must therefore hold RCU.
+
+Instead of letting all callers guard genlmsg_multicast_allns()
+with a rcu_read_lock()/rcu_read_unlock() pair, do it in genlmsg_mcast().
+
+This also means the @flags parameter is useless, we need to always use
+GFP_ATOMIC.
+
+[1]
+[10882.424136] =============================
+[10882.424166] WARNING: suspicious RCU usage
+[10882.424309] 6.12.0-rc2-virtme #1156 Not tainted
+[10882.424400] -----------------------------
+[10882.424423] net/netlink/genetlink.c:1940 RCU-list traversed in non-reader section!!
+[10882.424469]
+other info that might help us debug this:
+
+[10882.424500]
+rcu_scheduler_active = 2, debug_locks = 1
+[10882.424744] 2 locks held by ip/15677:
+[10882.424791] #0: ffffffffb6b491b0 (cb_lock){++++}-{3:3}, at: genl_rcv (net/netlink/genetlink.c:1219)
+[10882.426334] #1: ffffffffb6b49248 (genl_mutex){+.+.}-{3:3}, at: genl_rcv_msg (net/netlink/genetlink.c:61 net/netlink/genetlink.c:57 net/netlink/genetlink.c:1209)
+[10882.426465]
+stack backtrace:
+[10882.426805] CPU: 14 UID: 0 PID: 15677 Comm: ip Not tainted 6.12.0-rc2-virtme #1156
+[10882.426919] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.16.3-debian-1.16.3-2 04/01/2014
+[10882.427046] Call Trace:
+[10882.427131] <TASK>
+[10882.427244] dump_stack_lvl (lib/dump_stack.c:123)
+[10882.427335] lockdep_rcu_suspicious (kernel/locking/lockdep.c:6822)
+[10882.427387] genlmsg_multicast_allns (net/netlink/genetlink.c:1940 (discriminator 7) net/netlink/genetlink.c:1977 (discriminator 7))
+[10882.427436] l2tp_tunnel_notify.constprop.0 (net/l2tp/l2tp_netlink.c:119) l2tp_netlink
+[10882.427683] l2tp_nl_cmd_tunnel_create (net/l2tp/l2tp_netlink.c:253) l2tp_netlink
+[10882.427748] genl_family_rcv_msg_doit (net/netlink/genetlink.c:1115)
+[10882.427834] genl_rcv_msg (net/netlink/genetlink.c:1195 net/netlink/genetlink.c:1210)
+[10882.427877] ? __pfx_l2tp_nl_cmd_tunnel_create (net/l2tp/l2tp_netlink.c:186) l2tp_netlink
+[10882.427927] ? __pfx_genl_rcv_msg (net/netlink/genetlink.c:1201)
+[10882.427959] netlink_rcv_skb (net/netlink/af_netlink.c:2551)
+[10882.428069] genl_rcv (net/netlink/genetlink.c:1220)
+[10882.428095] netlink_unicast (net/netlink/af_netlink.c:1332 net/netlink/af_netlink.c:1357)
+[10882.428140] netlink_sendmsg (net/netlink/af_netlink.c:1901)
+[10882.428210] ____sys_sendmsg (net/socket.c:729 (discriminator 1) net/socket.c:744 (discriminator 1) net/socket.c:2607 (discriminator 1))
+
+Fixes: 33f72e6f0c67 ("l2tp : multicast notification to the registered listeners")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: James Chapman <jchapman@katalix.com>
+Cc: Tom Parkin <tparkin@katalix.com>
+Cc: Johannes Berg <johannes.berg@intel.com>
+Link: https://patch.msgid.link/20241011171217.3166614-1-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/target/target_core_user.c | 2 +-
+ include/net/genetlink.h | 3 +--
+ net/l2tp/l2tp_netlink.c | 4 ++--
+ net/netlink/genetlink.c | 28 ++++++++++++++--------------
+ net/wireless/nl80211.c | 8 ++------
+ 5 files changed, 20 insertions(+), 25 deletions(-)
+
+diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
+index 7eb94894bd68f..717931267bda0 100644
+--- a/drivers/target/target_core_user.c
++++ b/drivers/target/target_core_user.c
+@@ -2130,7 +2130,7 @@ static int tcmu_netlink_event_send(struct tcmu_dev *udev,
+ }
+
+ ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0,
+- TCMU_MCGRP_CONFIG, GFP_KERNEL);
++ TCMU_MCGRP_CONFIG);
+
+ /* Wait during an add as the listener may not be up yet */
+ if (ret == 0 ||
+diff --git a/include/net/genetlink.h b/include/net/genetlink.h
+index 9ab49bfeae789..c1d91f1d20f6c 100644
+--- a/include/net/genetlink.h
++++ b/include/net/genetlink.h
+@@ -531,13 +531,12 @@ static inline int genlmsg_multicast(const struct genl_family *family,
+ * @skb: netlink message as socket buffer
+ * @portid: own netlink portid to avoid sending to yourself
+ * @group: offset of multicast group in groups array
+- * @flags: allocation flags
+ *
+ * This function must hold the RTNL or rcu_read_lock().
+ */
+ int genlmsg_multicast_allns(const struct genl_family *family,
+ struct sk_buff *skb, u32 portid,
+- unsigned int group, gfp_t flags);
++ unsigned int group);
+
+ /**
+ * genlmsg_unicast - unicast a netlink message
+diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
+index fc43ecbd128cc..28e77a222a39b 100644
+--- a/net/l2tp/l2tp_netlink.c
++++ b/net/l2tp/l2tp_netlink.c
+@@ -116,7 +116,7 @@ static int l2tp_tunnel_notify(struct genl_family *family,
+ NLM_F_ACK, tunnel, cmd);
+
+ if (ret >= 0) {
+- ret = genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
++ ret = genlmsg_multicast_allns(family, msg, 0, 0);
+ /* We don't care if no one is listening */
+ if (ret == -ESRCH)
+ ret = 0;
+@@ -144,7 +144,7 @@ static int l2tp_session_notify(struct genl_family *family,
+ NLM_F_ACK, session, cmd);
+
+ if (ret >= 0) {
+- ret = genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
++ ret = genlmsg_multicast_allns(family, msg, 0, 0);
+ /* We don't care if no one is listening */
+ if (ret == -ESRCH)
+ ret = 0;
+diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
+index feb54c63a1165..07ad65774fe29 100644
+--- a/net/netlink/genetlink.c
++++ b/net/netlink/genetlink.c
+@@ -1501,15 +1501,11 @@ static int genl_ctrl_event(int event, const struct genl_family *family,
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+
+- if (!family->netnsok) {
++ if (!family->netnsok)
+ genlmsg_multicast_netns(&genl_ctrl, &init_net, msg, 0,
+ 0, GFP_KERNEL);
+- } else {
+- rcu_read_lock();
+- genlmsg_multicast_allns(&genl_ctrl, msg, 0,
+- 0, GFP_ATOMIC);
+- rcu_read_unlock();
+- }
++ else
++ genlmsg_multicast_allns(&genl_ctrl, msg, 0, 0);
+
+ return 0;
+ }
+@@ -1929,23 +1925,23 @@ static int __init genl_init(void)
+
+ core_initcall(genl_init);
+
+-static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
+- gfp_t flags)
++static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group)
+ {
+ struct sk_buff *tmp;
+ struct net *net, *prev = NULL;
+ bool delivered = false;
+ int err;
+
++ rcu_read_lock();
+ for_each_net_rcu(net) {
+ if (prev) {
+- tmp = skb_clone(skb, flags);
++ tmp = skb_clone(skb, GFP_ATOMIC);
+ if (!tmp) {
+ err = -ENOMEM;
+ goto error;
+ }
+ err = nlmsg_multicast(prev->genl_sock, tmp,
+- portid, group, flags);
++ portid, group, GFP_ATOMIC);
+ if (!err)
+ delivered = true;
+ else if (err != -ESRCH)
+@@ -1954,27 +1950,31 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
+
+ prev = net;
+ }
++ err = nlmsg_multicast(prev->genl_sock, skb, portid, group, GFP_ATOMIC);
++
++ rcu_read_unlock();
+
+- err = nlmsg_multicast(prev->genl_sock, skb, portid, group, flags);
+ if (!err)
+ delivered = true;
+ else if (err != -ESRCH)
+ return err;
+ return delivered ? 0 : -ESRCH;
+ error:
++ rcu_read_unlock();
++
+ kfree_skb(skb);
+ return err;
+ }
+
+ int genlmsg_multicast_allns(const struct genl_family *family,
+ struct sk_buff *skb, u32 portid,
+- unsigned int group, gfp_t flags)
++ unsigned int group)
+ {
+ if (WARN_ON_ONCE(group >= family->n_mcgrps))
+ return -EINVAL;
+
+ group = family->mcgrp_offset + group;
+- return genlmsg_mcast(skb, portid, group, flags);
++ return genlmsg_mcast(skb, portid, group);
+ }
+ EXPORT_SYMBOL(genlmsg_multicast_allns);
+
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index f18e1716339e0..3766efacfd64f 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -17967,10 +17967,8 @@ void nl80211_common_reg_change_event(enum nl80211_commands cmd_id,
+
+ genlmsg_end(msg, hdr);
+
+- rcu_read_lock();
+ genlmsg_multicast_allns(&nl80211_fam, msg, 0,
+- NL80211_MCGRP_REGULATORY, GFP_ATOMIC);
+- rcu_read_unlock();
++ NL80211_MCGRP_REGULATORY);
+
+ return;
+
+@@ -18703,10 +18701,8 @@ void nl80211_send_beacon_hint_event(struct wiphy *wiphy,
+
+ genlmsg_end(msg, hdr);
+
+- rcu_read_lock();
+ genlmsg_multicast_allns(&nl80211_fam, msg, 0,
+- NL80211_MCGRP_REGULATORY, GFP_ATOMIC);
+- rcu_read_unlock();
++ NL80211_MCGRP_REGULATORY);
+
+ return;
+
+--
+2.43.0
+
--- /dev/null
+From 5abd35b058a4a1a9b96b9b53e45194b68d555926 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 10 Sep 2024 04:36:20 -0400
+Subject: iio: accel: bma400: Fix uninitialized variable field_value in tap
+ event handling.
+
+From: Mikhail Lobanov <m.lobanov@rosalinux.ru>
+
+[ Upstream commit db9795a43dc944f048a37b65e06707f60f713e34 ]
+
+In the current implementation, the local variable field_value is used
+without prior initialization, which may lead to reading uninitialized
+memory. Specifically, in the macro set_mask_bits, the initial
+(potentially uninitialized) value of the buffer is copied into old__,
+and a mask is applied to calculate new__. A similar issue was resolved in
+commit 6ee2a7058fea ("iio: accel: bma400: Fix smatch warning based on use
+of unintialized value.").
+
+Found by Linux Verification Center (linuxtesting.org) with SVACE.
+
+Fixes: 961db2da159d ("iio: accel: bma400: Add support for single and double tap events")
+Signed-off-by: Mikhail Lobanov <m.lobanov@rosalinux.ru>
+Link: https://patch.msgid.link/20240910083624.27224-1-m.lobanov@rosalinux.ru
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iio/accel/bma400_core.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/iio/accel/bma400_core.c b/drivers/iio/accel/bma400_core.c
+index e90e2f01550ad..04083b7395ab8 100644
+--- a/drivers/iio/accel/bma400_core.c
++++ b/drivers/iio/accel/bma400_core.c
+@@ -1219,7 +1219,8 @@ static int bma400_activity_event_en(struct bma400_data *data,
+ static int bma400_tap_event_en(struct bma400_data *data,
+ enum iio_event_direction dir, int state)
+ {
+- unsigned int mask, field_value;
++ unsigned int mask;
++ unsigned int field_value = 0;
+ int ret;
+
+ /*
+--
+2.43.0
+
--- /dev/null
+From e07c9d12a5a40f4e29c64b0697663a795ca69396 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 3 Oct 2024 23:04:52 +0200
+Subject: iio: adc: ti-lmp92064: add missing select IIO_(TRIGGERED_)BUFFER in
+ Kconfig
+
+From: Javier Carrasco <javier.carrasco.cruz@gmail.com>
+
+[ Upstream commit a985576af824426e33100554a5958a6beda60a13 ]
+
+This driver makes use of triggered buffers, but does not select the
+required modules.
+
+Add the missing 'select IIO_BUFFER' and 'select IIO_TRIGGERED_BUFFER'.
+
+Fixes: 6c7bc1d27bb2 ("iio: adc: ti-lmp92064: add buffering support")
+Signed-off-by: Javier Carrasco <javier.carrasco.cruz@gmail.com>
+Link: https://patch.msgid.link/20241003-iio-select-v1-6-67c0385197cd@gmail.com
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iio/adc/Kconfig | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
+index cceac30e2bb9f..c16316664db38 100644
+--- a/drivers/iio/adc/Kconfig
++++ b/drivers/iio/adc/Kconfig
+@@ -1486,6 +1486,8 @@ config TI_LMP92064
+ tristate "Texas Instruments LMP92064 ADC driver"
+ depends on SPI
+ select REGMAP_SPI
++ select IIO_BUFFER
++ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to build support for the LMP92064 Precision Current and Voltage
+ sensor.
+--
+2.43.0
+
--- /dev/null
+From 74ce524a374aa3b686de8633d82e048cccb7e78d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 7 Oct 2024 22:06:38 +0200
+Subject: iio: frequency: {admv4420,adrf6780}: format Kconfig entries
+
+From: Javier Carrasco <javier.carrasco.cruz@gmail.com>
+
+[ Upstream commit 5c9644a683e1690387a476a4f5f6bd5cf9a1d695 ]
+
+Format the entries of these drivers in the Kconfig, where spaces
+instead of tabs were used.
+
+Signed-off-by: Javier Carrasco <javier.carrasco.cruz@gmail.com>
+Link: https://patch.msgid.link/20241007-ad2s1210-select-v2-1-7345d228040f@gmail.com
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Stable-dep-of: 6b8e9dbfaed4 ("iio: frequency: admv4420: fix missing select REMAP_SPI in Kconfig")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iio/frequency/Kconfig | 32 ++++++++++++++++----------------
+ 1 file changed, 16 insertions(+), 16 deletions(-)
+
+diff --git a/drivers/iio/frequency/Kconfig b/drivers/iio/frequency/Kconfig
+index 89ae09db5ca5f..7b1a7ed163ced 100644
+--- a/drivers/iio/frequency/Kconfig
++++ b/drivers/iio/frequency/Kconfig
+@@ -92,25 +92,25 @@ config ADMV1014
+ module will be called admv1014.
+
+ config ADMV4420
+- tristate "Analog Devices ADMV4420 K Band Downconverter"
+- depends on SPI
+- help
+- Say yes here to build support for Analog Devices K Band
+- Downconverter with integrated Fractional-N PLL and VCO.
++ tristate "Analog Devices ADMV4420 K Band Downconverter"
++ depends on SPI
++ help
++ Say yes here to build support for Analog Devices K Band
++ Downconverter with integrated Fractional-N PLL and VCO.
+
+- To compile this driver as a module, choose M here: the
+- module will be called admv4420.
++ To compile this driver as a module, choose M here: the
++ module will be called admv4420.
+
+ config ADRF6780
+- tristate "Analog Devices ADRF6780 Microwave Upconverter"
+- depends on SPI
+- depends on COMMON_CLK
+- help
+- Say yes here to build support for Analog Devices ADRF6780
+- 5.9 GHz to 23.6 GHz, Wideband, Microwave Upconverter.
+-
+- To compile this driver as a module, choose M here: the
+- module will be called adrf6780.
++ tristate "Analog Devices ADRF6780 Microwave Upconverter"
++ depends on SPI
++ depends on COMMON_CLK
++ help
++ Say yes here to build support for Analog Devices ADRF6780
++ 5.9 GHz to 23.6 GHz, Wideband, Microwave Upconverter.
++
++ To compile this driver as a module, choose M here: the
++ module will be called adrf6780.
+
+ endmenu
+ endmenu
+--
+2.43.0
+
--- /dev/null
+From a8eeeb7f9be011a1b75d8f561179cf1b779e5b9e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 7 Oct 2024 22:06:39 +0200
+Subject: iio: frequency: admv4420: fix missing select REMAP_SPI in Kconfig
+
+From: Javier Carrasco <javier.carrasco.cruz@gmail.com>
+
+[ Upstream commit 6b8e9dbfaed471627f7b863633b9937717df1d4d ]
+
+This driver makes use of regmap_spi, but does not select the required
+module.
+Add the missing 'select REGMAP_SPI'.
+
+Fixes: b59c04155901 ("iio: frequency: admv4420.c: Add support for ADMV4420")
+Signed-off-by: Javier Carrasco <javier.carrasco.cruz@gmail.com>
+Link: https://patch.msgid.link/20241007-ad2s1210-select-v2-2-7345d228040f@gmail.com
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iio/frequency/Kconfig | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/iio/frequency/Kconfig b/drivers/iio/frequency/Kconfig
+index 7b1a7ed163ced..583cbdf4e8cda 100644
+--- a/drivers/iio/frequency/Kconfig
++++ b/drivers/iio/frequency/Kconfig
+@@ -94,6 +94,7 @@ config ADMV1014
+ config ADMV4420
+ tristate "Analog Devices ADMV4420 K Band Downconverter"
+ depends on SPI
++ select REGMAP_SPI
+ help
+ Say yes here to build support for Analog Devices K Band
+ Downconverter with integrated Fractional-N PLL and VCO.
+--
+2.43.0
+
--- /dev/null
+From ba80dfabf802f05b2e967732cf38508f09626fa6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 9 Oct 2024 14:47:13 -0400
+Subject: ipv4: give an IPv4 dev to blackhole_netdev
+
+From: Xin Long <lucien.xin@gmail.com>
+
+[ Upstream commit 22600596b6756b166fd052d5facb66287e6f0bad ]
+
+After commit 8d7017fd621d ("blackhole_netdev: use blackhole_netdev to
+invalidate dst entries"), blackhole_netdev was introduced to invalidate
+dst cache entries on the TX path whenever the cache times out or is
+flushed.
+
+When two UDP sockets (sk1 and sk2) send messages to the same destination
+simultaneously, they are using the same dst cache. If the dst cache is
+invalidated on one path (sk2) while the other (sk1) is still transmitting,
+sk1 may try to use the invalid dst entry.
+
+ CPU1 CPU2
+
+ udp_sendmsg(sk1) udp_sendmsg(sk2)
+ udp_send_skb()
+ ip_output()
+ <--- dst timeout or flushed
+ dst_dev_put()
+ ip_finish_output2()
+ ip_neigh_for_gw()
+
+This results in a scenario where ip_neigh_for_gw() returns -EINVAL because
+blackhole_dev lacks an in_dev, which is needed to initialize the neigh in
+arp_constructor(). This error is then propagated back to userspace,
+breaking the UDP application.
+
+The patch fixes this issue by assigning an in_dev to blackhole_dev for
+IPv4, similar to what was done for IPv6 in commit e5f80fcf869a ("ipv6:
+give an IPv6 dev to blackhole_netdev"). This ensures that even when the
+dst entry is invalidated with blackhole_dev, it will not fail to create
+the neigh entry.
+
+As devinet_init() is called ealier than blackhole_netdev_init() in system
+booting, it can not assign the in_dev to blackhole_dev in devinet_init().
+As Paolo suggested, add a separate late_initcall() in devinet.c to ensure
+inet_blackhole_dev_init() is called after blackhole_netdev_init().
+
+Fixes: 8d7017fd621d ("blackhole_netdev: use blackhole_netdev to invalidate dst entries")
+Signed-off-by: Xin Long <lucien.xin@gmail.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Link: https://patch.msgid.link/3000792d45ca44e16c785ebe2b092e610e5b3df1.1728499633.git.lucien.xin@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/devinet.c | 35 +++++++++++++++++++++++++----------
+ 1 file changed, 25 insertions(+), 10 deletions(-)
+
+diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
+index ddab151164542..4d0c8d501ab7c 100644
+--- a/net/ipv4/devinet.c
++++ b/net/ipv4/devinet.c
+@@ -288,17 +288,19 @@ static struct in_device *inetdev_init(struct net_device *dev)
+ /* Account for reference dev->ip_ptr (below) */
+ refcount_set(&in_dev->refcnt, 1);
+
+- err = devinet_sysctl_register(in_dev);
+- if (err) {
+- in_dev->dead = 1;
+- neigh_parms_release(&arp_tbl, in_dev->arp_parms);
+- in_dev_put(in_dev);
+- in_dev = NULL;
+- goto out;
++ if (dev != blackhole_netdev) {
++ err = devinet_sysctl_register(in_dev);
++ if (err) {
++ in_dev->dead = 1;
++ neigh_parms_release(&arp_tbl, in_dev->arp_parms);
++ in_dev_put(in_dev);
++ in_dev = NULL;
++ goto out;
++ }
++ ip_mc_init_dev(in_dev);
++ if (dev->flags & IFF_UP)
++ ip_mc_up(in_dev);
+ }
+- ip_mc_init_dev(in_dev);
+- if (dev->flags & IFF_UP)
+- ip_mc_up(in_dev);
+
+ /* we can receive as soon as ip_ptr is set -- do this last */
+ rcu_assign_pointer(dev->ip_ptr, in_dev);
+@@ -337,6 +339,19 @@ static void inetdev_destroy(struct in_device *in_dev)
+ in_dev_put(in_dev);
+ }
+
++static int __init inet_blackhole_dev_init(void)
++{
++ int err = 0;
++
++ rtnl_lock();
++ if (!inetdev_init(blackhole_netdev))
++ err = -ENOMEM;
++ rtnl_unlock();
++
++ return err;
++}
++late_initcall(inet_blackhole_dev_init);
++
+ int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b)
+ {
+ const struct in_ifaddr *ifa;
+--
+2.43.0
+
--- /dev/null
+From ec183dcaf7490b282d5e62ef6faef4713f6048de Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 11 Oct 2024 18:20:03 +0100
+Subject: irqchip/renesas-rzg2l: Fix missing put_device
+
+From: Fabrizio Castro <fabrizio.castro.jz@renesas.com>
+
+[ Upstream commit d038109ac1c6bf619473dda03a16a6de58170f7f ]
+
+rzg2l_irqc_common_init() calls of_find_device_by_node(), but the
+corresponding put_device() call is missing. This also gets reported by
+make coccicheck.
+
+Make use of the cleanup interfaces from cleanup.h to call into
+__free_put_device(), which in turn calls into put_device when leaving
+function rzg2l_irqc_common_init() and variable "dev" goes out of scope.
+
+To prevent that the device is put on successful completion, assign NULL to
+"dev" to prevent __free_put_device() from calling into put_device() within
+the successful path.
+
+"make coccicheck" will still complain about missing put_device() calls,
+but those are false positives now.
+
+Fixes: 3fed09559cd8 ("irqchip: Add RZ/G2L IA55 Interrupt Controller driver")
+Signed-off-by: Fabrizio Castro <fabrizio.castro.jz@renesas.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Link: https://lore.kernel.org/all/20241011172003.1242841-1-fabrizio.castro.jz@renesas.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/irqchip/irq-renesas-rzg2l.c | 16 ++++++++++++++--
+ 1 file changed, 14 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/irqchip/irq-renesas-rzg2l.c b/drivers/irqchip/irq-renesas-rzg2l.c
+index 693ff285ca2c6..99e27e01b0b19 100644
+--- a/drivers/irqchip/irq-renesas-rzg2l.c
++++ b/drivers/irqchip/irq-renesas-rzg2l.c
+@@ -8,6 +8,7 @@
+ */
+
+ #include <linux/bitfield.h>
++#include <linux/cleanup.h>
+ #include <linux/clk.h>
+ #include <linux/err.h>
+ #include <linux/io.h>
+@@ -530,12 +531,12 @@ static int rzg2l_irqc_parse_interrupts(struct rzg2l_irqc_priv *priv,
+ static int rzg2l_irqc_common_init(struct device_node *node, struct device_node *parent,
+ const struct irq_chip *irq_chip)
+ {
++ struct platform_device *pdev = of_find_device_by_node(node);
++ struct device *dev __free(put_device) = pdev ? &pdev->dev : NULL;
+ struct irq_domain *irq_domain, *parent_domain;
+- struct platform_device *pdev;
+ struct reset_control *resetn;
+ int ret;
+
+- pdev = of_find_device_by_node(node);
+ if (!pdev)
+ return -ENODEV;
+
+@@ -591,6 +592,17 @@ static int rzg2l_irqc_common_init(struct device_node *node, struct device_node *
+
+ register_syscore_ops(&rzg2l_irqc_syscore_ops);
+
++ /*
++ * Prevent the cleanup function from invoking put_device by assigning
++ * NULL to dev.
++ *
++ * make coccicheck will complain about missing put_device calls, but
++ * those are false positives, as dev will be automatically "put" via
++ * __free_put_device on the failing path.
++ * On the successful path we don't actually want to "put" dev.
++ */
++ dev = NULL;
++
+ return 0;
+
+ pm_put:
+--
+2.43.0
+
--- /dev/null
+From 50a9b21fada0407168e7eb3696b25bf439458987 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 Sep 2024 10:56:11 +0200
+Subject: irqchip/riscv-imsic: Fix output text of base address
+
+From: Andrew Jones <ajones@ventanamicro.com>
+
+[ Upstream commit 4a1361e9a5c5dbb5c9f647762ae0cb1a605101fa ]
+
+The "per-CPU IDs ... at base ..." info log is outputting a physical
+address, not a PPN.
+
+Fixes: 027e125acdba ("irqchip/riscv-imsic: Add device MSI domain support for platform devices")
+Signed-off-by: Andrew Jones <ajones@ventanamicro.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Anup Patel <anup@brainfault.org>
+Link: https://lore.kernel.org/all/20240909085610.46625-2-ajones@ventanamicro.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/irqchip/irq-riscv-imsic-platform.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/irqchip/irq-riscv-imsic-platform.c b/drivers/irqchip/irq-riscv-imsic-platform.c
+index 11723a763c102..c5ec66e0bfd33 100644
+--- a/drivers/irqchip/irq-riscv-imsic-platform.c
++++ b/drivers/irqchip/irq-riscv-imsic-platform.c
+@@ -340,7 +340,7 @@ int imsic_irqdomain_init(void)
+ imsic->fwnode, global->hart_index_bits, global->guest_index_bits);
+ pr_info("%pfwP: group-index-bits: %d, group-index-shift: %d\n",
+ imsic->fwnode, global->group_index_bits, global->group_index_shift);
+- pr_info("%pfwP: per-CPU IDs %d at base PPN %pa\n",
++ pr_info("%pfwP: per-CPU IDs %d at base address %pa\n",
+ imsic->fwnode, global->nr_ids, &global->base_addr);
+ pr_info("%pfwP: total %d interrupts available\n",
+ imsic->fwnode, num_possible_cpus() * (global->nr_ids - 1));
+--
+2.43.0
+
--- /dev/null
+From d46f9bffebdb44c35af7e539f4e160ae788e7a01 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 6 Oct 2024 16:02:44 +0200
+Subject: lib/Kconfig.debug: fix grammar in RUST_BUILD_ASSERT_ALLOW
+
+From: Timo Grautstueck <timo.grautstueck@web.de>
+
+[ Upstream commit ab8851431bef5cc44f0f3f0da112e883fd4d0df5 ]
+
+Just a grammar fix in lib/Kconfig.debug, under the config option
+RUST_BUILD_ASSERT_ALLOW.
+
+Reported-by: Miguel Ojeda <ojeda@kernel.org>
+Closes: https://github.com/Rust-for-Linux/linux/issues/1006
+Fixes: ecaa6ddff2fd ("rust: add `build_error` crate")
+Signed-off-by: Timo Grautstueck <timo.grautstueck@web.de>
+Link: https://lore.kernel.org/r/20241006140244.5509-1-timo.grautstueck@web.de
+Signed-off-by: Miguel Ojeda <ojeda@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ lib/Kconfig.debug | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
+index a30c03a661726..8079f5c2dfe4f 100644
+--- a/lib/Kconfig.debug
++++ b/lib/Kconfig.debug
+@@ -3023,7 +3023,7 @@ config RUST_BUILD_ASSERT_ALLOW
+ bool "Allow unoptimized build-time assertions"
+ depends on RUST
+ help
+- Controls how are `build_error!` and `build_assert!` handled during build.
++ Controls how `build_error!` and `build_assert!` are handled during the build.
+
+ If calls to them exist in the binary, it may indicate a violated invariant
+ or that the optimizer failed to verify the invariant during compilation.
+--
+2.43.0
+
--- /dev/null
+From 57b9af0be7ba8283a895703006acf900e93bdab6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 11 Oct 2024 17:16:37 +0200
+Subject: macsec: don't increment counters for an unrelated SA
+
+From: Sabrina Dubroca <sd@queasysnail.net>
+
+[ Upstream commit cf58aefb1332db322060cad4a330d5f9292b0f41 ]
+
+On RX, we shouldn't be incrementing the stats for an arbitrary SA in
+case the actual SA hasn't been set up. Those counters are intended to
+track packets for their respective AN when the SA isn't currently
+configured. Due to the way MACsec is implemented, we don't keep
+counters unless the SA is configured, so we can't track those packets,
+and those counters will remain at 0.
+
+The RXSC's stats keeps track of those packets without telling us which
+AN they belonged to. We could add counters for non-existent SAs, and
+then find a way to integrate them in the dump to userspace, but I
+don't think it's worth the effort.
+
+Fixes: 91ec9bd57f35 ("macsec: Fix traffic counters/statistics")
+Reported-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sabrina Dubroca <sd@queasysnail.net>
+Link: https://patch.msgid.link/f5ac92aaa5b89343232615f4c03f9f95042c6aa0.1728657709.git.sd@queasysnail.net
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/macsec.c | 18 ------------------
+ 1 file changed, 18 deletions(-)
+
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index 2da70bc3dd869..2a31d09d43ed4 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -154,19 +154,6 @@ static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr)
+ return sa;
+ }
+
+-static struct macsec_rx_sa *macsec_active_rxsa_get(struct macsec_rx_sc *rx_sc)
+-{
+- struct macsec_rx_sa *sa = NULL;
+- int an;
+-
+- for (an = 0; an < MACSEC_NUM_AN; an++) {
+- sa = macsec_rxsa_get(rx_sc->sa[an]);
+- if (sa)
+- break;
+- }
+- return sa;
+-}
+-
+ static void free_rx_sc_rcu(struct rcu_head *head)
+ {
+ struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head);
+@@ -1208,15 +1195,12 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
+ /* If validateFrames is Strict or the C bit in the
+ * SecTAG is set, discard
+ */
+- struct macsec_rx_sa *active_rx_sa = macsec_active_rxsa_get(rx_sc);
+ if (hdr->tci_an & MACSEC_TCI_C ||
+ secy->validate_frames == MACSEC_VALIDATE_STRICT) {
+ u64_stats_update_begin(&rxsc_stats->syncp);
+ rxsc_stats->stats.InPktsNotUsingSA++;
+ u64_stats_update_end(&rxsc_stats->syncp);
+ DEV_STATS_INC(secy->netdev, rx_errors);
+- if (active_rx_sa)
+- this_cpu_inc(active_rx_sa->stats->InPktsNotUsingSA);
+ goto drop_nosa;
+ }
+
+@@ -1226,8 +1210,6 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
+ u64_stats_update_begin(&rxsc_stats->syncp);
+ rxsc_stats->stats.InPktsUnusedSA++;
+ u64_stats_update_end(&rxsc_stats->syncp);
+- if (active_rx_sa)
+- this_cpu_inc(active_rx_sa->stats->InPktsUnusedSA);
+ goto deliver;
+ }
+
+--
+2.43.0
+
--- /dev/null
+From b13b8a326a0c338769aa6ea6633e039a9632f95b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 11 Oct 2024 12:24:45 +0200
+Subject: mm: don't install PMD mappings when THPs are disabled by the
+ hw/process/vma
+
+From: David Hildenbrand <david@redhat.com>
+
+[ Upstream commit 2b0f922323ccfa76219bcaacd35cd50aeaa13592 ]
+
+We (or rather, readahead logic :) ) might be allocating a THP in the
+pagecache and then try mapping it into a process that explicitly disabled
+THP: we might end up installing PMD mappings.
+
+This is a problem for s390x KVM, which explicitly remaps all PMD-mapped
+THPs to be PTE-mapped in s390_enable_sie()->thp_split_mm(), before
+starting the VM.
+
+For example, starting a VM backed on a file system with large folios
+supported makes the VM crash when the VM tries accessing such a mapping
+using KVM.
+
+Is it also a problem when the HW disabled THP using
+TRANSPARENT_HUGEPAGE_UNSUPPORTED? At least on x86 this would be the case
+without X86_FEATURE_PSE.
+
+In the future, we might be able to do better on s390x and only disallow
+PMD mappings -- what s390x and likely TRANSPARENT_HUGEPAGE_UNSUPPORTED
+really wants. For now, fix it by essentially performing the same check as
+would be done in __thp_vma_allowable_orders() or in shmem code, where this
+works as expected, and disallow PMD mappings, making us fallback to PTE
+mappings.
+
+Link: https://lkml.kernel.org/r/20241011102445.934409-3-david@redhat.com
+Fixes: 793917d997df ("mm/readahead: Add large folio readahead")
+Signed-off-by: David Hildenbrand <david@redhat.com>
+Reported-by: Leo Fu <bfu@redhat.com>
+Tested-by: Thomas Huth <thuth@redhat.com>
+Cc: Thomas Huth <thuth@redhat.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Ryan Roberts <ryan.roberts@arm.com>
+Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
+Cc: Janosch Frank <frankja@linux.ibm.com>
+Cc: Claudio Imbrenda <imbrenda@linux.ibm.com>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/memory.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+diff --git a/mm/memory.c b/mm/memory.c
+index cda2c12c500b8..cb66345e398d2 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -4719,6 +4719,15 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
+ pmd_t entry;
+ vm_fault_t ret = VM_FAULT_FALLBACK;
+
++ /*
++ * It is too late to allocate a small folio, we already have a large
++ * folio in the pagecache: especially s390 KVM cannot tolerate any
++ * PMD mappings, but PTE-mapped THP are fine. So let's simply refuse any
++ * PMD mappings if THPs are disabled.
++ */
++ if (thp_disabled_by_hw() || vma_thp_disabled(vma, vma->vm_flags))
++ return ret;
++
+ if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER))
+ return ret;
+
+--
+2.43.0
+
--- /dev/null
+From dbbe0e282428ba978e9aa1f22932a381b4216f8a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 11 Oct 2024 12:24:44 +0200
+Subject: mm: huge_memory: add vma_thp_disabled() and thp_disabled_by_hw()
+
+From: Kefeng Wang <wangkefeng.wang@huawei.com>
+
+[ Upstream commit 963756aac1f011d904ddd9548ae82286d3a91f96 ]
+
+Patch series "mm: don't install PMD mappings when THPs are disabled by the
+hw/process/vma".
+
+During testing, it was found that we can get PMD mappings in processes
+where THP (and more precisely, PMD mappings) are supposed to be disabled.
+While it works as expected for anon+shmem, the pagecache is the
+problematic bit.
+
+For s390 KVM this currently means that a VM backed by a file located on
+filesystem with large folio support can crash when KVM tries accessing the
+problematic page, because the readahead logic might decide to use a
+PMD-sized THP and faulting it into the page tables will install a PMD
+mapping, something that s390 KVM cannot tolerate.
+
+This might also be a problem with HW that does not support PMD mappings,
+but I did not try reproducing it.
+
+Fix it by respecting the ways to disable THPs when deciding whether we can
+install a PMD mapping. khugepaged should already be taking care of not
+collapsing if THPs are effectively disabled for the hw/process/vma.
+
+This patch (of 2):
+
+Add vma_thp_disabled() and thp_disabled_by_hw() helpers to be shared by
+shmem_allowable_huge_orders() and __thp_vma_allowable_orders().
+
+[david@redhat.com: rename to vma_thp_disabled(), split out thp_disabled_by_hw() ]
+Link: https://lkml.kernel.org/r/20241011102445.934409-2-david@redhat.com
+Fixes: 793917d997df ("mm/readahead: Add large folio readahead")
+Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
+Signed-off-by: David Hildenbrand <david@redhat.com>
+Reported-by: Leo Fu <bfu@redhat.com>
+Tested-by: Thomas Huth <thuth@redhat.com>
+Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
+Cc: Boqiao Fu <bfu@redhat.com>
+Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
+Cc: Claudio Imbrenda <imbrenda@linux.ibm.com>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Janosch Frank <frankja@linux.ibm.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Stable-dep-of: 2b0f922323cc ("mm: don't install PMD mappings when THPs are disabled by the hw/process/vma")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/huge_mm.h | 18 ++++++++++++++++++
+ mm/huge_memory.c | 13 +------------
+ mm/shmem.c | 7 +------
+ 3 files changed, 20 insertions(+), 18 deletions(-)
+
+diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
+index e25d9ebfdf89a..6d334c211176c 100644
+--- a/include/linux/huge_mm.h
++++ b/include/linux/huge_mm.h
+@@ -308,6 +308,24 @@ static inline void count_mthp_stat(int order, enum mthp_stat_item item)
+ (transparent_hugepage_flags & \
+ (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
+
++static inline bool vma_thp_disabled(struct vm_area_struct *vma,
++ unsigned long vm_flags)
++{
++ /*
++ * Explicitly disabled through madvise or prctl, or some
++ * architectures may disable THP for some mappings, for
++ * example, s390 kvm.
++ */
++ return (vm_flags & VM_NOHUGEPAGE) ||
++ test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags);
++}
++
++static inline bool thp_disabled_by_hw(void)
++{
++ /* If the hardware/firmware marked hugepage support disabled. */
++ return transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED);
++}
++
+ unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff, unsigned long flags);
+ unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr,
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index d12ec1c8c7f07..e44508e46e897 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -106,18 +106,7 @@ unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
+ if (!vma->vm_mm) /* vdso */
+ return 0;
+
+- /*
+- * Explicitly disabled through madvise or prctl, or some
+- * architectures may disable THP for some mappings, for
+- * example, s390 kvm.
+- * */
+- if ((vm_flags & VM_NOHUGEPAGE) ||
+- test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
+- return 0;
+- /*
+- * If the hardware/firmware marked hugepage support disabled.
+- */
+- if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED))
++ if (thp_disabled_by_hw() || vma_thp_disabled(vma, vm_flags))
+ return 0;
+
+ /* khugepaged doesn't collapse DAX vma, but page fault is fine. */
+diff --git a/mm/shmem.c b/mm/shmem.c
+index a332323eea0b9..27f496d6e43eb 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -1642,12 +1642,7 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
+ loff_t i_size;
+ int order;
+
+- if (vma && ((vm_flags & VM_NOHUGEPAGE) ||
+- test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)))
+- return 0;
+-
+- /* If the hardware/firmware marked hugepage support disabled. */
+- if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED))
++ if (thp_disabled_by_hw() || (vma && vma_thp_disabled(vma, vm_flags)))
+ return 0;
+
+ global_huge = shmem_huge_global_enabled(inode, index, shmem_huge_force,
+--
+2.43.0
+
--- /dev/null
+From 572920c37b50519aaefb9d2e8d07051eeb99d5c8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 22 Jul 2024 13:43:19 +0800
+Subject: mm: shmem: move shmem_huge_global_enabled() into
+ shmem_allowable_huge_orders()
+
+From: Baolin Wang <baolin.wang@linux.alibaba.com>
+
+[ Upstream commit 6beeab870e70b2d4f49baf6c6be9da1b61c169f8 ]
+
+Move shmem_huge_global_enabled() into shmem_allowable_huge_orders(), so
+that shmem_allowable_huge_orders() can also help to find the allowable
+huge orders for tmpfs. Moreover the shmem_huge_global_enabled() can
+become static. While we are at it, passing the vma instead of mm for
+shmem_huge_global_enabled() makes code cleaner.
+
+No functional changes.
+
+Link: https://lkml.kernel.org/r/8e825146bb29ee1a1c7bd64d2968ff3e19be7815.1721626645.git.baolin.wang@linux.alibaba.com
+Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
+Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
+Acked-by: David Hildenbrand <david@redhat.com>
+Cc: Barry Song <21cnbao@gmail.com>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Lance Yang <ioworker0@gmail.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Zi Yan <ziy@nvidia.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Stable-dep-of: 2b0f922323cc ("mm: don't install PMD mappings when THPs are disabled by the hw/process/vma")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/shmem_fs.h | 12 ++--------
+ mm/huge_memory.c | 12 +++-------
+ mm/shmem.c | 47 +++++++++++++++++++++++++---------------
+ 3 files changed, 35 insertions(+), 36 deletions(-)
+
+diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
+index 405ee8d3589a5..1564d7d3ca615 100644
+--- a/include/linux/shmem_fs.h
++++ b/include/linux/shmem_fs.h
+@@ -111,21 +111,13 @@ extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
+ int shmem_unuse(unsigned int type);
+
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+-extern bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index, bool shmem_huge_force,
+- struct mm_struct *mm, unsigned long vm_flags);
+ unsigned long shmem_allowable_huge_orders(struct inode *inode,
+ struct vm_area_struct *vma, pgoff_t index,
+- bool global_huge);
++ bool shmem_huge_force);
+ #else
+-static __always_inline bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
+- bool shmem_huge_force, struct mm_struct *mm,
+- unsigned long vm_flags)
+-{
+- return false;
+-}
+ static inline unsigned long shmem_allowable_huge_orders(struct inode *inode,
+ struct vm_area_struct *vma, pgoff_t index,
+- bool global_huge)
++ bool shmem_huge_force)
+ {
+ return 0;
+ }
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 26843caa46962..d12ec1c8c7f07 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -159,16 +159,10 @@ unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
+ * Must be done before hugepage flags check since shmem has its
+ * own flags.
+ */
+- if (!in_pf && shmem_file(vma->vm_file)) {
+- bool global_huge = shmem_huge_global_enabled(file_inode(vma->vm_file),
+- vma->vm_pgoff, !enforce_sysfs,
+- vma->vm_mm, vm_flags);
+-
+- if (!vma_is_anon_shmem(vma))
+- return global_huge ? orders : 0;
++ if (!in_pf && shmem_file(vma->vm_file))
+ return shmem_allowable_huge_orders(file_inode(vma->vm_file),
+- vma, vma->vm_pgoff, global_huge);
+- }
++ vma, vma->vm_pgoff,
++ !enforce_sysfs);
+
+ if (!vma_is_anonymous(vma)) {
+ /*
+diff --git a/mm/shmem.c b/mm/shmem.c
+index d2ca6d4300bb8..a332323eea0b9 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -549,9 +549,10 @@ static bool shmem_confirm_swap(struct address_space *mapping,
+ static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER;
+
+ static bool __shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
+- bool shmem_huge_force, struct mm_struct *mm,
++ bool shmem_huge_force, struct vm_area_struct *vma,
+ unsigned long vm_flags)
+ {
++ struct mm_struct *mm = vma ? vma->vm_mm : NULL;
+ loff_t i_size;
+
+ if (!S_ISREG(inode->i_mode))
+@@ -581,15 +582,15 @@ static bool __shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
+ }
+ }
+
+-bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
+- bool shmem_huge_force, struct mm_struct *mm,
++static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
++ bool shmem_huge_force, struct vm_area_struct *vma,
+ unsigned long vm_flags)
+ {
+ if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER)
+ return false;
+
+ return __shmem_huge_global_enabled(inode, index, shmem_huge_force,
+- mm, vm_flags);
++ vma, vm_flags);
+ }
+
+ #if defined(CONFIG_SYSFS)
+@@ -772,6 +773,13 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
+ {
+ return 0;
+ }
++
++static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
++ bool shmem_huge_force, struct vm_area_struct *vma,
++ unsigned long vm_flags)
++{
++ return false;
++}
+ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+ /*
+@@ -1625,22 +1633,33 @@ static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp)
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ unsigned long shmem_allowable_huge_orders(struct inode *inode,
+ struct vm_area_struct *vma, pgoff_t index,
+- bool global_huge)
++ bool shmem_huge_force)
+ {
+ unsigned long mask = READ_ONCE(huge_shmem_orders_always);
+ unsigned long within_size_orders = READ_ONCE(huge_shmem_orders_within_size);
+- unsigned long vm_flags = vma->vm_flags;
++ unsigned long vm_flags = vma ? vma->vm_flags : 0;
++ bool global_huge;
+ loff_t i_size;
+ int order;
+
+- if ((vm_flags & VM_NOHUGEPAGE) ||
+- test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
++ if (vma && ((vm_flags & VM_NOHUGEPAGE) ||
++ test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)))
+ return 0;
+
+ /* If the hardware/firmware marked hugepage support disabled. */
+ if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED))
+ return 0;
+
++ global_huge = shmem_huge_global_enabled(inode, index, shmem_huge_force,
++ vma, vm_flags);
++ if (!vma || !vma_is_anon_shmem(vma)) {
++ /*
++ * For tmpfs, we now only support PMD sized THP if huge page
++ * is enabled, otherwise fallback to order 0.
++ */
++ return global_huge ? BIT(HPAGE_PMD_ORDER) : 0;
++ }
++
+ /*
+ * Following the 'deny' semantics of the top level, force the huge
+ * option off from all mounts.
+@@ -2086,7 +2105,7 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
+ struct mm_struct *fault_mm;
+ struct folio *folio;
+ int error;
+- bool alloced, huge;
++ bool alloced;
+ unsigned long orders = 0;
+
+ if (WARN_ON_ONCE(!shmem_mapping(inode->i_mapping)))
+@@ -2159,14 +2178,8 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
+ return 0;
+ }
+
+- huge = shmem_huge_global_enabled(inode, index, false, fault_mm,
+- vma ? vma->vm_flags : 0);
+- /* Find hugepage orders that are allowed for anonymous shmem. */
+- if (vma && vma_is_anon_shmem(vma))
+- orders = shmem_allowable_huge_orders(inode, vma, index, huge);
+- else if (huge)
+- orders = BIT(HPAGE_PMD_ORDER);
+-
++ /* Find hugepage orders that are allowed for anonymous shmem and tmpfs. */
++ orders = shmem_allowable_huge_orders(inode, vma, index, false);
+ if (orders > 0) {
+ gfp_t huge_gfp;
+
+--
+2.43.0
+
--- /dev/null
+From f889441f621418288eba885fba5240bcfa554f74 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 22 Jul 2024 13:43:18 +0800
+Subject: mm: shmem: rename shmem_is_huge() to shmem_huge_global_enabled()
+
+From: Baolin Wang <baolin.wang@linux.alibaba.com>
+
+[ Upstream commit d58a2a581f132529eefac5377676011562b631b8 ]
+
+shmem_is_huge() is now used to check if the top-level huge page is
+enabled, thus rename it to reflect its usage.
+
+Link: https://lkml.kernel.org/r/da53296e0ab6359aa083561d9dc01e4223d60fbe.1721626645.git.baolin.wang@linux.alibaba.com
+Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
+Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
+Acked-by: David Hildenbrand <david@redhat.com>
+Cc: Barry Song <21cnbao@gmail.com>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Lance Yang <ioworker0@gmail.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Zi Yan <ziy@nvidia.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Stable-dep-of: 2b0f922323cc ("mm: don't install PMD mappings when THPs are disabled by the hw/process/vma")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/shmem_fs.h | 9 +++++----
+ mm/huge_memory.c | 5 +++--
+ mm/shmem.c | 15 ++++++++-------
+ 3 files changed, 16 insertions(+), 13 deletions(-)
+
+diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
+index 1d06b1e5408a5..405ee8d3589a5 100644
+--- a/include/linux/shmem_fs.h
++++ b/include/linux/shmem_fs.h
+@@ -111,14 +111,15 @@ extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
+ int shmem_unuse(unsigned int type);
+
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+-extern bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force,
+- struct mm_struct *mm, unsigned long vm_flags);
++extern bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index, bool shmem_huge_force,
++ struct mm_struct *mm, unsigned long vm_flags);
+ unsigned long shmem_allowable_huge_orders(struct inode *inode,
+ struct vm_area_struct *vma, pgoff_t index,
+ bool global_huge);
+ #else
+-static __always_inline bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force,
+- struct mm_struct *mm, unsigned long vm_flags)
++static __always_inline bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
++ bool shmem_huge_force, struct mm_struct *mm,
++ unsigned long vm_flags)
+ {
+ return false;
+ }
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 99b146d16a185..26843caa46962 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -160,8 +160,9 @@ unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
+ * own flags.
+ */
+ if (!in_pf && shmem_file(vma->vm_file)) {
+- bool global_huge = shmem_is_huge(file_inode(vma->vm_file), vma->vm_pgoff,
+- !enforce_sysfs, vma->vm_mm, vm_flags);
++ bool global_huge = shmem_huge_global_enabled(file_inode(vma->vm_file),
++ vma->vm_pgoff, !enforce_sysfs,
++ vma->vm_mm, vm_flags);
+
+ if (!vma_is_anon_shmem(vma))
+ return global_huge ? orders : 0;
+diff --git a/mm/shmem.c b/mm/shmem.c
+index 5a77acf6ac6a6..d2ca6d4300bb8 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -548,9 +548,9 @@ static bool shmem_confirm_swap(struct address_space *mapping,
+
+ static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER;
+
+-static bool __shmem_is_huge(struct inode *inode, pgoff_t index,
+- bool shmem_huge_force, struct mm_struct *mm,
+- unsigned long vm_flags)
++static bool __shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
++ bool shmem_huge_force, struct mm_struct *mm,
++ unsigned long vm_flags)
+ {
+ loff_t i_size;
+
+@@ -581,14 +581,15 @@ static bool __shmem_is_huge(struct inode *inode, pgoff_t index,
+ }
+ }
+
+-bool shmem_is_huge(struct inode *inode, pgoff_t index,
++bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
+ bool shmem_huge_force, struct mm_struct *mm,
+ unsigned long vm_flags)
+ {
+ if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER)
+ return false;
+
+- return __shmem_is_huge(inode, index, shmem_huge_force, mm, vm_flags);
++ return __shmem_huge_global_enabled(inode, index, shmem_huge_force,
++ mm, vm_flags);
+ }
+
+ #if defined(CONFIG_SYSFS)
+@@ -1156,7 +1157,7 @@ static int shmem_getattr(struct mnt_idmap *idmap,
+ STATX_ATTR_NODUMP);
+ generic_fillattr(idmap, request_mask, inode, stat);
+
+- if (shmem_is_huge(inode, 0, false, NULL, 0))
++ if (shmem_huge_global_enabled(inode, 0, false, NULL, 0))
+ stat->blksize = HPAGE_PMD_SIZE;
+
+ if (request_mask & STATX_BTIME) {
+@@ -2158,7 +2159,7 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
+ return 0;
+ }
+
+- huge = shmem_is_huge(inode, index, false, fault_mm,
++ huge = shmem_huge_global_enabled(inode, index, false, fault_mm,
+ vma ? vma->vm_flags : 0);
+ /* Find hugepage orders that are allowed for anonymous shmem. */
+ if (vma && vma_is_anon_shmem(vma))
+--
+2.43.0
+
--- /dev/null
+From dba616e89270d0d3e24fb2ddef903e44524d0a58 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 14 Oct 2024 22:59:01 +0800
+Subject: net: bcmasp: fix potential memory leak in bcmasp_xmit()
+
+From: Wang Hai <wanghai38@huawei.com>
+
+[ Upstream commit fed07d3eb8a8d9fcc0e455175a89bc6445d6faed ]
+
+The bcmasp_xmit() returns NETDEV_TX_OK without freeing skb
+in case of mapping fails, add dev_kfree_skb() to fix it.
+
+Fixes: 490cb412007d ("net: bcmasp: Add support for ASP2.0 Ethernet controller")
+Signed-off-by: Wang Hai <wanghai38@huawei.com>
+Acked-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Link: https://patch.msgid.link/20241014145901.48940-1-wanghai38@huawei.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
+index 82768b0e90262..9ea16ef4139d3 100644
+--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
++++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
+@@ -322,6 +322,7 @@ static netdev_tx_t bcmasp_xmit(struct sk_buff *skb, struct net_device *dev)
+ }
+ /* Rewind so we do not have a hole */
+ spb_index = intf->tx_spb_index;
++ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+--
+2.43.0
+
--- /dev/null
+From ff4f18cc35a59f66b27b4505fe7dcccc8ce49bdb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 14 Oct 2024 13:43:42 -0700
+Subject: net: dsa: mv88e6xxx: Fix the max_vid definition for the MV88E6361
+
+From: Peter Rashleigh <peter@rashleigh.ca>
+
+[ Upstream commit 1833d8a26f057128fd63e126b4428203ece84684 ]
+
+According to the Marvell datasheet the 88E6361 has two VTU pages
+(4k VIDs per page) so the max_vid should be 8191, not 4095.
+
+In the current implementation mv88e6xxx_vtu_walk() gives unexpected
+results because of this error. I verified that mv88e6xxx_vtu_walk()
+works correctly on the MV88E6361 with this patch in place.
+
+Fixes: 12899f299803 ("net: dsa: mv88e6xxx: enable support for 88E6361 switch")
+Signed-off-by: Peter Rashleigh <peter@rashleigh.ca>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Link: https://patch.msgid.link/20241014204342.5852-1-peter@rashleigh.ca
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/mv88e6xxx/chip.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
+index 5b4e2ce5470d9..284270a4ade1c 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -6347,7 +6347,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
+ .invalid_port_mask = BIT(1) | BIT(2) | BIT(8),
+ .num_internal_phys = 5,
+ .internal_phys_offset = 3,
+- .max_vid = 4095,
++ .max_vid = 8191,
+ .max_sid = 63,
+ .port_base_addr = 0x0,
+ .phy_base_addr = 0x0,
+--
+2.43.0
+
--- /dev/null
+From 409aef464f99805ca4b126b909dd7b3d4cef009f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 14 Oct 2024 18:30:41 +0300
+Subject: net: dsa: vsc73xx: fix reception from VLAN-unaware bridges
+
+From: Vladimir Oltean <vladimir.oltean@nxp.com>
+
+[ Upstream commit 11d06f0aaef89f4cad68b92510bd9decff2d7b87 ]
+
+Similar to the situation described for sja1105 in commit 1f9fc48fd302
+("net: dsa: sja1105: fix reception from VLAN-unaware bridges"), the
+vsc73xx driver uses tag_8021q and doesn't need the ds->untag_bridge_pvid
+request. In fact, this option breaks packet reception.
+
+The ds->untag_bridge_pvid option strips VLANs from packets received on
+VLAN-unaware bridge ports. But those VLANs should already be stripped
+by tag_vsc73xx_8021q.c as part of vsc73xx_rcv() - they are not VLANs in
+VLAN-unaware mode, but DSA tags. Thus, dsa_software_vlan_untag() tries
+to untag a VLAN that doesn't exist, corrupting the packet.
+
+Fixes: 93e4649efa96 ("net: dsa: provide a software untagging function on RX for VLAN-aware bridges")
+Tested-by: Pawel Dembicki <paweldembicki@gmail.com>
+Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Reviewed-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
+Link: https://patch.msgid.link/20241014153041.1110364-1-vladimir.oltean@nxp.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/vitesse-vsc73xx-core.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c
+index 212421e9d42e4..f5a1fefb76509 100644
+--- a/drivers/net/dsa/vitesse-vsc73xx-core.c
++++ b/drivers/net/dsa/vitesse-vsc73xx-core.c
+@@ -721,7 +721,6 @@ static int vsc73xx_setup(struct dsa_switch *ds)
+
+ dev_info(vsc->dev, "set up the switch\n");
+
+- ds->untag_bridge_pvid = true;
+ ds->max_num_bridges = DSA_TAG_8021Q_MAX_NUM_BRIDGES;
+
+ /* Issue RESET */
+--
+2.43.0
+
--- /dev/null
+From b3a6d66cdd77096ac18867a4839e7c6a5cb74f60 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 12 Oct 2024 19:04:34 +0800
+Subject: net: ethernet: aeroflex: fix potential memory leak in
+ greth_start_xmit_gbit()
+
+From: Wang Hai <wanghai38@huawei.com>
+
+[ Upstream commit cf57b5d7a2aad456719152ecd12007fe031628a3 ]
+
+The greth_start_xmit_gbit() returns NETDEV_TX_OK without freeing skb
+in case of skb->len being too long, add dev_kfree_skb() to fix it.
+
+Fixes: d4c41139df6e ("net: Add Aeroflex Gaisler 10/100/1G Ethernet MAC driver")
+Signed-off-by: Wang Hai <wanghai38@huawei.com>
+Reviewed-by: Gerhard Engleder <gerhard@engleder-embedded.com>
+Link: https://patch.msgid.link/20241012110434.49265-1-wanghai38@huawei.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/aeroflex/greth.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
+index 27af7746d645b..adf6f67c5fcba 100644
+--- a/drivers/net/ethernet/aeroflex/greth.c
++++ b/drivers/net/ethernet/aeroflex/greth.c
+@@ -484,7 +484,7 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
+
+ if (unlikely(skb->len > MAX_FRAME_SIZE)) {
+ dev->stats.tx_errors++;
+- goto out;
++ goto len_error;
+ }
+
+ /* Save skb pointer. */
+@@ -575,6 +575,7 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
+ map_error:
+ if (net_ratelimit())
+ dev_warn(greth->dev, "Could not create TX DMA mapping\n");
++len_error:
+ dev_kfree_skb(skb);
+ out:
+ return err;
+--
+2.43.0
+
--- /dev/null
+From dd0230984cee23bf13bd08c6199d2c184f182431 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 15 Oct 2024 10:17:55 +0200
+Subject: net: ethernet: mtk_eth_soc: fix memory corruption during fq dma init
+
+From: Felix Fietkau <nbd@nbd.name>
+
+[ Upstream commit 88806efc034a9830f483963326b99930ad519af1 ]
+
+The loop responsible for allocating up to MTK_FQ_DMA_LENGTH buffers must
+only touch as many descriptors, otherwise it ends up corrupting unrelated
+memory. Fix the loop iteration count accordingly.
+
+Fixes: c57e55819443 ("net: ethernet: mtk_eth_soc: handle dma buffer size soc specific")
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20241015081755.31060-1-nbd@nbd.name
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mediatek/mtk_eth_soc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+index 16ca427cf4c3f..ed7313c10a052 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -1171,7 +1171,7 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
+ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
+ return -ENOMEM;
+
+- for (i = 0; i < cnt; i++) {
++ for (i = 0; i < len; i++) {
+ struct mtk_tx_dma_v2 *txd;
+
+ txd = eth->scratch_ring + (j * MTK_FQ_DMA_LENGTH + i) * soc->tx.desc_size;
+--
+2.43.0
+
--- /dev/null
+From 63d3e80a15c48eb4b1af3220393cb3ee227876a1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 14 Oct 2024 22:42:50 +0800
+Subject: net: ethernet: rtsn: fix potential memory leak in rtsn_start_xmit()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Wang Hai <wanghai38@huawei.com>
+
+[ Upstream commit c186b7a7f2387d9e09ad408420570be025b187c5 ]
+
+The rtsn_start_xmit() returns NETDEV_TX_OK without freeing skb
+in case of skb->len being too long, add dev_kfree_skb_any() to fix it.
+
+Fixes: b0d3969d2b4d ("net: ethernet: rtsn: Add support for Renesas Ethernet-TSN")
+Signed-off-by: Wang Hai <wanghai38@huawei.com>
+Reviewed-by: Niklas Söderlund <niklas.soderlund+renesas@ragnatech.se>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Link: https://patch.msgid.link/20241014144250.38802-1-wanghai38@huawei.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/renesas/rtsn.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/ethernet/renesas/rtsn.c b/drivers/net/ethernet/renesas/rtsn.c
+index 0e6cea42f0077..da90adef6b2b7 100644
+--- a/drivers/net/ethernet/renesas/rtsn.c
++++ b/drivers/net/ethernet/renesas/rtsn.c
+@@ -1057,6 +1057,7 @@ static netdev_tx_t rtsn_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ if (skb->len >= TX_DS) {
+ priv->stats.tx_dropped++;
+ priv->stats.tx_errors++;
++ dev_kfree_skb_any(skb);
+ goto out;
+ }
+
+--
+2.43.0
+
--- /dev/null
+From 898ef1694d7a0e576cec40ae9b68376ac1d7a1e6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 15 Oct 2024 12:32:05 +0300
+Subject: net/mlx5: Check for invalid vector index on EQ creation
+
+From: Maher Sanalla <msanalla@nvidia.com>
+
+[ Upstream commit d4f25be27e3ef7e23998fbd3dd4bff0602de7ae5 ]
+
+Currently, mlx5 driver does not enforce vector index to be lower than
+the maximum number of supported completion vectors when requesting a
+new completion EQ. Thus, mlx5_comp_eqn_get() fails when trying to
+acquire an IRQ with an improper vector index.
+
+To prevent the case above, enforce that vector index value is
+valid and lower than maximum in mlx5_comp_eqn_get() before handling the
+request.
+
+Fixes: f14c1a14e632 ("net/mlx5: Allocate completion EQs dynamically")
+Signed-off-by: Maher Sanalla <msanalla@nvidia.com>
+Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/eq.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+index cb7e7e4104aff..99d9e8863bfd6 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+@@ -1080,6 +1080,12 @@ int mlx5_comp_eqn_get(struct mlx5_core_dev *dev, u16 vecidx, int *eqn)
+ struct mlx5_eq_comp *eq;
+ int ret = 0;
+
++ if (vecidx >= table->max_comp_eqs) {
++ mlx5_core_dbg(dev, "Requested vector index %u should be less than %u",
++ vecidx, table->max_comp_eqs);
++ return -EINVAL;
++ }
++
+ mutex_lock(&table->comp_lock);
+ eq = xa_load(&table->comp_eqs, vecidx);
+ if (eq) {
+--
+2.43.0
+
--- /dev/null
+From 194d16e086e9fbf5148ed128ae45518b818f3f0d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 15 Oct 2024 12:32:06 +0300
+Subject: net/mlx5: Fix command bitmask initialization
+
+From: Shay Drory <shayd@nvidia.com>
+
+[ Upstream commit d62b14045c6511a7b2d4948d1a83a4e592deeb05 ]
+
+Command bitmask have a dedicated bit for MANAGE_PAGES command, this bit
+isn't Initialize during command bitmask Initialization, only during
+MANAGE_PAGES.
+
+In addition, mlx5_cmd_trigger_completions() is trying to trigger
+completion for MANAGE_PAGES command as well.
+
+Hence, in case health error occurred before any MANAGE_PAGES command
+have been invoke (for example, during mlx5_enable_hca()),
+mlx5_cmd_trigger_completions() will try to trigger completion for
+MANAGE_PAGES command, which will result in null-ptr-deref error.[1]
+
+Fix it by Initialize command bitmask correctly.
+
+While at it, re-write the code for better understanding.
+
+[1]
+BUG: KASAN: null-ptr-deref in mlx5_cmd_trigger_completions+0x1db/0x600 [mlx5_core]
+Write of size 4 at addr 0000000000000214 by task kworker/u96:2/12078
+CPU: 10 PID: 12078 Comm: kworker/u96:2 Not tainted 6.9.0-rc2_for_upstream_debug_2024_04_07_19_01 #1
+Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS rel-1.13.0-0-gf21b5a4aeb02-prebuilt.qemu.org 04/01/2014
+Workqueue: mlx5_health0000:08:00.0 mlx5_fw_fatal_reporter_err_work [mlx5_core]
+Call Trace:
+ <TASK>
+ dump_stack_lvl+0x7e/0xc0
+ kasan_report+0xb9/0xf0
+ kasan_check_range+0xec/0x190
+ mlx5_cmd_trigger_completions+0x1db/0x600 [mlx5_core]
+ mlx5_cmd_flush+0x94/0x240 [mlx5_core]
+ enter_error_state+0x6c/0xd0 [mlx5_core]
+ mlx5_fw_fatal_reporter_err_work+0xf3/0x480 [mlx5_core]
+ process_one_work+0x787/0x1490
+ ? lockdep_hardirqs_on_prepare+0x400/0x400
+ ? pwq_dec_nr_in_flight+0xda0/0xda0
+ ? assign_work+0x168/0x240
+ worker_thread+0x586/0xd30
+ ? rescuer_thread+0xae0/0xae0
+ kthread+0x2df/0x3b0
+ ? kthread_complete_and_exit+0x20/0x20
+ ret_from_fork+0x2d/0x70
+ ? kthread_complete_and_exit+0x20/0x20
+ ret_from_fork_asm+0x11/0x20
+ </TASK>
+
+Fixes: 9b98d395b85d ("net/mlx5: Start health poll at earlier stage of driver load")
+Signed-off-by: Shay Drory <shayd@nvidia.com>
+Reviewed-by: Moshe Shemesh <moshe@nvidia.com>
+Reviewed-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+index 20768ef2e9d2b..86d63c5f27ce2 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+@@ -1760,6 +1760,10 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
+ }
+ }
+
++#define MLX5_MAX_MANAGE_PAGES_CMD_ENT 1
++#define MLX5_CMD_MASK ((1UL << (cmd->vars.max_reg_cmds + \
++ MLX5_MAX_MANAGE_PAGES_CMD_ENT)) - 1)
++
+ static void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
+ {
+ struct mlx5_cmd *cmd = &dev->cmd;
+@@ -1771,7 +1775,7 @@ static void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
+ /* wait for pending handlers to complete */
+ mlx5_eq_synchronize_cmd_irq(dev);
+ spin_lock_irqsave(&dev->cmd.alloc_lock, flags);
+- vector = ~dev->cmd.vars.bitmask & ((1ul << (1 << dev->cmd.vars.log_sz)) - 1);
++ vector = ~dev->cmd.vars.bitmask & MLX5_CMD_MASK;
+ if (!vector)
+ goto no_trig;
+
+@@ -2345,7 +2349,7 @@ int mlx5_cmd_enable(struct mlx5_core_dev *dev)
+
+ cmd->state = MLX5_CMDIF_STATE_DOWN;
+ cmd->vars.max_reg_cmds = (1 << cmd->vars.log_sz) - 1;
+- cmd->vars.bitmask = (1UL << cmd->vars.max_reg_cmds) - 1;
++ cmd->vars.bitmask = MLX5_CMD_MASK;
+
+ sema_init(&cmd->vars.sem, cmd->vars.max_reg_cmds);
+ sema_init(&cmd->vars.pages_sem, 1);
+--
+2.43.0
+
--- /dev/null
+From 35bdc15d4d64d39895320ed6103f7de31aa97f39 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 15 Oct 2024 12:32:07 +0300
+Subject: net/mlx5: Unregister notifier on eswitch init failure
+
+From: Cosmin Ratiu <cratiu@nvidia.com>
+
+[ Upstream commit 1da9cfd6c41c2e6bbe624d0568644e1521c33e12 ]
+
+It otherwise remains registered and a subsequent attempt at eswitch
+enabling might trigger warnings of the sort:
+
+[ 682.589148] ------------[ cut here ]------------
+[ 682.590204] notifier callback eswitch_vport_event [mlx5_core] already registered
+[ 682.590256] WARNING: CPU: 13 PID: 2660 at kernel/notifier.c:31 notifier_chain_register+0x3e/0x90
+[...snipped]
+[ 682.610052] Call Trace:
+[ 682.610369] <TASK>
+[ 682.610663] ? __warn+0x7c/0x110
+[ 682.611050] ? notifier_chain_register+0x3e/0x90
+[ 682.611556] ? report_bug+0x148/0x170
+[ 682.611977] ? handle_bug+0x36/0x70
+[ 682.612384] ? exc_invalid_op+0x13/0x60
+[ 682.612817] ? asm_exc_invalid_op+0x16/0x20
+[ 682.613284] ? notifier_chain_register+0x3e/0x90
+[ 682.613789] atomic_notifier_chain_register+0x25/0x40
+[ 682.614322] mlx5_eswitch_enable_locked+0x1d4/0x3b0 [mlx5_core]
+[ 682.614965] mlx5_eswitch_enable+0xc9/0x100 [mlx5_core]
+[ 682.615551] mlx5_device_enable_sriov+0x25/0x340 [mlx5_core]
+[ 682.616170] mlx5_core_sriov_configure+0x50/0x170 [mlx5_core]
+[ 682.616789] sriov_numvfs_store+0xb0/0x1b0
+[ 682.617248] kernfs_fop_write_iter+0x117/0x1a0
+[ 682.617734] vfs_write+0x231/0x3f0
+[ 682.618138] ksys_write+0x63/0xe0
+[ 682.618536] do_syscall_64+0x4c/0x100
+[ 682.618958] entry_SYSCALL_64_after_hwframe+0x4b/0x53
+
+Fixes: 7624e58a8b3a ("net/mlx5: E-switch, register event handler before arming the event")
+Signed-off-by: Cosmin Ratiu <cratiu@nvidia.com>
+Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+index 17f78091ad30e..7aef30dbd82d6 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+@@ -1489,7 +1489,7 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs)
+ }
+
+ if (err)
+- goto abort;
++ goto err_esw_enable;
+
+ esw->fdb_table.flags |= MLX5_ESW_FDB_CREATED;
+
+@@ -1503,7 +1503,8 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs)
+
+ return 0;
+
+-abort:
++err_esw_enable:
++ mlx5_eq_notifier_unregister(esw->dev, &esw->nb);
+ mlx5_esw_acls_ns_cleanup(esw);
+ return err;
+ }
+--
+2.43.0
+
--- /dev/null
+From 1623c9bb472b89c62159f03dbcd623b37920c162 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 15 Oct 2024 12:32:08 +0300
+Subject: net/mlx5e: Don't call cleanup on profile rollback failure
+
+From: Cosmin Ratiu <cratiu@nvidia.com>
+
+[ Upstream commit 4dbc1d1a9f39c3711ad2a40addca04d07d9ab5d0 ]
+
+When profile rollback fails in mlx5e_netdev_change_profile, the netdev
+profile var is left set to NULL. Avoid a crash when unloading the driver
+by not calling profile->cleanup in such a case.
+
+This was encountered while testing, with the original trigger that
+the wq rescuer thread creation got interrupted (presumably due to
+Ctrl+C-ing modprobe), which gets converted to ENOMEM (-12) by
+mlx5e_priv_init, the profile rollback also fails for the same reason
+(signal still active) so the profile is left as NULL, leading to a crash
+later in _mlx5e_remove.
+
+ [ 732.473932] mlx5_core 0000:08:00.1: E-Switch: Unload vfs: mode(OFFLOADS), nvfs(2), necvfs(0), active vports(2)
+ [ 734.525513] workqueue: Failed to create a rescuer kthread for wq "mlx5e": -EINTR
+ [ 734.557372] mlx5_core 0000:08:00.1: mlx5e_netdev_init_profile:6235:(pid 6086): mlx5e_priv_init failed, err=-12
+ [ 734.559187] mlx5_core 0000:08:00.1 eth3: mlx5e_netdev_change_profile: new profile init failed, -12
+ [ 734.560153] workqueue: Failed to create a rescuer kthread for wq "mlx5e": -EINTR
+ [ 734.589378] mlx5_core 0000:08:00.1: mlx5e_netdev_init_profile:6235:(pid 6086): mlx5e_priv_init failed, err=-12
+ [ 734.591136] mlx5_core 0000:08:00.1 eth3: mlx5e_netdev_change_profile: failed to rollback to orig profile, -12
+ [ 745.537492] BUG: kernel NULL pointer dereference, address: 0000000000000008
+ [ 745.538222] #PF: supervisor read access in kernel mode
+<snipped>
+ [ 745.551290] Call Trace:
+ [ 745.551590] <TASK>
+ [ 745.551866] ? __die+0x20/0x60
+ [ 745.552218] ? page_fault_oops+0x150/0x400
+ [ 745.555307] ? exc_page_fault+0x79/0x240
+ [ 745.555729] ? asm_exc_page_fault+0x22/0x30
+ [ 745.556166] ? mlx5e_remove+0x6b/0xb0 [mlx5_core]
+ [ 745.556698] auxiliary_bus_remove+0x18/0x30
+ [ 745.557134] device_release_driver_internal+0x1df/0x240
+ [ 745.557654] bus_remove_device+0xd7/0x140
+ [ 745.558075] device_del+0x15b/0x3c0
+ [ 745.558456] mlx5_rescan_drivers_locked.part.0+0xb1/0x2f0 [mlx5_core]
+ [ 745.559112] mlx5_unregister_device+0x34/0x50 [mlx5_core]
+ [ 745.559686] mlx5_uninit_one+0x46/0xf0 [mlx5_core]
+ [ 745.560203] remove_one+0x4e/0xd0 [mlx5_core]
+ [ 745.560694] pci_device_remove+0x39/0xa0
+ [ 745.561112] device_release_driver_internal+0x1df/0x240
+ [ 745.561631] driver_detach+0x47/0x90
+ [ 745.562022] bus_remove_driver+0x84/0x100
+ [ 745.562444] pci_unregister_driver+0x3b/0x90
+ [ 745.562890] mlx5_cleanup+0xc/0x1b [mlx5_core]
+ [ 745.563415] __x64_sys_delete_module+0x14d/0x2f0
+ [ 745.563886] ? kmem_cache_free+0x1b0/0x460
+ [ 745.564313] ? lockdep_hardirqs_on_prepare+0xe2/0x190
+ [ 745.564825] do_syscall_64+0x6d/0x140
+ [ 745.565223] entry_SYSCALL_64_after_hwframe+0x4b/0x53
+ [ 745.565725] RIP: 0033:0x7f1579b1288b
+
+Fixes: 3ef14e463f6e ("net/mlx5e: Separate between netdev objects and mlx5e profiles initialization")
+Signed-off-by: Cosmin Ratiu <cratiu@nvidia.com>
+Reviewed-by: Dragos Tatulea <dtatulea@nvidia.com>
+Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 16b67c457b605..3e11c1c6d4f69 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -6508,7 +6508,9 @@ static void _mlx5e_remove(struct auxiliary_device *adev)
+ mlx5e_dcbnl_delete_app(priv);
+ unregister_netdev(priv->netdev);
+ _mlx5e_suspend(adev, false);
+- priv->profile->cleanup(priv);
++ /* Avoid cleanup if profile rollback failed. */
++ if (priv->profile)
++ priv->profile->cleanup(priv);
+ mlx5e_destroy_netdev(priv);
+ mlx5e_devlink_port_unregister(mlx5e_dev);
+ mlx5e_destroy_devlink(mlx5e_dev);
+--
+2.43.0
+
--- /dev/null
+From 08454056a6574e60ff1d9f44bb1a8bc93eb208da Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 14 Oct 2024 14:43:43 +0200
+Subject: net: ravb: Only advertise Rx/Tx timestamps if hardware supports it
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Niklas Söderlund <niklas.soderlund+renesas@ragnatech.se>
+
+[ Upstream commit 126e799602f45e9ce1ded03ee9eadda68bf470e0 ]
+
+Recent work moving the reporting of Rx software timestamps to the core
+[1] highlighted an issue where hardware time stamping was advertised
+for the platforms where it is not supported.
+
+Fix this by covering advertising support for hardware timestamps only if
+the hardware supports it. Due to the Tx implementation in RAVB software
+Tx timestamping is also only considered if the hardware supports
+hardware timestamps. This should be addressed in future, but this fix
+only reflects what the driver currently implements.
+
+1. Commit 277901ee3a26 ("ravb: Remove setting of RX software timestamp")
+
+Fixes: 7e09a052dc4e ("ravb: Exclude gPTP feature support for RZ/G2L")
+Signed-off-by: Niklas Söderlund <niklas.soderlund+renesas@ragnatech.se>
+Reviewed-by: Paul Barker <paul.barker.ct@bp.renesas.com>
+Tested-by: Paul Barker <paul.barker.ct@bp.renesas.com>
+Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru>
+Link: https://patch.msgid.link/20241014124343.3875285-1-niklas.soderlund+renesas@ragnatech.se
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/renesas/ravb_main.c | 25 ++++++++++++------------
+ 1 file changed, 12 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
+index d2a6518532f37..907af4651c553 100644
+--- a/drivers/net/ethernet/renesas/ravb_main.c
++++ b/drivers/net/ethernet/renesas/ravb_main.c
+@@ -1750,20 +1750,19 @@ static int ravb_get_ts_info(struct net_device *ndev,
+ struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *hw_info = priv->info;
+
+- info->so_timestamping =
+- SOF_TIMESTAMPING_TX_SOFTWARE |
+- SOF_TIMESTAMPING_TX_HARDWARE |
+- SOF_TIMESTAMPING_RX_HARDWARE |
+- SOF_TIMESTAMPING_RAW_HARDWARE;
+- info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+- info->rx_filters =
+- (1 << HWTSTAMP_FILTER_NONE) |
+- (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+- (1 << HWTSTAMP_FILTER_ALL);
+- if (hw_info->gptp || hw_info->ccc_gac)
++ if (hw_info->gptp || hw_info->ccc_gac) {
++ info->so_timestamping =
++ SOF_TIMESTAMPING_TX_SOFTWARE |
++ SOF_TIMESTAMPING_TX_HARDWARE |
++ SOF_TIMESTAMPING_RX_HARDWARE |
++ SOF_TIMESTAMPING_RAW_HARDWARE;
++ info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
++ info->rx_filters =
++ (1 << HWTSTAMP_FILTER_NONE) |
++ (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
++ (1 << HWTSTAMP_FILTER_ALL);
+ info->phc_index = ptp_clock_index(priv->ptp.clock);
+- else
+- info->phc_index = 0;
++ }
+
+ return 0;
+ }
+--
+2.43.0
+
--- /dev/null
+From bda33472c40ba65b76823e5d9d06be5dc45c99e2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 10 Oct 2024 11:56:24 +0000
+Subject: net/smc: Fix memory leak when using percpu refs
+
+From: Kai Shen <KaiShen@linux.alibaba.com>
+
+[ Upstream commit 25c12b459db8365fee84b63f3dd7910f70627f29 ]
+
+This patch adds missing percpu_ref_exit when releasing percpu refs.
+When releasing percpu refs, percpu_ref_exit should be called.
+Otherwise, memory leak happens.
+
+Fixes: 79a22238b4f2 ("net/smc: Use percpu ref for wr tx reference")
+Signed-off-by: Kai Shen <KaiShen@linux.alibaba.com>
+Reviewed-by: Dust Li <dust.li@linux.alibaba.com>
+Reviewed-by: Wenjia Zhang <wenjia@linux.ibm.com>
+Link: https://patch.msgid.link/20241010115624.7769-1-KaiShen@linux.alibaba.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/smc/smc_wr.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c
+index 0021065a600a0..994c0cd4fddbf 100644
+--- a/net/smc/smc_wr.c
++++ b/net/smc/smc_wr.c
+@@ -648,8 +648,10 @@ void smc_wr_free_link(struct smc_link *lnk)
+ smc_wr_tx_wait_no_pending_sends(lnk);
+ percpu_ref_kill(&lnk->wr_reg_refs);
+ wait_for_completion(&lnk->reg_ref_comp);
++ percpu_ref_exit(&lnk->wr_reg_refs);
+ percpu_ref_kill(&lnk->wr_tx_refs);
+ wait_for_completion(&lnk->tx_ref_comp);
++ percpu_ref_exit(&lnk->wr_tx_refs);
+
+ if (lnk->wr_rx_dma_addr) {
+ ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr,
+@@ -912,11 +914,13 @@ int smc_wr_create_link(struct smc_link *lnk)
+ init_waitqueue_head(&lnk->wr_reg_wait);
+ rc = percpu_ref_init(&lnk->wr_reg_refs, smcr_wr_reg_refs_free, 0, GFP_KERNEL);
+ if (rc)
+- goto dma_unmap;
++ goto cancel_ref;
+ init_completion(&lnk->reg_ref_comp);
+ init_waitqueue_head(&lnk->wr_rx_empty_wait);
+ return rc;
+
++cancel_ref:
++ percpu_ref_exit(&lnk->wr_tx_refs);
+ dma_unmap:
+ if (lnk->wr_rx_v2_dma_addr) {
+ ib_dma_unmap_single(ibdev, lnk->wr_rx_v2_dma_addr,
+--
+2.43.0
+
--- /dev/null
+From 1fc4e4db464453c55809e4b9d0d828385732d2e0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 14 Oct 2024 19:53:21 +0800
+Subject: net/smc: Fix searching in list of known pnetids in
+ smc_pnet_add_pnetid
+
+From: Li RongQing <lirongqing@baidu.com>
+
+[ Upstream commit 82ac39ebd6db0c9f7a97a934bda1e3e101a9d201 ]
+
+pnetid of pi (not newly allocated pe) should be compared
+
+Fixes: e888a2e8337c ("net/smc: introduce list of pnetids for Ethernet devices")
+Reviewed-by: D. Wythe <alibuda@linux.alibaba.com>
+Reviewed-by: Wen Gu <guwen@linux.alibaba.com>
+Signed-off-by: Li RongQing <lirongqing@baidu.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Reviewed-by: Gerd Bayer <gbayer@linux.ibm.com>
+Link: https://patch.msgid.link/20241014115321.33234-1-lirongqing@baidu.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/smc/smc_pnet.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c
+index 2adb92b8c4699..dbcc72b43d0c0 100644
+--- a/net/smc/smc_pnet.c
++++ b/net/smc/smc_pnet.c
+@@ -753,7 +753,7 @@ static int smc_pnet_add_pnetid(struct net *net, u8 *pnetid)
+
+ write_lock(&sn->pnetids_ndev.lock);
+ list_for_each_entry(pi, &sn->pnetids_ndev.list, list) {
+- if (smc_pnet_match(pnetid, pe->pnetid)) {
++ if (smc_pnet_match(pnetid, pi->pnetid)) {
+ refcount_inc(&pi->refcnt);
+ kfree(pe);
+ goto unlock;
+--
+2.43.0
+
--- /dev/null
+From ea52b876cd57f6f945e8fac34969b779f7df4aeb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 9 Oct 2024 14:49:56 +0200
+Subject: net: sparx5: fix source port register when mirroring
+
+From: Daniel Machon <daniel.machon@microchip.com>
+
+[ Upstream commit 8a6be4bd6fb319cee63d228e37c8dda5fd1eb74a ]
+
+When port mirroring is added to a port, the bit position of the source
+port, needs to be written to the register ANA_AC_PROBE_PORT_CFG. This
+register is replicated for n_ports > 32, and therefore we need to derive
+the correct register from the port number.
+
+Before this patch, we wrongly calculate the register from portno /
+BITS_PER_BYTE, where the divisor ought to be 32, causing any port >=8 to
+be written to the wrong register. We fix this, by using do_div(), where
+the dividend is the register, the remainder is the bit position and the
+divisor is now 32.
+
+Fixes: 4e50d72b3b95 ("net: sparx5: add port mirroring implementation")
+Signed-off-by: Daniel Machon <daniel.machon@microchip.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20241009-mirroring-fix-v1-1-9ec962301989@microchip.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/microchip/sparx5/sparx5_mirror.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_mirror.c b/drivers/net/ethernet/microchip/sparx5/sparx5_mirror.c
+index 15db423be4aa6..459a53676ae96 100644
+--- a/drivers/net/ethernet/microchip/sparx5/sparx5_mirror.c
++++ b/drivers/net/ethernet/microchip/sparx5/sparx5_mirror.c
+@@ -31,10 +31,10 @@ static u64 sparx5_mirror_port_get(struct sparx5 *sparx5, u32 idx)
+ /* Add port to mirror (only front ports) */
+ static void sparx5_mirror_port_add(struct sparx5 *sparx5, u32 idx, u32 portno)
+ {
+- u32 val, reg = portno;
++ u64 reg = portno;
++ u32 val;
+
+- reg = portno / BITS_PER_BYTE;
+- val = BIT(portno % BITS_PER_BYTE);
++ val = BIT(do_div(reg, 32));
+
+ if (reg == 0)
+ return spx5_rmw(val, val, sparx5, ANA_AC_PROBE_PORT_CFG(idx));
+@@ -45,10 +45,10 @@ static void sparx5_mirror_port_add(struct sparx5 *sparx5, u32 idx, u32 portno)
+ /* Delete port from mirror (only front ports) */
+ static void sparx5_mirror_port_del(struct sparx5 *sparx5, u32 idx, u32 portno)
+ {
+- u32 val, reg = portno;
++ u64 reg = portno;
++ u32 val;
+
+- reg = portno / BITS_PER_BYTE;
+- val = BIT(portno % BITS_PER_BYTE);
++ val = BIT(do_div(reg, 32));
+
+ if (reg == 0)
+ return spx5_rmw(0, val, sparx5, ANA_AC_PROBE_PORT_CFG(idx));
+--
+2.43.0
+
--- /dev/null
+From 480861afeb32e47740d90e1956c9201520149e92 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 10 Oct 2024 10:29:08 -0400
+Subject: net: stmmac: dwmac-tegra: Fix link bring-up sequence
+
+From: Paritosh Dixit <paritoshd@nvidia.com>
+
+[ Upstream commit 1cff6ff302f5703a627f9ee1d99131161ea2683e ]
+
+The Tegra MGBE driver sometimes fails to initialize, reporting the
+following error, and as a result, it is unable to acquire an IP
+address with DHCP:
+
+ tegra-mgbe 6800000.ethernet: timeout waiting for link to become ready
+
+As per the recommendation from the Tegra hardware design team, fix this
+issue by:
+- clearing the PHY_RDY bit before setting the CDR_RESET bit and then
+setting PHY_RDY bit before clearing CDR_RESET bit. This ensures valid
+data is present at UPHY RX inputs before starting the CDR lock.
+- adding the required delays when bringing up the UPHY lane. Note we
+need to use delays here because there is no alternative, such as
+polling, for these cases. Using the usleep_range() instead of ndelay()
+as sleeping is preferred over busy wait loop.
+
+Without this change we would see link failures on boot sometimes as
+often as 1 in 5 boots. With this fix we have not observed any failures
+in over 1000 boots.
+
+Fixes: d8ca113724e7 ("net: stmmac: tegra: Add MGBE support")
+Signed-off-by: Paritosh Dixit <paritoshd@nvidia.com>
+Link: https://patch.msgid.link/20241010142908.602712-1-paritoshd@nvidia.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c
+index 362f85136c3ef..6fdd94c8919ec 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c
+@@ -127,10 +127,12 @@ static int mgbe_uphy_lane_bringup_serdes_up(struct net_device *ndev, void *mgbe_
+ value &= ~XPCS_WRAP_UPHY_RX_CONTROL_AUX_RX_IDDQ;
+ writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+
++ usleep_range(10, 20); /* 50ns min delay needed as per HW design */
+ value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+ value &= ~XPCS_WRAP_UPHY_RX_CONTROL_RX_SLEEP;
+ writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+
++ usleep_range(10, 20); /* 500ns min delay needed as per HW design */
+ value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+ value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_CAL_EN;
+ writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+@@ -143,22 +145,30 @@ static int mgbe_uphy_lane_bringup_serdes_up(struct net_device *ndev, void *mgbe_
+ return err;
+ }
+
++ usleep_range(10, 20); /* 50ns min delay needed as per HW design */
+ value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+ value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_DATA_EN;
+ writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+
+ value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+- value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_CDR_RESET;
++ value &= ~XPCS_WRAP_UPHY_RX_CONTROL_RX_PCS_PHY_RDY;
+ writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+
++ usleep_range(10, 20); /* 50ns min delay needed as per HW design */
+ value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+- value &= ~XPCS_WRAP_UPHY_RX_CONTROL_RX_CDR_RESET;
++ value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_CDR_RESET;
+ writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+
++ usleep_range(10, 20); /* 50ns min delay needed as per HW design */
+ value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+ value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_PCS_PHY_RDY;
+ writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+
++ msleep(30); /* 30ms delay needed as per HW design */
++ value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
++ value &= ~XPCS_WRAP_UPHY_RX_CONTROL_RX_CDR_RESET;
++ writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
++
+ err = readl_poll_timeout(mgbe->xpcs + XPCS_WRAP_IRQ_STATUS, value,
+ value & XPCS_WRAP_IRQ_STATUS_PCS_LINK_STS,
+ 500, 500 * 2000);
+--
+2.43.0
+
--- /dev/null
+From e0b22bab7d5164d8081f1e07ad18e2858baccbab Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 14 Oct 2024 22:51:15 +0800
+Subject: net: systemport: fix potential memory leak in bcm_sysport_xmit()
+
+From: Wang Hai <wanghai38@huawei.com>
+
+[ Upstream commit c401ed1c709948e57945485088413e1bb5e94bd1 ]
+
+The bcm_sysport_xmit() returns NETDEV_TX_OK without freeing skb
+in case of dma_map_single() fails, add dev_kfree_skb() to fix it.
+
+Fixes: 80105befdb4b ("net: systemport: add Broadcom SYSTEMPORT Ethernet MAC driver")
+Signed-off-by: Wang Hai <wanghai38@huawei.com>
+Link: https://patch.msgid.link/20241014145115.44977-1-wanghai38@huawei.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/broadcom/bcmsysport.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
+index c9faa85408593..0a68b526e4a82 100644
+--- a/drivers/net/ethernet/broadcom/bcmsysport.c
++++ b/drivers/net/ethernet/broadcom/bcmsysport.c
+@@ -1359,6 +1359,7 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
+ netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n",
+ skb->data, skb_len);
+ ret = NETDEV_TX_OK;
++ dev_kfree_skb_any(skb);
+ goto out;
+ }
+
+--
+2.43.0
+
--- /dev/null
+From 85c9e5ca8baf7530854761ecb5a3d1a65a4de857 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 10 Oct 2024 15:19:14 +0200
+Subject: net: usb: usbnet: fix race in probe failure
+
+From: Oliver Neukum <oneukum@suse.com>
+
+[ Upstream commit b62f4c186c70aa235fef2da68d07325d85ca3ade ]
+
+The same bug as in the disconnect code path also exists
+in the case of a failure late during the probe process.
+The flag must also be set.
+
+Signed-off-by: Oliver Neukum <oneukum@suse.com>
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Link: https://patch.msgid.link/20241010131934.1499695-1-oneukum@suse.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/usb/usbnet.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
+index 2506aa8c603ec..ee1b5fd7b4919 100644
+--- a/drivers/net/usb/usbnet.c
++++ b/drivers/net/usb/usbnet.c
+@@ -1870,6 +1870,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
+ * may trigger an error resubmitting itself and, worse,
+ * schedule a timer. So we kill it all just in case.
+ */
++ usbnet_mark_going_away(dev);
+ cancel_work_sync(&dev->kevent);
+ del_timer_sync(&dev->delay);
+ free_netdev(net);
+--
+2.43.0
+
--- /dev/null
+From 842e2fcc9eb88e36cf868b32738e95a76efe04b4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 14 Oct 2024 22:37:04 +0800
+Subject: net: xilinx: axienet: fix potential memory leak in
+ axienet_start_xmit()
+
+From: Wang Hai <wanghai38@huawei.com>
+
+[ Upstream commit 99714e37e8333bbc22496fe80f241d5b35380e83 ]
+
+The axienet_start_xmit() returns NETDEV_TX_OK without freeing skb
+in case of dma_map_single() fails, add dev_kfree_skb_any() to fix it.
+
+Fixes: 71791dc8bdea ("net: axienet: Check for DMA mapping errors")
+Signed-off-by: Wang Hai <wanghai38@huawei.com>
+Reviewed-by: Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
+Link: https://patch.msgid.link/20241014143704.31938-1-wanghai38@huawei.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/xilinx/xilinx_axienet_main.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+index 5dbfee4aee43c..0c4c57e7fddc2 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+@@ -989,6 +989,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ if (net_ratelimit())
+ netdev_err(ndev, "TX DMA mapping error\n");
+ ndev->stats.tx_dropped++;
++ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ desc_set_phys_addr(lp, phys, cur_p);
+@@ -1009,6 +1010,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ ndev->stats.tx_dropped++;
+ axienet_free_tx_chain(lp, orig_tail_ptr, ii + 1,
+ true, NULL, 0);
++ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ desc_set_phys_addr(lp, phys, cur_p);
+--
+2.43.0
+
--- /dev/null
+From c5f917f56e219230e9449bae4b397ba75bf018c3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 12 Oct 2024 09:42:30 +0000
+Subject: netdevsim: use cond_resched() in nsim_dev_trap_report_work()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit a1494d532e28598bde7a5544892ef9c7dbfafa93 ]
+
+I am still seeing many syzbot reports hinting that syzbot
+might fool nsim_dev_trap_report_work() with hundreds of ports [1]
+
+Lets use cond_resched(), and system_unbound_wq
+instead of implicit system_wq.
+
+[1]
+INFO: task syz-executor:20633 blocked for more than 143 seconds.
+ Not tainted 6.12.0-rc2-syzkaller-00205-g1d227fcc7222 #0
+"echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
+task:syz-executor state:D stack:25856 pid:20633 tgid:20633 ppid:1 flags:0x00004006
+...
+NMI backtrace for cpu 1
+CPU: 1 UID: 0 PID: 16760 Comm: kworker/1:0 Not tainted 6.12.0-rc2-syzkaller-00205-g1d227fcc7222 #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 09/13/2024
+Workqueue: events nsim_dev_trap_report_work
+ RIP: 0010:__sanitizer_cov_trace_pc+0x0/0x70 kernel/kcov.c:210
+Code: 89 fb e8 23 00 00 00 48 8b 3d 04 fb 9c 0c 48 89 de 5b e9 c3 c7 5d 00 0f 1f 00 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 <f3> 0f 1e fa 48 8b 04 24 65 48 8b 0c 25 c0 d7 03 00 65 8b 15 60 f0
+RSP: 0018:ffffc90000a187e8 EFLAGS: 00000246
+RAX: 0000000000000100 RBX: ffffc90000a188e0 RCX: ffff888027d3bc00
+RDX: ffff888027d3bc00 RSI: 0000000000000000 RDI: 0000000000000000
+RBP: ffff88804a2e6000 R08: ffffffff8a4bc495 R09: ffffffff89da3577
+R10: 0000000000000004 R11: ffffffff8a4bc2b0 R12: dffffc0000000000
+R13: ffff88806573b503 R14: dffffc0000000000 R15: ffff8880663cca00
+FS: 0000000000000000(0000) GS:ffff8880b8700000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00007fc90a747f98 CR3: 000000000e734000 CR4: 00000000003526f0
+DR0: 0000000000000000 DR1: 000000000000002b DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400
+Call Trace:
+ <NMI>
+ </NMI>
+ <TASK>
+ __local_bh_enable_ip+0x1bb/0x200 kernel/softirq.c:382
+ spin_unlock_bh include/linux/spinlock.h:396 [inline]
+ nsim_dev_trap_report drivers/net/netdevsim/dev.c:820 [inline]
+ nsim_dev_trap_report_work+0x75d/0xaa0 drivers/net/netdevsim/dev.c:850
+ process_one_work kernel/workqueue.c:3229 [inline]
+ process_scheduled_works+0xa63/0x1850 kernel/workqueue.c:3310
+ worker_thread+0x870/0xd30 kernel/workqueue.c:3391
+ kthread+0x2f0/0x390 kernel/kthread.c:389
+ ret_from_fork+0x4b/0x80 arch/x86/kernel/process.c:147
+ ret_from_fork_asm+0x1a/0x30 arch/x86/entry/entry_64.S:244
+ </TASK>
+
+Fixes: ba5e1272142d ("netdevsim: avoid potential loop in nsim_dev_trap_report_work()")
+Reported-by: syzbot+d383dc9579a76f56c251@syzkaller.appspotmail.com
+Reported-by: syzbot+c596faae21a68bf7afd0@syzkaller.appspotmail.com
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Jiri Pirko <jiri@nvidia.com>
+Link: https://patch.msgid.link/20241012094230.3893510-1-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/netdevsim/dev.c | 15 +++++++++------
+ 1 file changed, 9 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
+index 92a7a36b93ac0..3e0b61202f0c9 100644
+--- a/drivers/net/netdevsim/dev.c
++++ b/drivers/net/netdevsim/dev.c
+@@ -836,7 +836,8 @@ static void nsim_dev_trap_report_work(struct work_struct *work)
+ nsim_dev = nsim_trap_data->nsim_dev;
+
+ if (!devl_trylock(priv_to_devlink(nsim_dev))) {
+- schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw, 1);
++ queue_delayed_work(system_unbound_wq,
++ &nsim_dev->trap_data->trap_report_dw, 1);
+ return;
+ }
+
+@@ -848,11 +849,12 @@ static void nsim_dev_trap_report_work(struct work_struct *work)
+ continue;
+
+ nsim_dev_trap_report(nsim_dev_port);
++ cond_resched();
+ }
+ devl_unlock(priv_to_devlink(nsim_dev));
+-
+- schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw,
+- msecs_to_jiffies(NSIM_TRAP_REPORT_INTERVAL_MS));
++ queue_delayed_work(system_unbound_wq,
++ &nsim_dev->trap_data->trap_report_dw,
++ msecs_to_jiffies(NSIM_TRAP_REPORT_INTERVAL_MS));
+ }
+
+ static int nsim_dev_traps_init(struct devlink *devlink)
+@@ -907,8 +909,9 @@ static int nsim_dev_traps_init(struct devlink *devlink)
+
+ INIT_DELAYED_WORK(&nsim_dev->trap_data->trap_report_dw,
+ nsim_dev_trap_report_work);
+- schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw,
+- msecs_to_jiffies(NSIM_TRAP_REPORT_INTERVAL_MS));
++ queue_delayed_work(system_unbound_wq,
++ &nsim_dev->trap_data->trap_report_dw,
++ msecs_to_jiffies(NSIM_TRAP_REPORT_INTERVAL_MS));
+
+ return 0;
+
+--
+2.43.0
+
--- /dev/null
+From 3de09ba783479db4b3dc2622068e193262a1fd4f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 15 Oct 2024 13:21:00 +0200
+Subject: nvme-pci: fix race condition between reset and nvme_dev_disable()
+
+From: Maurizio Lombardi <mlombard@redhat.com>
+
+[ Upstream commit 26bc0a81f64ce00fc4342c38eeb2eddaad084dd2 ]
+
+nvme_dev_disable() modifies the dev->online_queues field, therefore
+nvme_pci_update_nr_queues() should avoid racing against it, otherwise
+we could end up passing invalid values to blk_mq_update_nr_hw_queues().
+
+ WARNING: CPU: 39 PID: 61303 at drivers/pci/msi/api.c:347
+ pci_irq_get_affinity+0x187/0x210
+ Workqueue: nvme-reset-wq nvme_reset_work [nvme]
+ RIP: 0010:pci_irq_get_affinity+0x187/0x210
+ Call Trace:
+ <TASK>
+ ? blk_mq_pci_map_queues+0x87/0x3c0
+ ? pci_irq_get_affinity+0x187/0x210
+ blk_mq_pci_map_queues+0x87/0x3c0
+ nvme_pci_map_queues+0x189/0x460 [nvme]
+ blk_mq_update_nr_hw_queues+0x2a/0x40
+ nvme_reset_work+0x1be/0x2a0 [nvme]
+
+Fix the bug by locking the shutdown_lock mutex before using
+dev->online_queues. Give up if nvme_dev_disable() is running or if
+it has been executed already.
+
+Fixes: 949928c1c731 ("NVMe: Fix possible queue use after freed")
+Tested-by: Yi Zhang <yi.zhang@redhat.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Maurizio Lombardi <mlombard@redhat.com>
+Signed-off-by: Keith Busch <kbusch@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/host/pci.c | 19 ++++++++++++++++---
+ 1 file changed, 16 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 7990c3f22ecf6..4b9fda0b1d9a3 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -2506,17 +2506,29 @@ static unsigned int nvme_pci_nr_maps(struct nvme_dev *dev)
+ return 1;
+ }
+
+-static void nvme_pci_update_nr_queues(struct nvme_dev *dev)
++static bool nvme_pci_update_nr_queues(struct nvme_dev *dev)
+ {
+ if (!dev->ctrl.tagset) {
+ nvme_alloc_io_tag_set(&dev->ctrl, &dev->tagset, &nvme_mq_ops,
+ nvme_pci_nr_maps(dev), sizeof(struct nvme_iod));
+- return;
++ return true;
++ }
++
++ /* Give up if we are racing with nvme_dev_disable() */
++ if (!mutex_trylock(&dev->shutdown_lock))
++ return false;
++
++ /* Check if nvme_dev_disable() has been executed already */
++ if (!dev->online_queues) {
++ mutex_unlock(&dev->shutdown_lock);
++ return false;
+ }
+
+ blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1);
+ /* free previously allocated queues that are no longer usable */
+ nvme_free_queues(dev, dev->online_queues);
++ mutex_unlock(&dev->shutdown_lock);
++ return true;
+ }
+
+ static int nvme_pci_enable(struct nvme_dev *dev)
+@@ -2797,7 +2809,8 @@ static void nvme_reset_work(struct work_struct *work)
+ nvme_dbbuf_set(dev);
+ nvme_unquiesce_io_queues(&dev->ctrl);
+ nvme_wait_freeze(&dev->ctrl);
+- nvme_pci_update_nr_queues(dev);
++ if (!nvme_pci_update_nr_queues(dev))
++ goto out;
+ nvme_unfreeze(&dev->ctrl);
+ } else {
+ dev_warn(dev->ctrl.device, "IO queues lost\n");
+--
+2.43.0
+
--- /dev/null
+From 2b53f8f7c351a4380a316cf8b28a1029c8b14b4d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 10 Oct 2024 16:45:19 +0100
+Subject: octeontx2-af: Fix potential integer overflows on integer shifts
+
+From: Colin Ian King <colin.i.king@gmail.com>
+
+[ Upstream commit 637c4f6fe40befa04f19c38b5d15429cbb9191d9 ]
+
+The left shift int 32 bit integer constants 1 is evaluated using 32 bit
+arithmetic and then assigned to a 64 bit unsigned integer. In the case
+where the shift is 32 or more this can lead to an overflow. Avoid this
+by shifting using the BIT_ULL macro instead.
+
+Fixes: 019aba04f08c ("octeontx2-af: Modify SMQ flush sequence to drop packets")
+Signed-off-by: Colin Ian King <colin.i.king@gmail.com>
+Reviewed-by: Dan Carpenter <dan.carpenter@linaro.org>
+Link: https://patch.msgid.link/20241010154519.768785-1-colin.i.king@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+index 82832a24fbd86..da69350c6f765 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+@@ -2411,7 +2411,7 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
+ NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link));
+ if (!(cfg & BIT_ULL(12)))
+ continue;
+- bmap |= (1 << i);
++ bmap |= BIT_ULL(i);
+ cfg &= ~BIT_ULL(12);
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg);
+@@ -2432,7 +2432,7 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
+
+ /* Set NIX_AF_TL3_TL2_LINKX_CFG[ENA] for the TL3/TL2 queue */
+ for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) {
+- if (!(bmap & (1 << i)))
++ if (!(bmap & BIT_ULL(i)))
+ continue;
+ cfg = rvu_read64(rvu, blkaddr,
+ NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link));
+--
+2.43.0
+
--- /dev/null
+From 272a071a6f4d3ad9c018aa6b14fef93188864eda Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 14 Oct 2024 22:04:26 +0200
+Subject: [PATCH} hwmon: (jc42) Properly detect TSE2004-compliant devices again
+
+From: Jean Delvare <jdelvare@suse.de>
+
+[ Upstream commit eabb03810194b75417b09cff8a526d26939736ac ]
+
+Commit b3e992f69c23 ("hwmon: (jc42) Strengthen detect function")
+attempted to make the detect function more robust for
+TSE2004-compliant devices by checking capability bits which, according
+to the JEDEC 21-C specification, should always be set. Unfortunately,
+not all real-world implementations fully adhere to this specification,
+so this change caused a regression.
+
+Stop testing bit 7 (EVSD) of the Capabilities register, as it was
+found to be 0 on one real-world device.
+
+Also stop testing bits 0 (EVENT) and 2 (RANGE) as vendor datasheets
+(Renesas TSE2004GB2B0, ST STTS2004) suggest that they may not always
+be set either.
+
+Signed-off-by: Jean Delvare <jdelvare@suse.de>
+Message-ID: <20241014141204.026f4641@endymion.delvare>
+Fixes: b3e992f69c23 ("hwmon: (jc42) Strengthen detect function")
+Message-ID: <20241014220426.0c8f4d9c@endymion.delvare>
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/jc42.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
+index a260cff750a58..c459dce496a6e 100644
+--- a/drivers/hwmon/jc42.c
++++ b/drivers/hwmon/jc42.c
+@@ -417,7 +417,7 @@ static int jc42_detect(struct i2c_client *client, struct i2c_board_info *info)
+ return -ENODEV;
+
+ if ((devid & TSE2004_DEVID_MASK) == TSE2004_DEVID &&
+- (cap & 0x00e7) != 0x00e7)
++ (cap & 0x0062) != 0x0062)
+ return -ENODEV;
+
+ for (i = 0; i < ARRAY_SIZE(jc42_chips); i++) {
+--
+2.43.0
+
--- /dev/null
+From fbd6be010b45c7e93b0f4f791a67665ffe4e33f5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 1 Sep 2024 14:27:55 +0300
+Subject: ravb: Remove setting of RX software timestamp
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Gal Pressman <gal@nvidia.com>
+
+[ Upstream commit 277901ee3a2620679e2c8797377d2a72f4358068 ]
+
+The responsibility for reporting of RX software timestamp has moved to
+the core layer (see __ethtool_get_ts_info()), remove usage from the
+device drivers.
+
+Reviewed-by: Carolina Jubran <cjubran@nvidia.com>
+Reviewed-by: Rahul Rameshbabu <rrameshbabu@nvidia.com>
+Signed-off-by: Gal Pressman <gal@nvidia.com>
+Reviewed-by: Niklas Söderlund <niklas.soderlund+renesas@ragnatech.se>
+Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru>
+Link: https://patch.msgid.link/20240901112803.212753-8-gal@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: 126e799602f4 ("net: ravb: Only advertise Rx/Tx timestamps if hardware supports it")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/renesas/ravb_main.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
+index 6b82df11fe8d0..d2a6518532f37 100644
+--- a/drivers/net/ethernet/renesas/ravb_main.c
++++ b/drivers/net/ethernet/renesas/ravb_main.c
+@@ -1752,8 +1752,6 @@ static int ravb_get_ts_info(struct net_device *ndev,
+
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+- SOF_TIMESTAMPING_RX_SOFTWARE |
+- SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+@@ -1764,6 +1762,8 @@ static int ravb_get_ts_info(struct net_device *ndev,
+ (1 << HWTSTAMP_FILTER_ALL);
+ if (hw_info->gptp || hw_info->ccc_gac)
+ info->phc_index = ptp_clock_index(priv->ptp.clock);
++ else
++ info->phc_index = 0;
+
+ return 0;
+ }
+--
+2.43.0
+
--- /dev/null
+From 7d8797c462d6a5390eacaab631496aff57491a95 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Sep 2024 20:05:58 -0700
+Subject: RDMA/bnxt_re: Add a check for memory allocation
+
+From: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
+
+[ Upstream commit c5c1ae73b7741fa3b58e6e001b407825bb971225 ]
+
+__alloc_pbl() can return error when memory allocation fails.
+Driver is not checking the status on one of the instances.
+
+Fixes: 0c4dcd602817 ("RDMA/bnxt_re: Refactor hardware queue memory allocation")
+Link: https://patch.msgid.link/r/1726715161-18941-4-git-send-email-selvin.xavier@broadcom.com
+Reviewed-by: Selvin Xavier <selvin.xavier@broadcom.com>
+Signed-off-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
+Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/bnxt_re/qplib_res.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
+index dfc943fab87b4..1fdffd6a0f480 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
+@@ -244,6 +244,8 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
+ sginfo.pgsize = npde * pg_size;
+ sginfo.npages = 1;
+ rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo);
++ if (rc)
++ goto fail;
+
+ /* Alloc PBL pages */
+ sginfo.npages = npbl;
+--
+2.43.0
+
--- /dev/null
+From 43ffdd84a83dcbe8f037153a2156d4ee8001f8c0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 8 Oct 2024 00:41:38 -0700
+Subject: RDMA/bnxt_re: Avoid CPU lockups due fifo occupancy check loop
+
+From: Selvin Xavier <selvin.xavier@broadcom.com>
+
+[ Upstream commit 8be3e5b0c96beeefe9d5486b96575d104d3e7d17 ]
+
+Driver waits indefinitely for the fifo occupancy to go below a threshold
+as soon as the pacing interrupt is received. This can cause soft lockup on
+one of the processors, if the rate of DB is very high.
+
+Add a loop count for FPGA and exit the __wait_for_fifo_occupancy_below_th
+if the loop is taking more time. Pacing will be continuing until the
+occupancy is below the threshold. This is ensured by the checks in
+bnxt_re_pacing_timer_exp and further scheduling the work for pacing based
+on the fifo occupancy.
+
+Fixes: 2ad4e6303a6d ("RDMA/bnxt_re: Implement doorbell pacing algorithm")
+Link: https://patch.msgid.link/r/1728373302-19530-7-git-send-email-selvin.xavier@broadcom.com
+Reviewed-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
+Reviewed-by: Chandramohan Akula <chandramohan.akula@broadcom.com>
+Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/bnxt_re/main.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
+index e06adb2dfe6f9..c905a51aabfba 100644
+--- a/drivers/infiniband/hw/bnxt_re/main.c
++++ b/drivers/infiniband/hw/bnxt_re/main.c
+@@ -518,6 +518,7 @@ static bool is_dbr_fifo_full(struct bnxt_re_dev *rdev)
+ static void __wait_for_fifo_occupancy_below_th(struct bnxt_re_dev *rdev)
+ {
+ struct bnxt_qplib_db_pacing_data *pacing_data = rdev->qplib_res.pacing_data;
++ u32 retry_fifo_check = 1000;
+ u32 fifo_occup;
+
+ /* loop shouldn't run infintely as the occupancy usually goes
+@@ -531,6 +532,14 @@ static void __wait_for_fifo_occupancy_below_th(struct bnxt_re_dev *rdev)
+
+ if (fifo_occup < pacing_data->pacing_th)
+ break;
++ if (!retry_fifo_check--) {
++ dev_info_once(rdev_to_dev(rdev),
++ "%s: fifo_occup = 0x%xfifo_max_depth = 0x%x pacing_th = 0x%x\n",
++ __func__, fifo_occup, pacing_data->fifo_max_depth,
++ pacing_data->pacing_th);
++ break;
++ }
++
+ }
+ }
+
+--
+2.43.0
+
--- /dev/null
+From aeca5c620efe097c5a7345e5f5e638c14d527234 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 8 Oct 2024 00:41:40 -0700
+Subject: RDMA/bnxt_re: Change the sequence of updating the CQ toggle value
+
+From: Chandramohan Akula <chandramohan.akula@broadcom.com>
+
+[ Upstream commit 2df411353dacc4b0c911f8c4944f8ffab955391c ]
+
+Currently the CQ toggle value in the shared page (read by the userlib) is
+updated as part of the cqn_handler. There is a potential race of
+application calling the CQ ARM doorbell immediately and using the old
+toggle value.
+
+Change the sequence of updating CQ toggle value to update in the
+bnxt_qplib_service_nq function immediately after reading the toggle value
+to be in sync with the HW updated value.
+
+Fixes: e275919d9669 ("RDMA/bnxt_re: Share a page to expose per CQ info with userspace")
+Link: https://patch.msgid.link/r/1728373302-19530-9-git-send-email-selvin.xavier@broadcom.com
+Signed-off-by: Chandramohan Akula <chandramohan.akula@broadcom.com>
+Reviewed-by: Selvin Xavier <selvin.xavier@broadcom.com>
+Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/bnxt_re/main.c | 8 +-------
+ drivers/infiniband/hw/bnxt_re/qplib_fp.c | 5 +++++
+ 2 files changed, 6 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
+index c905a51aabfba..9b7093eb439c6 100644
+--- a/drivers/infiniband/hw/bnxt_re/main.c
++++ b/drivers/infiniband/hw/bnxt_re/main.c
+@@ -1257,15 +1257,9 @@ static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq,
+ {
+ struct bnxt_re_cq *cq = container_of(handle, struct bnxt_re_cq,
+ qplib_cq);
+- u32 *cq_ptr;
+
+- if (cq->ib_cq.comp_handler) {
+- if (cq->uctx_cq_page) {
+- cq_ptr = (u32 *)cq->uctx_cq_page;
+- *cq_ptr = cq->qplib_cq.toggle;
+- }
++ if (cq->ib_cq.comp_handler)
+ (*cq->ib_cq.comp_handler)(&cq->ib_cq, cq->ib_cq.cq_context);
+- }
+
+ return 0;
+ }
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+index 19bb45329a19b..03d517be9c52e 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+@@ -327,6 +327,7 @@ static void bnxt_qplib_service_nq(struct tasklet_struct *t)
+ case NQ_BASE_TYPE_CQ_NOTIFICATION:
+ {
+ struct nq_cn *nqcne = (struct nq_cn *)nqe;
++ struct bnxt_re_cq *cq_p;
+
+ q_handle = le32_to_cpu(nqcne->cq_handle_low);
+ q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
+@@ -337,6 +338,10 @@ static void bnxt_qplib_service_nq(struct tasklet_struct *t)
+ cq->toggle = (le16_to_cpu(nqe->info10_type) &
+ NQ_CN_TOGGLE_MASK) >> NQ_CN_TOGGLE_SFT;
+ cq->dbinfo.toggle = cq->toggle;
++ cq_p = container_of(cq, struct bnxt_re_cq, qplib_cq);
++ if (cq_p->uctx_cq_page)
++ *((u32 *)cq_p->uctx_cq_page) = cq->toggle;
++
+ bnxt_qplib_armen_db(&cq->dbinfo,
+ DBC_DBC_TYPE_CQ_ARMENA);
+ spin_lock_bh(&cq->compl_lock);
+--
+2.43.0
+
--- /dev/null
+From a32ea29e4be9b604a2e2c8a1b7e3aebaca609eb0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 8 Oct 2024 00:41:41 -0700
+Subject: RDMA/bnxt_re: Fix a bug while setting up Level-2 PBL pages
+
+From: Bhargava Chenna Marreddy <bhargava.marreddy@broadcom.com>
+
+[ Upstream commit 7988bdbbb85ac85a847baf09879edcd0f70521dc ]
+
+Avoid memory corruption while setting up Level-2 PBL pages for the non MR
+resources when num_pages > 256K.
+
+There will be a single PDE page address (contiguous pages in the case of >
+PAGE_SIZE), but, current logic assumes multiple pages, leading to invalid
+memory access after 256K PBL entries in the PDE.
+
+Fixes: 0c4dcd602817 ("RDMA/bnxt_re: Refactor hardware queue memory allocation")
+Link: https://patch.msgid.link/r/1728373302-19530-10-git-send-email-selvin.xavier@broadcom.com
+Signed-off-by: Bhargava Chenna Marreddy <bhargava.marreddy@broadcom.com>
+Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/bnxt_re/qplib_res.c | 19 +++----------------
+ 1 file changed, 3 insertions(+), 16 deletions(-)
+
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
+index 1fdffd6a0f480..96ceec1e8199a 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
+@@ -257,22 +257,9 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
+ dst_virt_ptr =
+ (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
+ src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
+- if (hwq_attr->type == HWQ_TYPE_MR) {
+- /* For MR it is expected that we supply only 1 contigous
+- * page i.e only 1 entry in the PDL that will contain
+- * all the PBLs for the user supplied memory region
+- */
+- for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count;
+- i++)
+- dst_virt_ptr[0][i] = src_phys_ptr[i] |
+- flag;
+- } else {
+- for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count;
+- i++)
+- dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
+- src_phys_ptr[i] |
+- PTU_PDE_VALID;
+- }
++ for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++)
++ dst_virt_ptr[0][i] = src_phys_ptr[i] | flag;
++
+ /* Alloc or init PTEs */
+ rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_2],
+ hwq_attr->sginfo);
+--
+2.43.0
+
--- /dev/null
+From ee57260072df1c9e1f80ad414a544d877004cd7e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Sep 2024 20:05:56 -0700
+Subject: RDMA/bnxt_re: Fix a possible memory leak
+
+From: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
+
+[ Upstream commit 3fc5410f225d1651580a4aeb7c72f55e28673b53 ]
+
+In bnxt_re_setup_chip_ctx() when bnxt_qplib_map_db_bar() fails
+driver is not freeing the memory allocated for "rdev->chip_ctx".
+
+Fixes: 0ac20faf5d83 ("RDMA/bnxt_re: Reorg the bar mapping")
+Link: https://patch.msgid.link/r/1726715161-18941-2-git-send-email-selvin.xavier@broadcom.com
+Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
+Signed-off-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/bnxt_re/main.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
+index 9714b9ab75240..2a450d7ad1990 100644
+--- a/drivers/infiniband/hw/bnxt_re/main.c
++++ b/drivers/infiniband/hw/bnxt_re/main.c
+@@ -184,8 +184,11 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode)
+
+ bnxt_re_set_db_offset(rdev);
+ rc = bnxt_qplib_map_db_bar(&rdev->qplib_res);
+- if (rc)
++ if (rc) {
++ kfree(rdev->chip_ctx);
++ rdev->chip_ctx = NULL;
+ return rc;
++ }
+
+ if (bnxt_qplib_determine_atomics(en_dev->pdev))
+ ibdev_info(&rdev->ibdev,
+--
+2.43.0
+
--- /dev/null
+From 6256372c2d5c9492cb15ead658f0a2fc5988c58d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Sep 2024 20:05:57 -0700
+Subject: RDMA/bnxt_re: Fix incorrect AVID type in WQE structure
+
+From: Saravanan Vajravel <saravanan.vajravel@broadcom.com>
+
+[ Upstream commit 9ab20f76ae9fad55ebaf36bdff04aea1c2552374 ]
+
+Driver uses internal data structure to construct WQE frame.
+It used avid type as u16 which can accommodate up to 64K AVs.
+When outstanding AVID crosses 64K, driver truncates AVID and
+hence it uses incorrect AVID to WR. This leads to WR failure
+due to invalid AV ID and QP is moved to error state with reason
+set to 19 (INVALID AVID). When RDMA CM path is used, this issue
+hits QP1 and it is moved to error state
+
+Fixes: 1ac5a4047975 ("RDMA/bnxt_re: Add bnxt_re RoCE driver")
+Link: https://patch.msgid.link/r/1726715161-18941-3-git-send-email-selvin.xavier@broadcom.com
+Reviewed-by: Selvin Xavier <selvin.xavier@broadcom.com>
+Reviewed-by: Chandramohan Akula <chandramohan.akula@broadcom.com>
+Signed-off-by: Saravanan Vajravel <saravanan.vajravel@broadcom.com>
+Signed-off-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
+Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/bnxt_re/qplib_fp.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+index 56538b90d6c56..c7412e461436f 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
++++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+@@ -169,7 +169,7 @@ struct bnxt_qplib_swqe {
+ };
+ u32 q_key;
+ u32 dst_qp;
+- u16 avid;
++ u32 avid;
+ } send;
+
+ /* Send Raw Ethernet and QP1 */
+--
+2.43.0
+
--- /dev/null
+From 5e352d56aea98bc7972238654b18a0ba81444a70 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 8 Oct 2024 00:41:35 -0700
+Subject: RDMA/bnxt_re: Fix incorrect dereference of srq in async event
+
+From: Kashyap Desai <kashyap.desai@broadcom.com>
+
+[ Upstream commit 87b4d8d28f6af8fc62766a8af7a5467b37053dfa ]
+
+Currently driver is not getting correct srq. Dereference only if qplib has
+a valid srq.
+
+Fixes: b02fd3f79ec3 ("RDMA/bnxt_re: Report async events and errors")
+Link: https://patch.msgid.link/r/1728373302-19530-4-git-send-email-selvin.xavier@broadcom.com
+Reviewed-by: Saravanan Vajravel <saravanan.vajravel@broadcom.com>
+Reviewed-by: Chandramohan Akula <chandramohan.akula@broadcom.com>
+Signed-off-by: Kashyap Desai <kashyap.desai@broadcom.com>
+Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/bnxt_re/main.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
+index 2a450d7ad1990..e06adb2dfe6f9 100644
+--- a/drivers/infiniband/hw/bnxt_re/main.c
++++ b/drivers/infiniband/hw/bnxt_re/main.c
+@@ -1012,12 +1012,15 @@ static int bnxt_re_handle_unaffi_async_event(struct creq_func_event
+ static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event,
+ struct bnxt_re_qp *qp)
+ {
+- struct bnxt_re_srq *srq = container_of(qp->qplib_qp.srq, struct bnxt_re_srq,
+- qplib_srq);
+ struct creq_qp_error_notification *err_event;
++ struct bnxt_re_srq *srq = NULL;
+ struct ib_event event = {};
+ unsigned int flags;
+
++ if (qp->qplib_qp.srq)
++ srq = container_of(qp->qplib_qp.srq, struct bnxt_re_srq,
++ qplib_srq);
++
+ if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR &&
+ rdma_is_kernel_res(&qp->ib_qp.res)) {
+ flags = bnxt_re_lock_cqs(qp);
+--
+2.43.0
+
--- /dev/null
+From b5bec913dbfa8c4824bab87fd83fdc3de8b21a97 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 8 Oct 2024 00:41:34 -0700
+Subject: RDMA/bnxt_re: Fix out of bound check
+
+From: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
+
+[ Upstream commit a9e6e7443922ac0a48243c35d03834c96926bff1 ]
+
+Driver exports pacing stats only on GenP5 and P7 adapters. But while
+parsing the pacing stats, driver has a check for "rdev->dbr_pacing". This
+caused a trace when KASAN is enabled.
+
+BUG: KASAN: slab-out-of-bounds in bnxt_re_get_hw_stats+0x2b6a/0x2e00 [bnxt_re]
+Write of size 8 at addr ffff8885942a6340 by task modprobe/4809
+
+Fixes: 8b6573ff3420 ("bnxt_re: Update the debug counters for doorbell pacing")
+Link: https://patch.msgid.link/r/1728373302-19530-3-git-send-email-selvin.xavier@broadcom.com
+Signed-off-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
+Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/bnxt_re/hw_counters.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.c b/drivers/infiniband/hw/bnxt_re/hw_counters.c
+index 128651c015956..1e63f80917483 100644
+--- a/drivers/infiniband/hw/bnxt_re/hw_counters.c
++++ b/drivers/infiniband/hw/bnxt_re/hw_counters.c
+@@ -366,7 +366,7 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
+ goto done;
+ }
+ }
+- if (rdev->pacing.dbr_pacing)
++ if (rdev->pacing.dbr_pacing && bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
+ bnxt_re_copy_db_pacing_stats(rdev, stats);
+ }
+
+--
+2.43.0
+
--- /dev/null
+From 775370b47cc92cf6ce3af0e99069a307a394e74a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 8 Oct 2024 00:41:42 -0700
+Subject: RDMA/bnxt_re: Fix the GID table length
+
+From: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
+
+[ Upstream commit dc5006cfcf62bea88076a587344ba5e00e66d1c6 ]
+
+GID table length is reported by FW. The gid index which is passed to the
+driver during modify_qp/create_ah is restricted by the sgid_index field of
+struct ib_global_route. sgid_index is u8 and the max sgid possible is
+256.
+
+Each GID entry in HW will have 2 GID entries in the kernel gid table. So
+we can support twice the gid table size reported by FW. Also, restrict the
+max GID to 256 also.
+
+Fixes: 847b97887ed4 ("RDMA/bnxt_re: Restrict the max_gids to 256")
+Link: https://patch.msgid.link/r/1728373302-19530-11-git-send-email-selvin.xavier@broadcom.com
+Signed-off-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
+Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/bnxt_re/qplib_sp.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+index 8e59422dd137e..420f8613bcd51 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+@@ -156,7 +156,14 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
+ if (!bnxt_qplib_is_chip_gen_p7(rcfw->res->cctx))
+ attr->l2_db_size = (sb->l2_db_space_size + 1) *
+ (0x01 << RCFW_DBR_BASE_PAGE_SHIFT);
+- attr->max_sgid = BNXT_QPLIB_NUM_GIDS_SUPPORTED;
++ /*
++ * Read the max gid supported by HW.
++ * For each entry in HW GID in HW table, we consume 2
++ * GID entries in the kernel GID table. So max_gid reported
++ * to stack can be up to twice the value reported by the HW, up to 256 gids.
++ */
++ attr->max_sgid = le32_to_cpu(sb->max_gid);
++ attr->max_sgid = min_t(u32, BNXT_QPLIB_NUM_GIDS_SUPPORTED, 2 * attr->max_sgid);
+ attr->dev_cap_flags = le16_to_cpu(sb->dev_cap_flags);
+ attr->dev_cap_flags2 = le16_to_cpu(sb->dev_cap_ext_flags_2);
+
+--
+2.43.0
+
--- /dev/null
+From 5cceec717ad715f6cd6c5b28c57adeb9a4523fb7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 8 Oct 2024 00:41:33 -0700
+Subject: RDMA/bnxt_re: Fix the max CQ WQEs for older adapters
+
+From: Abhishek Mohapatra <abhishek.mohapatra@broadcom.com>
+
+[ Upstream commit ac6df53738b465053d38d491fff87bd7d37fdc07 ]
+
+Older adapters doesn't support the MAX CQ WQEs reported by older FW. So
+restrict the value reported to 1M always for older adapters.
+
+Fixes: 1ac5a4047975 ("RDMA/bnxt_re: Add bnxt_re RoCE driver")
+Link: https://patch.msgid.link/r/1728373302-19530-2-git-send-email-selvin.xavier@broadcom.com
+Signed-off-by: Abhishek Mohapatra<abhishek.mohapatra@broadcom.com>
+Reviewed-by: Chandramohan Akula <chandramohan.akula@broadcom.com>
+Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/bnxt_re/qplib_sp.c | 2 ++
+ drivers/infiniband/hw/bnxt_re/qplib_sp.h | 1 +
+ 2 files changed, 3 insertions(+)
+
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+index 9328db92fa6db..8e59422dd137e 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+@@ -137,6 +137,8 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
+ 6 : sb->max_sge;
+ attr->max_cq = le32_to_cpu(sb->max_cq);
+ attr->max_cq_wqes = le32_to_cpu(sb->max_cqe);
++ if (!bnxt_qplib_is_chip_gen_p7(rcfw->res->cctx))
++ attr->max_cq_wqes = min_t(u32, BNXT_QPLIB_MAX_CQ_WQES, attr->max_cq_wqes);
+ attr->max_cq_sges = attr->max_qp_sges;
+ attr->max_mr = le32_to_cpu(sb->max_mr);
+ attr->max_mw = le32_to_cpu(sb->max_mw);
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+index 16a67d70a6fc4..2f16f3db093ea 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
++++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+@@ -55,6 +55,7 @@ struct bnxt_qplib_dev_attr {
+ u32 max_qp_wqes;
+ u32 max_qp_sges;
+ u32 max_cq;
++#define BNXT_QPLIB_MAX_CQ_WQES 0xfffff
+ u32 max_cq_wqes;
+ u32 max_cq_sges;
+ u32 max_mr;
+--
+2.43.0
+
--- /dev/null
+From 7d6c06b593a442c88f83119f297c07b2fbbbb02c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 29 Aug 2024 08:34:03 -0700
+Subject: RDMA/bnxt_re: Get the toggle bits from SRQ events
+
+From: Hongguang Gao <hongguang.gao@broadcom.com>
+
+[ Upstream commit 640c2cf84e1de62e6bb0738dc2128d5506e7e5bc ]
+
+SRQ arming requires the toggle bits received from hardware.
+Get the toggle bits from SRQ notification for the
+gen p7 adapters. This value will be zero for the older adapters.
+
+Signed-off-by: Hongguang Gao <hongguang.gao@broadcom.com>
+Signed-off-by: Chandramohan Akula <chandramohan.akula@broadcom.com>
+Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
+Link: https://patch.msgid.link/1724945645-14989-2-git-send-email-selvin.xavier@broadcom.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Stable-dep-of: 2df411353dac ("RDMA/bnxt_re: Change the sequence of updating the CQ toggle value")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/bnxt_re/ib_verbs.h | 1 +
+ drivers/infiniband/hw/bnxt_re/qplib_fp.c | 11 +++++++++++
+ drivers/infiniband/hw/bnxt_re/qplib_fp.h | 1 +
+ 3 files changed, 13 insertions(+)
+
+diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+index e98cb17173385..b368916a5bcfc 100644
+--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
++++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+@@ -77,6 +77,7 @@ struct bnxt_re_srq {
+ struct bnxt_qplib_srq qplib_srq;
+ struct ib_umem *umem;
+ spinlock_t lock; /* protect srq */
++ void *uctx_srq_page;
+ };
+
+ struct bnxt_re_qp {
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+index 49e4a4a50bfae..19bb45329a19b 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+@@ -54,6 +54,10 @@
+ #include "qplib_rcfw.h"
+ #include "qplib_sp.h"
+ #include "qplib_fp.h"
++#include <rdma/ib_addr.h>
++#include "bnxt_ulp.h"
++#include "bnxt_re.h"
++#include "ib_verbs.h"
+
+ static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
+
+@@ -347,6 +351,7 @@ static void bnxt_qplib_service_nq(struct tasklet_struct *t)
+ case NQ_BASE_TYPE_SRQ_EVENT:
+ {
+ struct bnxt_qplib_srq *srq;
++ struct bnxt_re_srq *srq_p;
+ struct nq_srq_event *nqsrqe =
+ (struct nq_srq_event *)nqe;
+
+@@ -354,6 +359,12 @@ static void bnxt_qplib_service_nq(struct tasklet_struct *t)
+ q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high)
+ << 32;
+ srq = (struct bnxt_qplib_srq *)q_handle;
++ srq->toggle = (le16_to_cpu(nqe->info10_type) & NQ_CN_TOGGLE_MASK)
++ >> NQ_CN_TOGGLE_SFT;
++ srq->dbinfo.toggle = srq->toggle;
++ srq_p = container_of(srq, struct bnxt_re_srq, qplib_srq);
++ if (srq_p->uctx_srq_page)
++ *((u32 *)srq_p->uctx_srq_page) = srq->toggle;
+ bnxt_qplib_armen_db(&srq->dbinfo,
+ DBC_DBC_TYPE_SRQ_ARMENA);
+ if (nq->srqn_handler(nq,
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+index c7412e461436f..389862df818d9 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
++++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+@@ -105,6 +105,7 @@ struct bnxt_qplib_srq {
+ struct bnxt_qplib_sg_info sg_info;
+ u16 eventq_hw_ring_id;
+ spinlock_t lock; /* protect SRQE link list */
++ u8 toggle;
+ };
+
+ struct bnxt_qplib_sge {
+--
+2.43.0
+
--- /dev/null
+From 0b7334254206f25736de3dab7830615be340d6eb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 8 Oct 2024 00:41:36 -0700
+Subject: RDMA/bnxt_re: Return more meaningful error
+
+From: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
+
+[ Upstream commit 98647df0178df215b8239c5c365537283b2852a6 ]
+
+When the HWRM command fails, driver currently returns -EFAULT(Bad
+address). This does not look correct.
+
+Modified to return -EIO(I/O error).
+
+Fixes: cc1ec769b87c ("RDMA/bnxt_re: Fixing the Control path command and response handling")
+Fixes: 65288a22ddd8 ("RDMA/bnxt_re: use shadow qd while posting non blocking rcfw command")
+Link: https://patch.msgid.link/r/1728373302-19530-5-git-send-email-selvin.xavier@broadcom.com
+Signed-off-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
+Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/bnxt_re/qplib_rcfw.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+index 3ffaef0c26519..7294221b3316c 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+@@ -525,7 +525,7 @@ static int __bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
+ /* failed with status */
+ dev_err(&rcfw->pdev->dev, "cmdq[%#x]=%#x status %#x\n",
+ cookie, opcode, evnt->status);
+- rc = -EFAULT;
++ rc = -EIO;
+ }
+
+ return rc;
+--
+2.43.0
+
--- /dev/null
+From acd3754bcca1ae970db4f1cbcba050951beaa041 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 8 Oct 2024 17:13:34 +0530
+Subject: RDMA/core: Fix ENODEV error for iWARP test over vlan
+
+From: Anumula Murali Mohan Reddy <anumula@chelsio.com>
+
+[ Upstream commit 5069d7e202f640a36cf213a432296c85113a52f7 ]
+
+If traffic is over vlan, cma_validate_port() fails to match vlan
+net_device ifindex with bound_if_index and results in ENODEV error.
+It is because rdma_copy_src_l2_addr() always assigns bound_if_index with
+real net_device ifindex.
+This patch fixes the issue by assigning bound_if_index with vlan
+net_device index if traffic is over vlan.
+
+Fixes: f8ef1be816bf ("RDMA/cma: Avoid GID lookups on iWARP devices")
+Signed-off-by: Anumula Murali Mohan Reddy <anumula@chelsio.com>
+Signed-off-by: Potnuri Bharat Teja <bharat@chelsio.com>
+Link: https://patch.msgid.link/20241008114334.146702-1-anumula@chelsio.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/core/addr.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
+index be0743dac3fff..c4cf26f1d1496 100644
+--- a/drivers/infiniband/core/addr.c
++++ b/drivers/infiniband/core/addr.c
+@@ -269,6 +269,8 @@ rdma_find_ndev_for_src_ip_rcu(struct net *net, const struct sockaddr *src_in)
+ break;
+ #endif
+ }
++ if (!ret && dev && is_vlan_dev(dev))
++ dev = vlan_dev_real_dev(dev);
+ return ret ? ERR_PTR(ret) : dev;
+ }
+
+--
+2.43.0
+
--- /dev/null
+From 958147d813cdfcd2c1ce2b85ad3ebe17c4306d0c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 7 Oct 2024 18:53:11 +0530
+Subject: RDMA/cxgb4: Fix RDMA_CM_EVENT_UNREACHABLE error for iWARP
+
+From: Anumula Murali Mohan Reddy <anumula@chelsio.com>
+
+[ Upstream commit c659b405b82ead335bee6eb33f9691bf718e21e8 ]
+
+ip_dev_find() always returns real net_device address, whether traffic is
+running on a vlan or real device, if traffic is over vlan, filling
+endpoint struture with real ndev and an attempt to send a connect request
+will results in RDMA_CM_EVENT_UNREACHABLE error. This patch fixes the
+issue by using vlan_dev_real_dev().
+
+Fixes: 830662f6f032 ("RDMA/cxgb4: Add support for active and passive open connection with IPv6 address")
+Link: https://patch.msgid.link/r/20241007132311.70593-1-anumula@chelsio.com
+Signed-off-by: Anumula Murali Mohan Reddy <anumula@chelsio.com>
+Signed-off-by: Potnuri Bharat Teja <bharat@chelsio.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/cxgb4/cm.c | 9 ++++-----
+ 1 file changed, 4 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
+index b3757c6a0457a..8d753e6e0c719 100644
+--- a/drivers/infiniband/hw/cxgb4/cm.c
++++ b/drivers/infiniband/hw/cxgb4/cm.c
+@@ -2086,7 +2086,7 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
+ err = -ENOMEM;
+ if (n->dev->flags & IFF_LOOPBACK) {
+ if (iptype == 4)
+- pdev = ip_dev_find(&init_net, *(__be32 *)peer_ip);
++ pdev = __ip_dev_find(&init_net, *(__be32 *)peer_ip, false);
+ else if (IS_ENABLED(CONFIG_IPV6))
+ for_each_netdev(&init_net, pdev) {
+ if (ipv6_chk_addr(&init_net,
+@@ -2101,12 +2101,12 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
+ err = -ENODEV;
+ goto out;
+ }
++ if (is_vlan_dev(pdev))
++ pdev = vlan_dev_real_dev(pdev);
+ ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
+ n, pdev, rt_tos2priority(tos));
+- if (!ep->l2t) {
+- dev_put(pdev);
++ if (!ep->l2t)
+ goto out;
+- }
+ ep->mtu = pdev->mtu;
+ ep->tx_chan = cxgb4_port_chan(pdev);
+ ep->smac_idx = ((struct port_info *)netdev_priv(pdev))->smt_idx;
+@@ -2119,7 +2119,6 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
+ ep->rss_qid = cdev->rdev.lldi.rxq_ids[
+ cxgb4_port_idx(pdev) * step];
+ set_tcp_window(ep, (struct port_info *)netdev_priv(pdev));
+- dev_put(pdev);
+ } else {
+ pdev = get_real_dev(n->dev);
+ ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
+--
+2.43.0
+
--- /dev/null
+From dbe7b7e5a8ce959e78f4f11d4e2b42e03b0f7853 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 8 Oct 2024 18:19:13 +0200
+Subject: RDMA/irdma: Fix misspelling of "accept*"
+
+From: Alexander Zubkov <green@qrator.net>
+
+[ Upstream commit 8cddfa535c931b8d8110c73bfed7354a94cbf891 ]
+
+There is "accept*" misspelled as "accpet*" in the comments. Fix the
+spelling.
+
+Fixes: 146b9756f14c ("RDMA/irdma: Add connection manager")
+Link: https://patch.msgid.link/r/20241008161913.19965-1-green@qrator.net
+Signed-off-by: Alexander Zubkov <green@qrator.net>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/irdma/cm.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/hw/irdma/cm.c b/drivers/infiniband/hw/irdma/cm.c
+index 36bb7e5ce6382..ce8d821bdad84 100644
+--- a/drivers/infiniband/hw/irdma/cm.c
++++ b/drivers/infiniband/hw/irdma/cm.c
+@@ -3631,7 +3631,7 @@ void irdma_free_lsmm_rsrc(struct irdma_qp *iwqp)
+ /**
+ * irdma_accept - registered call for connection to be accepted
+ * @cm_id: cm information for passive connection
+- * @conn_param: accpet parameters
++ * @conn_param: accept parameters
+ */
+ int irdma_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+ {
+--
+2.43.0
+
--- /dev/null
+From ed078d0b87621ad6c05e1b5b145fc8ba41e4038f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 9 Oct 2024 14:00:48 -0700
+Subject: RDMA/srpt: Make slab cache names unique
+
+From: Bart Van Assche <bvanassche@acm.org>
+
+[ Upstream commit 4d784c042d164f10fc809e2338457036cd7c653d ]
+
+Since commit 4c39529663b9 ("slab: Warn on duplicate cache names when
+DEBUG_VM=y"), slab complains about duplicate cache names. Hence this
+patch. The approach is as follows:
+- Maintain an xarray with the slab size as index and a reference count
+ and a kmem_cache pointer as contents. Use srpt-${slab_size} as kmem
+ cache name.
+- Use 512-byte alignment for all slabs instead of only for some of the
+ slabs.
+- Increment the reference count instead of calling kmem_cache_create().
+- Decrement the reference count instead of calling kmem_cache_destroy().
+
+Fixes: 5dabcd0456d7 ("RDMA/srpt: Add support for immediate data")
+Link: https://patch.msgid.link/r/20241009210048.4122518-1-bvanassche@acm.org
+Reported-by: Shinichiro Kawasaki <shinichiro.kawasaki@wdc.com>
+Closes: https://lore.kernel.org/linux-block/xpe6bea7rakpyoyfvspvin2dsozjmjtjktpph7rep3h25tv7fb@ooz4cu5z6bq6/
+Suggested-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Tested-by: Shin'ichiro Kawasaki <shinichiro.kawasaki@wdc.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/ulp/srpt/ib_srpt.c | 80 +++++++++++++++++++++++----
+ 1 file changed, 68 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
+index 9632afbd727b6..5dfb4644446ba 100644
+--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
++++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
+@@ -68,6 +68,8 @@ MODULE_LICENSE("Dual BSD/GPL");
+ static u64 srpt_service_guid;
+ static DEFINE_SPINLOCK(srpt_dev_lock); /* Protects srpt_dev_list. */
+ static LIST_HEAD(srpt_dev_list); /* List of srpt_device structures. */
++static DEFINE_MUTEX(srpt_mc_mutex); /* Protects srpt_memory_caches. */
++static DEFINE_XARRAY(srpt_memory_caches); /* See also srpt_memory_cache_entry */
+
+ static unsigned srp_max_req_size = DEFAULT_MAX_REQ_SIZE;
+ module_param(srp_max_req_size, int, 0444);
+@@ -105,6 +107,63 @@ static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc);
+ static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc);
+ static void srpt_process_wait_list(struct srpt_rdma_ch *ch);
+
++/* Type of the entries in srpt_memory_caches. */
++struct srpt_memory_cache_entry {
++ refcount_t ref;
++ struct kmem_cache *c;
++};
++
++static struct kmem_cache *srpt_cache_get(unsigned int object_size)
++{
++ struct srpt_memory_cache_entry *e;
++ char name[32];
++ void *res;
++
++ guard(mutex)(&srpt_mc_mutex);
++ e = xa_load(&srpt_memory_caches, object_size);
++ if (e) {
++ refcount_inc(&e->ref);
++ return e->c;
++ }
++ snprintf(name, sizeof(name), "srpt-%u", object_size);
++ e = kmalloc(sizeof(*e), GFP_KERNEL);
++ if (!e)
++ return NULL;
++ refcount_set(&e->ref, 1);
++ e->c = kmem_cache_create(name, object_size, /*align=*/512, 0, NULL);
++ if (!e->c)
++ goto free_entry;
++ res = xa_store(&srpt_memory_caches, object_size, e, GFP_KERNEL);
++ if (xa_is_err(res))
++ goto destroy_cache;
++ return e->c;
++
++destroy_cache:
++ kmem_cache_destroy(e->c);
++
++free_entry:
++ kfree(e);
++ return NULL;
++}
++
++static void srpt_cache_put(struct kmem_cache *c)
++{
++ struct srpt_memory_cache_entry *e = NULL;
++ unsigned long object_size;
++
++ guard(mutex)(&srpt_mc_mutex);
++ xa_for_each(&srpt_memory_caches, object_size, e)
++ if (e->c == c)
++ break;
++ if (WARN_ON_ONCE(!e))
++ return;
++ if (!refcount_dec_and_test(&e->ref))
++ return;
++ WARN_ON_ONCE(xa_erase(&srpt_memory_caches, object_size) != e);
++ kmem_cache_destroy(e->c);
++ kfree(e);
++}
++
+ /*
+ * The only allowed channel state changes are those that change the channel
+ * state into a state with a higher numerical value. Hence the new > prev test.
+@@ -2119,13 +2178,13 @@ static void srpt_release_channel_work(struct work_struct *w)
+ ch->sport->sdev, ch->rq_size,
+ ch->rsp_buf_cache, DMA_TO_DEVICE);
+
+- kmem_cache_destroy(ch->rsp_buf_cache);
++ srpt_cache_put(ch->rsp_buf_cache);
+
+ srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_recv_ring,
+ sdev, ch->rq_size,
+ ch->req_buf_cache, DMA_FROM_DEVICE);
+
+- kmem_cache_destroy(ch->req_buf_cache);
++ srpt_cache_put(ch->req_buf_cache);
+
+ kref_put(&ch->kref, srpt_free_ch);
+ }
+@@ -2245,8 +2304,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
+ INIT_LIST_HEAD(&ch->cmd_wait_list);
+ ch->max_rsp_size = ch->sport->port_attrib.srp_max_rsp_size;
+
+- ch->rsp_buf_cache = kmem_cache_create("srpt-rsp-buf", ch->max_rsp_size,
+- 512, 0, NULL);
++ ch->rsp_buf_cache = srpt_cache_get(ch->max_rsp_size);
+ if (!ch->rsp_buf_cache)
+ goto free_ch;
+
+@@ -2280,8 +2338,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
+ alignment_offset = round_up(imm_data_offset, 512) -
+ imm_data_offset;
+ req_sz = alignment_offset + imm_data_offset + srp_max_req_size;
+- ch->req_buf_cache = kmem_cache_create("srpt-req-buf", req_sz,
+- 512, 0, NULL);
++ ch->req_buf_cache = srpt_cache_get(req_sz);
+ if (!ch->req_buf_cache)
+ goto free_rsp_ring;
+
+@@ -2478,7 +2535,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
+ ch->req_buf_cache, DMA_FROM_DEVICE);
+
+ free_recv_cache:
+- kmem_cache_destroy(ch->req_buf_cache);
++ srpt_cache_put(ch->req_buf_cache);
+
+ free_rsp_ring:
+ srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
+@@ -2486,7 +2543,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
+ ch->rsp_buf_cache, DMA_TO_DEVICE);
+
+ free_rsp_cache:
+- kmem_cache_destroy(ch->rsp_buf_cache);
++ srpt_cache_put(ch->rsp_buf_cache);
+
+ free_ch:
+ if (rdma_cm_id)
+@@ -3055,7 +3112,7 @@ static void srpt_free_srq(struct srpt_device *sdev)
+ srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
+ sdev->srq_size, sdev->req_buf_cache,
+ DMA_FROM_DEVICE);
+- kmem_cache_destroy(sdev->req_buf_cache);
++ srpt_cache_put(sdev->req_buf_cache);
+ sdev->srq = NULL;
+ }
+
+@@ -3082,8 +3139,7 @@ static int srpt_alloc_srq(struct srpt_device *sdev)
+ pr_debug("create SRQ #wr= %d max_allow=%d dev= %s\n", sdev->srq_size,
+ sdev->device->attrs.max_srq_wr, dev_name(&device->dev));
+
+- sdev->req_buf_cache = kmem_cache_create("srpt-srq-req-buf",
+- srp_max_req_size, 0, 0, NULL);
++ sdev->req_buf_cache = srpt_cache_get(srp_max_req_size);
+ if (!sdev->req_buf_cache)
+ goto free_srq;
+
+@@ -3105,7 +3161,7 @@ static int srpt_alloc_srq(struct srpt_device *sdev)
+ return 0;
+
+ free_cache:
+- kmem_cache_destroy(sdev->req_buf_cache);
++ srpt_cache_put(sdev->req_buf_cache);
+
+ free_srq:
+ ib_destroy_srq(srq);
+--
+2.43.0
+
--- /dev/null
+From 78af91b275a214a9b1473074cb81a9b9456f3352 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Sep 2024 04:24:42 -0700
+Subject: reset: starfive: jh71x0: Fix accessing the empty member on JH7110 SoC
+
+From: Changhuang Liang <changhuang.liang@starfivetech.com>
+
+[ Upstream commit 2cf59663660799ce16f4dfbed97cdceac7a7fa11 ]
+
+data->asserted will be NULL on JH7110 SoC since commit 82327b127d41
+("reset: starfive: Add StarFive JH7110 reset driver") was added. Add
+the judgment condition to avoid errors when calling reset_control_status
+on JH7110 SoC.
+
+Fixes: 82327b127d41 ("reset: starfive: Add StarFive JH7110 reset driver")
+Signed-off-by: Changhuang Liang <changhuang.liang@starfivetech.com>
+Acked-by: Hal Feng <hal.feng@starfivetech.com>
+Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
+Link: https://lore.kernel.org/r/20240925112442.1732416-1-changhuang.liang@starfivetech.com
+Signed-off-by: Philipp Zabel <p.zabel@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/reset/starfive/reset-starfive-jh71x0.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/reset/starfive/reset-starfive-jh71x0.c b/drivers/reset/starfive/reset-starfive-jh71x0.c
+index 55bbbd2de52cf..29ce3486752f3 100644
+--- a/drivers/reset/starfive/reset-starfive-jh71x0.c
++++ b/drivers/reset/starfive/reset-starfive-jh71x0.c
+@@ -94,6 +94,9 @@ static int jh71x0_reset_status(struct reset_controller_dev *rcdev,
+ void __iomem *reg_status = data->status + offset * sizeof(u32);
+ u32 value = readl(reg_status);
+
++ if (!data->asserted)
++ return !(value & mask);
++
+ return !((value ^ data->asserted[offset]) & mask);
+ }
+
+--
+2.43.0
+
--- /dev/null
+From 5124906a55e88bea64d0d818560c459dddec839c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 15 Oct 2024 13:24:29 +0200
+Subject: ring-buffer: Fix reader locking when changing the sub buffer order
+
+From: Petr Pavlu <petr.pavlu@suse.com>
+
+[ Upstream commit 09661f75e75cb6c1d2d8326a70c311d46729235f ]
+
+The function ring_buffer_subbuf_order_set() updates each
+ring_buffer_per_cpu and installs new sub buffers that match the requested
+page order. This operation may be invoked concurrently with readers that
+rely on some of the modified data, such as the head bit (RB_PAGE_HEAD), or
+the ring_buffer_per_cpu.pages and reader_page pointers. However, no
+exclusive access is acquired by ring_buffer_subbuf_order_set(). Modifying
+the mentioned data while a reader also operates on them can then result in
+incorrect memory access and various crashes.
+
+Fix the problem by taking the reader_lock when updating a specific
+ring_buffer_per_cpu in ring_buffer_subbuf_order_set().
+
+Link: https://lore.kernel.org/linux-trace-kernel/20240715145141.5528-1-petr.pavlu@suse.com/
+Link: https://lore.kernel.org/linux-trace-kernel/20241010195849.2f77cc3f@gandalf.local.home/
+Link: https://lore.kernel.org/linux-trace-kernel/20241011112850.17212b25@gandalf.local.home/
+
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Link: https://lore.kernel.org/20241015112440.26987-1-petr.pavlu@suse.com
+Fixes: 8e7b58c27b3c ("ring-buffer: Just update the subbuffers when changing their allocation order")
+Signed-off-by: Petr Pavlu <petr.pavlu@suse.com>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/ring_buffer.c | 44 ++++++++++++++++++++++----------------
+ 1 file changed, 26 insertions(+), 18 deletions(-)
+
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index cebd879a30cbd..fb7b092e79313 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -6008,39 +6008,38 @@ int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order)
+ }
+
+ for_each_buffer_cpu(buffer, cpu) {
++ struct buffer_data_page *old_free_data_page;
++ struct list_head old_pages;
++ unsigned long flags;
+
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
+ continue;
+
+ cpu_buffer = buffer->buffers[cpu];
+
++ raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
++
+ /* Clear the head bit to make the link list normal to read */
+ rb_head_page_deactivate(cpu_buffer);
+
+- /* Now walk the list and free all the old sub buffers */
+- list_for_each_entry_safe(bpage, tmp, cpu_buffer->pages, list) {
+- list_del_init(&bpage->list);
+- free_buffer_page(bpage);
+- }
+- /* The above loop stopped an the last page needing to be freed */
+- bpage = list_entry(cpu_buffer->pages, struct buffer_page, list);
+- free_buffer_page(bpage);
+-
+- /* Free the current reader page */
+- free_buffer_page(cpu_buffer->reader_page);
++ /*
++ * Collect buffers from the cpu_buffer pages list and the
++ * reader_page on old_pages, so they can be freed later when not
++ * under a spinlock. The pages list is a linked list with no
++ * head, adding old_pages turns it into a regular list with
++ * old_pages being the head.
++ */
++ list_add(&old_pages, cpu_buffer->pages);
++ list_add(&cpu_buffer->reader_page->list, &old_pages);
+
+ /* One page was allocated for the reader page */
+ cpu_buffer->reader_page = list_entry(cpu_buffer->new_pages.next,
+ struct buffer_page, list);
+ list_del_init(&cpu_buffer->reader_page->list);
+
+- /* The cpu_buffer pages are a link list with no head */
++ /* Install the new pages, remove the head from the list */
+ cpu_buffer->pages = cpu_buffer->new_pages.next;
+- cpu_buffer->new_pages.next->prev = cpu_buffer->new_pages.prev;
+- cpu_buffer->new_pages.prev->next = cpu_buffer->new_pages.next;
+-
+- /* Clear the new_pages list */
+- INIT_LIST_HEAD(&cpu_buffer->new_pages);
++ list_del_init(&cpu_buffer->new_pages);
+
+ cpu_buffer->head_page
+ = list_entry(cpu_buffer->pages, struct buffer_page, list);
+@@ -6049,11 +6048,20 @@ int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order)
+ cpu_buffer->nr_pages = cpu_buffer->nr_pages_to_update;
+ cpu_buffer->nr_pages_to_update = 0;
+
+- free_pages((unsigned long)cpu_buffer->free_page, old_order);
++ old_free_data_page = cpu_buffer->free_page;
+ cpu_buffer->free_page = NULL;
+
+ rb_head_page_activate(cpu_buffer);
+
++ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
++
++ /* Free old sub buffers */
++ list_for_each_entry_safe(bpage, tmp, &old_pages, list) {
++ list_del_init(&bpage->list);
++ free_buffer_page(bpage);
++ }
++ free_pages((unsigned long)old_free_data_page, old_order);
++
+ rb_check_pages(cpu_buffer);
+ }
+
+--
+2.43.0
+
--- /dev/null
+From 8905eb188c0993512791f29bf755c0193bb75f61 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 8 Oct 2024 12:45:44 +0000
+Subject: riscv, bpf: Fix possible infinite tailcall when CONFIG_CFI_CLANG is
+ enabled
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Pu Lehui <pulehui@huawei.com>
+
+[ Upstream commit 30a59cc79754fd9ff3f41b7ee2eb21da85988548 ]
+
+When CONFIG_CFI_CLANG is enabled, the number of prologue instructions
+skipped by tailcall needs to include the kcfi instruction, otherwise the
+TCC will be initialized every tailcall is called, which may result in
+infinite tailcalls.
+
+Fixes: e63985ecd226 ("bpf, riscv64/cfi: Support kCFI + BPF on riscv64")
+Signed-off-by: Pu Lehui <pulehui@huawei.com>
+Acked-by: Björn Töpel <bjorn@kernel.org>
+Link: https://lore.kernel.org/r/20241008124544.171161-1-pulehui@huaweicloud.com
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/riscv/net/bpf_jit_comp64.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
+index 99f34409fb60f..91bd5082c4d8e 100644
+--- a/arch/riscv/net/bpf_jit_comp64.c
++++ b/arch/riscv/net/bpf_jit_comp64.c
+@@ -18,6 +18,7 @@
+ #define RV_MAX_REG_ARGS 8
+ #define RV_FENTRY_NINSNS 2
+ #define RV_FENTRY_NBYTES (RV_FENTRY_NINSNS * 4)
++#define RV_KCFI_NINSNS (IS_ENABLED(CONFIG_CFI_CLANG) ? 1 : 0)
+ /* imm that allows emit_imm to emit max count insns */
+ #define RV_MAX_COUNT_IMM 0x7FFF7FF7FF7FF7FF
+
+@@ -271,7 +272,8 @@ static void __build_epilogue(bool is_tail_call, struct rv_jit_context *ctx)
+ if (!is_tail_call)
+ emit_addiw(RV_REG_A0, RV_REG_A5, 0, ctx);
+ emit_jalr(RV_REG_ZERO, is_tail_call ? RV_REG_T3 : RV_REG_RA,
+- is_tail_call ? (RV_FENTRY_NINSNS + 1) * 4 : 0, /* skip reserved nops and TCC init */
++ /* kcfi, fentry and TCC init insns will be skipped on tailcall */
++ is_tail_call ? (RV_KCFI_NINSNS + RV_FENTRY_NINSNS + 1) * 4 : 0,
+ ctx);
+ }
+
+--
+2.43.0
+
--- /dev/null
+From 3cf438927247b0e3e60a45c17cccbe7c6696dbf0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 17 Oct 2024 17:36:28 +0300
+Subject: riscv, bpf: Make BPF_CMPXCHG fully ordered
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Andrea Parri <parri.andrea@gmail.com>
+
+[ Upstream commit e59db0623f6955986d1be0880b351a1f56e7fd6d ]
+
+According to the prototype formal BPF memory consistency model
+discussed e.g. in [1] and following the ordering properties of
+the C/in-kernel macro atomic_cmpxchg(), a BPF atomic operation
+with the BPF_CMPXCHG modifier is fully ordered. However, the
+current RISC-V JIT lowerings fail to meet such memory ordering
+property. This is illustrated by the following litmus test:
+
+BPF BPF__MP+success_cmpxchg+fence
+{
+ 0:r1=x; 0:r3=y; 0:r5=1;
+ 1:r2=y; 1:r4=f; 1:r7=x;
+}
+ P0 | P1 ;
+ *(u64 *)(r1 + 0) = 1 | r1 = *(u64 *)(r2 + 0) ;
+ r2 = cmpxchg_64 (r3 + 0, r4, r5) | r3 = atomic_fetch_add((u64 *)(r4 + 0), r5) ;
+ | r6 = *(u64 *)(r7 + 0) ;
+exists (1:r1=1 /\ 1:r6=0)
+
+whose "exists" clause is not satisfiable according to the BPF
+memory model. Using the current RISC-V JIT lowerings, the test
+can be mapped to the following RISC-V litmus test:
+
+RISCV RISCV__MP+success_cmpxchg+fence
+{
+ 0:x1=x; 0:x3=y; 0:x5=1;
+ 1:x2=y; 1:x4=f; 1:x7=x;
+}
+ P0 | P1 ;
+ sd x5, 0(x1) | ld x1, 0(x2) ;
+ L00: | amoadd.d.aqrl x3, x5, 0(x4) ;
+ lr.d x2, 0(x3) | ld x6, 0(x7) ;
+ bne x2, x4, L01 | ;
+ sc.d x6, x5, 0(x3) | ;
+ bne x6, x4, L00 | ;
+ fence rw, rw | ;
+ L01: | ;
+exists (1:x1=1 /\ 1:x6=0)
+
+where the two stores in P0 can be reordered. Update the RISC-V
+JIT lowerings/implementation of BPF_CMPXCHG to emit an SC with
+RELEASE ("rl") annotation in order to meet the expected memory
+ordering guarantees. The resulting RISC-V JIT lowerings of
+BPF_CMPXCHG match the RISC-V lowerings of the C atomic_cmpxchg().
+
+Other lowerings were fixed via 20a759df3bba ("riscv, bpf: make
+some atomic operations fully ordered").
+
+Fixes: dd642ccb45ec ("riscv, bpf: Implement more atomic operations for RV64")
+Signed-off-by: Andrea Parri <parri.andrea@gmail.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Reviewed-by: Puranjay Mohan <puranjay@kernel.org>
+Acked-by: Björn Töpel <bjorn@kernel.org>
+Link: https://lpc.events/event/18/contributions/1949/attachments/1665/3441/bpfmemmodel.2024.09.19p.pdf [1]
+Link: https://lore.kernel.org/bpf/20241017143628.2673894-1-parri.andrea@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/riscv/net/bpf_jit_comp64.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
+index 91bd5082c4d8e..4cc631fa70391 100644
+--- a/arch/riscv/net/bpf_jit_comp64.c
++++ b/arch/riscv/net/bpf_jit_comp64.c
+@@ -550,8 +550,8 @@ static void emit_atomic(u8 rd, u8 rs, s16 off, s32 imm, bool is64,
+ rv_lr_w(r0, 0, rd, 0, 0), ctx);
+ jmp_offset = ninsns_rvoff(8);
+ emit(rv_bne(RV_REG_T2, r0, jmp_offset >> 1), ctx);
+- emit(is64 ? rv_sc_d(RV_REG_T3, rs, rd, 0, 0) :
+- rv_sc_w(RV_REG_T3, rs, rd, 0, 0), ctx);
++ emit(is64 ? rv_sc_d(RV_REG_T3, rs, rd, 0, 1) :
++ rv_sc_w(RV_REG_T3, rs, rd, 0, 1), ctx);
+ jmp_offset = ninsns_rvoff(-6);
+ emit(rv_bne(RV_REG_T3, 0, jmp_offset >> 1), ctx);
+ emit(rv_fence(0x3, 0x3), ctx);
+--
+2.43.0
+
--- /dev/null
+From e6bdc397a4b9c88f881c74ac698304709e1ee707 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 10 Oct 2024 17:52:39 +0200
+Subject: s390: Initialize psw mask in perf_arch_fetch_caller_regs()
+
+From: Heiko Carstens <hca@linux.ibm.com>
+
+[ Upstream commit 223e7fb979fa06934f1595b6ad0ae1d4ead1147f ]
+
+Also initialize regs->psw.mask in perf_arch_fetch_caller_regs().
+This way user_mode(regs) will return false, like it should.
+
+It looks like all current users initialize regs to zero, so that this
+doesn't fix a bug currently. However it is better to not rely on callers
+to do this.
+
+Fixes: 914d52e46490 ("s390: implement perf_arch_fetch_caller_regs")
+Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/include/asm/perf_event.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h
+index 9917e2717b2b4..66aff768f8151 100644
+--- a/arch/s390/include/asm/perf_event.h
++++ b/arch/s390/include/asm/perf_event.h
+@@ -73,6 +73,7 @@ struct perf_sf_sde_regs {
+ #define SAMPLE_FREQ_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FREQ_MODE)
+
+ #define perf_arch_fetch_caller_regs(regs, __ip) do { \
++ (regs)->psw.mask = 0; \
+ (regs)->psw.addr = (__ip); \
+ (regs)->gprs[15] = (unsigned long)__builtin_frame_address(0) - \
+ offsetof(struct stack_frame, back_chain); \
+--
+2.43.0
+
--- /dev/null
+From 58ef005fe16afd8927dd1376f3ae54c98cc59c7a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 11 Apr 2024 14:01:39 +0200
+Subject: s390/pci: Handle PCI error codes other than 0x3a
+
+From: Niklas Schnelle <schnelle@linux.ibm.com>
+
+[ Upstream commit 3cd03ea57e8e16cc78cc357d5e9f26078426f236 ]
+
+The Linux implementation of PCI error recovery for s390 was based on the
+understanding that firmware error recovery is a two step process with an
+optional initial error event to indicate the cause of the error if known
+followed by either error event 0x3A (Success) or 0x3B (Failure) to
+indicate whether firmware was able to recover. While this has been the
+case in testing and the error cases seen in the wild it turns out this
+is not correct. Instead firmware only generates 0x3A for some error and
+service scenarios and expects the OS to perform recovery for all PCI
+events codes except for those indicating permanent error (0x3B, 0x40)
+and those indicating errors on the function measurement block (0x2A,
+0x2B, 0x2C). Align Linux behavior with these expectations.
+
+Fixes: 4cdf2f4e24ff ("s390/pci: implement minimal PCI error recovery")
+Reviewed-by: Gerd Bayer <gbayer@linux.ibm.com>
+Signed-off-by: Niklas Schnelle <schnelle@linux.ibm.com>
+Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/pci/pci_event.c | 17 +++++++++--------
+ 1 file changed, 9 insertions(+), 8 deletions(-)
+
+diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
+index dbe95ec5917e5..d4f19d33914cb 100644
+--- a/arch/s390/pci/pci_event.c
++++ b/arch/s390/pci/pci_event.c
+@@ -280,18 +280,19 @@ static void __zpci_event_error(struct zpci_ccdf_err *ccdf)
+ goto no_pdev;
+
+ switch (ccdf->pec) {
+- case 0x003a: /* Service Action or Error Recovery Successful */
++ case 0x002a: /* Error event concerns FMB */
++ case 0x002b:
++ case 0x002c:
++ break;
++ case 0x0040: /* Service Action or Error Recovery Failed */
++ case 0x003b:
++ zpci_event_io_failure(pdev, pci_channel_io_perm_failure);
++ break;
++ default: /* PCI function left in the error state attempt to recover */
+ ers_res = zpci_event_attempt_error_recovery(pdev);
+ if (ers_res != PCI_ERS_RESULT_RECOVERED)
+ zpci_event_io_failure(pdev, pci_channel_io_perm_failure);
+ break;
+- default:
+- /*
+- * Mark as frozen not permanently failed because the device
+- * could be subsequently recovered by the platform.
+- */
+- zpci_event_io_failure(pdev, pci_channel_io_frozen);
+- break;
+ }
+ pci_dev_put(pdev);
+ no_pdev:
+--
+2.43.0
+
--- /dev/null
+From 9d2cf0dd25362f5c9d723d7b24e25e1ae1724d85 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 9 Oct 2024 21:44:32 -0400
+Subject: sched/core: Disable page allocation in task_tick_mm_cid()
+
+From: Waiman Long <longman@redhat.com>
+
+[ Upstream commit 73ab05aa46b02d96509cb029a8d04fca7bbde8c7 ]
+
+With KASAN and PREEMPT_RT enabled, calling task_work_add() in
+task_tick_mm_cid() may cause the following splat.
+
+[ 63.696416] BUG: sleeping function called from invalid context at kernel/locking/spinlock_rt.c:48
+[ 63.696416] in_atomic(): 1, irqs_disabled(): 1, non_block: 0, pid: 610, name: modprobe
+[ 63.696416] preempt_count: 10001, expected: 0
+[ 63.696416] RCU nest depth: 1, expected: 1
+
+This problem is caused by the following call trace.
+
+ sched_tick() [ acquire rq->__lock ]
+ -> task_tick_mm_cid()
+ -> task_work_add()
+ -> __kasan_record_aux_stack()
+ -> kasan_save_stack()
+ -> stack_depot_save_flags()
+ -> alloc_pages_mpol_noprof()
+ -> __alloc_pages_noprof()
+ -> get_page_from_freelist()
+ -> rmqueue()
+ -> rmqueue_pcplist()
+ -> __rmqueue_pcplist()
+ -> rmqueue_bulk()
+ -> rt_spin_lock()
+
+The rq lock is a raw_spinlock_t. We can't sleep while holding
+it. IOW, we can't call alloc_pages() in stack_depot_save_flags().
+
+The task_tick_mm_cid() function with its task_work_add() call was
+introduced by commit 223baf9d17f2 ("sched: Fix performance regression
+introduced by mm_cid") in v6.4 kernel.
+
+Fortunately, there is a kasan_record_aux_stack_noalloc() variant that
+calls stack_depot_save_flags() while not allowing it to allocate
+new pages. To allow task_tick_mm_cid() to use task_work without
+page allocation, a new TWAF_NO_ALLOC flag is added to enable calling
+kasan_record_aux_stack_noalloc() instead of kasan_record_aux_stack()
+if set. The task_tick_mm_cid() function is modified to add this new flag.
+
+The possible downside is the missing stack trace in a KASAN report due
+to new page allocation required when task_work_add_noallloc() is called
+which should be rare.
+
+Fixes: 223baf9d17f2 ("sched: Fix performance regression introduced by mm_cid")
+Signed-off-by: Waiman Long <longman@redhat.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20241010014432.194742-1-longman@redhat.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/task_work.h | 5 ++++-
+ kernel/sched/core.c | 4 +++-
+ kernel/task_work.c | 15 +++++++++++++--
+ 3 files changed, 20 insertions(+), 4 deletions(-)
+
+diff --git a/include/linux/task_work.h b/include/linux/task_work.h
+index cf5e7e891a776..2964171856e00 100644
+--- a/include/linux/task_work.h
++++ b/include/linux/task_work.h
+@@ -14,11 +14,14 @@ init_task_work(struct callback_head *twork, task_work_func_t func)
+ }
+
+ enum task_work_notify_mode {
+- TWA_NONE,
++ TWA_NONE = 0,
+ TWA_RESUME,
+ TWA_SIGNAL,
+ TWA_SIGNAL_NO_IPI,
+ TWA_NMI_CURRENT,
++
++ TWA_FLAGS = 0xff00,
++ TWAF_NO_ALLOC = 0x0100,
+ };
+
+ static inline bool task_work_pending(struct task_struct *task)
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 1af59cf714cd3..3713341c2e720 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -10262,7 +10262,9 @@ void task_tick_mm_cid(struct rq *rq, struct task_struct *curr)
+ return;
+ if (time_before(now, READ_ONCE(curr->mm->mm_cid_next_scan)))
+ return;
+- task_work_add(curr, work, TWA_RESUME);
++
++ /* No page allocation under rq lock */
++ task_work_add(curr, work, TWA_RESUME | TWAF_NO_ALLOC);
+ }
+
+ void sched_mm_cid_exit_signals(struct task_struct *t)
+diff --git a/kernel/task_work.c b/kernel/task_work.c
+index 5d14d639ac71b..c969f1f26be58 100644
+--- a/kernel/task_work.c
++++ b/kernel/task_work.c
+@@ -55,15 +55,26 @@ int task_work_add(struct task_struct *task, struct callback_head *work,
+ enum task_work_notify_mode notify)
+ {
+ struct callback_head *head;
++ int flags = notify & TWA_FLAGS;
+
++ notify &= ~TWA_FLAGS;
+ if (notify == TWA_NMI_CURRENT) {
+ if (WARN_ON_ONCE(task != current))
+ return -EINVAL;
+ if (!IS_ENABLED(CONFIG_IRQ_WORK))
+ return -EINVAL;
+ } else {
+- /* record the work call stack in order to print it in KASAN reports */
+- kasan_record_aux_stack(work);
++ /*
++ * Record the work call stack in order to print it in KASAN
++ * reports.
++ *
++ * Note that stack allocation can fail if TWAF_NO_ALLOC flag
++ * is set and new page is needed to expand the stack buffer.
++ */
++ if (flags & TWAF_NO_ALLOC)
++ kasan_record_aux_stack_noalloc(work);
++ else
++ kasan_record_aux_stack(work);
+ }
+
+ head = READ_ONCE(task->task_works);
+--
+2.43.0
+
--- /dev/null
+From 6e1fb4484c048da4bb9f802d9f769c6251979423 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 11 Oct 2024 19:34:44 +0800
+Subject: scsi: target: core: Fix null-ptr-deref in target_alloc_device()
+
+From: Wang Hai <wanghai38@huawei.com>
+
+[ Upstream commit fca6caeb4a61d240f031914413fcc69534f6dc03 ]
+
+There is a null-ptr-deref issue reported by KASAN:
+
+BUG: KASAN: null-ptr-deref in target_alloc_device+0xbc4/0xbe0 [target_core_mod]
+...
+ kasan_report+0xb9/0xf0
+ target_alloc_device+0xbc4/0xbe0 [target_core_mod]
+ core_dev_setup_virtual_lun0+0xef/0x1f0 [target_core_mod]
+ target_core_init_configfs+0x205/0x420 [target_core_mod]
+ do_one_initcall+0xdd/0x4e0
+...
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+
+In target_alloc_device(), if allocing memory for dev queues fails, then
+dev will be freed by dev->transport->free_device(), but dev->transport
+is not initialized at that time, which will lead to a null pointer
+reference problem.
+
+Fixing this bug by freeing dev with hba->backend->ops->free_device().
+
+Fixes: 1526d9f10c61 ("scsi: target: Make state_list per CPU")
+Signed-off-by: Wang Hai <wanghai38@huawei.com>
+Link: https://lore.kernel.org/r/20241011113444.40749-1-wanghai38@huawei.com
+Reviewed-by: Mike Christie <michael.christie@oracle.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/target/target_core_device.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
+index bf4892544cfdb..bb84d304b07e5 100644
+--- a/drivers/target/target_core_device.c
++++ b/drivers/target/target_core_device.c
+@@ -691,7 +691,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
+
+ dev->queues = kcalloc(nr_cpu_ids, sizeof(*dev->queues), GFP_KERNEL);
+ if (!dev->queues) {
+- dev->transport->free_device(dev);
++ hba->backend->ops->free_device(dev);
+ return NULL;
+ }
+
+--
+2.43.0
+
--- /dev/null
+From fd70a11e308eaf2026baf9fbb2104f938698f852 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 8 Oct 2024 21:07:20 -0700
+Subject: selftests/bpf: Fix cross-compiling urandom_read
+
+From: Tony Ambardar <tony.ambardar@gmail.com>
+
+[ Upstream commit fd526e121c4d6f71aed82d21a8b8277b03e60b43 ]
+
+Linking of urandom_read and liburandom_read.so prefers LLVM's 'ld.lld' but
+falls back to using 'ld' if unsupported. However, this fallback discards
+any existing makefile macro for LD and can break cross-compilation.
+
+Fix by changing the fallback to use the target linker $(LD), passed via
+'-fuse-ld=' using an absolute path rather than a linker "flavour".
+
+Fixes: 08c79c9cd67f ("selftests/bpf: Don't force lld on non-x86 architectures")
+Signed-off-by: Tony Ambardar <tony.ambardar@gmail.com>
+Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
+Link: https://lore.kernel.org/bpf/20241009040720.635260-1-tony.ambardar@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/bpf/Makefile | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
+index 848fffa250227..555fd34c6e1fc 100644
+--- a/tools/testing/selftests/bpf/Makefile
++++ b/tools/testing/selftests/bpf/Makefile
+@@ -221,7 +221,7 @@ $(OUTPUT)/%:%.c
+ ifeq ($(SRCARCH),$(filter $(SRCARCH),x86 riscv))
+ LLD := lld
+ else
+-LLD := ld
++LLD := $(shell command -v $(LD))
+ endif
+
+ # Filter out -static for liburandom_read.so and its dependent targets so that static builds
+--
+2.43.0
+
--- /dev/null
+From fd52ac09a15e1b2db0f412089382a1693a964ee8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 8 Oct 2024 16:43:12 +0000
+Subject: selftests/bpf: fix perf_event link info name_len assertion
+
+From: Tyrone Wu <wudevelops@gmail.com>
+
+[ Upstream commit 4538a38f654a1c292fe489a9b66179262bfed088 ]
+
+Fix `name_len` field assertions in `bpf_link_info.perf_event` for
+kprobe/uprobe/tracepoint to validate correct name size instead of 0.
+
+Fixes: 23cf7aa539dc ("selftests/bpf: Add selftest for fill_link_info")
+Signed-off-by: Tyrone Wu <wudevelops@gmail.com>
+Acked-by: Jiri Olsa <jolsa@kernel.org>
+Acked-by: Yafang Shao <laoar.shao@gmail.com>
+Link: https://lore.kernel.org/r/20241008164312.46269-2-wudevelops@gmail.com
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/bpf/prog_tests/fill_link_info.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/tools/testing/selftests/bpf/prog_tests/fill_link_info.c b/tools/testing/selftests/bpf/prog_tests/fill_link_info.c
+index f3932941bbaaf..745c5ada4c4bf 100644
+--- a/tools/testing/selftests/bpf/prog_tests/fill_link_info.c
++++ b/tools/testing/selftests/bpf/prog_tests/fill_link_info.c
+@@ -67,8 +67,9 @@ static int verify_perf_link_info(int fd, enum bpf_perf_event_type type, long add
+
+ ASSERT_EQ(info.perf_event.kprobe.cookie, PERF_EVENT_COOKIE, "kprobe_cookie");
+
++ ASSERT_EQ(info.perf_event.kprobe.name_len, strlen(KPROBE_FUNC) + 1,
++ "name_len");
+ if (!info.perf_event.kprobe.func_name) {
+- ASSERT_EQ(info.perf_event.kprobe.name_len, 0, "name_len");
+ info.perf_event.kprobe.func_name = ptr_to_u64(&buf);
+ info.perf_event.kprobe.name_len = sizeof(buf);
+ goto again;
+@@ -79,8 +80,9 @@ static int verify_perf_link_info(int fd, enum bpf_perf_event_type type, long add
+ ASSERT_EQ(err, 0, "cmp_kprobe_func_name");
+ break;
+ case BPF_PERF_EVENT_TRACEPOINT:
++ ASSERT_EQ(info.perf_event.tracepoint.name_len, strlen(TP_NAME) + 1,
++ "name_len");
+ if (!info.perf_event.tracepoint.tp_name) {
+- ASSERT_EQ(info.perf_event.tracepoint.name_len, 0, "name_len");
+ info.perf_event.tracepoint.tp_name = ptr_to_u64(&buf);
+ info.perf_event.tracepoint.name_len = sizeof(buf);
+ goto again;
+@@ -96,8 +98,9 @@ static int verify_perf_link_info(int fd, enum bpf_perf_event_type type, long add
+ case BPF_PERF_EVENT_URETPROBE:
+ ASSERT_EQ(info.perf_event.uprobe.offset, offset, "uprobe_offset");
+
++ ASSERT_EQ(info.perf_event.uprobe.name_len, strlen(UPROBE_FILE) + 1,
++ "name_len");
+ if (!info.perf_event.uprobe.file_name) {
+- ASSERT_EQ(info.perf_event.uprobe.name_len, 0, "name_len");
+ info.perf_event.uprobe.file_name = ptr_to_u64(&buf);
+ info.perf_event.uprobe.name_len = sizeof(buf);
+ goto again;
+--
+2.43.0
+
--- /dev/null
+From ad8b93ffe0a86e3b6be297826cd34b12080fc877 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 15 Oct 2024 10:02:57 +0800
+Subject: selftests: mm: fix the incorrect usage() info of khugepaged
+
+From: Nanyong Sun <sunnanyong@huawei.com>
+
+[ Upstream commit 3e822bed2fbd1527d88f483342b1d2a468520a9a ]
+
+The mount option of tmpfs should be huge=advise, not madvise which is not
+supported and may mislead the users.
+
+Link: https://lkml.kernel.org/r/20241015020257.139235-1-sunnanyong@huawei.com
+Fixes: 1b03d0d558a2 ("selftests/vm: add thp collapse file and tmpfs testing")
+Signed-off-by: Nanyong Sun <sunnanyong@huawei.com>
+Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
+Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com>
+Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
+Cc: Shuah Khan <shuah@kernel.org>
+Cc: Zach O'Keefe <zokeefe@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/mm/khugepaged.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/tools/testing/selftests/mm/khugepaged.c b/tools/testing/selftests/mm/khugepaged.c
+index 829320a519e72..89dec42986825 100644
+--- a/tools/testing/selftests/mm/khugepaged.c
++++ b/tools/testing/selftests/mm/khugepaged.c
+@@ -1091,7 +1091,7 @@ static void usage(void)
+ fprintf(stderr, "\n\t\"file,all\" mem_type requires kernel built with\n");
+ fprintf(stderr, "\tCONFIG_READ_ONLY_THP_FOR_FS=y\n");
+ fprintf(stderr, "\n\tif [dir] is a (sub)directory of a tmpfs mount, tmpfs must be\n");
+- fprintf(stderr, "\tmounted with huge=madvise option for khugepaged tests to work\n");
++ fprintf(stderr, "\tmounted with huge=advise option for khugepaged tests to work\n");
+ fprintf(stderr, "\n\tSupported Options:\n");
+ fprintf(stderr, "\t\t-h: This help message.\n");
+ fprintf(stderr, "\t\t-s: mTHP size, expressed as page order.\n");
+--
+2.43.0
+
+bpf-use-raw_spinlock_t-in-ringbuf.patch
+iio-accel-bma400-fix-uninitialized-variable-field_va.patch
+reset-starfive-jh71x0-fix-accessing-the-empty-member.patch
+bpf-sync_linked_regs-must-preserve-subreg_def.patch
+bpf-make-sure-internal-and-uapi-bpf_redirect-flags-d.patch
+irqchip-riscv-imsic-fix-output-text-of-base-address.patch
+bpf-devmap-provide-rxq-after-redirect.patch
+cpufreq-amd-pstate-fix-amd_pstate-mode-switch-on-sha.patch
+lib-kconfig.debug-fix-grammar-in-rust_build_assert_a.patch
+bpf-fix-memory-leak-in-bpf_core_apply.patch
+rdma-bnxt_re-fix-a-possible-memory-leak.patch
+rdma-bnxt_re-fix-incorrect-avid-type-in-wqe-structur.patch
+rdma-bnxt_re-add-a-check-for-memory-allocation.patch
+rdma-core-fix-enodev-error-for-iwarp-test-over-vlan.patch
+x86-resctrl-avoid-overflow-in-mb-settings-in-bw_vali.patch
+arm-dts-bcm2837-rpi-cm3-io3-fix-hdmi-hpd-gpio-pin.patch
+clk-rockchip-fix-finding-of-maximum-clock-id.patch
+bpf-check-the-remaining-info_cnt-before-repeating-bt.patch
+bpf-fix-unpopulated-name_len-field-in-perf_event-lin.patch
+selftests-bpf-fix-perf_event-link-info-name_len-asse.patch
+riscv-bpf-fix-possible-infinite-tailcall-when-config.patch
+s390-pci-handle-pci-error-codes-other-than-0x3a.patch
+bpf-fix-kfunc-btf-caching-for-modules.patch
+iio-frequency-admv4420-adrf6780-format-kconfig-entri.patch
+iio-frequency-admv4420-fix-missing-select-remap_spi-.patch
+drm-vmwgfx-handle-possible-enomem-in-vmw_stdu_connec.patch
+selftests-bpf-fix-cross-compiling-urandom_read.patch
+bpf-fix-unpopulated-path_size-when-uprobe_multi-fiel.patch
+sched-core-disable-page-allocation-in-task_tick_mm_c.patch
+alsa-hda-cs8409-fix-possible-null-dereference.patch
+firmware-arm_scmi-fix-the-double-free-in-scmi_debugf.patch
+rdma-cxgb4-fix-rdma_cm_event_unreachable-error-for-i.patch
+rdma-irdma-fix-misspelling-of-accept.patch
+rdma-srpt-make-slab-cache-names-unique.patch
+elevator-do-not-request_module-if-elevator-exists.patch
+elevator-remove-argument-from-elevator_find_get.patch
+ipv4-give-an-ipv4-dev-to-blackhole_netdev.patch
+net-sparx5-fix-source-port-register-when-mirroring.patch
+rdma-bnxt_re-fix-the-max-cq-wqes-for-older-adapters.patch
+rdma-bnxt_re-fix-out-of-bound-check.patch
+rdma-bnxt_re-fix-incorrect-dereference-of-srq-in-asy.patch
+rdma-bnxt_re-return-more-meaningful-error.patch
+rdma-bnxt_re-avoid-cpu-lockups-due-fifo-occupancy-ch.patch
+rdma-bnxt_re-get-the-toggle-bits-from-srq-events.patch
+rdma-bnxt_re-change-the-sequence-of-updating-the-cq-.patch
+rdma-bnxt_re-fix-a-bug-while-setting-up-level-2-pbl-.patch
+rdma-bnxt_re-fix-the-gid-table-length.patch
+accel-qaic-fix-the-for-loop-used-to-walk-sg-table.patch
+drm-panel-himax-hx83102-adjust-power-and-gamma-to-op.patch
+drm-msm-dpu-make-sure-phys-resources-are-properly-in.patch
+drm-msm-dpu-move-crtc-resource-assignment-to-dpu_enc.patch
+drm-msm-dpu-check-for-overflow-in-_dpu_crtc_setup_lm.patch
+drm-msm-dsi-improve-fix-dsc-pclk-calculation.patch
+drm-msm-dsi-fix-32-bit-signed-integer-extension-in-p.patch
+drm-msm-avoid-null-dereference-in-msm_disp_state_pri.patch
+drm-msm-allocate-memory-for-disp-snapshot-with-kvzal.patch
+firmware-arm_scmi-queue-in-scmi-layer-for-mailbox-im.patch
+net-smc-fix-memory-leak-when-using-percpu-refs.patch
+patch-hwmon-jc42-properly-detect-tse2004-compliant-d.patch
+net-usb-usbnet-fix-race-in-probe-failure.patch
+net-stmmac-dwmac-tegra-fix-link-bring-up-sequence.patch
+octeontx2-af-fix-potential-integer-overflows-on-inte.patch
+ring-buffer-fix-reader-locking-when-changing-the-sub.patch
+drm-amd-amdgpu-fix-double-unlock-in-amdgpu_mes_add_r.patch
+macsec-don-t-increment-counters-for-an-unrelated-sa.patch
+netdevsim-use-cond_resched-in-nsim_dev_trap_report_w.patch
+net-ethernet-aeroflex-fix-potential-memory-leak-in-g.patch
+net-smc-fix-searching-in-list-of-known-pnetids-in-sm.patch
+net-xilinx-axienet-fix-potential-memory-leak-in-axie.patch
+net-ethernet-rtsn-fix-potential-memory-leak-in-rtsn_.patch
+bpf-fix-truncation-bug-in-coerce_reg_to_size_sx.patch
+net-systemport-fix-potential-memory-leak-in-bcm_sysp.patch
+irqchip-renesas-rzg2l-fix-missing-put_device.patch
+drm-msm-dpu-don-t-always-set-merge_3d-pending-flush.patch
+drm-msm-dpu-don-t-always-program-merge_3d-block.patch
+net-bcmasp-fix-potential-memory-leak-in-bcmasp_xmit.patch
+drm-msm-a6xx-insert-a-fence-wait-before-smmu-table-u.patch
+tcp-dccp-don-t-use-timer_pending-in-reqsk_queue_unli.patch
+net-dsa-mv88e6xxx-fix-the-max_vid-definition-for-the.patch
+genetlink-hold-rcu-in-genlmsg_mcast.patch
+ravb-remove-setting-of-rx-software-timestamp.patch
+net-ravb-only-advertise-rx-tx-timestamps-if-hardware.patch
+net-dsa-vsc73xx-fix-reception-from-vlan-unaware-brid.patch
+scsi-target-core-fix-null-ptr-deref-in-target_alloc_.patch
+smb-client-fix-possible-double-free-in-smb2_set_ea.patch
+smb-client-fix-oobs-when-building-smb2_ioctl-request.patch
+usb-typec-altmode-should-keep-reference-to-parent.patch
+s390-initialize-psw-mask-in-perf_arch_fetch_caller_r.patch
+drm-xe-fix-unbalanced-rpm-put-with-fence_fini.patch
+drm-xe-fix-unbalanced-rpm-put-with-declare_wedged.patch
+drm-xe-take-job-list-lock-in-xe_sched_add_pending_jo.patch
+drm-xe-don-t-free-job-in-tdr.patch
+drm-xe-use-bookkeep-slots-for-external-bo-s-in-exec-.patch
+bpf-fix-link-info-netfilter-flags-to-populate-defrag.patch
+bluetooth-bnep-fix-wild-memory-access-in-proto_unreg.patch
+selftests-mm-fix-the-incorrect-usage-info-of-khugepa.patch
+vmxnet3-fix-packet-corruption-in-vmxnet3_xdp_xmit_fr.patch
+net-ethernet-mtk_eth_soc-fix-memory-corruption-durin.patch
+net-mlx5-check-for-invalid-vector-index-on-eq-creati.patch
+net-mlx5-fix-command-bitmask-initialization.patch
+net-mlx5-unregister-notifier-on-eswitch-init-failure.patch
+net-mlx5e-don-t-call-cleanup-on-profile-rollback-fai.patch
+bpf-sockmap-sk_drop-on-attempted-redirects-of-unsupp.patch
+vsock-update-rx_bytes-on-read_skb.patch
+vsock-update-msg_count-on-read_skb.patch
+bpf-vsock-drop-static-vsock_bpf_prot-initialization.patch
+riscv-bpf-make-bpf_cmpxchg-fully-ordered.patch
+nvme-pci-fix-race-condition-between-reset-and-nvme_d.patch
+bpf-fix-iter-task-tid-filtering.patch
+bpf-fix-incorrect-delta-propagation-between-linked-r.patch
+bpf-fix-print_reg_state-s-constant-scalar-dump.patch
+cdrom-avoid-barrier_nospec-in-cdrom_ioctl_media_chan.patch
+fgraph-allocate-ret_stack_list-with-proper-size.patch
+mm-shmem-rename-shmem_is_huge-to-shmem_huge_global_e.patch
+mm-shmem-move-shmem_huge_global_enabled-into-shmem_a.patch
+mm-huge_memory-add-vma_thp_disabled-and-thp_disabled.patch
+mm-don-t-install-pmd-mappings-when-thps-are-disabled.patch
+iio-adc-ti-lmp92064-add-missing-select-iio_-triggere.patch
+xhci-dbgtty-remove-kfifo_out-wrapper.patch
+xhci-dbgtty-use-kfifo-from-tty_port-struct.patch
+xhci-dbc-honor-usb-transfer-size-boundaries.patch
--- /dev/null
+From 550c6072a72588bca749d5e5acaa6d8adc8d67e8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 15 Oct 2024 19:04:04 -0300
+Subject: smb: client: fix OOBs when building SMB2_IOCTL request
+
+From: Paulo Alcantara <pc@manguebit.com>
+
+[ Upstream commit 1ab60323c5201bef25f2a3dc0ccc404d9aca77f1 ]
+
+When using encryption, either enforced by the server or when using
+'seal' mount option, the client will squash all compound request buffers
+down for encryption into a single iov in smb2_set_next_command().
+
+SMB2_ioctl_init() allocates a small buffer (448 bytes) to hold the
+SMB2_IOCTL request in the first iov, and if the user passes an input
+buffer that is greater than 328 bytes, smb2_set_next_command() will
+end up writing off the end of @rqst->iov[0].iov_base as shown below:
+
+ mount.cifs //srv/share /mnt -o ...,seal
+ ln -s $(perl -e "print('a')for 1..1024") /mnt/link
+
+ BUG: KASAN: slab-out-of-bounds in
+ smb2_set_next_command.cold+0x1d6/0x24c [cifs]
+ Write of size 4116 at addr ffff8881148fcab8 by task ln/859
+
+ CPU: 1 UID: 0 PID: 859 Comm: ln Not tainted 6.12.0-rc3 #1
+ Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS
+ 1.16.3-2.fc40 04/01/2014
+ Call Trace:
+ <TASK>
+ dump_stack_lvl+0x5d/0x80
+ ? smb2_set_next_command.cold+0x1d6/0x24c [cifs]
+ print_report+0x156/0x4d9
+ ? smb2_set_next_command.cold+0x1d6/0x24c [cifs]
+ ? __virt_addr_valid+0x145/0x310
+ ? __phys_addr+0x46/0x90
+ ? smb2_set_next_command.cold+0x1d6/0x24c [cifs]
+ kasan_report+0xda/0x110
+ ? smb2_set_next_command.cold+0x1d6/0x24c [cifs]
+ kasan_check_range+0x10f/0x1f0
+ __asan_memcpy+0x3c/0x60
+ smb2_set_next_command.cold+0x1d6/0x24c [cifs]
+ smb2_compound_op+0x238c/0x3840 [cifs]
+ ? kasan_save_track+0x14/0x30
+ ? kasan_save_free_info+0x3b/0x70
+ ? vfs_symlink+0x1a1/0x2c0
+ ? do_symlinkat+0x108/0x1c0
+ ? __pfx_smb2_compound_op+0x10/0x10 [cifs]
+ ? kmem_cache_free+0x118/0x3e0
+ ? cifs_get_writable_path+0xeb/0x1a0 [cifs]
+ smb2_get_reparse_inode+0x423/0x540 [cifs]
+ ? __pfx_smb2_get_reparse_inode+0x10/0x10 [cifs]
+ ? rcu_is_watching+0x20/0x50
+ ? __kmalloc_noprof+0x37c/0x480
+ ? smb2_create_reparse_symlink+0x257/0x490 [cifs]
+ ? smb2_create_reparse_symlink+0x38f/0x490 [cifs]
+ smb2_create_reparse_symlink+0x38f/0x490 [cifs]
+ ? __pfx_smb2_create_reparse_symlink+0x10/0x10 [cifs]
+ ? find_held_lock+0x8a/0xa0
+ ? hlock_class+0x32/0xb0
+ ? __build_path_from_dentry_optional_prefix+0x19d/0x2e0 [cifs]
+ cifs_symlink+0x24f/0x960 [cifs]
+ ? __pfx_make_vfsuid+0x10/0x10
+ ? __pfx_cifs_symlink+0x10/0x10 [cifs]
+ ? make_vfsgid+0x6b/0xc0
+ ? generic_permission+0x96/0x2d0
+ vfs_symlink+0x1a1/0x2c0
+ do_symlinkat+0x108/0x1c0
+ ? __pfx_do_symlinkat+0x10/0x10
+ ? strncpy_from_user+0xaa/0x160
+ __x64_sys_symlinkat+0xb9/0xf0
+ do_syscall_64+0xbb/0x1d0
+ entry_SYSCALL_64_after_hwframe+0x77/0x7f
+ RIP: 0033:0x7f08d75c13bb
+
+Reported-by: David Howells <dhowells@redhat.com>
+Fixes: e77fe73c7e38 ("cifs: we can not use small padding iovs together with encryption")
+Signed-off-by: Paulo Alcantara (Red Hat) <pc@manguebit.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/smb2pdu.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
+index 3d9e6e15dd900..194a4262d57a0 100644
+--- a/fs/smb/client/smb2pdu.c
++++ b/fs/smb/client/smb2pdu.c
+@@ -3308,6 +3308,15 @@ SMB2_ioctl_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
+ return rc;
+
+ if (indatalen) {
++ unsigned int len;
++
++ if (WARN_ON_ONCE(smb3_encryption_required(tcon) &&
++ (check_add_overflow(total_len - 1,
++ ALIGN(indatalen, 8), &len) ||
++ len > MAX_CIFS_SMALL_BUFFER_SIZE))) {
++ cifs_small_buf_release(req);
++ return -EIO;
++ }
+ /*
+ * indatalen is usually small at a couple of bytes max, so
+ * just allocate through generic pool
+--
+2.43.0
+
--- /dev/null
+From c062cbd3bd65b432a4d5c844079121368634ff50 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 15 Oct 2024 18:20:37 +0800
+Subject: smb: client: fix possible double free in smb2_set_ea()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Su Hui <suhui@nfschina.com>
+
+[ Upstream commit 19ebc1e6cab334a8193398d4152deb76019b5d34 ]
+
+Clang static checker(scan-build) warningïŒ
+fs/smb/client/smb2ops.c:1304:2: Attempt to free released memory.
+ 1304 | kfree(ea);
+ | ^~~~~~~~~
+
+There is a double free in such case:
+'ea is initialized to NULL' -> 'first successful memory allocation for
+ea' -> 'something failed, goto sea_exit' -> 'first memory release for ea'
+-> 'goto replay_again' -> 'second goto sea_exit before allocate memory
+for ea' -> 'second memory release for ea resulted in double free'.
+
+Re-initialie 'ea' to NULL near to the replay_again label, it can fix this
+double free problem.
+
+Fixes: 4f1fffa23769 ("cifs: commands that are retried should have replay flag set")
+Reviewed-by: Dan Carpenter <dan.carpenter@linaro.org>
+Signed-off-by: Su Hui <suhui@nfschina.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/smb2ops.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index e9be7b43bb6b8..b9e332443b0d9 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -1156,7 +1156,7 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifs_fid fid;
+ unsigned int size[1];
+ void *data[1];
+- struct smb2_file_full_ea_info *ea = NULL;
++ struct smb2_file_full_ea_info *ea;
+ struct smb2_query_info_rsp *rsp;
+ int rc, used_len = 0;
+ int retries = 0, cur_sleep = 1;
+@@ -1177,6 +1177,7 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
+ if (!utf16_path)
+ return -ENOMEM;
+
++ ea = NULL;
+ resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
+ vars = kzalloc(sizeof(*vars), GFP_KERNEL);
+ if (!vars) {
+--
+2.43.0
+
--- /dev/null
+From b82c586ce405ca09c2f3f2046a0ba4c22f004cca Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 14 Oct 2024 15:33:12 -0700
+Subject: tcp/dccp: Don't use timer_pending() in reqsk_queue_unlink().
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit e8c526f2bdf1845bedaf6a478816a3d06fa78b8f ]
+
+Martin KaFai Lau reported use-after-free [0] in reqsk_timer_handler().
+
+ """
+ We are seeing a use-after-free from a bpf prog attached to
+ trace_tcp_retransmit_synack. The program passes the req->sk to the
+ bpf_sk_storage_get_tracing kernel helper which does check for null
+ before using it.
+ """
+
+The commit 83fccfc3940c ("inet: fix potential deadlock in
+reqsk_queue_unlink()") added timer_pending() in reqsk_queue_unlink() not
+to call del_timer_sync() from reqsk_timer_handler(), but it introduced a
+small race window.
+
+Before the timer is called, expire_timers() calls detach_timer(timer, true)
+to clear timer->entry.pprev and marks it as not pending.
+
+If reqsk_queue_unlink() checks timer_pending() just after expire_timers()
+calls detach_timer(), TCP will miss del_timer_sync(); the reqsk timer will
+continue running and send multiple SYN+ACKs until it expires.
+
+The reported UAF could happen if req->sk is close()d earlier than the timer
+expiration, which is 63s by default.
+
+The scenario would be
+
+ 1. inet_csk_complete_hashdance() calls inet_csk_reqsk_queue_drop(),
+ but del_timer_sync() is missed
+
+ 2. reqsk timer is executed and scheduled again
+
+ 3. req->sk is accept()ed and reqsk_put() decrements rsk_refcnt, but
+ reqsk timer still has another one, and inet_csk_accept() does not
+ clear req->sk for non-TFO sockets
+
+ 4. sk is close()d
+
+ 5. reqsk timer is executed again, and BPF touches req->sk
+
+Let's not use timer_pending() by passing the caller context to
+__inet_csk_reqsk_queue_drop().
+
+Note that reqsk timer is pinned, so the issue does not happen in most
+use cases. [1]
+
+[0]
+BUG: KFENCE: use-after-free read in bpf_sk_storage_get_tracing+0x2e/0x1b0
+
+Use-after-free read at 0x00000000a891fb3a (in kfence-#1):
+bpf_sk_storage_get_tracing+0x2e/0x1b0
+bpf_prog_5ea3e95db6da0438_tcp_retransmit_synack+0x1d20/0x1dda
+bpf_trace_run2+0x4c/0xc0
+tcp_rtx_synack+0xf9/0x100
+reqsk_timer_handler+0xda/0x3d0
+run_timer_softirq+0x292/0x8a0
+irq_exit_rcu+0xf5/0x320
+sysvec_apic_timer_interrupt+0x6d/0x80
+asm_sysvec_apic_timer_interrupt+0x16/0x20
+intel_idle_irq+0x5a/0xa0
+cpuidle_enter_state+0x94/0x273
+cpu_startup_entry+0x15e/0x260
+start_secondary+0x8a/0x90
+secondary_startup_64_no_verify+0xfa/0xfb
+
+kfence-#1: 0x00000000a72cc7b6-0x00000000d97616d9, size=2376, cache=TCPv6
+
+allocated by task 0 on cpu 9 at 260507.901592s:
+sk_prot_alloc+0x35/0x140
+sk_clone_lock+0x1f/0x3f0
+inet_csk_clone_lock+0x15/0x160
+tcp_create_openreq_child+0x1f/0x410
+tcp_v6_syn_recv_sock+0x1da/0x700
+tcp_check_req+0x1fb/0x510
+tcp_v6_rcv+0x98b/0x1420
+ipv6_list_rcv+0x2258/0x26e0
+napi_complete_done+0x5b1/0x2990
+mlx5e_napi_poll+0x2ae/0x8d0
+net_rx_action+0x13e/0x590
+irq_exit_rcu+0xf5/0x320
+common_interrupt+0x80/0x90
+asm_common_interrupt+0x22/0x40
+cpuidle_enter_state+0xfb/0x273
+cpu_startup_entry+0x15e/0x260
+start_secondary+0x8a/0x90
+secondary_startup_64_no_verify+0xfa/0xfb
+
+freed by task 0 on cpu 9 at 260507.927527s:
+rcu_core_si+0x4ff/0xf10
+irq_exit_rcu+0xf5/0x320
+sysvec_apic_timer_interrupt+0x6d/0x80
+asm_sysvec_apic_timer_interrupt+0x16/0x20
+cpuidle_enter_state+0xfb/0x273
+cpu_startup_entry+0x15e/0x260
+start_secondary+0x8a/0x90
+secondary_startup_64_no_verify+0xfa/0xfb
+
+Fixes: 83fccfc3940c ("inet: fix potential deadlock in reqsk_queue_unlink()")
+Reported-by: Martin KaFai Lau <martin.lau@kernel.org>
+Closes: https://lore.kernel.org/netdev/eb6684d0-ffd9-4bdc-9196-33f690c25824@linux.dev/
+Link: https://lore.kernel.org/netdev/b55e2ca0-42f2-4b7c-b445-6ffd87ca74a0@linux.dev/ [1]
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Martin KaFai Lau <martin.lau@kernel.org>
+Link: https://patch.msgid.link/20241014223312.4254-1-kuniyu@amazon.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/inet_connection_sock.c | 21 ++++++++++++++++-----
+ 1 file changed, 16 insertions(+), 5 deletions(-)
+
+diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
+index 64d07b842e736..cd7989b514eaa 100644
+--- a/net/ipv4/inet_connection_sock.c
++++ b/net/ipv4/inet_connection_sock.c
+@@ -1044,21 +1044,31 @@ static bool reqsk_queue_unlink(struct request_sock *req)
+ found = __sk_nulls_del_node_init_rcu(sk);
+ spin_unlock(lock);
+ }
+- if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer))
+- reqsk_put(req);
++
+ return found;
+ }
+
+-bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
++static bool __inet_csk_reqsk_queue_drop(struct sock *sk,
++ struct request_sock *req,
++ bool from_timer)
+ {
+ bool unlinked = reqsk_queue_unlink(req);
+
++ if (!from_timer && timer_delete_sync(&req->rsk_timer))
++ reqsk_put(req);
++
+ if (unlinked) {
+ reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
+ reqsk_put(req);
+ }
++
+ return unlinked;
+ }
++
++bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
++{
++ return __inet_csk_reqsk_queue_drop(sk, req, false);
++}
+ EXPORT_SYMBOL(inet_csk_reqsk_queue_drop);
+
+ void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req)
+@@ -1151,7 +1161,7 @@ static void reqsk_timer_handler(struct timer_list *t)
+
+ if (!inet_ehash_insert(req_to_sk(nreq), req_to_sk(oreq), NULL)) {
+ /* delete timer */
+- inet_csk_reqsk_queue_drop(sk_listener, nreq);
++ __inet_csk_reqsk_queue_drop(sk_listener, nreq, true);
+ goto no_ownership;
+ }
+
+@@ -1177,7 +1187,8 @@ static void reqsk_timer_handler(struct timer_list *t)
+ }
+
+ drop:
+- inet_csk_reqsk_queue_drop_and_put(oreq->rsk_listener, oreq);
++ __inet_csk_reqsk_queue_drop(sk_listener, oreq, true);
++ reqsk_put(req);
+ }
+
+ static bool reqsk_queue_hash_req(struct request_sock *req,
+--
+2.43.0
+
--- /dev/null
+From 360c73b57cc56bb11c47f09640ff3333a439a1c2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 4 Oct 2024 09:37:38 -0300
+Subject: usb: typec: altmode should keep reference to parent
+
+From: Thadeu Lima de Souza Cascardo <cascardo@igalia.com>
+
+[ Upstream commit befab3a278c59db0cc88c8799638064f6d3fd6f8 ]
+
+The altmode device release refers to its parent device, but without keeping
+a reference to it.
+
+When registering the altmode, get a reference to the parent and put it in
+the release function.
+
+Before this fix, when using CONFIG_DEBUG_KOBJECT_RELEASE, we see issues
+like this:
+
+[ 43.572860] kobject: 'port0.0' (ffff8880057ba008): kobject_release, parent 0000000000000000 (delayed 3000)
+[ 43.573532] kobject: 'port0.1' (ffff8880057bd008): kobject_release, parent 0000000000000000 (delayed 1000)
+[ 43.574407] kobject: 'port0' (ffff8880057b9008): kobject_release, parent 0000000000000000 (delayed 3000)
+[ 43.575059] kobject: 'port1.0' (ffff8880057ca008): kobject_release, parent 0000000000000000 (delayed 4000)
+[ 43.575908] kobject: 'port1.1' (ffff8880057c9008): kobject_release, parent 0000000000000000 (delayed 4000)
+[ 43.576908] kobject: 'typec' (ffff8880062dbc00): kobject_release, parent 0000000000000000 (delayed 4000)
+[ 43.577769] kobject: 'port1' (ffff8880057bf008): kobject_release, parent 0000000000000000 (delayed 3000)
+[ 46.612867] ==================================================================
+[ 46.613402] BUG: KASAN: slab-use-after-free in typec_altmode_release+0x38/0x129
+[ 46.614003] Read of size 8 at addr ffff8880057b9118 by task kworker/2:1/48
+[ 46.614538]
+[ 46.614668] CPU: 2 UID: 0 PID: 48 Comm: kworker/2:1 Not tainted 6.12.0-rc1-00138-gedbae730ad31 #535
+[ 46.615391] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.15.0-1 04/01/2014
+[ 46.616042] Workqueue: events kobject_delayed_cleanup
+[ 46.616446] Call Trace:
+[ 46.616648] <TASK>
+[ 46.616820] dump_stack_lvl+0x5b/0x7c
+[ 46.617112] ? typec_altmode_release+0x38/0x129
+[ 46.617470] print_report+0x14c/0x49e
+[ 46.617769] ? rcu_read_unlock_sched+0x56/0x69
+[ 46.618117] ? __virt_addr_valid+0x19a/0x1ab
+[ 46.618456] ? kmem_cache_debug_flags+0xc/0x1d
+[ 46.618807] ? typec_altmode_release+0x38/0x129
+[ 46.619161] kasan_report+0x8d/0xb4
+[ 46.619447] ? typec_altmode_release+0x38/0x129
+[ 46.619809] ? process_scheduled_works+0x3cb/0x85f
+[ 46.620185] typec_altmode_release+0x38/0x129
+[ 46.620537] ? process_scheduled_works+0x3cb/0x85f
+[ 46.620907] device_release+0xaf/0xf2
+[ 46.621206] kobject_delayed_cleanup+0x13b/0x17a
+[ 46.621584] process_scheduled_works+0x4f6/0x85f
+[ 46.621955] ? __pfx_process_scheduled_works+0x10/0x10
+[ 46.622353] ? hlock_class+0x31/0x9a
+[ 46.622647] ? lock_acquired+0x361/0x3c3
+[ 46.622956] ? move_linked_works+0x46/0x7d
+[ 46.623277] worker_thread+0x1ce/0x291
+[ 46.623582] ? __kthread_parkme+0xc8/0xdf
+[ 46.623900] ? __pfx_worker_thread+0x10/0x10
+[ 46.624236] kthread+0x17e/0x190
+[ 46.624501] ? kthread+0xfb/0x190
+[ 46.624756] ? __pfx_kthread+0x10/0x10
+[ 46.625015] ret_from_fork+0x20/0x40
+[ 46.625268] ? __pfx_kthread+0x10/0x10
+[ 46.625532] ret_from_fork_asm+0x1a/0x30
+[ 46.625805] </TASK>
+[ 46.625953]
+[ 46.626056] Allocated by task 678:
+[ 46.626287] kasan_save_stack+0x24/0x44
+[ 46.626555] kasan_save_track+0x14/0x2d
+[ 46.626811] __kasan_kmalloc+0x3f/0x4d
+[ 46.627049] __kmalloc_noprof+0x1bf/0x1f0
+[ 46.627362] typec_register_port+0x23/0x491
+[ 46.627698] cros_typec_probe+0x634/0xbb6
+[ 46.628026] platform_probe+0x47/0x8c
+[ 46.628311] really_probe+0x20a/0x47d
+[ 46.628605] device_driver_attach+0x39/0x72
+[ 46.628940] bind_store+0x87/0xd7
+[ 46.629213] kernfs_fop_write_iter+0x1aa/0x218
+[ 46.629574] vfs_write+0x1d6/0x29b
+[ 46.629856] ksys_write+0xcd/0x13b
+[ 46.630128] do_syscall_64+0xd4/0x139
+[ 46.630420] entry_SYSCALL_64_after_hwframe+0x76/0x7e
+[ 46.630820]
+[ 46.630946] Freed by task 48:
+[ 46.631182] kasan_save_stack+0x24/0x44
+[ 46.631493] kasan_save_track+0x14/0x2d
+[ 46.631799] kasan_save_free_info+0x3f/0x4d
+[ 46.632144] __kasan_slab_free+0x37/0x45
+[ 46.632474] kfree+0x1d4/0x252
+[ 46.632725] device_release+0xaf/0xf2
+[ 46.633017] kobject_delayed_cleanup+0x13b/0x17a
+[ 46.633388] process_scheduled_works+0x4f6/0x85f
+[ 46.633764] worker_thread+0x1ce/0x291
+[ 46.634065] kthread+0x17e/0x190
+[ 46.634324] ret_from_fork+0x20/0x40
+[ 46.634621] ret_from_fork_asm+0x1a/0x30
+
+Fixes: 8a37d87d72f0 ("usb: typec: Bus type for alternate modes")
+Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@igalia.com>
+Reviewed-by: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Link: https://lore.kernel.org/r/20241004123738.2964524-1-cascardo@igalia.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/typec/class.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
+index 9262fcd4144f8..d61b4c74648df 100644
+--- a/drivers/usb/typec/class.c
++++ b/drivers/usb/typec/class.c
+@@ -519,6 +519,7 @@ static void typec_altmode_release(struct device *dev)
+ typec_altmode_put_partner(alt);
+
+ altmode_id_remove(alt->adev.dev.parent, alt->id);
++ put_device(alt->adev.dev.parent);
+ kfree(alt);
+ }
+
+@@ -568,6 +569,8 @@ typec_register_altmode(struct device *parent,
+ alt->adev.dev.type = &typec_altmode_dev_type;
+ dev_set_name(&alt->adev.dev, "%s.%u", dev_name(parent), id);
+
++ get_device(alt->adev.dev.parent);
++
+ /* Link partners and plugs with the ports */
+ if (!is_port)
+ typec_altmode_set_partner(alt);
+--
+2.43.0
+
--- /dev/null
+From 11ec9887a59d5ccf110b089be326a65edd598a67 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 14 Oct 2024 21:03:11 +0200
+Subject: vmxnet3: Fix packet corruption in vmxnet3_xdp_xmit_frame
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+[ Upstream commit 4678adf94da4a9e9683817b246b58ce15fb81782 ]
+
+Andrew and Nikolay reported connectivity issues with Cilium's service
+load-balancing in case of vmxnet3.
+
+If a BPF program for native XDP adds an encapsulation header such as
+IPIP and transmits the packet out the same interface, then in case
+of vmxnet3 a corrupted packet is being sent and subsequently dropped
+on the path.
+
+vmxnet3_xdp_xmit_frame() which is called e.g. via vmxnet3_run_xdp()
+through vmxnet3_xdp_xmit_back() calculates an incorrect DMA address:
+
+ page = virt_to_page(xdpf->data);
+ tbi->dma_addr = page_pool_get_dma_addr(page) +
+ VMXNET3_XDP_HEADROOM;
+ dma_sync_single_for_device(&adapter->pdev->dev,
+ tbi->dma_addr, buf_size,
+ DMA_TO_DEVICE);
+
+The above assumes a fixed offset (VMXNET3_XDP_HEADROOM), but the XDP
+BPF program could have moved xdp->data. While the passed buf_size is
+correct (xdpf->len), the dma_addr needs to have a dynamic offset which
+can be calculated as xdpf->data - (void *)xdpf, that is, xdp->data -
+xdp->data_hard_start.
+
+Fixes: 54f00cce1178 ("vmxnet3: Add XDP support.")
+Reported-by: Andrew Sauber <andrew.sauber@isovalent.com>
+Reported-by: Nikolay Nikolaev <nikolay.nikolaev@isovalent.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Tested-by: Nikolay Nikolaev <nikolay.nikolaev@isovalent.com>
+Acked-by: Anton Protopopov <aspsk@isovalent.com>
+Cc: William Tu <witu@nvidia.com>
+Cc: Ronak Doshi <ronak.doshi@broadcom.com>
+Link: https://patch.msgid.link/a0888656d7f09028f9984498cc698bb5364d89fc.1728931137.git.daniel@iogearbox.net
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/vmxnet3/vmxnet3_xdp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/vmxnet3/vmxnet3_xdp.c b/drivers/net/vmxnet3/vmxnet3_xdp.c
+index a6c787454a1ae..1341374a4588a 100644
+--- a/drivers/net/vmxnet3/vmxnet3_xdp.c
++++ b/drivers/net/vmxnet3/vmxnet3_xdp.c
+@@ -148,7 +148,7 @@ vmxnet3_xdp_xmit_frame(struct vmxnet3_adapter *adapter,
+ } else { /* XDP buffer from page pool */
+ page = virt_to_page(xdpf->data);
+ tbi->dma_addr = page_pool_get_dma_addr(page) +
+- VMXNET3_XDP_HEADROOM;
++ (xdpf->data - (void *)xdpf);
+ dma_sync_single_for_device(&adapter->pdev->dev,
+ tbi->dma_addr, buf_size,
+ DMA_TO_DEVICE);
+--
+2.43.0
+
--- /dev/null
+From ced6f0685049372ad74be21fbf9483b4ef6d8bd5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 13 Oct 2024 18:26:41 +0200
+Subject: vsock: Update msg_count on read_skb()
+
+From: Michal Luczaj <mhal@rbox.co>
+
+[ Upstream commit 6dafde852df8de3617d4b9f835b629aaeaccd01d ]
+
+Dequeuing via vsock_transport::read_skb() left msg_count outdated, which
+then confused SOCK_SEQPACKET recv(). Decrease the counter.
+
+Fixes: 634f1a7110b4 ("vsock: support sockmap")
+Signed-off-by: Michal Luczaj <mhal@rbox.co>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
+Acked-by: John Fastabend <john.fastabend@gmail.com>
+Link: https://lore.kernel.org/bpf/20241013-vsock-fixes-for-redir-v2-3-d6577bbfe742@rbox.co
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/vmw_vsock/virtio_transport_common.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
+index 0f67cb0c64702..645222ac84e3f 100644
+--- a/net/vmw_vsock/virtio_transport_common.c
++++ b/net/vmw_vsock/virtio_transport_common.c
+@@ -1688,6 +1688,9 @@ int virtio_transport_read_skb(struct vsock_sock *vsk, skb_read_actor_t recv_acto
+ }
+
+ hdr = virtio_vsock_hdr(skb);
++ if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOM)
++ vvs->msg_count--;
++
+ virtio_transport_dec_rx_pkt(vvs, le32_to_cpu(hdr->len));
+ spin_unlock_bh(&vvs->rx_lock);
+
+--
+2.43.0
+
--- /dev/null
+From 48bde160caf2a1d38995095dc6cdfffa34aa3405 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 13 Oct 2024 18:26:40 +0200
+Subject: vsock: Update rx_bytes on read_skb()
+
+From: Michal Luczaj <mhal@rbox.co>
+
+[ Upstream commit 3543152f2d330141d9394d28855cb90b860091d2 ]
+
+Make sure virtio_transport_inc_rx_pkt() and virtio_transport_dec_rx_pkt()
+calls are balanced (i.e. virtio_vsock_sock::rx_bytes doesn't lie) after
+vsock_transport::read_skb().
+
+While here, also inform the peer that we've freed up space and it has more
+credit.
+
+Failing to update rx_bytes after packet is dequeued leads to a warning on
+SOCK_STREAM recv():
+
+[ 233.396654] rx_queue is empty, but rx_bytes is non-zero
+[ 233.396702] WARNING: CPU: 11 PID: 40601 at net/vmw_vsock/virtio_transport_common.c:589
+
+Fixes: 634f1a7110b4 ("vsock: support sockmap")
+Suggested-by: Stefano Garzarella <sgarzare@redhat.com>
+Signed-off-by: Michal Luczaj <mhal@rbox.co>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
+Acked-by: John Fastabend <john.fastabend@gmail.com>
+Link: https://lore.kernel.org/bpf/20241013-vsock-fixes-for-redir-v2-2-d6577bbfe742@rbox.co
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/vmw_vsock/virtio_transport_common.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
+index 16ff976a86e3e..0f67cb0c64702 100644
+--- a/net/vmw_vsock/virtio_transport_common.c
++++ b/net/vmw_vsock/virtio_transport_common.c
+@@ -1672,6 +1672,7 @@ int virtio_transport_read_skb(struct vsock_sock *vsk, skb_read_actor_t recv_acto
+ {
+ struct virtio_vsock_sock *vvs = vsk->trans;
+ struct sock *sk = sk_vsock(vsk);
++ struct virtio_vsock_hdr *hdr;
+ struct sk_buff *skb;
+ int off = 0;
+ int err;
+@@ -1681,10 +1682,16 @@ int virtio_transport_read_skb(struct vsock_sock *vsk, skb_read_actor_t recv_acto
+ * works for types other than dgrams.
+ */
+ skb = __skb_recv_datagram(sk, &vvs->rx_queue, MSG_DONTWAIT, &off, &err);
++ if (!skb) {
++ spin_unlock_bh(&vvs->rx_lock);
++ return err;
++ }
++
++ hdr = virtio_vsock_hdr(skb);
++ virtio_transport_dec_rx_pkt(vvs, le32_to_cpu(hdr->len));
+ spin_unlock_bh(&vvs->rx_lock);
+
+- if (!skb)
+- return err;
++ virtio_transport_send_credit_update(vsk);
+
+ return recv_actor(sk, skb);
+ }
+--
+2.43.0
+
--- /dev/null
+From 3c4cf255b46ce45cd701d432eadca58ecbdd1681 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 1 Oct 2024 13:43:56 +0200
+Subject: x86/resctrl: Avoid overflow in MB settings in bw_validate()
+
+From: Martin Kletzander <nert.pinx@gmail.com>
+
+[ Upstream commit 2b5648416e47933939dc310c4ea1e29404f35630 ]
+
+The resctrl schemata file supports specifying memory bandwidth associated with
+the Memory Bandwidth Allocation (MBA) feature via a percentage (this is the
+default) or bandwidth in MiBps (when resctrl is mounted with the "mba_MBps"
+option).
+
+The allowed range for the bandwidth percentage is from
+/sys/fs/resctrl/info/MB/min_bandwidth to 100, using a granularity of
+/sys/fs/resctrl/info/MB/bandwidth_gran. The supported range for the MiBps
+bandwidth is 0 to U32_MAX.
+
+There are two issues with parsing of MiBps memory bandwidth:
+
+* The user provided MiBps is mistakenly rounded up to the granularity
+ that is unique to percentage input.
+
+* The user provided MiBps is parsed using unsigned long (thus accepting
+ values up to ULONG_MAX), and then assigned to u32 that could result in
+ overflow.
+
+Do not round up the MiBps value and parse user provided bandwidth as the u32
+it is intended to be. Use the appropriate kstrtou32() that can detect out of
+range values.
+
+Fixes: 8205a078ba78 ("x86/intel_rdt/mba_sc: Add schemata support")
+Fixes: 6ce1560d35f6 ("x86/resctrl: Switch over to the resctrl mbps_val list")
+Co-developed-by: Reinette Chatre <reinette.chatre@intel.com>
+Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
+Signed-off-by: Martin Kletzander <nert.pinx@gmail.com>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Reviewed-by: Reinette Chatre <reinette.chatre@intel.com>
+Reviewed-by: Tony Luck <tony.luck@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kernel/cpu/resctrl/ctrlmondata.c | 23 ++++++++++++++---------
+ 1 file changed, 14 insertions(+), 9 deletions(-)
+
+diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
+index 50fa1fe9a073f..200d89a640270 100644
+--- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
++++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
+@@ -29,10 +29,10 @@
+ * hardware. The allocated bandwidth percentage is rounded to the next
+ * control step available on the hardware.
+ */
+-static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r)
++static bool bw_validate(char *buf, u32 *data, struct rdt_resource *r)
+ {
+- unsigned long bw;
+ int ret;
++ u32 bw;
+
+ /*
+ * Only linear delay values is supported for current Intel SKUs.
+@@ -42,16 +42,21 @@ static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r)
+ return false;
+ }
+
+- ret = kstrtoul(buf, 10, &bw);
++ ret = kstrtou32(buf, 10, &bw);
+ if (ret) {
+- rdt_last_cmd_printf("Non-decimal digit in MB value %s\n", buf);
++ rdt_last_cmd_printf("Invalid MB value %s\n", buf);
+ return false;
+ }
+
+- if ((bw < r->membw.min_bw || bw > r->default_ctrl) &&
+- !is_mba_sc(r)) {
+- rdt_last_cmd_printf("MB value %ld out of range [%d,%d]\n", bw,
+- r->membw.min_bw, r->default_ctrl);
++ /* Nothing else to do if software controller is enabled. */
++ if (is_mba_sc(r)) {
++ *data = bw;
++ return true;
++ }
++
++ if (bw < r->membw.min_bw || bw > r->default_ctrl) {
++ rdt_last_cmd_printf("MB value %u out of range [%d,%d]\n",
++ bw, r->membw.min_bw, r->default_ctrl);
+ return false;
+ }
+
+@@ -65,7 +70,7 @@ int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s,
+ struct resctrl_staged_config *cfg;
+ u32 closid = data->rdtgrp->closid;
+ struct rdt_resource *r = s->res;
+- unsigned long bw_val;
++ u32 bw_val;
+
+ cfg = &d->staged_config[s->conf_type];
+ if (cfg->have_new_ctrl) {
+--
+2.43.0
+
--- /dev/null
+From c8698f307278680d5e5a46bb222711651159150d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 16 Oct 2024 17:00:00 +0300
+Subject: xhci: dbc: honor usb transfer size boundaries.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Mathias Nyman <mathias.nyman@linux.intel.com>
+
+[ Upstream commit 30c9ae5ece8ecd69d36e6912c2c0896418f2468c ]
+
+Treat each completed full size write to /dev/ttyDBC0 as a separate usb
+transfer. Make sure the size of the TRBs matches the size of the tty
+write by first queuing as many max packet size TRBs as possible up to
+the last TRB which will be cut short to match the size of the tty write.
+
+This solves an issue where userspace writes several transfers back to
+back via /dev/ttyDBC0 into a kfifo before dbgtty can find available
+request to turn that kfifo data into TRBs on the transfer ring.
+
+The boundary between transfer was lost as xhci-dbgtty then turned
+everyting in the kfifo into as many 'max packet size' TRBs as possible.
+
+DbC would then send more data to the host than intended for that
+transfer, causing host to issue a babble error.
+
+Refuse to write more data to kfifo until previous tty write data is
+turned into properly sized TRBs with data size boundaries matching tty
+write size
+
+Tested-by: Uday M Bhat <uday.m.bhat@intel.com>
+Tested-by: Ćukasz Bartosik <ukaszb@chromium.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Mathias Nyman <mathias.nyman@linux.intel.com>
+Link: https://lore.kernel.org/r/20241016140000.783905-5-mathias.nyman@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/host/xhci-dbgcap.h | 1 +
+ drivers/usb/host/xhci-dbgtty.c | 55 ++++++++++++++++++++++++++++++----
+ 2 files changed, 51 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/usb/host/xhci-dbgcap.h b/drivers/usb/host/xhci-dbgcap.h
+index 8ec813b6e9fda..9dc8f4d8077cc 100644
+--- a/drivers/usb/host/xhci-dbgcap.h
++++ b/drivers/usb/host/xhci-dbgcap.h
+@@ -110,6 +110,7 @@ struct dbc_port {
+ struct tasklet_struct push;
+
+ struct list_head write_pool;
++ unsigned int tx_boundary;
+
+ bool registered;
+ };
+diff --git a/drivers/usb/host/xhci-dbgtty.c b/drivers/usb/host/xhci-dbgtty.c
+index 881f5a7e6e0e1..0266c2f5bc0d8 100644
+--- a/drivers/usb/host/xhci-dbgtty.c
++++ b/drivers/usb/host/xhci-dbgtty.c
+@@ -24,6 +24,29 @@ static inline struct dbc_port *dbc_to_port(struct xhci_dbc *dbc)
+ return dbc->priv;
+ }
+
++static unsigned int
++dbc_kfifo_to_req(struct dbc_port *port, char *packet)
++{
++ unsigned int len;
++
++ len = kfifo_len(&port->port.xmit_fifo);
++
++ if (len == 0)
++ return 0;
++
++ len = min(len, DBC_MAX_PACKET);
++
++ if (port->tx_boundary)
++ len = min(port->tx_boundary, len);
++
++ len = kfifo_out(&port->port.xmit_fifo, packet, len);
++
++ if (port->tx_boundary)
++ port->tx_boundary -= len;
++
++ return len;
++}
++
+ static int dbc_start_tx(struct dbc_port *port)
+ __releases(&port->port_lock)
+ __acquires(&port->port_lock)
+@@ -36,7 +59,7 @@ static int dbc_start_tx(struct dbc_port *port)
+
+ while (!list_empty(pool)) {
+ req = list_entry(pool->next, struct dbc_request, list_pool);
+- len = kfifo_out(&port->port.xmit_fifo, req->buf, DBC_MAX_PACKET);
++ len = dbc_kfifo_to_req(port, req->buf);
+ if (len == 0)
+ break;
+ do_tty_wake = true;
+@@ -200,14 +223,32 @@ static ssize_t dbc_tty_write(struct tty_struct *tty, const u8 *buf,
+ {
+ struct dbc_port *port = tty->driver_data;
+ unsigned long flags;
++ unsigned int written = 0;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+- if (count)
+- count = kfifo_in(&port->port.xmit_fifo, buf, count);
+- dbc_start_tx(port);
++
++ /*
++ * Treat tty write as one usb transfer. Make sure the writes are turned
++ * into TRB request having the same size boundaries as the tty writes.
++ * Don't add data to kfifo before previous write is turned into TRBs
++ */
++ if (port->tx_boundary) {
++ spin_unlock_irqrestore(&port->port_lock, flags);
++ return 0;
++ }
++
++ if (count) {
++ written = kfifo_in(&port->port.xmit_fifo, buf, count);
++
++ if (written == count)
++ port->tx_boundary = kfifo_len(&port->port.xmit_fifo);
++
++ dbc_start_tx(port);
++ }
++
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+- return count;
++ return written;
+ }
+
+ static int dbc_tty_put_char(struct tty_struct *tty, u8 ch)
+@@ -241,6 +282,10 @@ static unsigned int dbc_tty_write_room(struct tty_struct *tty)
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ room = kfifo_avail(&port->port.xmit_fifo);
++
++ if (port->tx_boundary)
++ room = 0;
++
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ return room;
+--
+2.43.0
+
--- /dev/null
+From 513f58d322267c6340ef9e67c646fd30416710b6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 8 Aug 2024 12:35:40 +0200
+Subject: xhci: dbgtty: remove kfifo_out() wrapper
+
+From: Jiri Slaby (SUSE) <jirislaby@kernel.org>
+
+[ Upstream commit 2b217514436744dd98c4d9fa48d60610f9f67d61 ]
+
+There is no need to check against kfifo_len() before kfifo_out(). Just
+ask the latter for data and it tells how much it retrieved. Or returns 0
+in case there are no more.
+
+Signed-off-by: Jiri Slaby (SUSE) <jirislaby@kernel.org>
+Cc: Mathias Nyman <mathias.nyman@intel.com>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: linux-usb@vger.kernel.org
+Link: https://lore.kernel.org/r/20240808103549.429349-5-jirislaby@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: 30c9ae5ece8e ("xhci: dbc: honor usb transfer size boundaries.")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/host/xhci-dbgtty.c | 15 +--------------
+ 1 file changed, 1 insertion(+), 14 deletions(-)
+
+diff --git a/drivers/usb/host/xhci-dbgtty.c b/drivers/usb/host/xhci-dbgtty.c
+index b74e98e943932..64ea964949975 100644
+--- a/drivers/usb/host/xhci-dbgtty.c
++++ b/drivers/usb/host/xhci-dbgtty.c
+@@ -24,19 +24,6 @@ static inline struct dbc_port *dbc_to_port(struct xhci_dbc *dbc)
+ return dbc->priv;
+ }
+
+-static unsigned int
+-dbc_send_packet(struct dbc_port *port, char *packet, unsigned int size)
+-{
+- unsigned int len;
+-
+- len = kfifo_len(&port->write_fifo);
+- if (len < size)
+- size = len;
+- if (size != 0)
+- size = kfifo_out(&port->write_fifo, packet, size);
+- return size;
+-}
+-
+ static int dbc_start_tx(struct dbc_port *port)
+ __releases(&port->port_lock)
+ __acquires(&port->port_lock)
+@@ -49,7 +36,7 @@ static int dbc_start_tx(struct dbc_port *port)
+
+ while (!list_empty(pool)) {
+ req = list_entry(pool->next, struct dbc_request, list_pool);
+- len = dbc_send_packet(port, req->buf, DBC_MAX_PACKET);
++ len = kfifo_out(&port->write_fifo, req->buf, DBC_MAX_PACKET);
+ if (len == 0)
+ break;
+ do_tty_wake = true;
+--
+2.43.0
+
--- /dev/null
+From d9ec7cfc040423373aa75fee383d6b698e6d9f66 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 8 Aug 2024 12:35:41 +0200
+Subject: xhci: dbgtty: use kfifo from tty_port struct
+
+From: Jiri Slaby (SUSE) <jirislaby@kernel.org>
+
+[ Upstream commit 866025f0237609532bc8e4af5ef4d7252d3b55b6 ]
+
+There is no need to define one in a custom structure. The tty_port one
+is free to use.
+
+Signed-off-by: Jiri Slaby (SUSE) <jirislaby@kernel.org>
+Cc: Mathias Nyman <mathias.nyman@intel.com>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: linux-usb@vger.kernel.org
+Link: https://lore.kernel.org/r/20240808103549.429349-6-jirislaby@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: 30c9ae5ece8e ("xhci: dbc: honor usb transfer size boundaries.")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/host/xhci-dbgcap.h | 1 -
+ drivers/usb/host/xhci-dbgtty.c | 17 +++++++++--------
+ 2 files changed, 9 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/usb/host/xhci-dbgcap.h b/drivers/usb/host/xhci-dbgcap.h
+index 97c5dc290138b..8ec813b6e9fda 100644
+--- a/drivers/usb/host/xhci-dbgcap.h
++++ b/drivers/usb/host/xhci-dbgcap.h
+@@ -110,7 +110,6 @@ struct dbc_port {
+ struct tasklet_struct push;
+
+ struct list_head write_pool;
+- struct kfifo write_fifo;
+
+ bool registered;
+ };
+diff --git a/drivers/usb/host/xhci-dbgtty.c b/drivers/usb/host/xhci-dbgtty.c
+index 64ea964949975..881f5a7e6e0e1 100644
+--- a/drivers/usb/host/xhci-dbgtty.c
++++ b/drivers/usb/host/xhci-dbgtty.c
+@@ -36,7 +36,7 @@ static int dbc_start_tx(struct dbc_port *port)
+
+ while (!list_empty(pool)) {
+ req = list_entry(pool->next, struct dbc_request, list_pool);
+- len = kfifo_out(&port->write_fifo, req->buf, DBC_MAX_PACKET);
++ len = kfifo_out(&port->port.xmit_fifo, req->buf, DBC_MAX_PACKET);
+ if (len == 0)
+ break;
+ do_tty_wake = true;
+@@ -203,7 +203,7 @@ static ssize_t dbc_tty_write(struct tty_struct *tty, const u8 *buf,
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (count)
+- count = kfifo_in(&port->write_fifo, buf, count);
++ count = kfifo_in(&port->port.xmit_fifo, buf, count);
+ dbc_start_tx(port);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+@@ -217,7 +217,7 @@ static int dbc_tty_put_char(struct tty_struct *tty, u8 ch)
+ int status;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+- status = kfifo_put(&port->write_fifo, ch);
++ status = kfifo_put(&port->port.xmit_fifo, ch);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ return status;
+@@ -240,7 +240,7 @@ static unsigned int dbc_tty_write_room(struct tty_struct *tty)
+ unsigned int room;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+- room = kfifo_avail(&port->write_fifo);
++ room = kfifo_avail(&port->port.xmit_fifo);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ return room;
+@@ -253,7 +253,7 @@ static unsigned int dbc_tty_chars_in_buffer(struct tty_struct *tty)
+ unsigned int chars;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+- chars = kfifo_len(&port->write_fifo);
++ chars = kfifo_len(&port->port.xmit_fifo);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ return chars;
+@@ -411,7 +411,8 @@ static int xhci_dbc_tty_register_device(struct xhci_dbc *dbc)
+ goto err_idr;
+ }
+
+- ret = kfifo_alloc(&port->write_fifo, DBC_WRITE_BUF_SIZE, GFP_KERNEL);
++ ret = kfifo_alloc(&port->port.xmit_fifo, DBC_WRITE_BUF_SIZE,
++ GFP_KERNEL);
+ if (ret)
+ goto err_exit_port;
+
+@@ -440,7 +441,7 @@ static int xhci_dbc_tty_register_device(struct xhci_dbc *dbc)
+ xhci_dbc_free_requests(&port->read_pool);
+ xhci_dbc_free_requests(&port->write_pool);
+ err_free_fifo:
+- kfifo_free(&port->write_fifo);
++ kfifo_free(&port->port.xmit_fifo);
+ err_exit_port:
+ idr_remove(&dbc_tty_minors, port->minor);
+ err_idr:
+@@ -465,7 +466,7 @@ static void xhci_dbc_tty_unregister_device(struct xhci_dbc *dbc)
+ idr_remove(&dbc_tty_minors, port->minor);
+ mutex_unlock(&dbc_tty_minors_lock);
+
+- kfifo_free(&port->write_fifo);
++ kfifo_free(&port->port.xmit_fifo);
+ xhci_dbc_free_requests(&port->read_pool);
+ xhci_dbc_free_requests(&port->read_queue);
+ xhci_dbc_free_requests(&port->write_pool);
+--
+2.43.0
+