From: Sasha Levin Date: Sat, 24 May 2025 10:22:28 +0000 (-0400) Subject: Fixes for 6.12 X-Git-Tag: v6.12.31~76 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=90df688ef6613ffe5d58ca34b5d81432f09f7455;p=thirdparty%2Fkernel%2Fstable-queue.git Fixes for 6.12 Signed-off-by: Sasha Levin --- diff --git a/queue-6.12/asoc-sof-intel-hda-fix-uaf-when-reloading-module.patch b/queue-6.12/asoc-sof-intel-hda-fix-uaf-when-reloading-module.patch new file mode 100644 index 0000000000..a146620627 --- /dev/null +++ b/queue-6.12/asoc-sof-intel-hda-fix-uaf-when-reloading-module.patch @@ -0,0 +1,101 @@ +From 4b5fbcc1b106a9b9b49138699a21e00189e5415c Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 14 May 2025 09:37:49 -0400 +Subject: ASoC: SOF: Intel: hda: Fix UAF when reloading module + +From: Tavian Barnes + +[ Upstream commit 7dd7f39fce0022b386ef1ea5ffef92ecc7dfc6af ] + +hda_generic_machine_select() appends -idisp to the tplg filename by +allocating a new string with devm_kasprintf(), then stores the string +right back into the global variable snd_soc_acpi_intel_hda_machines. +When the module is unloaded, this memory is freed, resulting in a global +variable pointing to freed memory. Reloading the module then triggers +a use-after-free: + +BUG: KFENCE: use-after-free read in string+0x48/0xe0 + +Use-after-free read at 0x00000000967e0109 (in kfence-#99): + string+0x48/0xe0 + vsnprintf+0x329/0x6e0 + devm_kvasprintf+0x54/0xb0 + devm_kasprintf+0x58/0x80 + hda_machine_select.cold+0x198/0x17a2 [snd_sof_intel_hda_generic] + sof_probe_work+0x7f/0x600 [snd_sof] + process_one_work+0x17b/0x330 + worker_thread+0x2ce/0x3f0 + kthread+0xcf/0x100 + ret_from_fork+0x31/0x50 + ret_from_fork_asm+0x1a/0x30 + +kfence-#99: 0x00000000198a940f-0x00000000ace47d9d, size=64, cache=kmalloc-64 + +allocated by task 333 on cpu 8 at 17.798069s (130.453553s ago): + devm_kmalloc+0x52/0x120 + devm_kvasprintf+0x66/0xb0 + devm_kasprintf+0x58/0x80 + hda_machine_select.cold+0x198/0x17a2 [snd_sof_intel_hda_generic] + sof_probe_work+0x7f/0x600 [snd_sof] + process_one_work+0x17b/0x330 + worker_thread+0x2ce/0x3f0 + kthread+0xcf/0x100 + ret_from_fork+0x31/0x50 + ret_from_fork_asm+0x1a/0x30 + +freed by task 1543 on cpu 4 at 141.586686s (6.665010s ago): + release_nodes+0x43/0xb0 + devres_release_all+0x90/0xf0 + device_unbind_cleanup+0xe/0x70 + device_release_driver_internal+0x1c1/0x200 + driver_detach+0x48/0x90 + bus_remove_driver+0x6d/0xf0 + pci_unregister_driver+0x42/0xb0 + __do_sys_delete_module+0x1d1/0x310 + do_syscall_64+0x82/0x190 + entry_SYSCALL_64_after_hwframe+0x76/0x7e + +Fix it by copying the match array with devm_kmemdup_array() before we +modify it. + +Fixes: 5458411d7594 ("ASoC: SOF: Intel: hda: refactoring topology name fixup for HDA mach") +Suggested-by: Peter Ujfalusi +Acked-by: Peter Ujfalusi +Signed-off-by: Tavian Barnes +Link: https://patch.msgid.link/570b15570b274520a0d9052f4e0f064a29c950ef.1747229716.git.tavianator@tavianator.com +Signed-off-by: Mark Brown +Signed-off-by: Sasha Levin +--- + sound/soc/sof/intel/hda.c | 16 +++++++++++++++- + 1 file changed, 15 insertions(+), 1 deletion(-) + +diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c +index c924a998d6f90..9c8f79e55ec5d 100644 +--- a/sound/soc/sof/intel/hda.c ++++ b/sound/soc/sof/intel/hda.c +@@ -1007,7 +1007,21 @@ static void hda_generic_machine_select(struct snd_sof_dev *sdev, + if (!*mach && codec_num <= 2) { + bool tplg_fixup = false; + +- hda_mach = snd_soc_acpi_intel_hda_machines; ++ /* ++ * make a local copy of the match array since we might ++ * be modifying it ++ */ ++ hda_mach = devm_kmemdup_array(sdev->dev, ++ snd_soc_acpi_intel_hda_machines, ++ 2, /* we have one entry + sentinel in the array */ ++ sizeof(snd_soc_acpi_intel_hda_machines[0]), ++ GFP_KERNEL); ++ if (!hda_mach) { ++ dev_err(bus->dev, ++ "%s: failed to duplicate the HDA match table\n", ++ __func__); ++ return; ++ } + + dev_info(bus->dev, "using HDA machine driver %s now\n", + hda_mach->drv_name); +-- +2.39.5 + diff --git a/queue-6.12/bluetooth-btusb-use-skb_pull-to-avoid-unsafe-access-.patch b/queue-6.12/bluetooth-btusb-use-skb_pull-to-avoid-unsafe-access-.patch new file mode 100644 index 0000000000..c62427604b --- /dev/null +++ b/queue-6.12/bluetooth-btusb-use-skb_pull-to-avoid-unsafe-access-.patch @@ -0,0 +1,193 @@ +From 4565d49a78dcce7f73b9adf4786c86df74a078b3 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 8 May 2025 22:15:20 +0800 +Subject: Bluetooth: btusb: use skb_pull to avoid unsafe access in QCA dump + handling + +From: En-Wei Wu + +[ Upstream commit 4bcb0c7dc25446b99fc7a8fa2a143d69f3314162 ] + +Use skb_pull() and skb_pull_data() to safely parse QCA dump packets. + +This avoids direct pointer math on skb->data, which could lead to +invalid access if the packet is shorter than expected. + +Fixes: 20981ce2d5a5 ("Bluetooth: btusb: Add WCN6855 devcoredump support") +Signed-off-by: En-Wei Wu +Signed-off-by: Luiz Augusto von Dentz +Signed-off-by: Sasha Levin +--- + drivers/bluetooth/btusb.c | 98 ++++++++++++++++----------------------- + 1 file changed, 40 insertions(+), 58 deletions(-) + +diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c +index 7e1f03231b4c9..af2be0271806f 100644 +--- a/drivers/bluetooth/btusb.c ++++ b/drivers/bluetooth/btusb.c +@@ -2979,9 +2979,8 @@ static void btusb_coredump_qca(struct hci_dev *hdev) + static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb) + { + int ret = 0; ++ unsigned int skip = 0; + u8 pkt_type; +- u8 *sk_ptr; +- unsigned int sk_len; + u16 seqno; + u32 dump_size; + +@@ -2990,18 +2989,13 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb) + struct usb_device *udev = btdata->udev; + + pkt_type = hci_skb_pkt_type(skb); +- sk_ptr = skb->data; +- sk_len = skb->len; ++ skip = sizeof(struct hci_event_hdr); ++ if (pkt_type == HCI_ACLDATA_PKT) ++ skip += sizeof(struct hci_acl_hdr); + +- if (pkt_type == HCI_ACLDATA_PKT) { +- sk_ptr += HCI_ACL_HDR_SIZE; +- sk_len -= HCI_ACL_HDR_SIZE; +- } +- +- sk_ptr += HCI_EVENT_HDR_SIZE; +- sk_len -= HCI_EVENT_HDR_SIZE; ++ skb_pull(skb, skip); ++ dump_hdr = (struct qca_dump_hdr *)skb->data; + +- dump_hdr = (struct qca_dump_hdr *)sk_ptr; + seqno = le16_to_cpu(dump_hdr->seqno); + if (seqno == 0) { + set_bit(BTUSB_HW_SSR_ACTIVE, &btdata->flags); +@@ -3021,16 +3015,15 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb) + + btdata->qca_dump.ram_dump_size = dump_size; + btdata->qca_dump.ram_dump_seqno = 0; +- sk_ptr += offsetof(struct qca_dump_hdr, data0); +- sk_len -= offsetof(struct qca_dump_hdr, data0); ++ ++ skb_pull(skb, offsetof(struct qca_dump_hdr, data0)); + + usb_disable_autosuspend(udev); + bt_dev_info(hdev, "%s memdump size(%u)\n", + (pkt_type == HCI_ACLDATA_PKT) ? "ACL" : "event", + dump_size); + } else { +- sk_ptr += offsetof(struct qca_dump_hdr, data); +- sk_len -= offsetof(struct qca_dump_hdr, data); ++ skb_pull(skb, offsetof(struct qca_dump_hdr, data)); + } + + if (!btdata->qca_dump.ram_dump_size) { +@@ -3050,7 +3043,6 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb) + return ret; + } + +- skb_pull(skb, skb->len - sk_len); + hci_devcd_append(hdev, skb); + btdata->qca_dump.ram_dump_seqno++; + if (seqno == QCA_LAST_SEQUENCE_NUM) { +@@ -3078,68 +3070,58 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb) + /* Return: true if the ACL packet is a dump packet, false otherwise. */ + static bool acl_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb) + { +- u8 *sk_ptr; +- unsigned int sk_len; +- + struct hci_event_hdr *event_hdr; + struct hci_acl_hdr *acl_hdr; + struct qca_dump_hdr *dump_hdr; ++ struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC); ++ bool is_dump = false; + +- sk_ptr = skb->data; +- sk_len = skb->len; +- +- acl_hdr = hci_acl_hdr(skb); +- if (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE) ++ if (!clone) + return false; + +- sk_ptr += HCI_ACL_HDR_SIZE; +- sk_len -= HCI_ACL_HDR_SIZE; +- event_hdr = (struct hci_event_hdr *)sk_ptr; +- +- if ((event_hdr->evt != HCI_VENDOR_PKT) || +- (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE))) +- return false; ++ acl_hdr = skb_pull_data(clone, sizeof(*acl_hdr)); ++ if (!acl_hdr || (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE)) ++ goto out; + +- sk_ptr += HCI_EVENT_HDR_SIZE; +- sk_len -= HCI_EVENT_HDR_SIZE; ++ event_hdr = skb_pull_data(clone, sizeof(*event_hdr)); ++ if (!event_hdr || (event_hdr->evt != HCI_VENDOR_PKT)) ++ goto out; + +- dump_hdr = (struct qca_dump_hdr *)sk_ptr; +- if ((sk_len < offsetof(struct qca_dump_hdr, data)) || +- (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) || +- (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE)) +- return false; ++ dump_hdr = skb_pull_data(clone, sizeof(*dump_hdr)); ++ if (!dump_hdr || (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) || ++ (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE)) ++ goto out; + +- return true; ++ is_dump = true; ++out: ++ consume_skb(clone); ++ return is_dump; + } + + /* Return: true if the event packet is a dump packet, false otherwise. */ + static bool evt_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb) + { +- u8 *sk_ptr; +- unsigned int sk_len; +- + struct hci_event_hdr *event_hdr; + struct qca_dump_hdr *dump_hdr; ++ struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC); ++ bool is_dump = false; + +- sk_ptr = skb->data; +- sk_len = skb->len; +- +- event_hdr = hci_event_hdr(skb); +- +- if ((event_hdr->evt != HCI_VENDOR_PKT) +- || (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE))) ++ if (!clone) + return false; + +- sk_ptr += HCI_EVENT_HDR_SIZE; +- sk_len -= HCI_EVENT_HDR_SIZE; ++ event_hdr = skb_pull_data(clone, sizeof(*event_hdr)); ++ if (!event_hdr || (event_hdr->evt != HCI_VENDOR_PKT)) ++ goto out; + +- dump_hdr = (struct qca_dump_hdr *)sk_ptr; +- if ((sk_len < offsetof(struct qca_dump_hdr, data)) || +- (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) || +- (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE)) +- return false; ++ dump_hdr = skb_pull_data(clone, sizeof(*dump_hdr)); ++ if (!dump_hdr || (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) || ++ (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE)) ++ goto out; + +- return true; ++ is_dump = true; ++out: ++ consume_skb(clone); ++ return is_dump; + } + + static int btusb_recv_acl_qca(struct hci_dev *hdev, struct sk_buff *skb) +-- +2.39.5 + diff --git a/queue-6.12/bluetooth-l2cap-fix-not-checking-l2cap_chan-security.patch b/queue-6.12/bluetooth-l2cap-fix-not-checking-l2cap_chan-security.patch new file mode 100644 index 0000000000..1cb3523363 --- /dev/null +++ b/queue-6.12/bluetooth-l2cap-fix-not-checking-l2cap_chan-security.patch @@ -0,0 +1,92 @@ +From 934c392fcca1f15f4a4415fce54df43fa64e5271 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 7 May 2025 15:00:30 -0400 +Subject: Bluetooth: L2CAP: Fix not checking l2cap_chan security level + +From: Luiz Augusto von Dentz + +[ Upstream commit 7af8479d9eb4319b4ba7b47a8c4d2c55af1c31e1 ] + +l2cap_check_enc_key_size shall check the security level of the +l2cap_chan rather than the hci_conn since for incoming connection +request that may be different as hci_conn may already been +encrypted using a different security level. + +Fixes: 522e9ed157e3 ("Bluetooth: l2cap: Check encryption key size on incoming connection") +Signed-off-by: Luiz Augusto von Dentz +Signed-off-by: Sasha Levin +--- + net/bluetooth/l2cap_core.c | 15 ++++++++------- + 1 file changed, 8 insertions(+), 7 deletions(-) + +diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c +index c219a8c596d3e..66fa5d6fea6ca 100644 +--- a/net/bluetooth/l2cap_core.c ++++ b/net/bluetooth/l2cap_core.c +@@ -1411,7 +1411,8 @@ static void l2cap_request_info(struct l2cap_conn *conn) + sizeof(req), &req); + } + +-static bool l2cap_check_enc_key_size(struct hci_conn *hcon) ++static bool l2cap_check_enc_key_size(struct hci_conn *hcon, ++ struct l2cap_chan *chan) + { + /* The minimum encryption key size needs to be enforced by the + * host stack before establishing any L2CAP connections. The +@@ -1425,7 +1426,7 @@ static bool l2cap_check_enc_key_size(struct hci_conn *hcon) + int min_key_size = hcon->hdev->min_enc_key_size; + + /* On FIPS security level, key size must be 16 bytes */ +- if (hcon->sec_level == BT_SECURITY_FIPS) ++ if (chan->sec_level == BT_SECURITY_FIPS) + min_key_size = 16; + + return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) || +@@ -1453,7 +1454,7 @@ static void l2cap_do_start(struct l2cap_chan *chan) + !__l2cap_no_conn_pending(chan)) + return; + +- if (l2cap_check_enc_key_size(conn->hcon)) ++ if (l2cap_check_enc_key_size(conn->hcon, chan)) + l2cap_start_connection(chan); + else + __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); +@@ -1528,7 +1529,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn) + continue; + } + +- if (l2cap_check_enc_key_size(conn->hcon)) ++ if (l2cap_check_enc_key_size(conn->hcon, chan)) + l2cap_start_connection(chan); + else + l2cap_chan_close(chan, ECONNREFUSED); +@@ -3957,7 +3958,7 @@ static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, + /* Check if the ACL is secure enough (if not SDP) */ + if (psm != cpu_to_le16(L2CAP_PSM_SDP) && + (!hci_conn_check_link_mode(conn->hcon) || +- !l2cap_check_enc_key_size(conn->hcon))) { ++ !l2cap_check_enc_key_size(conn->hcon, pchan))) { + conn->disc_reason = HCI_ERROR_AUTH_FAILURE; + result = L2CAP_CR_SEC_BLOCK; + goto response; +@@ -7317,7 +7318,7 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) + } + + if (chan->state == BT_CONNECT) { +- if (!status && l2cap_check_enc_key_size(hcon)) ++ if (!status && l2cap_check_enc_key_size(hcon, chan)) + l2cap_start_connection(chan); + else + __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); +@@ -7327,7 +7328,7 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) + struct l2cap_conn_rsp rsp; + __u16 res, stat; + +- if (!status && l2cap_check_enc_key_size(hcon)) { ++ if (!status && l2cap_check_enc_key_size(hcon, chan)) { + if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { + res = L2CAP_CR_PEND; + stat = L2CAP_CS_AUTHOR_PEND; +-- +2.39.5 + diff --git a/queue-6.12/bridge-netfilter-fix-forwarding-of-fragmented-packet.patch b/queue-6.12/bridge-netfilter-fix-forwarding-of-fragmented-packet.patch new file mode 100644 index 0000000000..66e194c734 --- /dev/null +++ b/queue-6.12/bridge-netfilter-fix-forwarding-of-fragmented-packet.patch @@ -0,0 +1,95 @@ +From d35c1027d227a26be1442632a58430249df8edf9 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 15 May 2025 11:48:48 +0300 +Subject: bridge: netfilter: Fix forwarding of fragmented packets + +From: Ido Schimmel + +[ Upstream commit 91b6dbced0ef1d680afdd69b14fc83d50ebafaf3 ] + +When netfilter defrag hooks are loaded (due to the presence of conntrack +rules, for example), fragmented packets entering the bridge will be +defragged by the bridge's pre-routing hook (br_nf_pre_routing() -> +ipv4_conntrack_defrag()). + +Later on, in the bridge's post-routing hook, the defragged packet will +be fragmented again. If the size of the largest fragment is larger than +what the kernel has determined as the destination MTU (using +ip_skb_dst_mtu()), the defragged packet will be dropped. + +Before commit ac6627a28dbf ("net: ipv4: Consolidate ipv4_mtu and +ip_dst_mtu_maybe_forward"), ip_skb_dst_mtu() would return dst_mtu() as +the destination MTU. Assuming the dst entry attached to the packet is +the bridge's fake rtable one, this would simply be the bridge's MTU (see +fake_mtu()). + +However, after above mentioned commit, ip_skb_dst_mtu() ends up +returning the route's MTU stored in the dst entry's metrics. Ideally, in +case the dst entry is the bridge's fake rtable one, this should be the +bridge's MTU as the bridge takes care of updating this metric when its +MTU changes (see br_change_mtu()). + +Unfortunately, the last operation is a no-op given the metrics attached +to the fake rtable entry are marked as read-only. Therefore, +ip_skb_dst_mtu() ends up returning 1500 (the initial MTU value) and +defragged packets are dropped during fragmentation when dealing with +large fragments and high MTU (e.g., 9k). + +Fix by moving the fake rtable entry's metrics to be per-bridge (in a +similar fashion to the fake rtable entry itself) and marking them as +writable, thereby allowing MTU changes to be reflected. + +Fixes: 62fa8a846d7d ("net: Implement read-only protection and COW'ing of metrics.") +Fixes: 33eb9873a283 ("bridge: initialize fake_rtable metrics") +Reported-by: Venkat Venkatsubra +Closes: https://lore.kernel.org/netdev/PH0PR10MB4504888284FF4CBA648197D0ACB82@PH0PR10MB4504.namprd10.prod.outlook.com/ +Tested-by: Venkat Venkatsubra +Signed-off-by: Ido Schimmel +Acked-by: Nikolay Aleksandrov +Link: https://patch.msgid.link/20250515084848.727706-1-idosch@nvidia.com +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + net/bridge/br_nf_core.c | 7 ++----- + net/bridge/br_private.h | 1 + + 2 files changed, 3 insertions(+), 5 deletions(-) + +diff --git a/net/bridge/br_nf_core.c b/net/bridge/br_nf_core.c +index 98aea5485aaef..a8c67035e23c0 100644 +--- a/net/bridge/br_nf_core.c ++++ b/net/bridge/br_nf_core.c +@@ -65,17 +65,14 @@ static struct dst_ops fake_dst_ops = { + * ipt_REJECT needs it. Future netfilter modules might + * require us to fill additional fields. + */ +-static const u32 br_dst_default_metrics[RTAX_MAX] = { +- [RTAX_MTU - 1] = 1500, +-}; +- + void br_netfilter_rtable_init(struct net_bridge *br) + { + struct rtable *rt = &br->fake_rtable; + + rcuref_init(&rt->dst.__rcuref, 1); + rt->dst.dev = br->dev; +- dst_init_metrics(&rt->dst, br_dst_default_metrics, true); ++ dst_init_metrics(&rt->dst, br->metrics, false); ++ dst_metric_set(&rt->dst, RTAX_MTU, br->dev->mtu); + rt->dst.flags = DST_NOXFRM | DST_FAKE_RTABLE; + rt->dst.ops = &fake_dst_ops; + } +diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h +index 041f6e571a209..df502cc1191c3 100644 +--- a/net/bridge/br_private.h ++++ b/net/bridge/br_private.h +@@ -505,6 +505,7 @@ struct net_bridge { + struct rtable fake_rtable; + struct rt6_info fake_rt6_info; + }; ++ u32 metrics[RTAX_MAX]; + #endif + u16 group_fwd_mask; + u16 group_fwd_mask_required; +-- +2.39.5 + diff --git a/queue-6.12/clk-sunxi-ng-d1-add-missing-divider-for-mmc-mod-cloc.patch b/queue-6.12/clk-sunxi-ng-d1-add-missing-divider-for-mmc-mod-cloc.patch new file mode 100644 index 0000000000..7a20422511 --- /dev/null +++ b/queue-6.12/clk-sunxi-ng-d1-add-missing-divider-for-mmc-mod-cloc.patch @@ -0,0 +1,131 @@ +From 2cafda8ec47adb182b2a5f2c2599a332e481b203 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 1 May 2025 13:06:31 +0100 +Subject: clk: sunxi-ng: d1: Add missing divider for MMC mod clocks +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Andre Przywara + +[ Upstream commit 98e6da673cc6dd46ca9a599802bd2c8f83606710 ] + +The D1/R528/T113 SoCs have a hidden divider of 2 in the MMC mod clocks, +just as other recent SoCs. So far we did not describe that, which led +to the resulting MMC clock rate to be only half of its intended value. + +Use a macro that allows to describe a fixed post-divider, to compensate +for that divisor. + +This brings the MMC performance on those SoCs to its expected level, +so about 23 MB/s for SD cards, instead of the 11 MB/s measured so far. + +Fixes: 35b97bb94111 ("clk: sunxi-ng: Add support for the D1 SoC clocks") +Reported-by: Kuba Szczodrzyński +Signed-off-by: Andre Przywara +Link: https://patch.msgid.link/20250501120631.837186-1-andre.przywara@arm.com +Signed-off-by: Chen-Yu Tsai +Signed-off-by: Sasha Levin +--- + drivers/clk/sunxi-ng/ccu-sun20i-d1.c | 44 ++++++++++++++++------------ + drivers/clk/sunxi-ng/ccu_mp.h | 22 ++++++++++++++ + 2 files changed, 47 insertions(+), 19 deletions(-) + +diff --git a/drivers/clk/sunxi-ng/ccu-sun20i-d1.c b/drivers/clk/sunxi-ng/ccu-sun20i-d1.c +index 3f095515f54f9..54d2c7f0ed632 100644 +--- a/drivers/clk/sunxi-ng/ccu-sun20i-d1.c ++++ b/drivers/clk/sunxi-ng/ccu-sun20i-d1.c +@@ -412,19 +412,23 @@ static const struct clk_parent_data mmc0_mmc1_parents[] = { + { .hw = &pll_periph0_2x_clk.common.hw }, + { .hw = &pll_audio1_div2_clk.common.hw }, + }; +-static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(mmc0_clk, "mmc0", mmc0_mmc1_parents, 0x830, +- 0, 4, /* M */ +- 8, 2, /* P */ +- 24, 3, /* mux */ +- BIT(31), /* gate */ +- 0); +- +-static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(mmc1_clk, "mmc1", mmc0_mmc1_parents, 0x834, +- 0, 4, /* M */ +- 8, 2, /* P */ +- 24, 3, /* mux */ +- BIT(31), /* gate */ +- 0); ++static SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(mmc0_clk, "mmc0", ++ mmc0_mmc1_parents, 0x830, ++ 0, 4, /* M */ ++ 8, 2, /* P */ ++ 24, 3, /* mux */ ++ BIT(31), /* gate */ ++ 2, /* post-div */ ++ 0); ++ ++static SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(mmc1_clk, "mmc1", ++ mmc0_mmc1_parents, 0x834, ++ 0, 4, /* M */ ++ 8, 2, /* P */ ++ 24, 3, /* mux */ ++ BIT(31), /* gate */ ++ 2, /* post-div */ ++ 0); + + static const struct clk_parent_data mmc2_parents[] = { + { .fw_name = "hosc" }, +@@ -433,12 +437,14 @@ static const struct clk_parent_data mmc2_parents[] = { + { .hw = &pll_periph0_800M_clk.common.hw }, + { .hw = &pll_audio1_div2_clk.common.hw }, + }; +-static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(mmc2_clk, "mmc2", mmc2_parents, 0x838, +- 0, 4, /* M */ +- 8, 2, /* P */ +- 24, 3, /* mux */ +- BIT(31), /* gate */ +- 0); ++static SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(mmc2_clk, "mmc2", mmc2_parents, ++ 0x838, ++ 0, 4, /* M */ ++ 8, 2, /* P */ ++ 24, 3, /* mux */ ++ BIT(31), /* gate */ ++ 2, /* post-div */ ++ 0); + + static SUNXI_CCU_GATE_HWS(bus_mmc0_clk, "bus-mmc0", psi_ahb_hws, + 0x84c, BIT(0), 0); +diff --git a/drivers/clk/sunxi-ng/ccu_mp.h b/drivers/clk/sunxi-ng/ccu_mp.h +index 6e50f3728fb5f..7d836a9fb3db3 100644 +--- a/drivers/clk/sunxi-ng/ccu_mp.h ++++ b/drivers/clk/sunxi-ng/ccu_mp.h +@@ -52,6 +52,28 @@ struct ccu_mp { + } \ + } + ++#define SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(_struct, _name, _parents, \ ++ _reg, \ ++ _mshift, _mwidth, \ ++ _pshift, _pwidth, \ ++ _muxshift, _muxwidth, \ ++ _gate, _postdiv, _flags)\ ++ struct ccu_mp _struct = { \ ++ .enable = _gate, \ ++ .m = _SUNXI_CCU_DIV(_mshift, _mwidth), \ ++ .p = _SUNXI_CCU_DIV(_pshift, _pwidth), \ ++ .mux = _SUNXI_CCU_MUX(_muxshift, _muxwidth), \ ++ .fixed_post_div = _postdiv, \ ++ .common = { \ ++ .reg = _reg, \ ++ .features = CCU_FEATURE_FIXED_POSTDIV, \ ++ .hw.init = CLK_HW_INIT_PARENTS_DATA(_name, \ ++ _parents, \ ++ &ccu_mp_ops, \ ++ _flags), \ ++ } \ ++ } ++ + #define SUNXI_CCU_MP_WITH_MUX_GATE(_struct, _name, _parents, _reg, \ + _mshift, _mwidth, \ + _pshift, _pwidth, \ +-- +2.39.5 + diff --git a/queue-6.12/devres-introduce-devm_kmemdup_array.patch b/queue-6.12/devres-introduce-devm_kmemdup_array.patch new file mode 100644 index 0000000000..3ed964c4e3 --- /dev/null +++ b/queue-6.12/devres-introduce-devm_kmemdup_array.patch @@ -0,0 +1,42 @@ +From 717b6d697bf0bf6e37ecd39b6f161dec79d8a9f8 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 12 Feb 2025 11:55:05 +0530 +Subject: devres: Introduce devm_kmemdup_array() + +From: Raag Jadav + +[ Upstream commit a103b833ac3806b816bc993cba77d0b17cf801f1 ] + +Introduce '_array' variant of devm_kmemdup() which is more robust and +consistent with alloc family of helpers. + +Suggested-by: Andy Shevchenko +Signed-off-by: Raag Jadav +Reviewed-by: Dmitry Torokhov +Reviewed-by: Linus Walleij +Signed-off-by: Andy Shevchenko +Stable-dep-of: 7dd7f39fce00 ("ASoC: SOF: Intel: hda: Fix UAF when reloading module") +Signed-off-by: Sasha Levin +--- + include/linux/device/devres.h | 5 +++++ + 1 file changed, 5 insertions(+) + +diff --git a/include/linux/device/devres.h b/include/linux/device/devres.h +index 6b0b265058bcc..9b49f99158508 100644 +--- a/include/linux/device/devres.h ++++ b/include/linux/device/devres.h +@@ -79,6 +79,11 @@ void devm_kfree(struct device *dev, const void *p); + + void * __realloc_size(3) + devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp); ++static inline void *devm_kmemdup_array(struct device *dev, const void *src, ++ size_t n, size_t size, gfp_t flags) ++{ ++ return devm_kmemdup(dev, src, size_mul(size, n), flags); ++} + + char * __malloc + devm_kstrdup(struct device *dev, const char *s, gfp_t gfp); +-- +2.39.5 + diff --git a/queue-6.12/dmaengine-fsl-edma-fix-return-code-for-unhandled-int.patch b/queue-6.12/dmaengine-fsl-edma-fix-return-code-for-unhandled-int.patch new file mode 100644 index 0000000000..c674ee22cc --- /dev/null +++ b/queue-6.12/dmaengine-fsl-edma-fix-return-code-for-unhandled-int.patch @@ -0,0 +1,40 @@ +From bd32173bfb118760057edd44ad1c81007434cf43 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 24 Apr 2025 13:48:29 +0200 +Subject: dmaengine: fsl-edma: Fix return code for unhandled interrupts + +From: Stefan Wahren + +[ Upstream commit 5e27af0514e2249a9ccc9a762abd3b74e03a1f90 ] + +For fsl,imx93-edma4 two DMA channels share the same interrupt. +So in case fsl_edma3_tx_handler is called for the "wrong" +channel, the return code must be IRQ_NONE. This signalize that +the interrupt wasn't handled. + +Fixes: 72f5801a4e2b ("dmaengine: fsl-edma: integrate v3 support") +Signed-off-by: Stefan Wahren +Reviewed-by: Joy Zou +Link: https://lore.kernel.org/r/20250424114829.9055-1-wahrenst@gmx.net +Signed-off-by: Vinod Koul +Signed-off-by: Sasha Levin +--- + drivers/dma/fsl-edma-main.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c +index 27645606f900b..4794d58dab556 100644 +--- a/drivers/dma/fsl-edma-main.c ++++ b/drivers/dma/fsl-edma-main.c +@@ -56,7 +56,7 @@ static irqreturn_t fsl_edma3_tx_handler(int irq, void *dev_id) + + intr = edma_readl_chreg(fsl_chan, ch_int); + if (!intr) +- return IRQ_HANDLED; ++ return IRQ_NONE; + + edma_writel_chreg(fsl_chan, 1, ch_int); + +-- +2.39.5 + diff --git a/queue-6.12/dmaengine-idxd-fix-allowing-write-from-different-add.patch b/queue-6.12/dmaengine-idxd-fix-allowing-write-from-different-add.patch new file mode 100644 index 0000000000..5632311c1d --- /dev/null +++ b/queue-6.12/dmaengine-idxd-fix-allowing-write-from-different-add.patch @@ -0,0 +1,59 @@ +From 56326d96defb6b710206589808a4834e02eedb22 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 21 Apr 2025 10:03:37 -0700 +Subject: dmaengine: idxd: Fix allowing write() from different address spaces + +From: Vinicius Costa Gomes + +[ Upstream commit 8dfa57aabff625bf445548257f7711ef294cd30e ] + +Check if the process submitting the descriptor belongs to the same +address space as the one that opened the file, reject otherwise. + +Fixes: 6827738dc684 ("dmaengine: idxd: add a write() method for applications to submit work") +Signed-off-by: Vinicius Costa Gomes +Signed-off-by: Dave Jiang +Link: https://lore.kernel.org/r/20250421170337.3008875-1-dave.jiang@intel.com +Signed-off-by: Vinod Koul +Signed-off-by: Sasha Levin +--- + drivers/dma/idxd/cdev.c | 9 +++++++++ + 1 file changed, 9 insertions(+) + +diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c +index 57f1bf2ab20be..928b8ab8499a7 100644 +--- a/drivers/dma/idxd/cdev.c ++++ b/drivers/dma/idxd/cdev.c +@@ -412,6 +412,9 @@ static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma) + if (!idxd->user_submission_safe && !capable(CAP_SYS_RAWIO)) + return -EPERM; + ++ if (current->mm != ctx->mm) ++ return -EPERM; ++ + rc = check_vma(wq, vma, __func__); + if (rc < 0) + return rc; +@@ -478,6 +481,9 @@ static ssize_t idxd_cdev_write(struct file *filp, const char __user *buf, size_t + ssize_t written = 0; + int i; + ++ if (current->mm != ctx->mm) ++ return -EPERM; ++ + for (i = 0; i < len/sizeof(struct dsa_hw_desc); i++) { + int rc = idxd_submit_user_descriptor(ctx, udesc + i); + +@@ -498,6 +504,9 @@ static __poll_t idxd_cdev_poll(struct file *filp, + struct idxd_device *idxd = wq->idxd; + __poll_t out = 0; + ++ if (current->mm != ctx->mm) ++ return -EPERM; ++ + poll_wait(filp, &wq->err_queue, wait); + spin_lock(&idxd->dev_lock); + if (idxd->sw_err.valid) +-- +2.39.5 + diff --git a/queue-6.12/dmaengine-idxd-fix-poll-return-value.patch b/queue-6.12/dmaengine-idxd-fix-poll-return-value.patch new file mode 100644 index 0000000000..c62e916d00 --- /dev/null +++ b/queue-6.12/dmaengine-idxd-fix-poll-return-value.patch @@ -0,0 +1,41 @@ +From 135fb89563a16df355ef517b5b35b6b3d42bb68f Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 8 May 2025 10:05:48 -0700 +Subject: dmaengine: idxd: Fix ->poll() return value + +From: Dave Jiang + +[ Upstream commit ae74cd15ade833adc289279b5c6f12e78f64d4d7 ] + +The fix to block access from different address space did not return a +correct value for ->poll() change. kernel test bot reported that a +return value of type __poll_t is expected rather than int. Fix to return +POLLNVAL to indicate invalid request. + +Fixes: 8dfa57aabff6 ("dmaengine: idxd: Fix allowing write() from different address spaces") +Reported-by: kernel test robot +Closes: https://lore.kernel.org/oe-kbuild-all/202505081851.rwD7jVxg-lkp@intel.com/ +Signed-off-by: Dave Jiang +Link: https://lore.kernel.org/r/20250508170548.2747425-1-dave.jiang@intel.com +Signed-off-by: Vinod Koul +Signed-off-by: Sasha Levin +--- + drivers/dma/idxd/cdev.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c +index 928b8ab8499a7..22aa2bab3693c 100644 +--- a/drivers/dma/idxd/cdev.c ++++ b/drivers/dma/idxd/cdev.c +@@ -505,7 +505,7 @@ static __poll_t idxd_cdev_poll(struct file *filp, + __poll_t out = 0; + + if (current->mm != ctx->mm) +- return -EPERM; ++ return POLLNVAL; + + poll_wait(filp, &wq->err_queue, wait); + spin_lock(&idxd->dev_lock); +-- +2.39.5 + diff --git a/queue-6.12/driver-core-split-devres-apis-to-device-devres.h.patch b/queue-6.12/driver-core-split-devres-apis-to-device-devres.h.patch new file mode 100644 index 0000000000..36f397ff18 --- /dev/null +++ b/queue-6.12/driver-core-split-devres-apis-to-device-devres.h.patch @@ -0,0 +1,302 @@ +From fc702426d232dfe8562ebc9ceb4350d2ca41cb3f Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 12 Feb 2025 11:55:03 +0530 +Subject: driver core: Split devres APIs to device/devres.h + +From: Andy Shevchenko + +[ Upstream commit a21cad9312767d26b5257ce0662699bb202cdda1 ] + +device.h is a huge header which is hard to follow and easy to miss +something. Improve that by splitting devres APIs to device/devres.h. + +In particular this helps to speedup the build of the code that includes +device.h solely for a devres APIs. + +While at it, cast the error pointers to __iomem using IOMEM_ERR_PTR() +and fix sparse warnings. + +Signed-off-by: Raag Jadav +Acked-by: Arnd Bergmann +Reviewed-by: Greg Kroah-Hartman +Signed-off-by: Andy Shevchenko +Stable-dep-of: 7dd7f39fce00 ("ASoC: SOF: Intel: hda: Fix UAF when reloading module") +Signed-off-by: Sasha Levin +--- + include/linux/device.h | 119 +------------------------------- + include/linux/device/devres.h | 124 ++++++++++++++++++++++++++++++++++ + 2 files changed, 125 insertions(+), 118 deletions(-) + create mode 100644 include/linux/device/devres.h + +diff --git a/include/linux/device.h b/include/linux/device.h +index 667cb6db90193..39120b172992e 100644 +--- a/include/linux/device.h ++++ b/include/linux/device.h +@@ -26,9 +26,9 @@ + #include + #include + #include +-#include + #include + #include ++#include + #include + #include + #include +@@ -281,123 +281,6 @@ int __must_check device_create_bin_file(struct device *dev, + void device_remove_bin_file(struct device *dev, + const struct bin_attribute *attr); + +-/* device resource management */ +-typedef void (*dr_release_t)(struct device *dev, void *res); +-typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data); +- +-void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, +- int nid, const char *name) __malloc; +-#define devres_alloc(release, size, gfp) \ +- __devres_alloc_node(release, size, gfp, NUMA_NO_NODE, #release) +-#define devres_alloc_node(release, size, gfp, nid) \ +- __devres_alloc_node(release, size, gfp, nid, #release) +- +-void devres_for_each_res(struct device *dev, dr_release_t release, +- dr_match_t match, void *match_data, +- void (*fn)(struct device *, void *, void *), +- void *data); +-void devres_free(void *res); +-void devres_add(struct device *dev, void *res); +-void *devres_find(struct device *dev, dr_release_t release, +- dr_match_t match, void *match_data); +-void *devres_get(struct device *dev, void *new_res, +- dr_match_t match, void *match_data); +-void *devres_remove(struct device *dev, dr_release_t release, +- dr_match_t match, void *match_data); +-int devres_destroy(struct device *dev, dr_release_t release, +- dr_match_t match, void *match_data); +-int devres_release(struct device *dev, dr_release_t release, +- dr_match_t match, void *match_data); +- +-/* devres group */ +-void * __must_check devres_open_group(struct device *dev, void *id, gfp_t gfp); +-void devres_close_group(struct device *dev, void *id); +-void devres_remove_group(struct device *dev, void *id); +-int devres_release_group(struct device *dev, void *id); +- +-/* managed devm_k.alloc/kfree for device drivers */ +-void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) __alloc_size(2); +-void *devm_krealloc(struct device *dev, void *ptr, size_t size, +- gfp_t gfp) __must_check __realloc_size(3); +-__printf(3, 0) char *devm_kvasprintf(struct device *dev, gfp_t gfp, +- const char *fmt, va_list ap) __malloc; +-__printf(3, 4) char *devm_kasprintf(struct device *dev, gfp_t gfp, +- const char *fmt, ...) __malloc; +-static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp) +-{ +- return devm_kmalloc(dev, size, gfp | __GFP_ZERO); +-} +-static inline void *devm_kmalloc_array(struct device *dev, +- size_t n, size_t size, gfp_t flags) +-{ +- size_t bytes; +- +- if (unlikely(check_mul_overflow(n, size, &bytes))) +- return NULL; +- +- return devm_kmalloc(dev, bytes, flags); +-} +-static inline void *devm_kcalloc(struct device *dev, +- size_t n, size_t size, gfp_t flags) +-{ +- return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO); +-} +-static inline __realloc_size(3, 4) void * __must_check +-devm_krealloc_array(struct device *dev, void *p, size_t new_n, size_t new_size, gfp_t flags) +-{ +- size_t bytes; +- +- if (unlikely(check_mul_overflow(new_n, new_size, &bytes))) +- return NULL; +- +- return devm_krealloc(dev, p, bytes, flags); +-} +- +-void devm_kfree(struct device *dev, const void *p); +-char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) __malloc; +-const char *devm_kstrdup_const(struct device *dev, const char *s, gfp_t gfp); +-void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp) +- __realloc_size(3); +- +-unsigned long devm_get_free_pages(struct device *dev, +- gfp_t gfp_mask, unsigned int order); +-void devm_free_pages(struct device *dev, unsigned long addr); +- +-#ifdef CONFIG_HAS_IOMEM +-void __iomem *devm_ioremap_resource(struct device *dev, +- const struct resource *res); +-void __iomem *devm_ioremap_resource_wc(struct device *dev, +- const struct resource *res); +- +-void __iomem *devm_of_iomap(struct device *dev, +- struct device_node *node, int index, +- resource_size_t *size); +-#else +- +-static inline +-void __iomem *devm_ioremap_resource(struct device *dev, +- const struct resource *res) +-{ +- return ERR_PTR(-EINVAL); +-} +- +-static inline +-void __iomem *devm_ioremap_resource_wc(struct device *dev, +- const struct resource *res) +-{ +- return ERR_PTR(-EINVAL); +-} +- +-static inline +-void __iomem *devm_of_iomap(struct device *dev, +- struct device_node *node, int index, +- resource_size_t *size) +-{ +- return ERR_PTR(-EINVAL); +-} +- +-#endif +- + /* allows to add/remove a custom action to devres stack */ + void devm_remove_action(struct device *dev, void (*action)(void *), void *data); + void devm_release_action(struct device *dev, void (*action)(void *), void *data); +diff --git a/include/linux/device/devres.h b/include/linux/device/devres.h +new file mode 100644 +index 0000000000000..6b0b265058bcc +--- /dev/null ++++ b/include/linux/device/devres.h +@@ -0,0 +1,124 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++#ifndef _DEVICE_DEVRES_H_ ++#define _DEVICE_DEVRES_H_ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++struct device; ++struct device_node; ++struct resource; ++ ++/* device resource management */ ++typedef void (*dr_release_t)(struct device *dev, void *res); ++typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data); ++ ++void * __malloc ++__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid, const char *name); ++#define devres_alloc(release, size, gfp) \ ++ __devres_alloc_node(release, size, gfp, NUMA_NO_NODE, #release) ++#define devres_alloc_node(release, size, gfp, nid) \ ++ __devres_alloc_node(release, size, gfp, nid, #release) ++ ++void devres_for_each_res(struct device *dev, dr_release_t release, ++ dr_match_t match, void *match_data, ++ void (*fn)(struct device *, void *, void *), ++ void *data); ++void devres_free(void *res); ++void devres_add(struct device *dev, void *res); ++void *devres_find(struct device *dev, dr_release_t release, dr_match_t match, void *match_data); ++void *devres_get(struct device *dev, void *new_res, dr_match_t match, void *match_data); ++void *devres_remove(struct device *dev, dr_release_t release, dr_match_t match, void *match_data); ++int devres_destroy(struct device *dev, dr_release_t release, dr_match_t match, void *match_data); ++int devres_release(struct device *dev, dr_release_t release, dr_match_t match, void *match_data); ++ ++/* devres group */ ++void * __must_check devres_open_group(struct device *dev, void *id, gfp_t gfp); ++void devres_close_group(struct device *dev, void *id); ++void devres_remove_group(struct device *dev, void *id); ++int devres_release_group(struct device *dev, void *id); ++ ++/* managed devm_k.alloc/kfree for device drivers */ ++void * __alloc_size(2) ++devm_kmalloc(struct device *dev, size_t size, gfp_t gfp); ++void * __must_check __realloc_size(3) ++devm_krealloc(struct device *dev, void *ptr, size_t size, gfp_t gfp); ++static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp) ++{ ++ return devm_kmalloc(dev, size, gfp | __GFP_ZERO); ++} ++static inline void *devm_kmalloc_array(struct device *dev, size_t n, size_t size, gfp_t flags) ++{ ++ size_t bytes; ++ ++ if (unlikely(check_mul_overflow(n, size, &bytes))) ++ return NULL; ++ ++ return devm_kmalloc(dev, bytes, flags); ++} ++static inline void *devm_kcalloc(struct device *dev, size_t n, size_t size, gfp_t flags) ++{ ++ return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO); ++} ++static inline __realloc_size(3, 4) void * __must_check ++devm_krealloc_array(struct device *dev, void *p, size_t new_n, size_t new_size, gfp_t flags) ++{ ++ size_t bytes; ++ ++ if (unlikely(check_mul_overflow(new_n, new_size, &bytes))) ++ return NULL; ++ ++ return devm_krealloc(dev, p, bytes, flags); ++} ++ ++void devm_kfree(struct device *dev, const void *p); ++ ++void * __realloc_size(3) ++devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp); ++ ++char * __malloc ++devm_kstrdup(struct device *dev, const char *s, gfp_t gfp); ++const char *devm_kstrdup_const(struct device *dev, const char *s, gfp_t gfp); ++char * __printf(3, 0) __malloc ++devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, va_list ap); ++char * __printf(3, 4) __malloc ++devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...); ++ ++unsigned long devm_get_free_pages(struct device *dev, gfp_t gfp_mask, unsigned int order); ++void devm_free_pages(struct device *dev, unsigned long addr); ++ ++#ifdef CONFIG_HAS_IOMEM ++ ++void __iomem *devm_ioremap_resource(struct device *dev, const struct resource *res); ++void __iomem *devm_ioremap_resource_wc(struct device *dev, const struct resource *res); ++ ++void __iomem *devm_of_iomap(struct device *dev, struct device_node *node, int index, ++ resource_size_t *size); ++#else ++ ++static inline ++void __iomem *devm_ioremap_resource(struct device *dev, const struct resource *res) ++{ ++ return IOMEM_ERR_PTR(-EINVAL); ++} ++ ++static inline ++void __iomem *devm_ioremap_resource_wc(struct device *dev, const struct resource *res) ++{ ++ return IOMEM_ERR_PTR(-EINVAL); ++} ++ ++static inline ++void __iomem *devm_of_iomap(struct device *dev, struct device_node *node, int index, ++ resource_size_t *size) ++{ ++ return IOMEM_ERR_PTR(-EINVAL); ++} ++ ++#endif ++ ++#endif /* _DEVICE_DEVRES_H_ */ +-- +2.39.5 + diff --git a/queue-6.12/espintcp-fix-skb-leaks.patch b/queue-6.12/espintcp-fix-skb-leaks.patch new file mode 100644 index 0000000000..69f445d3a3 --- /dev/null +++ b/queue-6.12/espintcp-fix-skb-leaks.patch @@ -0,0 +1,73 @@ +From 95805e70d0c9ce56aec6d13018222ec583471e39 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 9 Apr 2025 15:59:56 +0200 +Subject: espintcp: fix skb leaks + +From: Sabrina Dubroca + +[ Upstream commit 63c1f19a3be3169e51a5812d22a6d0c879414076 ] + +A few error paths are missing a kfree_skb. + +Fixes: e27cca96cd68 ("xfrm: add espintcp (RFC 8229)") +Signed-off-by: Sabrina Dubroca +Reviewed-by: Simon Horman +Signed-off-by: Steffen Klassert +Signed-off-by: Sasha Levin +--- + net/ipv4/esp4.c | 4 +++- + net/ipv6/esp6.c | 4 +++- + net/xfrm/espintcp.c | 4 +++- + 3 files changed, 9 insertions(+), 3 deletions(-) + +diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c +index f3281312eb5eb..f0a7f06df3ade 100644 +--- a/net/ipv4/esp4.c ++++ b/net/ipv4/esp4.c +@@ -199,8 +199,10 @@ static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb) + + sk = esp_find_tcp_sk(x); + err = PTR_ERR_OR_ZERO(sk); +- if (err) ++ if (err) { ++ kfree_skb(skb); + goto out; ++ } + + bh_lock_sock(sk); + if (sock_owned_by_user(sk)) +diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c +index b2400c226a325..3810cfbc44103 100644 +--- a/net/ipv6/esp6.c ++++ b/net/ipv6/esp6.c +@@ -216,8 +216,10 @@ static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb) + + sk = esp6_find_tcp_sk(x); + err = PTR_ERR_OR_ZERO(sk); +- if (err) ++ if (err) { ++ kfree_skb(skb); + goto out; ++ } + + bh_lock_sock(sk); + if (sock_owned_by_user(sk)) +diff --git a/net/xfrm/espintcp.c b/net/xfrm/espintcp.c +index fe82e2d073006..fc7a603b04f13 100644 +--- a/net/xfrm/espintcp.c ++++ b/net/xfrm/espintcp.c +@@ -171,8 +171,10 @@ int espintcp_queue_out(struct sock *sk, struct sk_buff *skb) + struct espintcp_ctx *ctx = espintcp_getctx(sk); + + if (skb_queue_len(&ctx->out_queue) >= +- READ_ONCE(net_hotdata.max_backlog)) ++ READ_ONCE(net_hotdata.max_backlog)) { ++ kfree_skb(skb); + return -ENOBUFS; ++ } + + __skb_queue_tail(&ctx->out_queue, skb); + +-- +2.39.5 + diff --git a/queue-6.12/espintcp-remove-encap-socket-caching-to-avoid-refere.patch b/queue-6.12/espintcp-remove-encap-socket-caching-to-avoid-refere.patch new file mode 100644 index 0000000000..edb7fc1381 --- /dev/null +++ b/queue-6.12/espintcp-remove-encap-socket-caching-to-avoid-refere.patch @@ -0,0 +1,252 @@ +From 0ba2b9a29e7a24e5f6c1d4bed17f06a0bffb8578 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 9 Apr 2025 15:59:57 +0200 +Subject: espintcp: remove encap socket caching to avoid reference leak + +From: Sabrina Dubroca + +[ Upstream commit 028363685bd0b7a19b4a820f82dd905b1dc83999 ] + +The current scheme for caching the encap socket can lead to reference +leaks when we try to delete the netns. + +The reference chain is: xfrm_state -> enacp_sk -> netns + +Since the encap socket is a userspace socket, it holds a reference on +the netns. If we delete the espintcp state (through flush or +individual delete) before removing the netns, the reference on the +socket is dropped and the netns is correctly deleted. Otherwise, the +netns may not be reachable anymore (if all processes within the ns +have terminated), so we cannot delete the xfrm state to drop its +reference on the socket. + +This patch results in a small (~2% in my tests) performance +regression. + +A GC-type mechanism could be added for the socket cache, to clear +references if the state hasn't been used "recently", but it's a lot +more complex than just not caching the socket. + +Fixes: e27cca96cd68 ("xfrm: add espintcp (RFC 8229)") +Signed-off-by: Sabrina Dubroca +Reviewed-by: Simon Horman +Signed-off-by: Steffen Klassert +Signed-off-by: Sasha Levin +--- + include/net/xfrm.h | 1 - + net/ipv4/esp4.c | 49 ++++--------------------------------------- + net/ipv6/esp6.c | 49 ++++--------------------------------------- + net/xfrm/xfrm_state.c | 3 --- + 4 files changed, 8 insertions(+), 94 deletions(-) + +diff --git a/include/net/xfrm.h b/include/net/xfrm.h +index 83e9ef25b8d0d..1484dd15a3694 100644 +--- a/include/net/xfrm.h ++++ b/include/net/xfrm.h +@@ -233,7 +233,6 @@ struct xfrm_state { + + /* Data for encapsulator */ + struct xfrm_encap_tmpl *encap; +- struct sock __rcu *encap_sk; + + /* NAT keepalive */ + u32 nat_keepalive_interval; /* seconds */ +diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c +index f0a7f06df3ade..cbe4c6fc8b8e9 100644 +--- a/net/ipv4/esp4.c ++++ b/net/ipv4/esp4.c +@@ -120,47 +120,16 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp, struct sk_buff *skb) + } + + #ifdef CONFIG_INET_ESPINTCP +-struct esp_tcp_sk { +- struct sock *sk; +- struct rcu_head rcu; +-}; +- +-static void esp_free_tcp_sk(struct rcu_head *head) +-{ +- struct esp_tcp_sk *esk = container_of(head, struct esp_tcp_sk, rcu); +- +- sock_put(esk->sk); +- kfree(esk); +-} +- + static struct sock *esp_find_tcp_sk(struct xfrm_state *x) + { + struct xfrm_encap_tmpl *encap = x->encap; + struct net *net = xs_net(x); +- struct esp_tcp_sk *esk; + __be16 sport, dport; +- struct sock *nsk; + struct sock *sk; + +- sk = rcu_dereference(x->encap_sk); +- if (sk && sk->sk_state == TCP_ESTABLISHED) +- return sk; +- + spin_lock_bh(&x->lock); + sport = encap->encap_sport; + dport = encap->encap_dport; +- nsk = rcu_dereference_protected(x->encap_sk, +- lockdep_is_held(&x->lock)); +- if (sk && sk == nsk) { +- esk = kmalloc(sizeof(*esk), GFP_ATOMIC); +- if (!esk) { +- spin_unlock_bh(&x->lock); +- return ERR_PTR(-ENOMEM); +- } +- RCU_INIT_POINTER(x->encap_sk, NULL); +- esk->sk = sk; +- call_rcu(&esk->rcu, esp_free_tcp_sk); +- } + spin_unlock_bh(&x->lock); + + sk = inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, x->id.daddr.a4, +@@ -173,20 +142,6 @@ static struct sock *esp_find_tcp_sk(struct xfrm_state *x) + return ERR_PTR(-EINVAL); + } + +- spin_lock_bh(&x->lock); +- nsk = rcu_dereference_protected(x->encap_sk, +- lockdep_is_held(&x->lock)); +- if (encap->encap_sport != sport || +- encap->encap_dport != dport) { +- sock_put(sk); +- sk = nsk ?: ERR_PTR(-EREMCHG); +- } else if (sk == nsk) { +- sock_put(sk); +- } else { +- rcu_assign_pointer(x->encap_sk, sk); +- } +- spin_unlock_bh(&x->lock); +- + return sk; + } + +@@ -211,6 +166,8 @@ static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb) + err = espintcp_push_skb(sk, skb); + bh_unlock_sock(sk); + ++ sock_put(sk); ++ + out: + rcu_read_unlock(); + return err; +@@ -394,6 +351,8 @@ static struct ip_esp_hdr *esp_output_tcp_encap(struct xfrm_state *x, + if (IS_ERR(sk)) + return ERR_CAST(sk); + ++ sock_put(sk); ++ + *lenp = htons(len); + esph = (struct ip_esp_hdr *)(lenp + 1); + +diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c +index 3810cfbc44103..62d17d7f6d9a9 100644 +--- a/net/ipv6/esp6.c ++++ b/net/ipv6/esp6.c +@@ -137,47 +137,16 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp, struct sk_buff *skb) + } + + #ifdef CONFIG_INET6_ESPINTCP +-struct esp_tcp_sk { +- struct sock *sk; +- struct rcu_head rcu; +-}; +- +-static void esp_free_tcp_sk(struct rcu_head *head) +-{ +- struct esp_tcp_sk *esk = container_of(head, struct esp_tcp_sk, rcu); +- +- sock_put(esk->sk); +- kfree(esk); +-} +- + static struct sock *esp6_find_tcp_sk(struct xfrm_state *x) + { + struct xfrm_encap_tmpl *encap = x->encap; + struct net *net = xs_net(x); +- struct esp_tcp_sk *esk; + __be16 sport, dport; +- struct sock *nsk; + struct sock *sk; + +- sk = rcu_dereference(x->encap_sk); +- if (sk && sk->sk_state == TCP_ESTABLISHED) +- return sk; +- + spin_lock_bh(&x->lock); + sport = encap->encap_sport; + dport = encap->encap_dport; +- nsk = rcu_dereference_protected(x->encap_sk, +- lockdep_is_held(&x->lock)); +- if (sk && sk == nsk) { +- esk = kmalloc(sizeof(*esk), GFP_ATOMIC); +- if (!esk) { +- spin_unlock_bh(&x->lock); +- return ERR_PTR(-ENOMEM); +- } +- RCU_INIT_POINTER(x->encap_sk, NULL); +- esk->sk = sk; +- call_rcu(&esk->rcu, esp_free_tcp_sk); +- } + spin_unlock_bh(&x->lock); + + sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, &x->id.daddr.in6, +@@ -190,20 +159,6 @@ static struct sock *esp6_find_tcp_sk(struct xfrm_state *x) + return ERR_PTR(-EINVAL); + } + +- spin_lock_bh(&x->lock); +- nsk = rcu_dereference_protected(x->encap_sk, +- lockdep_is_held(&x->lock)); +- if (encap->encap_sport != sport || +- encap->encap_dport != dport) { +- sock_put(sk); +- sk = nsk ?: ERR_PTR(-EREMCHG); +- } else if (sk == nsk) { +- sock_put(sk); +- } else { +- rcu_assign_pointer(x->encap_sk, sk); +- } +- spin_unlock_bh(&x->lock); +- + return sk; + } + +@@ -228,6 +183,8 @@ static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb) + err = espintcp_push_skb(sk, skb); + bh_unlock_sock(sk); + ++ sock_put(sk); ++ + out: + rcu_read_unlock(); + return err; +@@ -424,6 +381,8 @@ static struct ip_esp_hdr *esp6_output_tcp_encap(struct xfrm_state *x, + if (IS_ERR(sk)) + return ERR_CAST(sk); + ++ sock_put(sk); ++ + *lenp = htons(len); + esph = (struct ip_esp_hdr *)(lenp + 1); + +diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c +index 711e816fc4041..a526b3bb8b88e 100644 +--- a/net/xfrm/xfrm_state.c ++++ b/net/xfrm/xfrm_state.c +@@ -773,9 +773,6 @@ int __xfrm_state_delete(struct xfrm_state *x) + xfrm_nat_keepalive_state_updated(x); + spin_unlock(&net->xfrm.xfrm_state_lock); + +- if (x->encap_sk) +- sock_put(rcu_dereference_raw(x->encap_sk)); +- + xfrm_dev_state_delete(x); + + /* All xfrm_state objects are created by xfrm_state_alloc. +-- +2.39.5 + diff --git a/queue-6.12/ice-fix-lacp-bonds-without-sriov-environment.patch b/queue-6.12/ice-fix-lacp-bonds-without-sriov-environment.patch new file mode 100644 index 0000000000..8241ea74f5 --- /dev/null +++ b/queue-6.12/ice-fix-lacp-bonds-without-sriov-environment.patch @@ -0,0 +1,68 @@ +From 2e4a23bc2b9f2c9e9f4ec2dfb1d4c682985dc90e Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 28 Apr 2025 15:33:39 -0400 +Subject: ice: Fix LACP bonds without SRIOV environment + +From: Dave Ertman + +[ Upstream commit 6c778f1b839b63525b30046c9d1899424a62be0a ] + +If an aggregate has the following conditions: +- The SRIOV LAG DDP package has been enabled +- The bond is in 802.3ad LACP mode +- The bond is disqualified from supporting SRIOV VF LAG +- Both interfaces were added simultaneously to the bond (same command) + +Then there is a chance that the two interfaces will be assigned different +LACP Aggregator ID's. This will cause a failure of the LACP control over +the bond. + +To fix this, we can detect if the primary interface for the bond (as +defined by the driver) is not in switchdev mode, and exit the setup flow +if so. + +Reproduction steps: + +%> ip link add bond0 type bond mode 802.3ad miimon 100 +%> ip link set bond0 up +%> ifenslave bond0 eth0 eth1 +%> cat /proc/net/bonding/bond0 | grep Agg + +Check for Aggregator IDs that differ. + +Fixes: ec5a6c5f79ed ("ice: process events created by lag netdev event handler") +Reviewed-by: Aleksandr Loktionov +Signed-off-by: Dave Ertman +Tested-by: Sujai Buvaneswaran +Signed-off-by: Tony Nguyen +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/intel/ice/ice_lag.c | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c +index 22371011c2492..2410aee59fb2d 100644 +--- a/drivers/net/ethernet/intel/ice/ice_lag.c ++++ b/drivers/net/ethernet/intel/ice/ice_lag.c +@@ -1321,12 +1321,18 @@ static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr) + */ + if (!primary_lag) { + lag->primary = true; ++ if (!ice_is_switchdev_running(lag->pf)) ++ return; ++ + /* Configure primary's SWID to be shared */ + ice_lag_primary_swid(lag, true); + primary_lag = lag; + } else { + u16 swid; + ++ if (!ice_is_switchdev_running(primary_lag->pf)) ++ return; ++ + swid = primary_lag->pf->hw.port_info->sw_id; + ice_lag_set_swid(swid, lag, true); + ice_lag_add_prune_list(primary_lag, lag->pf); +-- +2.39.5 + diff --git a/queue-6.12/ice-fix-vf-num_mac-count-with-port-representors.patch b/queue-6.12/ice-fix-vf-num_mac-count-with-port-representors.patch new file mode 100644 index 0000000000..f8a1c24546 --- /dev/null +++ b/queue-6.12/ice-fix-vf-num_mac-count-with-port-representors.patch @@ -0,0 +1,53 @@ +From 401950f8fdcc2432a84c2b69c27167093aca9e71 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 10 Apr 2025 11:13:52 -0700 +Subject: ice: fix vf->num_mac count with port representors + +From: Jacob Keller + +[ Upstream commit bbd95160a03dbfcd01a541f25c27ddb730dfbbd5 ] + +The ice_vc_repr_add_mac() function indicates that it does not store the MAC +address filters in the firmware. However, it still increments vf->num_mac. +This is incorrect, as vf->num_mac should represent the number of MAC +filters currently programmed to firmware. + +Indeed, we only perform this increment if the requested filter is a unicast +address that doesn't match the existing vf->hw_lan_addr. In addition, +ice_vc_repr_del_mac() does not decrement the vf->num_mac counter. This +results in the counter becoming out of sync with the actual count. + +As it turns out, vf->num_mac is currently only used in legacy made without +port representors. The single place where the value is checked is for +enforcing a filter limit on untrusted VFs. + +Upcoming patches to support VF Live Migration will use this value when +determining the size of the TLV for MAC address filters. Fix the +representor mode function to stop incrementing the counter incorrectly. + +Fixes: ac19e03ef780 ("ice: allow process VF opcodes in different ways") +Signed-off-by: Jacob Keller +Reviewed-by: Michal Swiatkowski +Reviewed-by: Simon Horman +Tested-by: Sujai Buvaneswaran +Signed-off-by: Tony Nguyen +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/intel/ice/ice_virtchnl.c | 1 - + 1 file changed, 1 deletion(-) + +diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c +index c8c1d48ff793d..87ffd25b268a2 100644 +--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c ++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c +@@ -3877,7 +3877,6 @@ static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg) + } + + ice_vfhw_mac_add(vf, &al->list[i]); +- vf->num_mac++; + break; + } + +-- +2.39.5 + diff --git a/queue-6.12/idpf-fix-idpf_vport_splitq_napi_poll.patch b/queue-6.12/idpf-fix-idpf_vport_splitq_napi_poll.patch new file mode 100644 index 0000000000..4d7f6086a3 --- /dev/null +++ b/queue-6.12/idpf-fix-idpf_vport_splitq_napi_poll.patch @@ -0,0 +1,72 @@ +From 446643ea17c91331cadc3b5b7766ccf938d4a6e1 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 20 May 2025 12:40:30 +0000 +Subject: idpf: fix idpf_vport_splitq_napi_poll() + +From: Eric Dumazet + +[ Upstream commit 407e0efdf8baf1672876d5948b75049860a93e59 ] + +idpf_vport_splitq_napi_poll() can incorrectly return @budget +after napi_complete_done() has been called. + +This violates NAPI rules, because after napi_complete_done(), +current thread lost napi ownership. + +Move the test against POLL_MODE before the napi_complete_done(). + +Fixes: c2d548cad150 ("idpf: add TX splitq napi poll support") +Reported-by: Peter Newman +Closes: https://lore.kernel.org/netdev/20250520121908.1805732-1-edumazet@google.com/T/#u +Signed-off-by: Eric Dumazet +Cc: Joshua Hay +Cc: Alan Brady +Cc: Madhu Chittim +Cc: Phani Burra +Cc: Pavan Kumar Linga +Link: https://patch.msgid.link/20250520124030.1983936-1-edumazet@google.com +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/intel/idpf/idpf_txrx.c | 18 +++++++++--------- + 1 file changed, 9 insertions(+), 9 deletions(-) + +diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c +index afc902ae4763e..623bf17f87f9c 100644 +--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c ++++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c +@@ -4022,6 +4022,14 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget) + return budget; + } + ++ /* Switch to poll mode in the tear-down path after sending disable ++ * queues virtchnl message, as the interrupts will be disabled after ++ * that. ++ */ ++ if (unlikely(q_vector->num_txq && idpf_queue_has(POLL_MODE, ++ q_vector->tx[0]))) ++ return budget; ++ + work_done = min_t(int, work_done, budget - 1); + + /* Exit the polling mode, but don't re-enable interrupts if stack might +@@ -4032,15 +4040,7 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget) + else + idpf_vport_intr_set_wb_on_itr(q_vector); + +- /* Switch to poll mode in the tear-down path after sending disable +- * queues virtchnl message, as the interrupts will be disabled after +- * that +- */ +- if (unlikely(q_vector->num_txq && idpf_queue_has(POLL_MODE, +- q_vector->tx[0]))) +- return budget; +- else +- return work_done; ++ return work_done; + } + + /** +-- +2.39.5 + diff --git a/queue-6.12/idpf-fix-null-ptr-deref-in-idpf_features_check.patch b/queue-6.12/idpf-fix-null-ptr-deref-in-idpf_features_check.patch new file mode 100644 index 0000000000..7a18f27ba3 --- /dev/null +++ b/queue-6.12/idpf-fix-null-ptr-deref-in-idpf_features_check.patch @@ -0,0 +1,121 @@ +From 34c4d387ef995adb5030134689926e282e702160 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 11 Apr 2025 09:00:35 -0700 +Subject: idpf: fix null-ptr-deref in idpf_features_check + +From: Pavan Kumar Linga + +[ Upstream commit 2dabe349f7882ff1407a784d54d8541909329088 ] + +idpf_features_check is used to validate the TX packet. skb header +length is compared with the hardware supported value received from +the device control plane. The value is stored in the adapter structure +and to access it, vport pointer is used. During reset all the vports +are released and the vport pointer that the netdev private structure +points to is NULL. + +To avoid null-ptr-deref, store the max header length value in netdev +private structure. This also helps to cache the value and avoid +accessing adapter pointer in hot path. + +BUG: kernel NULL pointer dereference, address: 0000000000000068 +... +RIP: 0010:idpf_features_check+0x6d/0xe0 [idpf] +Call Trace: + + ? __die+0x23/0x70 + ? page_fault_oops+0x154/0x520 + ? exc_page_fault+0x76/0x190 + ? asm_exc_page_fault+0x26/0x30 + ? idpf_features_check+0x6d/0xe0 [idpf] + netif_skb_features+0x88/0x310 + validate_xmit_skb+0x2a/0x2b0 + validate_xmit_skb_list+0x4c/0x70 + sch_direct_xmit+0x19d/0x3a0 + __dev_queue_xmit+0xb74/0xe70 + ... + +Fixes: a251eee62133 ("idpf: add SRIOV support and other ndo_ops") +Reviewed-by: Madhu Chititm +Signed-off-by: Pavan Kumar Linga +Reviewed-by: Simon Horman +Tested-by: Samuel Salin +Signed-off-by: Tony Nguyen +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/intel/idpf/idpf.h | 2 ++ + drivers/net/ethernet/intel/idpf/idpf_lib.c | 10 ++++++---- + 2 files changed, 8 insertions(+), 4 deletions(-) + +diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h +index aef0e9775a330..70dbf80f3bb75 100644 +--- a/drivers/net/ethernet/intel/idpf/idpf.h ++++ b/drivers/net/ethernet/intel/idpf/idpf.h +@@ -143,6 +143,7 @@ enum idpf_vport_state { + * @vport_id: Vport identifier + * @link_speed_mbps: Link speed in mbps + * @vport_idx: Relative vport index ++ * @max_tx_hdr_size: Max header length hardware can support + * @state: See enum idpf_vport_state + * @netstats: Packet and byte stats + * @stats_lock: Lock to protect stats update +@@ -153,6 +154,7 @@ struct idpf_netdev_priv { + u32 vport_id; + u32 link_speed_mbps; + u16 vport_idx; ++ u16 max_tx_hdr_size; + enum idpf_vport_state state; + struct rtnl_link_stats64 netstats; + spinlock_t stats_lock; +diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c +index 5ce663d04de00..615e74d038457 100644 +--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c ++++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c +@@ -723,6 +723,7 @@ static int idpf_cfg_netdev(struct idpf_vport *vport) + np->vport = vport; + np->vport_idx = vport->idx; + np->vport_id = vport->vport_id; ++ np->max_tx_hdr_size = idpf_get_max_tx_hdr_size(adapter); + vport->netdev = netdev; + + return idpf_init_mac_addr(vport, netdev); +@@ -740,6 +741,7 @@ static int idpf_cfg_netdev(struct idpf_vport *vport) + np->adapter = adapter; + np->vport_idx = vport->idx; + np->vport_id = vport->vport_id; ++ np->max_tx_hdr_size = idpf_get_max_tx_hdr_size(adapter); + + spin_lock_init(&np->stats_lock); + +@@ -2189,8 +2191,8 @@ static netdev_features_t idpf_features_check(struct sk_buff *skb, + struct net_device *netdev, + netdev_features_t features) + { +- struct idpf_vport *vport = idpf_netdev_to_vport(netdev); +- struct idpf_adapter *adapter = vport->adapter; ++ struct idpf_netdev_priv *np = netdev_priv(netdev); ++ u16 max_tx_hdr_size = np->max_tx_hdr_size; + size_t len; + + /* No point in doing any of this if neither checksum nor GSO are +@@ -2213,7 +2215,7 @@ static netdev_features_t idpf_features_check(struct sk_buff *skb, + goto unsupported; + + len = skb_network_header_len(skb); +- if (unlikely(len > idpf_get_max_tx_hdr_size(adapter))) ++ if (unlikely(len > max_tx_hdr_size)) + goto unsupported; + + if (!skb->encapsulation) +@@ -2226,7 +2228,7 @@ static netdev_features_t idpf_features_check(struct sk_buff *skb, + + /* IPLEN can support at most 127 dwords */ + len = skb_inner_network_header_len(skb); +- if (unlikely(len > idpf_get_max_tx_hdr_size(adapter))) ++ if (unlikely(len > max_tx_hdr_size)) + goto unsupported; + + /* No need to validate L4LEN as TCP is the only protocol with a +-- +2.39.5 + diff --git a/queue-6.12/io_uring-fix-overflow-resched-cqe-reordering.patch b/queue-6.12/io_uring-fix-overflow-resched-cqe-reordering.patch new file mode 100644 index 0000000000..ce8df7e714 --- /dev/null +++ b/queue-6.12/io_uring-fix-overflow-resched-cqe-reordering.patch @@ -0,0 +1,38 @@ +From 806f99af4bb288fd361c7e61f4f7265bb2ab9337 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sat, 17 May 2025 13:27:37 +0100 +Subject: io_uring: fix overflow resched cqe reordering + +From: Pavel Begunkov + +[ Upstream commit a7d755ed9ce9738af3db602eb29d32774a180bc7 ] + +Leaving the CQ critical section in the middle of a overflow flushing +can cause cqe reordering since the cache cq pointers are reset and any +new cqe emitters that might get called in between are not going to be +forced into io_cqe_cache_refill(). + +Fixes: eac2ca2d682f9 ("io_uring: check if we need to reschedule during overflow flush") +Signed-off-by: Pavel Begunkov +Link: https://lore.kernel.org/r/90ba817f1a458f091f355f407de1c911d2b93bbf.1747483784.git.asml.silence@gmail.com +Signed-off-by: Jens Axboe +Signed-off-by: Sasha Levin +--- + io_uring/io_uring.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c +index 985c87ea09a90..bd3b3f7a6f6ca 100644 +--- a/io_uring/io_uring.c ++++ b/io_uring/io_uring.c +@@ -630,6 +630,7 @@ static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool dying) + * to care for a non-real case. + */ + if (need_resched()) { ++ ctx->cqe_sentinel = ctx->cqe_cached; + io_cq_unlock_post(ctx); + mutex_unlock(&ctx->uring_lock); + cond_resched(); +-- +2.39.5 + diff --git a/queue-6.12/irqchip-riscv-imsic-start-local-sync-timer-on-correc.patch b/queue-6.12/irqchip-riscv-imsic-start-local-sync-timer-on-correc.patch new file mode 100644 index 0000000000..4839160681 --- /dev/null +++ b/queue-6.12/irqchip-riscv-imsic-start-local-sync-timer-on-correc.patch @@ -0,0 +1,71 @@ +From 2581f8a6f76a64b0a93c9fab52f608317e3785aa Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 14 May 2025 10:13:20 -0700 +Subject: irqchip/riscv-imsic: Start local sync timer on correct CPU + +From: Andrew Bresticker + +[ Upstream commit 08fb624802d8786253994d8ebdbbcdaa186f04f5 ] + +When starting the local sync timer to synchronize the state of a remote +CPU it should be added on the CPU to be synchronized, not the initiating +CPU. This results in interrupt delivery being delayed until the timer +eventually runs (due to another mask/unmask/migrate operation) on the +target CPU. + +Fixes: 0f67911e821c ("irqchip/riscv-imsic: Separate next and previous pointers in IMSIC vector") +Signed-off-by: Andrew Bresticker +Signed-off-by: Thomas Gleixner +Reviewed-by: Anup Patel +Link: https://lore.kernel.org/all/20250514171320.3494917-1-abrestic@rivosinc.com +Signed-off-by: Sasha Levin +--- + drivers/irqchip/irq-riscv-imsic-state.c | 10 +++++----- + 1 file changed, 5 insertions(+), 5 deletions(-) + +diff --git a/drivers/irqchip/irq-riscv-imsic-state.c b/drivers/irqchip/irq-riscv-imsic-state.c +index 1aeba76d72795..06ff0e17c0c33 100644 +--- a/drivers/irqchip/irq-riscv-imsic-state.c ++++ b/drivers/irqchip/irq-riscv-imsic-state.c +@@ -186,17 +186,17 @@ static bool __imsic_local_sync(struct imsic_local_priv *lpriv) + } + + #ifdef CONFIG_SMP +-static void __imsic_local_timer_start(struct imsic_local_priv *lpriv) ++static void __imsic_local_timer_start(struct imsic_local_priv *lpriv, unsigned int cpu) + { + lockdep_assert_held(&lpriv->lock); + + if (!timer_pending(&lpriv->timer)) { + lpriv->timer.expires = jiffies + 1; +- add_timer_on(&lpriv->timer, smp_processor_id()); ++ add_timer_on(&lpriv->timer, cpu); + } + } + #else +-static inline void __imsic_local_timer_start(struct imsic_local_priv *lpriv) ++static inline void __imsic_local_timer_start(struct imsic_local_priv *lpriv, unsigned int cpu) + { + } + #endif +@@ -211,7 +211,7 @@ void imsic_local_sync_all(bool force_all) + if (force_all) + bitmap_fill(lpriv->dirty_bitmap, imsic->global.nr_ids + 1); + if (!__imsic_local_sync(lpriv)) +- __imsic_local_timer_start(lpriv); ++ __imsic_local_timer_start(lpriv, smp_processor_id()); + + raw_spin_unlock_irqrestore(&lpriv->lock, flags); + } +@@ -256,7 +256,7 @@ static void __imsic_remote_sync(struct imsic_local_priv *lpriv, unsigned int cpu + return; + } + +- __imsic_local_timer_start(lpriv); ++ __imsic_local_timer_start(lpriv, cpu); + } + } + #else +-- +2.39.5 + diff --git a/queue-6.12/kernel-fork-only-call-untrack_pfn_clear-on-vmas-dupl.patch b/queue-6.12/kernel-fork-only-call-untrack_pfn_clear-on-vmas-dupl.patch new file mode 100644 index 0000000000..07c5620f89 --- /dev/null +++ b/queue-6.12/kernel-fork-only-call-untrack_pfn_clear-on-vmas-dupl.patch @@ -0,0 +1,98 @@ +From 870e364f0dc42dab6968ff09b2ae082e9b96999e Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 22 Apr 2025 16:49:42 +0200 +Subject: kernel/fork: only call untrack_pfn_clear() on VMAs duplicated for + fork() + +From: David Hildenbrand + +[ Upstream commit e9f180d7cfde23b9f8eebd60272465176373ab2c ] + +Not intuitive, but vm_area_dup() located in kernel/fork.c is not only used +for duplicating VMAs during fork(), but also for duplicating VMAs when +splitting VMAs or when mremap()'ing them. + +VM_PFNMAP mappings can at least get ordinarily mremap()'ed (no change in +size) and apparently also shrunk during mremap(), which implies +duplicating the VMA in __split_vma() first. + +In case of ordinary mremap() (no change in size), we first duplicate the +VMA in copy_vma_and_data()->copy_vma() to then call untrack_pfn_clear() on +the old VMA: we effectively move the VM_PAT reservation. So the +untrack_pfn_clear() call on the new VMA duplicating is wrong in that +context. + +Splitting of VMAs seems problematic, because we don't duplicate/adjust the +reservation when splitting the VMA. Instead, in memtype_erase() -- called +during zapping/munmap -- we shrink a reservation in case only the end +address matches: Assume we split a VMA into A and B, both would share a +reservation until B is unmapped. + +So when unmapping B, the reservation would be updated to cover only A. +When unmapping A, we would properly remove the now-shrunk reservation. +That scenario describes the mremap() shrinking (old_size > new_size), +where we split + unmap B, and the untrack_pfn_clear() on the new VMA when +is wrong. + +What if we manage to split a VM_PFNMAP VMA into A and B and unmap A first? +It would be broken because we would never free the reservation. Likely, +there are ways to trigger such a VMA split outside of mremap(). + +Affecting other VMA duplication was not intended, vm_area_dup() being used +outside of kernel/fork.c was an oversight. So let's fix that for; how to +handle VMA splits better should be investigated separately. + +With a simple reproducer that uses mprotect() to split such a VMA I can +trigger + +x86/PAT: pat_mremap:26448 freeing invalid memtype [mem 0x00000000-0x00000fff] + +Link: https://lkml.kernel.org/r/20250422144942.2871395-1-david@redhat.com +Fixes: dc84bc2aba85 ("x86/mm/pat: Fix VM_PAT handling when fork() fails in copy_page_range()") +Signed-off-by: David Hildenbrand +Reviewed-by: Lorenzo Stoakes +Cc: Ingo Molnar +Cc: Dave Hansen +Cc: Andy Lutomirski +Cc: Peter Zijlstra +Cc: Thomas Gleixner +Cc: Borislav Petkov +Cc: Rik van Riel +Cc: "H. Peter Anvin" +Cc: Linus Torvalds +Signed-off-by: Andrew Morton +Signed-off-by: Sasha Levin +--- + kernel/fork.c | 9 +++++---- + 1 file changed, 5 insertions(+), 4 deletions(-) + +diff --git a/kernel/fork.c b/kernel/fork.c +index 12decadff468f..97c9afe3efc38 100644 +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -505,10 +505,6 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) + vma_numab_state_init(new); + dup_anon_vma_name(orig, new); + +- /* track_pfn_copy() will later take care of copying internal state. */ +- if (unlikely(new->vm_flags & VM_PFNMAP)) +- untrack_pfn_clear(new); +- + return new; + } + +@@ -699,6 +695,11 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, + tmp = vm_area_dup(mpnt); + if (!tmp) + goto fail_nomem; ++ ++ /* track_pfn_copy() will later take care of copying internal state. */ ++ if (unlikely(tmp->vm_flags & VM_PFNMAP)) ++ untrack_pfn_clear(tmp); ++ + retval = vma_dup_policy(mpnt, tmp); + if (retval) + goto fail_nomem_policy; +-- +2.39.5 + diff --git a/queue-6.12/loop-don-t-require-write_iter-for-writable-files-in-.patch b/queue-6.12/loop-don-t-require-write_iter-for-writable-files-in-.patch new file mode 100644 index 0000000000..43dd008238 --- /dev/null +++ b/queue-6.12/loop-don-t-require-write_iter-for-writable-files-in-.patch @@ -0,0 +1,43 @@ +From 900da003d440b57c7f6208c7fb9b3c4ad07448be Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 20 May 2025 15:54:20 +0200 +Subject: loop: don't require ->write_iter for writable files in loop_configure + +From: Christoph Hellwig + +[ Upstream commit 355341e4359b2d5edf0ed5e117f7e9e7a0a5dac0 ] + +Block devices can be opened read-write even if they can't be written to +for historic reasons. Remove the check requiring file->f_op->write_iter +when the block devices was opened in loop_configure. The call to +loop_check_backing_file just below ensures the ->write_iter is present +for backing files opened for writing, which is the only check that is +actually needed. + +Fixes: f5c84eff634b ("loop: Add sanity check for read/write_iter") +Reported-by: Christian Hesse +Signed-off-by: Christoph Hellwig +Link: https://lore.kernel.org/r/20250520135420.1177312-1-hch@lst.de +Signed-off-by: Jens Axboe +Signed-off-by: Sasha Levin +--- + drivers/block/loop.c | 3 --- + 1 file changed, 3 deletions(-) + +diff --git a/drivers/block/loop.c b/drivers/block/loop.c +index fa9c77b8f4d23..0843d229b0f76 100644 +--- a/drivers/block/loop.c ++++ b/drivers/block/loop.c +@@ -969,9 +969,6 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode, + if (!file) + return -EBADF; + +- if ((mode & BLK_OPEN_WRITE) && !file->f_op->write_iter) +- return -EINVAL; +- + error = loop_check_backing_file(file); + if (error) + return error; +-- +2.39.5 + diff --git a/queue-6.12/net-dwmac-sun8i-use-parsed-internal-phy-address-inst.patch b/queue-6.12/net-dwmac-sun8i-use-parsed-internal-phy-address-inst.patch new file mode 100644 index 0000000000..24bf802c90 --- /dev/null +++ b/queue-6.12/net-dwmac-sun8i-use-parsed-internal-phy-address-inst.patch @@ -0,0 +1,48 @@ +From d04bb166bb797332ba718ede3bce2b12cb219d8a Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 19 May 2025 18:49:36 +0200 +Subject: net: dwmac-sun8i: Use parsed internal PHY address instead of 1 + +From: Paul Kocialkowski + +[ Upstream commit 47653e4243f2b0a26372e481ca098936b51ec3a8 ] + +While the MDIO address of the internal PHY on Allwinner sun8i chips is +generally 1, of_mdio_parse_addr is used to cleanly parse the address +from the device-tree instead of hardcoding it. + +A commit reworking the code ditched the parsed value and hardcoded the +value 1 instead, which didn't really break anything but is more fragile +and not future-proof. + +Restore the initial behavior using the parsed address returned from the +helper. + +Fixes: 634db83b8265 ("net: stmmac: dwmac-sun8i: Handle integrated/external MDIOs") +Signed-off-by: Paul Kocialkowski +Reviewed-by: Andrew Lunn +Acked-by: Corentin LABBE +Tested-by: Corentin LABBE +Link: https://patch.msgid.link/20250519164936.4172658-1-paulk@sys-base.io +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +index 4a0ae92b3055c..ce8367b63823a 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +@@ -964,7 +964,7 @@ static int sun8i_dwmac_set_syscon(struct device *dev, + /* of_mdio_parse_addr returns a valid (0 ~ 31) PHY + * address. No need to mask it again. + */ +- reg |= 1 << H3_EPHY_ADDR_SHIFT; ++ reg |= ret << H3_EPHY_ADDR_SHIFT; + } else { + /* For SoCs without internal PHY the PHY selection bit should be + * set to 0 (external PHY). +-- +2.39.5 + diff --git a/queue-6.12/net-lan743x-restore-sgmii-ctrl-register-on-resume.patch b/queue-6.12/net-lan743x-restore-sgmii-ctrl-register-on-resume.patch new file mode 100644 index 0000000000..f5141d073f --- /dev/null +++ b/queue-6.12/net-lan743x-restore-sgmii-ctrl-register-on-resume.patch @@ -0,0 +1,92 @@ +From e457a9449d4d2b55316667e6f30a1d596e5b6d69 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 16 May 2025 09:27:19 +0530 +Subject: net: lan743x: Restore SGMII CTRL register on resume + +From: Thangaraj Samynathan + +[ Upstream commit 293e38ff4e4c2ba53f3fd47d8a4a9f0f0414a7a6 ] + +SGMII_CTRL register, which specifies the active interface, was not +properly restored when resuming from suspend. This led to incorrect +interface selection after resume particularly in scenarios involving +the FPGA. + +To fix this: +- Move the SGMII_CTRL setup out of the probe function. +- Initialize the register in the hardware initialization helper function, +which is called during both device initialization and resume. + +This ensures the interface configuration is consistently restored after +suspend/resume cycles. + +Fixes: a46d9d37c4f4f ("net: lan743x: Add support for SGMII interface") +Signed-off-by: Thangaraj Samynathan +Link: https://patch.msgid.link/20250516035719.117960-1-thangaraj.s@microchip.com +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/microchip/lan743x_main.c | 19 ++++++++++--------- + 1 file changed, 10 insertions(+), 9 deletions(-) + +diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c +index 547255ca1c4ef..812ad9d61676a 100644 +--- a/drivers/net/ethernet/microchip/lan743x_main.c ++++ b/drivers/net/ethernet/microchip/lan743x_main.c +@@ -3466,6 +3466,7 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter, + struct pci_dev *pdev) + { + struct lan743x_tx *tx; ++ u32 sgmii_ctl; + int index; + int ret; + +@@ -3478,6 +3479,15 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter, + spin_lock_init(&adapter->eth_syslock_spinlock); + mutex_init(&adapter->sgmii_rw_lock); + pci11x1x_set_rfe_rd_fifo_threshold(adapter); ++ sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL); ++ if (adapter->is_sgmii_en) { ++ sgmii_ctl |= SGMII_CTL_SGMII_ENABLE_; ++ sgmii_ctl &= ~SGMII_CTL_SGMII_POWER_DN_; ++ } else { ++ sgmii_ctl &= ~SGMII_CTL_SGMII_ENABLE_; ++ sgmii_ctl |= SGMII_CTL_SGMII_POWER_DN_; ++ } ++ lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl); + } else { + adapter->max_tx_channels = LAN743X_MAX_TX_CHANNELS; + adapter->used_tx_channels = LAN743X_USED_TX_CHANNELS; +@@ -3526,7 +3536,6 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter, + + static int lan743x_mdiobus_init(struct lan743x_adapter *adapter) + { +- u32 sgmii_ctl; + int ret; + + adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev); +@@ -3538,10 +3547,6 @@ static int lan743x_mdiobus_init(struct lan743x_adapter *adapter) + adapter->mdiobus->priv = (void *)adapter; + if (adapter->is_pci11x1x) { + if (adapter->is_sgmii_en) { +- sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL); +- sgmii_ctl |= SGMII_CTL_SGMII_ENABLE_; +- sgmii_ctl &= ~SGMII_CTL_SGMII_POWER_DN_; +- lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl); + netif_dbg(adapter, drv, adapter->netdev, + "SGMII operation\n"); + adapter->mdiobus->read = lan743x_mdiobus_read_c22; +@@ -3552,10 +3557,6 @@ static int lan743x_mdiobus_init(struct lan743x_adapter *adapter) + netif_dbg(adapter, drv, adapter->netdev, + "lan743x-mdiobus-c45\n"); + } else { +- sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL); +- sgmii_ctl &= ~SGMII_CTL_SGMII_ENABLE_; +- sgmii_ctl |= SGMII_CTL_SGMII_POWER_DN_; +- lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl); + netif_dbg(adapter, drv, adapter->netdev, + "RGMII operation\n"); + // Only C22 support when RGMII I/F +-- +2.39.5 + diff --git a/queue-6.12/net-tipc-fix-slab-use-after-free-read-in-tipc_aead_e.patch b/queue-6.12/net-tipc-fix-slab-use-after-free-read-in-tipc_aead_e.patch new file mode 100644 index 0000000000..d3b9cafd6a --- /dev/null +++ b/queue-6.12/net-tipc-fix-slab-use-after-free-read-in-tipc_aead_e.patch @@ -0,0 +1,125 @@ +From c1b35a8595f730c4188f0889e79605dcd9c9b45e Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 20 May 2025 18:14:04 +0800 +Subject: net/tipc: fix slab-use-after-free Read in tipc_aead_encrypt_done + +From: Wang Liang + +[ Upstream commit e279024617134c94fd3e37470156534d5f2b3472 ] + +Syzbot reported a slab-use-after-free with the following call trace: + + ================================================================== + BUG: KASAN: slab-use-after-free in tipc_aead_encrypt_done+0x4bd/0x510 net/tipc/crypto.c:840 + Read of size 8 at addr ffff88807a733000 by task kworker/1:0/25 + + Call Trace: + kasan_report+0xd9/0x110 mm/kasan/report.c:601 + tipc_aead_encrypt_done+0x4bd/0x510 net/tipc/crypto.c:840 + crypto_request_complete include/crypto/algapi.h:266 + aead_request_complete include/crypto/internal/aead.h:85 + cryptd_aead_crypt+0x3b8/0x750 crypto/cryptd.c:772 + crypto_request_complete include/crypto/algapi.h:266 + cryptd_queue_worker+0x131/0x200 crypto/cryptd.c:181 + process_one_work+0x9fb/0x1b60 kernel/workqueue.c:3231 + + Allocated by task 8355: + kzalloc_noprof include/linux/slab.h:778 + tipc_crypto_start+0xcc/0x9e0 net/tipc/crypto.c:1466 + tipc_init_net+0x2dd/0x430 net/tipc/core.c:72 + ops_init+0xb9/0x650 net/core/net_namespace.c:139 + setup_net+0x435/0xb40 net/core/net_namespace.c:343 + copy_net_ns+0x2f0/0x670 net/core/net_namespace.c:508 + create_new_namespaces+0x3ea/0xb10 kernel/nsproxy.c:110 + unshare_nsproxy_namespaces+0xc0/0x1f0 kernel/nsproxy.c:228 + ksys_unshare+0x419/0x970 kernel/fork.c:3323 + __do_sys_unshare kernel/fork.c:3394 + + Freed by task 63: + kfree+0x12a/0x3b0 mm/slub.c:4557 + tipc_crypto_stop+0x23c/0x500 net/tipc/crypto.c:1539 + tipc_exit_net+0x8c/0x110 net/tipc/core.c:119 + ops_exit_list+0xb0/0x180 net/core/net_namespace.c:173 + cleanup_net+0x5b7/0xbf0 net/core/net_namespace.c:640 + process_one_work+0x9fb/0x1b60 kernel/workqueue.c:3231 + +After freed the tipc_crypto tx by delete namespace, tipc_aead_encrypt_done +may still visit it in cryptd_queue_worker workqueue. + +I reproduce this issue by: + ip netns add ns1 + ip link add veth1 type veth peer name veth2 + ip link set veth1 netns ns1 + ip netns exec ns1 tipc bearer enable media eth dev veth1 + ip netns exec ns1 tipc node set key this_is_a_master_key master + ip netns exec ns1 tipc bearer disable media eth dev veth1 + ip netns del ns1 + +The key of reproduction is that, simd_aead_encrypt is interrupted, leading +to crypto_simd_usable() return false. Thus, the cryptd_queue_worker is +triggered, and the tipc_crypto tx will be visited. + + tipc_disc_timeout + tipc_bearer_xmit_skb + tipc_crypto_xmit + tipc_aead_encrypt + crypto_aead_encrypt + // encrypt() + simd_aead_encrypt + // crypto_simd_usable() is false + child = &ctx->cryptd_tfm->base; + + simd_aead_encrypt + crypto_aead_encrypt + // encrypt() + cryptd_aead_encrypt_enqueue + cryptd_aead_enqueue + cryptd_enqueue_request + // trigger cryptd_queue_worker + queue_work_on(smp_processor_id(), cryptd_wq, &cpu_queue->work) + +Fix this by holding net reference count before encrypt. + +Reported-by: syzbot+55c12726619ff85ce1f6@syzkaller.appspotmail.com +Closes: https://syzkaller.appspot.com/bug?extid=55c12726619ff85ce1f6 +Fixes: fc1b6d6de220 ("tipc: introduce TIPC encryption & authentication") +Signed-off-by: Wang Liang +Link: https://patch.msgid.link/20250520101404.1341730-1-wangliang74@huawei.com +Signed-off-by: Paolo Abeni +Signed-off-by: Sasha Levin +--- + net/tipc/crypto.c | 5 +++++ + 1 file changed, 5 insertions(+) + +diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c +index c524421ec6525..8584893b47851 100644 +--- a/net/tipc/crypto.c ++++ b/net/tipc/crypto.c +@@ -817,12 +817,16 @@ static int tipc_aead_encrypt(struct tipc_aead *aead, struct sk_buff *skb, + goto exit; + } + ++ /* Get net to avoid freed tipc_crypto when delete namespace */ ++ get_net(aead->crypto->net); ++ + /* Now, do encrypt */ + rc = crypto_aead_encrypt(req); + if (rc == -EINPROGRESS || rc == -EBUSY) + return rc; + + tipc_bearer_put(b); ++ put_net(aead->crypto->net); + + exit: + kfree(ctx); +@@ -860,6 +864,7 @@ static void tipc_aead_encrypt_done(void *data, int err) + kfree(tx_ctx); + tipc_bearer_put(b); + tipc_aead_put(aead); ++ put_net(net); + } + + /** +-- +2.39.5 + diff --git a/queue-6.12/octeontx2-af-fix-apr-entry-mapping-based-on-apr_lmt_.patch b/queue-6.12/octeontx2-af-fix-apr-entry-mapping-based-on-apr_lmt_.patch new file mode 100644 index 0000000000..347ca936b6 --- /dev/null +++ b/queue-6.12/octeontx2-af-fix-apr-entry-mapping-based-on-apr_lmt_.patch @@ -0,0 +1,111 @@ +From 51d0665d61294b26c7c7175f200aca2ae91ed8e5 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 21 May 2025 11:38:34 +0530 +Subject: octeontx2-af: Fix APR entry mapping based on APR_LMT_CFG + +From: Geetha sowjanya + +[ Upstream commit a6ae7129819ad20788e610261246e71736543b8b ] + +The current implementation maps the APR table using a fixed size, +which can lead to incorrect mapping when the number of PFs and VFs +varies. +This patch corrects the mapping by calculating the APR table +size dynamically based on the values configured in the +APR_LMT_CFG register, ensuring accurate representation +of APR entries in debugfs. + +Fixes: 0daa55d033b0 ("octeontx2-af: cn10k: debugfs for dumping LMTST map table"). +Signed-off-by: Geetha sowjanya +Link: https://patch.msgid.link/20250521060834.19780-3-gakula@marvell.com +Signed-off-by: Paolo Abeni +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c | 9 ++++++--- + .../net/ethernet/marvell/octeontx2/af/rvu_debugfs.c | 11 ++++++++--- + 2 files changed, 14 insertions(+), 6 deletions(-) + +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c +index 3838c04b78c22..4a3370a40dd88 100644 +--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c ++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c +@@ -13,7 +13,6 @@ + /* RVU LMTST */ + #define LMT_TBL_OP_READ 0 + #define LMT_TBL_OP_WRITE 1 +-#define LMT_MAP_TABLE_SIZE (128 * 1024) + #define LMT_MAPTBL_ENTRY_SIZE 16 + #define LMT_MAX_VFS 256 + +@@ -26,10 +25,14 @@ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val, + { + void __iomem *lmt_map_base; + u64 tbl_base, cfg; ++ int pfs, vfs; + + tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE); ++ cfg = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CFG); ++ vfs = 1 << (cfg & 0xF); ++ pfs = 1 << ((cfg >> 4) & 0x7); + +- lmt_map_base = ioremap_wc(tbl_base, LMT_MAP_TABLE_SIZE); ++ lmt_map_base = ioremap_wc(tbl_base, pfs * vfs * LMT_MAPTBL_ENTRY_SIZE); + if (!lmt_map_base) { + dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n"); + return -ENOMEM; +@@ -80,7 +83,7 @@ static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc, + + mutex_lock(&rvu->rsrc_lock); + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_REQ, iova); +- pf = rvu_get_pf(pcifunc) & 0x1F; ++ pf = rvu_get_pf(pcifunc) & RVU_PFVF_PF_MASK; + val = BIT_ULL(63) | BIT_ULL(14) | BIT_ULL(13) | pf << 8 | + ((pcifunc & RVU_PFVF_FUNC_MASK) & 0xFF); + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TXN_REQ, val); +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +index 87ba77e5026a0..e24accfecb3fb 100644 +--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c ++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +@@ -580,6 +580,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, + u64 lmt_addr, val, tbl_base; + int pf, vf, num_vfs, hw_vfs; + void __iomem *lmt_map_base; ++ int apr_pfs, apr_vfs; + int buf_size = 10240; + size_t off = 0; + int index = 0; +@@ -595,8 +596,12 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, + return -ENOMEM; + + tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE); ++ val = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CFG); ++ apr_vfs = 1 << (val & 0xF); ++ apr_pfs = 1 << ((val >> 4) & 0x7); + +- lmt_map_base = ioremap_wc(tbl_base, 128 * 1024); ++ lmt_map_base = ioremap_wc(tbl_base, apr_pfs * apr_vfs * ++ LMT_MAPTBL_ENTRY_SIZE); + if (!lmt_map_base) { + dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n"); + kfree(buf); +@@ -618,7 +623,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, + off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d \t\t\t", + pf); + +- index = pf * rvu->hw->total_vfs * LMT_MAPTBL_ENTRY_SIZE; ++ index = pf * apr_vfs * LMT_MAPTBL_ENTRY_SIZE; + off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%llx\t\t", + (tbl_base + index)); + lmt_addr = readq(lmt_map_base + index); +@@ -631,7 +636,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, + /* Reading num of VFs per PF */ + rvu_get_pf_numvfs(rvu, pf, &num_vfs, &hw_vfs); + for (vf = 0; vf < num_vfs; vf++) { +- index = (pf * rvu->hw->total_vfs * 16) + ++ index = (pf * apr_vfs * LMT_MAPTBL_ENTRY_SIZE) + + ((vf + 1) * LMT_MAPTBL_ENTRY_SIZE); + off += scnprintf(&buf[off], buf_size - 1 - off, + "PF%d:VF%d \t\t", pf, vf); +-- +2.39.5 + diff --git a/queue-6.12/octeontx2-af-set-lmt_ena-bit-for-apr-table-entries.patch b/queue-6.12/octeontx2-af-set-lmt_ena-bit-for-apr-table-entries.patch new file mode 100644 index 0000000000..f8e11d10ef --- /dev/null +++ b/queue-6.12/octeontx2-af-set-lmt_ena-bit-for-apr-table-entries.patch @@ -0,0 +1,76 @@ +From cc0b0d1954be4baf404ba27e9312a42a6de0d437 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 21 May 2025 11:38:33 +0530 +Subject: octeontx2-af: Set LMT_ENA bit for APR table entries + +From: Subbaraya Sundeep + +[ Upstream commit 0eefa27b493306928d88af6368193b134c98fd64 ] + +This patch enables the LMT line for a PF/VF by setting the +LMT_ENA bit in the APR_LMT_MAP_ENTRY_S structure. + +Additionally, it simplifies the logic for calculating the +LMTST table index by consistently using the maximum +number of hw supported VFs (i.e., 256). + +Fixes: 873a1e3d207a ("octeontx2-af: cn10k: Setting up lmtst map table"). +Signed-off-by: Subbaraya Sundeep +Signed-off-by: Geetha sowjanya +Reviewed-by: Michal Swiatkowski +Link: https://patch.msgid.link/20250521060834.19780-2-gakula@marvell.com +Signed-off-by: Paolo Abeni +Signed-off-by: Sasha Levin +--- + .../net/ethernet/marvell/octeontx2/af/rvu_cn10k.c | 15 +++++++++++++-- + 1 file changed, 13 insertions(+), 2 deletions(-) + +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c +index 7fa98aeb3663c..3838c04b78c22 100644 +--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c ++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c +@@ -15,13 +15,17 @@ + #define LMT_TBL_OP_WRITE 1 + #define LMT_MAP_TABLE_SIZE (128 * 1024) + #define LMT_MAPTBL_ENTRY_SIZE 16 ++#define LMT_MAX_VFS 256 ++ ++#define LMT_MAP_ENTRY_ENA BIT_ULL(20) ++#define LMT_MAP_ENTRY_LINES GENMASK_ULL(18, 16) + + /* Function to perform operations (read/write) on lmtst map table */ + static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val, + int lmt_tbl_op) + { + void __iomem *lmt_map_base; +- u64 tbl_base; ++ u64 tbl_base, cfg; + + tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE); + +@@ -35,6 +39,13 @@ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val, + *val = readq(lmt_map_base + index); + } else { + writeq((*val), (lmt_map_base + index)); ++ ++ cfg = FIELD_PREP(LMT_MAP_ENTRY_ENA, 0x1); ++ /* 2048 LMTLINES */ ++ cfg |= FIELD_PREP(LMT_MAP_ENTRY_LINES, 0x6); ++ ++ writeq(cfg, (lmt_map_base + (index + 8))); ++ + /* Flushing the AP interceptor cache to make APR_LMT_MAP_ENTRY_S + * changes effective. Write 1 for flush and read is being used as a + * barrier and sets up a data dependency. Write to 0 after a write +@@ -52,7 +63,7 @@ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val, + #define LMT_MAP_TBL_W1_OFF 8 + static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc) + { +- return ((rvu_get_pf(pcifunc) * rvu->hw->total_vfs) + ++ return ((rvu_get_pf(pcifunc) * LMT_MAX_VFS) + + (pcifunc & RVU_PFVF_FUNC_MASK)) * LMT_MAPTBL_ENTRY_SIZE; + } + +-- +2.39.5 + diff --git a/queue-6.12/octeontx2-pf-add-af_xdp-non-zero-copy-support.patch b/queue-6.12/octeontx2-pf-add-af_xdp-non-zero-copy-support.patch new file mode 100644 index 0000000000..4395926e86 --- /dev/null +++ b/queue-6.12/octeontx2-pf-add-af_xdp-non-zero-copy-support.patch @@ -0,0 +1,51 @@ +From 988c2ac49f5ffa49fc6c72555eb34426f2836195 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 13 Feb 2025 11:01:37 +0530 +Subject: octeontx2-pf: Add AF_XDP non-zero copy support + +From: Suman Ghosh + +[ Upstream commit b4164de5041b51cda3438e75bce668e2556057c3 ] + +Set xdp rx ring memory type as MEM_TYPE_PAGE_POOL for +af-xdp to work. This is needed since xdp_return_frame +internally will use page pools. + +Fixes: 06059a1a9a4a ("octeontx2-pf: Add XDP support to netdev PF") +Signed-off-by: Suman Ghosh +Signed-off-by: Paolo Abeni +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c | 8 +++++++- + 1 file changed, 7 insertions(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +index 7510a918d942c..f75afcf5f5aef 100644 +--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c ++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +@@ -988,6 +988,7 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx) + int err, pool_id, non_xdp_queues; + struct nix_aq_enq_req *aq; + struct otx2_cq_queue *cq; ++ struct otx2_pool *pool; + + cq = &qset->cq[qidx]; + cq->cq_idx = qidx; +@@ -996,8 +997,13 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx) + cq->cq_type = CQ_RX; + cq->cint_idx = qidx; + cq->cqe_cnt = qset->rqe_cnt; +- if (pfvf->xdp_prog) ++ if (pfvf->xdp_prog) { ++ pool = &qset->pool[qidx]; + xdp_rxq_info_reg(&cq->xdp_rxq, pfvf->netdev, qidx, 0); ++ xdp_rxq_info_reg_mem_model(&cq->xdp_rxq, ++ MEM_TYPE_PAGE_POOL, ++ pool->page_pool); ++ } + } else if (qidx < non_xdp_queues) { + cq->cq_type = CQ_TX; + cq->cint_idx = qidx - pfvf->hw.rx_queues; +-- +2.39.5 + diff --git a/queue-6.12/perf-x86-intel-fix-segfault-with-pebs-via-pt-with-sa.patch b/queue-6.12/perf-x86-intel-fix-segfault-with-pebs-via-pt-with-sa.patch new file mode 100644 index 0000000000..0b1f6fc8a7 --- /dev/null +++ b/queue-6.12/perf-x86-intel-fix-segfault-with-pebs-via-pt-with-sa.patch @@ -0,0 +1,101 @@ +From aa55b559f67e3d7b47f798b7e2f1e7d9eccdd30b Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 8 May 2025 16:44:52 +0300 +Subject: perf/x86/intel: Fix segfault with PEBS-via-PT with sample_freq + +From: Adrian Hunter + +[ Upstream commit 99bcd91fabada0dbb1d5f0de44532d8008db93c6 ] + +Currently, using PEBS-via-PT with a sample frequency instead of a sample +period, causes a segfault. For example: + + BUG: kernel NULL pointer dereference, address: 0000000000000195 + + ? __die_body.cold+0x19/0x27 + ? page_fault_oops+0xca/0x290 + ? exc_page_fault+0x7e/0x1b0 + ? asm_exc_page_fault+0x26/0x30 + ? intel_pmu_pebs_event_update_no_drain+0x40/0x60 + ? intel_pmu_pebs_event_update_no_drain+0x32/0x60 + intel_pmu_drain_pebs_icl+0x333/0x350 + handle_pmi_common+0x272/0x3c0 + intel_pmu_handle_irq+0x10a/0x2e0 + perf_event_nmi_handler+0x2a/0x50 + +That happens because intel_pmu_pebs_event_update_no_drain() assumes all the +pebs_enabled bits represent counter indexes, which is not always the case. +In this particular case, bits 60 and 61 are set for PEBS-via-PT purposes. + +The behaviour of PEBS-via-PT with sample frequency is questionable because +although a PMI is generated (PEBS_PMI_AFTER_EACH_RECORD), the period is not +adjusted anyway. + +Putting that aside, fix intel_pmu_pebs_event_update_no_drain() by passing +the mask of counter bits instead of 'size'. Note, prior to the Fixes +commit, 'size' would be limited to the maximum counter index, so the issue +was not hit. + +Fixes: 722e42e45c2f1 ("perf/x86: Support counter mask") +Signed-off-by: Adrian Hunter +Signed-off-by: Ingo Molnar +Reviewed-by: Kan Liang +Cc: Peter Zijlstra +Cc: Ingo Molnar +Cc: Alexander Shishkin +Cc: Arnaldo Carvalho de Melo +Cc: Jiri Olsa +Cc: Namhyung Kim +Cc: Ian Rogers +Cc: linux-perf-users@vger.kernel.org +Link: https://lore.kernel.org/r/20250508134452.73960-1-adrian.hunter@intel.com +Signed-off-by: Sasha Levin +--- + arch/x86/events/intel/ds.c | 9 +++++---- + 1 file changed, 5 insertions(+), 4 deletions(-) + +diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c +index 1b82bcc6fa556..54007174c15b5 100644 +--- a/arch/x86/events/intel/ds.c ++++ b/arch/x86/events/intel/ds.c +@@ -2240,8 +2240,9 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs, struct perf_sample_ + setup_pebs_fixed_sample_data); + } + +-static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events *cpuc, int size) ++static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events *cpuc, u64 mask) + { ++ u64 pebs_enabled = cpuc->pebs_enabled & mask; + struct perf_event *event; + int bit; + +@@ -2252,7 +2253,7 @@ static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events *cpuc, int + * It needs to call intel_pmu_save_and_restart_reload() to + * update the event->count for this case. + */ +- for_each_set_bit(bit, (unsigned long *)&cpuc->pebs_enabled, size) { ++ for_each_set_bit(bit, (unsigned long *)&pebs_enabled, X86_PMC_IDX_MAX) { + event = cpuc->events[bit]; + if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD) + intel_pmu_save_and_restart_reload(event, 0); +@@ -2287,7 +2288,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_d + } + + if (unlikely(base >= top)) { +- intel_pmu_pebs_event_update_no_drain(cpuc, size); ++ intel_pmu_pebs_event_update_no_drain(cpuc, mask); + return; + } + +@@ -2397,7 +2398,7 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_d + (hybrid(cpuc->pmu, fixed_cntr_mask64) << INTEL_PMC_IDX_FIXED); + + if (unlikely(base >= top)) { +- intel_pmu_pebs_event_update_no_drain(cpuc, X86_PMC_IDX_MAX); ++ intel_pmu_pebs_event_update_no_drain(cpuc, mask); + return; + } + +-- +2.39.5 + diff --git a/queue-6.12/pinctrl-qcom-switch-to-devm_register_sys_off_handler.patch b/queue-6.12/pinctrl-qcom-switch-to-devm_register_sys_off_handler.patch new file mode 100644 index 0000000000..d4ffa16858 --- /dev/null +++ b/queue-6.12/pinctrl-qcom-switch-to-devm_register_sys_off_handler.patch @@ -0,0 +1,96 @@ +From 9e2a6c97f2e62c195f26a94985e514d53210e850 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 13 May 2025 21:38:58 +0300 +Subject: pinctrl: qcom: switch to devm_register_sys_off_handler() + +From: Dmitry Baryshkov + +[ Upstream commit 41e452e6933d14146381ea25cff5e4d1ac2abea1 ] + +Error-handling paths in msm_pinctrl_probe() don't call +a function required to unroll restart handler registration, +unregister_restart_handler(). Instead of adding calls to this function, +switch the msm pinctrl code into using devm_register_sys_off_handler(). + +Fixes: cf1fc1876289 ("pinctrl: qcom: use restart_notifier mechanism for ps_hold") +Signed-off-by: Dmitry Baryshkov +Link: https://lore.kernel.org/20250513-pinctrl-msm-fix-v2-2-249999af0fc1@oss.qualcomm.com +Signed-off-by: Linus Walleij +Signed-off-by: Sasha Levin +--- + drivers/pinctrl/qcom/pinctrl-msm.c | 23 ++++++++++++----------- + 1 file changed, 12 insertions(+), 11 deletions(-) + +diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c +index a6bdff7a0bb25..018e96d921c05 100644 +--- a/drivers/pinctrl/qcom/pinctrl-msm.c ++++ b/drivers/pinctrl/qcom/pinctrl-msm.c +@@ -43,7 +43,6 @@ + * @pctrl: pinctrl handle. + * @chip: gpiochip handle. + * @desc: pin controller descriptor +- * @restart_nb: restart notifier block. + * @irq: parent irq for the TLMM irq_chip. + * @intr_target_use_scm: route irq to application cpu using scm calls + * @lock: Spinlock to protect register resources as well +@@ -63,7 +62,6 @@ struct msm_pinctrl { + struct pinctrl_dev *pctrl; + struct gpio_chip chip; + struct pinctrl_desc desc; +- struct notifier_block restart_nb; + + int irq; + +@@ -1470,10 +1468,9 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl) + return 0; + } + +-static int msm_ps_hold_restart(struct notifier_block *nb, unsigned long action, +- void *data) ++static int msm_ps_hold_restart(struct sys_off_data *data) + { +- struct msm_pinctrl *pctrl = container_of(nb, struct msm_pinctrl, restart_nb); ++ struct msm_pinctrl *pctrl = data->cb_data; + + writel(0, pctrl->regs[0] + PS_HOLD_OFFSET); + mdelay(1000); +@@ -1484,7 +1481,11 @@ static struct msm_pinctrl *poweroff_pctrl; + + static void msm_ps_hold_poweroff(void) + { +- msm_ps_hold_restart(&poweroff_pctrl->restart_nb, 0, NULL); ++ struct sys_off_data data = { ++ .cb_data = poweroff_pctrl, ++ }; ++ ++ msm_ps_hold_restart(&data); + } + + static void msm_pinctrl_setup_pm_reset(struct msm_pinctrl *pctrl) +@@ -1494,9 +1495,11 @@ static void msm_pinctrl_setup_pm_reset(struct msm_pinctrl *pctrl) + + for (i = 0; i < pctrl->soc->nfunctions; i++) + if (!strcmp(func[i].name, "ps_hold")) { +- pctrl->restart_nb.notifier_call = msm_ps_hold_restart; +- pctrl->restart_nb.priority = 128; +- if (register_restart_handler(&pctrl->restart_nb)) ++ if (devm_register_sys_off_handler(pctrl->dev, ++ SYS_OFF_MODE_RESTART, ++ 128, ++ msm_ps_hold_restart, ++ pctrl)) + dev_err(pctrl->dev, + "failed to setup restart handler.\n"); + poweroff_pctrl = pctrl; +@@ -1598,8 +1601,6 @@ void msm_pinctrl_remove(struct platform_device *pdev) + struct msm_pinctrl *pctrl = platform_get_drvdata(pdev); + + gpiochip_remove(&pctrl->chip); +- +- unregister_restart_handler(&pctrl->restart_nb); + } + EXPORT_SYMBOL(msm_pinctrl_remove); + +-- +2.39.5 + diff --git a/queue-6.12/ptp-ocp-limit-signal-freq-counts-in-summary-output-f.patch b/queue-6.12/ptp-ocp-limit-signal-freq-counts-in-summary-output-f.patch new file mode 100644 index 0000000000..2d2be78998 --- /dev/null +++ b/queue-6.12/ptp-ocp-limit-signal-freq-counts-in-summary-output-f.patch @@ -0,0 +1,131 @@ +From 0c513d0bf27525715ba4cc18c670f36ad6508165 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 14 May 2025 10:35:41 +0300 +Subject: ptp: ocp: Limit signal/freq counts in summary output functions + +From: Sagi Maimon + +[ Upstream commit c9e455581e2ba87ee38c126e8dc49a424b9df0cf ] + +The debugfs summary output could access uninitialized elements in +the freq_in[] and signal_out[] arrays, causing NULL pointer +dereferences and triggering a kernel Oops (page_fault_oops). +This patch adds u8 fields (nr_freq_in, nr_signal_out) to track the +number of initialized elements, with a maximum of 4 per array. +The summary output functions are updated to respect these limits, +preventing out-of-bounds access and ensuring safe array handling. + +Widen the label variables because the change confuses GCC about +max length of the strings. + +Fixes: ef61f5528fca ("ptp: ocp: add Adva timecard support") +Signed-off-by: Sagi Maimon +Reviewed-by: Simon Horman +Reviewed-by: Vadim Fedorenko +Link: https://patch.msgid.link/20250514073541.35817-1-maimon.sagi@gmail.com +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + drivers/ptp/ptp_ocp.c | 24 +++++++++++++++++------- + 1 file changed, 17 insertions(+), 7 deletions(-) + +diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c +index 1a936829975e1..efbd80db778d6 100644 +--- a/drivers/ptp/ptp_ocp.c ++++ b/drivers/ptp/ptp_ocp.c +@@ -315,6 +315,8 @@ struct ptp_ocp_serial_port { + #define OCP_BOARD_ID_LEN 13 + #define OCP_SERIAL_LEN 6 + #define OCP_SMA_NUM 4 ++#define OCP_SIGNAL_NUM 4 ++#define OCP_FREQ_NUM 4 + + enum { + PORT_GNSS, +@@ -342,8 +344,8 @@ struct ptp_ocp { + struct dcf_master_reg __iomem *dcf_out; + struct dcf_slave_reg __iomem *dcf_in; + struct tod_reg __iomem *nmea_out; +- struct frequency_reg __iomem *freq_in[4]; +- struct ptp_ocp_ext_src *signal_out[4]; ++ struct frequency_reg __iomem *freq_in[OCP_FREQ_NUM]; ++ struct ptp_ocp_ext_src *signal_out[OCP_SIGNAL_NUM]; + struct ptp_ocp_ext_src *pps; + struct ptp_ocp_ext_src *ts0; + struct ptp_ocp_ext_src *ts1; +@@ -378,10 +380,12 @@ struct ptp_ocp { + u32 utc_tai_offset; + u32 ts_window_adjust; + u64 fw_cap; +- struct ptp_ocp_signal signal[4]; ++ struct ptp_ocp_signal signal[OCP_SIGNAL_NUM]; + struct ptp_ocp_sma_connector sma[OCP_SMA_NUM]; + const struct ocp_sma_op *sma_op; + struct dpll_device *dpll; ++ int signals_nr; ++ int freq_in_nr; + }; + + #define OCP_REQ_TIMESTAMP BIT(0) +@@ -2693,6 +2697,8 @@ ptp_ocp_fb_board_init(struct ptp_ocp *bp, struct ocp_resource *r) + bp->eeprom_map = fb_eeprom_map; + bp->fw_version = ioread32(&bp->image->version); + bp->sma_op = &ocp_fb_sma_op; ++ bp->signals_nr = 4; ++ bp->freq_in_nr = 4; + + ptp_ocp_fb_set_version(bp); + +@@ -2858,6 +2864,8 @@ ptp_ocp_art_board_init(struct ptp_ocp *bp, struct ocp_resource *r) + bp->fw_version = ioread32(&bp->reg->version); + bp->fw_tag = 2; + bp->sma_op = &ocp_art_sma_op; ++ bp->signals_nr = 4; ++ bp->freq_in_nr = 4; + + /* Enable MAC serial port during initialisation */ + iowrite32(1, &bp->board_config->mro50_serial_activate); +@@ -2884,6 +2892,8 @@ ptp_ocp_adva_board_init(struct ptp_ocp *bp, struct ocp_resource *r) + bp->flash_start = 0xA00000; + bp->eeprom_map = fb_eeprom_map; + bp->sma_op = &ocp_adva_sma_op; ++ bp->signals_nr = 2; ++ bp->freq_in_nr = 2; + + version = ioread32(&bp->image->version); + /* if lower 16 bits are empty, this is the fw loader. */ +@@ -4004,7 +4014,7 @@ _signal_summary_show(struct seq_file *s, struct ptp_ocp *bp, int nr) + { + struct signal_reg __iomem *reg = bp->signal_out[nr]->mem; + struct ptp_ocp_signal *signal = &bp->signal[nr]; +- char label[8]; ++ char label[16]; + bool on; + u32 val; + +@@ -4030,7 +4040,7 @@ static void + _frequency_summary_show(struct seq_file *s, int nr, + struct frequency_reg __iomem *reg) + { +- char label[8]; ++ char label[16]; + bool on; + u32 val; + +@@ -4174,11 +4184,11 @@ ptp_ocp_summary_show(struct seq_file *s, void *data) + } + + if (bp->fw_cap & OCP_CAP_SIGNAL) +- for (i = 0; i < 4; i++) ++ for (i = 0; i < bp->signals_nr; i++) + _signal_summary_show(s, bp, i); + + if (bp->fw_cap & OCP_CAP_FREQ) +- for (i = 0; i < 4; i++) ++ for (i = 0; i < bp->freq_in_nr; i++) + _frequency_summary_show(s, i, bp->freq_in[i]); + + if (bp->irig_out) { +-- +2.39.5 + diff --git a/queue-6.12/remoteproc-qcom_wcnss-fix-on-platforms-without-fallb.patch b/queue-6.12/remoteproc-qcom_wcnss-fix-on-platforms-without-fallb.patch new file mode 100644 index 0000000000..d92ebc589e --- /dev/null +++ b/queue-6.12/remoteproc-qcom_wcnss-fix-on-platforms-without-fallb.patch @@ -0,0 +1,45 @@ +From 0955dec8529edc9306f361ea039cfa638e02104a Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 12 May 2025 02:40:15 +0300 +Subject: remoteproc: qcom_wcnss: Fix on platforms without fallback regulators +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Matti Lehtimäki + +[ Upstream commit 4ca45af0a56d00b86285d6fdd720dca3215059a7 ] + +Recent change to handle platforms with only single power domain broke +pronto-v3 which requires power domains and doesn't have fallback voltage +regulators in case power domains are missing. Add a check to verify +the number of fallback voltage regulators before using the code which +handles single power domain situation. + +Fixes: 65991ea8a6d1 ("remoteproc: qcom_wcnss: Handle platforms with only single power domain") +Signed-off-by: Matti Lehtimäki +Tested-by: Luca Weiss # sdm632-fairphone-fp3 +Link: https://lore.kernel.org/r/20250511234026.94735-1-matti.lehtimaki@gmail.com +Signed-off-by: Bjorn Andersson +Signed-off-by: Sasha Levin +--- + drivers/remoteproc/qcom_wcnss.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c +index 735d373a9f636..a2ae6adf0053a 100644 +--- a/drivers/remoteproc/qcom_wcnss.c ++++ b/drivers/remoteproc/qcom_wcnss.c +@@ -456,7 +456,8 @@ static int wcnss_init_regulators(struct qcom_wcnss *wcnss, + if (wcnss->num_pds) { + info += wcnss->num_pds; + /* Handle single power domain case */ +- num_vregs += num_pd_vregs - wcnss->num_pds; ++ if (wcnss->num_pds < num_pd_vregs) ++ num_vregs += num_pd_vregs - wcnss->num_pds; + } else { + num_vregs += num_pd_vregs; + } +-- +2.39.5 + diff --git a/queue-6.12/sch_hfsc-fix-qlen-accounting-bug-when-using-peek-in-.patch b/queue-6.12/sch_hfsc-fix-qlen-accounting-bug-when-using-peek-in-.patch new file mode 100644 index 0000000000..ff944d4158 --- /dev/null +++ b/queue-6.12/sch_hfsc-fix-qlen-accounting-bug-when-using-peek-in-.patch @@ -0,0 +1,62 @@ +From f6cbb940aee04154b5241594fd6cdaf7d028a56f Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sun, 18 May 2025 15:20:37 -0700 +Subject: sch_hfsc: Fix qlen accounting bug when using peek in hfsc_enqueue() + +From: Cong Wang + +[ Upstream commit 3f981138109f63232a5fb7165938d4c945cc1b9d ] + +When enqueuing the first packet to an HFSC class, hfsc_enqueue() calls the +child qdisc's peek() operation before incrementing sch->q.qlen and +sch->qstats.backlog. If the child qdisc uses qdisc_peek_dequeued(), this may +trigger an immediate dequeue and potential packet drop. In such cases, +qdisc_tree_reduce_backlog() is called, but the HFSC qdisc's qlen and backlog +have not yet been updated, leading to inconsistent queue accounting. This +can leave an empty HFSC class in the active list, causing further +consequences like use-after-free. + +This patch fixes the bug by moving the increment of sch->q.qlen and +sch->qstats.backlog before the call to the child qdisc's peek() operation. +This ensures that queue length and backlog are always accurate when packet +drops or dequeues are triggered during the peek. + +Fixes: 12d0ad3be9c3 ("net/sched/sch_hfsc.c: handle corner cases where head may change invalidating calculated deadline") +Reported-by: Mingi Cho +Signed-off-by: Cong Wang +Reviewed-by: Simon Horman +Link: https://patch.msgid.link/20250518222038.58538-2-xiyou.wangcong@gmail.com +Reviewed-by: Jamal Hadi Salim +Signed-off-by: Paolo Abeni +Signed-off-by: Sasha Levin +--- + net/sched/sch_hfsc.c | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c +index cb8c525ea20ea..7986145a527cb 100644 +--- a/net/sched/sch_hfsc.c ++++ b/net/sched/sch_hfsc.c +@@ -1569,6 +1569,9 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) + return err; + } + ++ sch->qstats.backlog += len; ++ sch->q.qlen++; ++ + if (first && !cl->cl_nactive) { + if (cl->cl_flags & HFSC_RSC) + init_ed(cl, len); +@@ -1584,9 +1587,6 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) + + } + +- sch->qstats.backlog += len; +- sch->q.qlen++; +- + return NET_XMIT_SUCCESS; + } + +-- +2.39.5 + diff --git a/queue-6.12/series b/queue-6.12/series index 00e30e6649..1aad823ed0 100644 --- a/queue-6.12/series +++ b/queue-6.12/series @@ -537,3 +537,38 @@ x86-kconfig-make-cfi_auto_default-depend-on-rust-or-.patch xenbus-allow-pvh-dom0-a-non-local-xenstore.patch drm-amd-display-call-fp-protect-before-mode-programm.patch __legitimize_mnt-check-for-mnt_sync_umount-should-be.patch +soundwire-bus-fix-race-on-the-creation-of-the-irq-do.patch +espintcp-fix-skb-leaks.patch +espintcp-remove-encap-socket-caching-to-avoid-refere.patch +xfrm-fix-udp-gro-handling-for-some-corner-cases.patch +dmaengine-idxd-fix-allowing-write-from-different-add.patch +x86-sev-fix-operator-precedence-in-ghcb_msr_vmpl_req.patch +kernel-fork-only-call-untrack_pfn_clear-on-vmas-dupl.patch +remoteproc-qcom_wcnss-fix-on-platforms-without-fallb.patch +clk-sunxi-ng-d1-add-missing-divider-for-mmc-mod-cloc.patch +xfrm-sanitize-marks-before-insert.patch +dmaengine-idxd-fix-poll-return-value.patch +dmaengine-fsl-edma-fix-return-code-for-unhandled-int.patch +driver-core-split-devres-apis-to-device-devres.h.patch +devres-introduce-devm_kmemdup_array.patch +asoc-sof-intel-hda-fix-uaf-when-reloading-module.patch +irqchip-riscv-imsic-start-local-sync-timer-on-correc.patch +perf-x86-intel-fix-segfault-with-pebs-via-pt-with-sa.patch +bluetooth-l2cap-fix-not-checking-l2cap_chan-security.patch +bluetooth-btusb-use-skb_pull-to-avoid-unsafe-access-.patch +ptp-ocp-limit-signal-freq-counts-in-summary-output-f.patch +bridge-netfilter-fix-forwarding-of-fragmented-packet.patch +ice-fix-vf-num_mac-count-with-port-representors.patch +ice-fix-lacp-bonds-without-sriov-environment.patch +idpf-fix-null-ptr-deref-in-idpf_features_check.patch +loop-don-t-require-write_iter-for-writable-files-in-.patch +pinctrl-qcom-switch-to-devm_register_sys_off_handler.patch +net-dwmac-sun8i-use-parsed-internal-phy-address-inst.patch +net-lan743x-restore-sgmii-ctrl-register-on-resume.patch +io_uring-fix-overflow-resched-cqe-reordering.patch +idpf-fix-idpf_vport_splitq_napi_poll.patch +sch_hfsc-fix-qlen-accounting-bug-when-using-peek-in-.patch +octeontx2-pf-add-af_xdp-non-zero-copy-support.patch +net-tipc-fix-slab-use-after-free-read-in-tipc_aead_e.patch +octeontx2-af-set-lmt_ena-bit-for-apr-table-entries.patch +octeontx2-af-fix-apr-entry-mapping-based-on-apr_lmt_.patch diff --git a/queue-6.12/soundwire-bus-fix-race-on-the-creation-of-the-irq-do.patch b/queue-6.12/soundwire-bus-fix-race-on-the-creation-of-the-irq-do.patch new file mode 100644 index 0000000000..df0add5bf3 --- /dev/null +++ b/queue-6.12/soundwire-bus-fix-race-on-the-creation-of-the-irq-do.patch @@ -0,0 +1,60 @@ +From 9ec6ca3aa8ffc301ab93fa6102604ca9c7938bb1 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 9 Apr 2025 13:22:39 +0100 +Subject: soundwire: bus: Fix race on the creation of the IRQ domain + +From: Charles Keepax + +[ Upstream commit fd15594ba7d559d9da741504c322b9f57c4981e5 ] + +The SoundWire IRQ domain needs to be created before any slaves are added +to the bus, such that the domain is always available when needed. Move +the call to sdw_irq_create() before the calls to sdw_acpi_find_slaves() +and sdw_of_find_slaves(). + +Fixes: 12a95123bfe1 ("soundwire: bus: Allow SoundWire peripherals to register IRQ handlers") +Signed-off-by: Charles Keepax +Link: https://lore.kernel.org/r/20250409122239.1396489-1-ckeepax@opensource.cirrus.com +Signed-off-by: Vinod Koul +Signed-off-by: Sasha Levin +--- + drivers/soundwire/bus.c | 9 +++++---- + 1 file changed, 5 insertions(+), 4 deletions(-) + +diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c +index 263ca32f0c5c3..6ca06cce41d3c 100644 +--- a/drivers/soundwire/bus.c ++++ b/drivers/soundwire/bus.c +@@ -121,6 +121,10 @@ int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent, + set_bit(SDW_GROUP13_DEV_NUM, bus->assigned); + set_bit(SDW_MASTER_DEV_NUM, bus->assigned); + ++ ret = sdw_irq_create(bus, fwnode); ++ if (ret) ++ return ret; ++ + /* + * SDW is an enumerable bus, but devices can be powered off. So, + * they won't be able to report as present. +@@ -137,6 +141,7 @@ int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent, + + if (ret < 0) { + dev_err(bus->dev, "Finding slaves failed:%d\n", ret); ++ sdw_irq_delete(bus); + return ret; + } + +@@ -155,10 +160,6 @@ int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent, + bus->params.curr_bank = SDW_BANK0; + bus->params.next_bank = SDW_BANK1; + +- ret = sdw_irq_create(bus, fwnode); +- if (ret) +- return ret; +- + return 0; + } + EXPORT_SYMBOL(sdw_bus_master_add); +-- +2.39.5 + diff --git a/queue-6.12/x86-sev-fix-operator-precedence-in-ghcb_msr_vmpl_req.patch b/queue-6.12/x86-sev-fix-operator-precedence-in-ghcb_msr_vmpl_req.patch new file mode 100644 index 0000000000..3009505202 --- /dev/null +++ b/queue-6.12/x86-sev-fix-operator-precedence-in-ghcb_msr_vmpl_req.patch @@ -0,0 +1,45 @@ +From 7cf55d6208ed5784fde52ff37c0ceaa36d21d088 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sun, 11 May 2025 18:23:28 +0900 +Subject: x86/sev: Fix operator precedence in GHCB_MSR_VMPL_REQ_LEVEL macro + +From: Seongman Lee + +[ Upstream commit f7387eff4bad33d12719c66c43541c095556ae4e ] + +The GHCB_MSR_VMPL_REQ_LEVEL macro lacked parentheses around the bitmask +expression, causing the shift operation to bind too early. As a result, +when requesting VMPL1 (e.g., GHCB_MSR_VMPL_REQ_LEVEL(1)), incorrect +values such as 0x000000016 were generated instead of the intended +0x100000016 (the requested VMPL level is specified in GHCBData[39:32]). + +Fix the precedence issue by grouping the masked value before applying +the shift. + + [ bp: Massage commit message. ] + +Fixes: 34ff65901735 ("x86/sev: Use kernel provided SVSM Calling Areas") +Signed-off-by: Seongman Lee +Signed-off-by: Borislav Petkov (AMD) +Link: https://lore.kernel.org/20250511092329.12680-1-cloudlee1719@gmail.com +Signed-off-by: Sasha Levin +--- + arch/x86/include/asm/sev-common.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/arch/x86/include/asm/sev-common.h b/arch/x86/include/asm/sev-common.h +index 98726c2b04f85..ddaf3c62efb52 100644 +--- a/arch/x86/include/asm/sev-common.h ++++ b/arch/x86/include/asm/sev-common.h +@@ -116,7 +116,7 @@ enum psc_op { + #define GHCB_MSR_VMPL_REQ 0x016 + #define GHCB_MSR_VMPL_REQ_LEVEL(v) \ + /* GHCBData[39:32] */ \ +- (((u64)(v) & GENMASK_ULL(7, 0) << 32) | \ ++ ((((u64)(v) & GENMASK_ULL(7, 0)) << 32) | \ + /* GHCBDdata[11:0] */ \ + GHCB_MSR_VMPL_REQ) + +-- +2.39.5 + diff --git a/queue-6.12/xfrm-fix-udp-gro-handling-for-some-corner-cases.patch b/queue-6.12/xfrm-fix-udp-gro-handling-for-some-corner-cases.patch new file mode 100644 index 0000000000..aca5e4f5e2 --- /dev/null +++ b/queue-6.12/xfrm-fix-udp-gro-handling-for-some-corner-cases.patch @@ -0,0 +1,143 @@ +From 2cff9e6c34c82c30db46320b392fbbd7f2b248c2 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 15 Apr 2025 13:13:18 +0200 +Subject: xfrm: Fix UDP GRO handling for some corner cases + +From: Tobias Brunner + +[ Upstream commit e3fd0577768584ece824c8b661c40fb3d912812a ] + +This fixes an issue that's caused if there is a mismatch between the data +offset in the GRO header and the length fields in the regular sk_buff due +to the pskb_pull()/skb_push() calls. That's because the UDP GRO layer +stripped off the UDP header via skb_gro_pull() already while the UDP +header was explicitly not pulled/pushed in this function. + +For example, an IKE packet that triggered this had len=data_len=1268 and +the data_offset in the GRO header was 28 (IPv4 + UDP). So pskb_pull() +was called with an offset of 28-8=20, which reduced len to 1248 and via +pskb_may_pull() and __pskb_pull_tail() it also set data_len to 1248. +As the ESP offload module was not loaded, the function bailed out and +called skb_push(), which restored len to 1268, however, data_len remained +at 1248. + +So while skb_headlen() was 0 before, it was now 20. The latter caused a +difference of 8 instead of 28 (or 0 if pskb_pull()/skb_push() was called +with the complete GRO data_offset) in gro_try_pull_from_frag0() that +triggered a call to gro_pull_from_frag0() that corrupted the packet. + +This change uses a more GRO-like approach seen in other GRO receivers +via skb_gro_header() to just read the actual data we are interested in +and does not try to "restore" the UDP header at this point to call the +existing function. If the offload module is not loaded, it immediately +bails out, otherwise, it only does a quick check to see if the packet +is an IKE or keepalive packet instead of calling the existing function. + +Fixes: 172bf009c18d ("xfrm: Support GRO for IPv4 ESP in UDP encapsulation") +Fixes: 221ddb723d90 ("xfrm: Support GRO for IPv6 ESP in UDP encapsulation") +Signed-off-by: Tobias Brunner +Signed-off-by: Steffen Klassert +Signed-off-by: Sasha Levin +--- + net/ipv4/xfrm4_input.c | 18 ++++++++++-------- + net/ipv6/xfrm6_input.c | 18 ++++++++++-------- + 2 files changed, 20 insertions(+), 16 deletions(-) + +diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c +index a620618cc568a..17d3fc2fab4cc 100644 +--- a/net/ipv4/xfrm4_input.c ++++ b/net/ipv4/xfrm4_input.c +@@ -182,11 +182,15 @@ struct sk_buff *xfrm4_gro_udp_encap_rcv(struct sock *sk, struct list_head *head, + int offset = skb_gro_offset(skb); + const struct net_offload *ops; + struct sk_buff *pp = NULL; +- int ret; +- +- offset = offset - sizeof(struct udphdr); ++ int len, dlen; ++ __u8 *udpdata; ++ __be32 *udpdata32; + +- if (!pskb_pull(skb, offset)) ++ len = skb->len - offset; ++ dlen = offset + min(len, 8); ++ udpdata = skb_gro_header(skb, dlen, offset); ++ udpdata32 = (__be32 *)udpdata; ++ if (unlikely(!udpdata)) + return NULL; + + rcu_read_lock(); +@@ -194,11 +198,10 @@ struct sk_buff *xfrm4_gro_udp_encap_rcv(struct sock *sk, struct list_head *head, + if (!ops || !ops->callbacks.gro_receive) + goto out; + +- ret = __xfrm4_udp_encap_rcv(sk, skb, false); +- if (ret) ++ /* check if it is a keepalive or IKE packet */ ++ if (len <= sizeof(struct ip_esp_hdr) || udpdata32[0] == 0) + goto out; + +- skb_push(skb, offset); + NAPI_GRO_CB(skb)->proto = IPPROTO_UDP; + + pp = call_gro_receive(ops->callbacks.gro_receive, head, skb); +@@ -208,7 +211,6 @@ struct sk_buff *xfrm4_gro_udp_encap_rcv(struct sock *sk, struct list_head *head, + + out: + rcu_read_unlock(); +- skb_push(skb, offset); + NAPI_GRO_CB(skb)->same_flow = 0; + NAPI_GRO_CB(skb)->flush = 1; + +diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c +index 4abc5e9d63227..841c81abaaf4f 100644 +--- a/net/ipv6/xfrm6_input.c ++++ b/net/ipv6/xfrm6_input.c +@@ -179,14 +179,18 @@ struct sk_buff *xfrm6_gro_udp_encap_rcv(struct sock *sk, struct list_head *head, + int offset = skb_gro_offset(skb); + const struct net_offload *ops; + struct sk_buff *pp = NULL; +- int ret; ++ int len, dlen; ++ __u8 *udpdata; ++ __be32 *udpdata32; + + if (skb->protocol == htons(ETH_P_IP)) + return xfrm4_gro_udp_encap_rcv(sk, head, skb); + +- offset = offset - sizeof(struct udphdr); +- +- if (!pskb_pull(skb, offset)) ++ len = skb->len - offset; ++ dlen = offset + min(len, 8); ++ udpdata = skb_gro_header(skb, dlen, offset); ++ udpdata32 = (__be32 *)udpdata; ++ if (unlikely(!udpdata)) + return NULL; + + rcu_read_lock(); +@@ -194,11 +198,10 @@ struct sk_buff *xfrm6_gro_udp_encap_rcv(struct sock *sk, struct list_head *head, + if (!ops || !ops->callbacks.gro_receive) + goto out; + +- ret = __xfrm6_udp_encap_rcv(sk, skb, false); +- if (ret) ++ /* check if it is a keepalive or IKE packet */ ++ if (len <= sizeof(struct ip_esp_hdr) || udpdata32[0] == 0) + goto out; + +- skb_push(skb, offset); + NAPI_GRO_CB(skb)->proto = IPPROTO_UDP; + + pp = call_gro_receive(ops->callbacks.gro_receive, head, skb); +@@ -208,7 +211,6 @@ struct sk_buff *xfrm6_gro_udp_encap_rcv(struct sock *sk, struct list_head *head, + + out: + rcu_read_unlock(); +- skb_push(skb, offset); + NAPI_GRO_CB(skb)->same_flow = 0; + NAPI_GRO_CB(skb)->flush = 1; + +-- +2.39.5 + diff --git a/queue-6.12/xfrm-sanitize-marks-before-insert.patch b/queue-6.12/xfrm-sanitize-marks-before-insert.patch new file mode 100644 index 0000000000..e2f9906142 --- /dev/null +++ b/queue-6.12/xfrm-sanitize-marks-before-insert.patch @@ -0,0 +1,71 @@ +From d9639875eae06bd51308511a0d500b16fa7a999e Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 7 May 2025 13:31:58 +0200 +Subject: xfrm: Sanitize marks before insert + +From: Paul Chaignon + +[ Upstream commit 0b91fda3a1f044141e1e615456ff62508c32b202 ] + +Prior to this patch, the mark is sanitized (applying the state's mask to +the state's value) only on inserts when checking if a conflicting XFRM +state or policy exists. + +We discovered in Cilium that this same sanitization does not occur +in the hot-path __xfrm_state_lookup. In the hot-path, the sk_buff's mark +is simply compared to the state's value: + + if ((mark & x->mark.m) != x->mark.v) + continue; + +Therefore, users can define unsanitized marks (ex. 0xf42/0xf00) which will +never match any packet. + +This commit updates __xfrm_state_insert and xfrm_policy_insert to store +the sanitized marks, thus removing this footgun. + +This has the side effect of changing the ip output, as the +returned mark will have the mask applied to it when printed. + +Fixes: 3d6acfa7641f ("xfrm: SA lookups with mark") +Signed-off-by: Paul Chaignon +Signed-off-by: Louis DeLosSantos +Co-developed-by: Louis DeLosSantos +Signed-off-by: Steffen Klassert +Signed-off-by: Sasha Levin +--- + net/xfrm/xfrm_policy.c | 3 +++ + net/xfrm/xfrm_state.c | 3 +++ + 2 files changed, 6 insertions(+) + +diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c +index 8a1b83191a6cd..2c42d83fbaa2d 100644 +--- a/net/xfrm/xfrm_policy.c ++++ b/net/xfrm/xfrm_policy.c +@@ -1581,6 +1581,9 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) + struct xfrm_policy *delpol; + struct hlist_head *chain; + ++ /* Sanitize mark before store */ ++ policy->mark.v &= policy->mark.m; ++ + spin_lock_bh(&net->xfrm.xfrm_policy_lock); + chain = policy_hash_bysel(net, &policy->selector, policy->family, dir); + if (chain) +diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c +index a526b3bb8b88e..abd725386cb60 100644 +--- a/net/xfrm/xfrm_state.c ++++ b/net/xfrm/xfrm_state.c +@@ -1653,6 +1653,9 @@ static void __xfrm_state_insert(struct xfrm_state *x) + + list_add(&x->km.all, &net->xfrm.state_all); + ++ /* Sanitize mark before store */ ++ x->mark.v &= x->mark.m; ++ + h = xfrm_dst_hash(net, &x->id.daddr, &x->props.saddr, + x->props.reqid, x->props.family); + XFRM_STATE_INSERT(bydst, &x->bydst, net->xfrm.state_bydst + h, +-- +2.39.5 +