From: Greg Kroah-Hartman Date: Sun, 11 Jun 2023 13:26:18 +0000 (+0200) Subject: 6.1-stable patches X-Git-Tag: v4.14.318~42 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=04986dcd425a7fb6e4a493236bb244f713210a7a;p=thirdparty%2Fkernel%2Fstable-queue.git 6.1-stable patches added patches: bluetooth-fix-debugfs-registration.patch bluetooth-fix-use-after-free-in-hci_remove_ltk-hci_remove_irk.patch bluetooth-hci_qca-fix-debugfs-registration.patch can-j1939-avoid-possible-use-after-free-when-j1939_can_rx_register-fails.patch can-j1939-change-j1939_netdev_lock-type-to-mutex.patch can-j1939-j1939_sk_send_loop_abort-improved-error-queue-handling-in-j1939-socket.patch ceph-fix-use-after-free-bug-for-inodes-when-flushing-capsnaps.patch drm-amd-display-reduce-sdp-bw-after-urgent-to-90.patch drm-amd-pm-fix-power-context-allocation-in-smu13.patch mm-page_table_check-ensure-user-pages-are-not-slab-pages.patch mm-page_table_check-make-it-dependent-on-exclusive_system_ram.patch mptcp-add-address-into-userspace-pm-list.patch mptcp-only-send-rm_addr-in-nl_cmd_remove.patch mptcp-update-userspace-pm-infos.patch pinctrl-meson-axg-add-missing-gpioa_18-gpio-group.patch rbd-get-snapshot-context-after-exclusive-lock-is-ensured-to-be-held.patch rbd-move-rbd_obj_flag_copyup_enabled-flag-setting.patch s390-dasd-use-correct-lock-while-counting-channel-queue-length.patch selftests-mptcp-update-userspace-pm-addr-tests.patch selftests-mptcp-update-userspace-pm-subflow-tests.patch soc-qcom-icc-bwmon-fix-incorrect-error-code-passed-to-dev_err_probe.patch tee-amdtee-add-return_origin-to-struct-tee_cmd_load_ta.patch usb-usbfs-enforce-page-requirements-for-mmap.patch usb-usbfs-use-consistent-mmap-functions.patch virtio_net-use-control_buf-for-coalesce-params.patch wifi-iwlwifi-mvm-fix-warray-bounds-bug-in-iwl_mvm_wait_d3_notif.patch --- diff --git a/queue-6.1/bluetooth-fix-debugfs-registration.patch b/queue-6.1/bluetooth-fix-debugfs-registration.patch new file mode 100644 index 00000000000..ad304e65dfe --- /dev/null +++ b/queue-6.1/bluetooth-fix-debugfs-registration.patch @@ -0,0 +1,59 @@ +From fe2ccc6c29d53e14d3c8b3ddf8ad965a92e074ee Mon Sep 17 00:00:00 2001 +From: Johan Hovold +Date: Wed, 31 May 2023 10:57:58 +0200 +Subject: Bluetooth: fix debugfs registration + +From: Johan Hovold + +commit fe2ccc6c29d53e14d3c8b3ddf8ad965a92e074ee upstream. + +Since commit ec6cef9cd98d ("Bluetooth: Fix SMP channel registration for +unconfigured controllers") the debugfs interface for unconfigured +controllers will be created when the controller is configured. + +There is however currently nothing preventing a controller from being +configured multiple time (e.g. setting the device address using btmgmt) +which results in failed attempts to register the already registered +debugfs entries: + + debugfs: File 'features' in directory 'hci0' already present! + debugfs: File 'manufacturer' in directory 'hci0' already present! + debugfs: File 'hci_version' in directory 'hci0' already present! + ... + debugfs: File 'quirk_simultaneous_discovery' in directory 'hci0' already present! + +Add a controller flag to avoid trying to register the debugfs interface +more than once. + +Fixes: ec6cef9cd98d ("Bluetooth: Fix SMP channel registration for unconfigured controllers") +Cc: stable@vger.kernel.org # 4.0 +Signed-off-by: Johan Hovold +Signed-off-by: Luiz Augusto von Dentz +Signed-off-by: Greg Kroah-Hartman +--- + include/net/bluetooth/hci.h | 1 + + net/bluetooth/hci_sync.c | 3 +++ + 2 files changed, 4 insertions(+) + +--- a/include/net/bluetooth/hci.h ++++ b/include/net/bluetooth/hci.h +@@ -350,6 +350,7 @@ enum { + enum { + HCI_SETUP, + HCI_CONFIG, ++ HCI_DEBUGFS_CREATED, + HCI_AUTO_OFF, + HCI_RFKILLED, + HCI_MGMT, +--- a/net/bluetooth/hci_sync.c ++++ b/net/bluetooth/hci_sync.c +@@ -4492,6 +4492,9 @@ static int hci_init_sync(struct hci_dev + !hci_dev_test_flag(hdev, HCI_CONFIG)) + return 0; + ++ if (hci_dev_test_and_set_flag(hdev, HCI_DEBUGFS_CREATED)) ++ return 0; ++ + hci_debugfs_create_common(hdev); + + if (lmp_bredr_capable(hdev)) diff --git a/queue-6.1/bluetooth-fix-use-after-free-in-hci_remove_ltk-hci_remove_irk.patch b/queue-6.1/bluetooth-fix-use-after-free-in-hci_remove_ltk-hci_remove_irk.patch new file mode 100644 index 00000000000..4d9dd579756 --- /dev/null +++ b/queue-6.1/bluetooth-fix-use-after-free-in-hci_remove_ltk-hci_remove_irk.patch @@ -0,0 +1,48 @@ +From c5d2b6fa26b5b8386a9cc902cdece3a46bef2bd2 Mon Sep 17 00:00:00 2001 +From: Luiz Augusto von Dentz +Date: Tue, 30 May 2023 13:48:44 -0700 +Subject: Bluetooth: Fix use-after-free in hci_remove_ltk/hci_remove_irk + +From: Luiz Augusto von Dentz + +commit c5d2b6fa26b5b8386a9cc902cdece3a46bef2bd2 upstream. + +Similar to commit 0f7d9b31ce7a ("netfilter: nf_tables: fix use-after-free +in nft_set_catchall_destroy()"). We can not access k after kfree_rcu() +call. + +Cc: stable@vger.kernel.org +Signed-off-by: Min Li +Signed-off-by: Luiz Augusto von Dentz +Signed-off-by: Greg Kroah-Hartman +--- + net/bluetooth/hci_core.c | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +--- a/net/bluetooth/hci_core.c ++++ b/net/bluetooth/hci_core.c +@@ -1416,10 +1416,10 @@ int hci_remove_link_key(struct hci_dev * + + int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type) + { +- struct smp_ltk *k; ++ struct smp_ltk *k, *tmp; + int removed = 0; + +- list_for_each_entry_rcu(k, &hdev->long_term_keys, list) { ++ list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) { + if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type) + continue; + +@@ -1435,9 +1435,9 @@ int hci_remove_ltk(struct hci_dev *hdev, + + void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type) + { +- struct smp_irk *k; ++ struct smp_irk *k, *tmp; + +- list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) { ++ list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) { + if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type) + continue; + diff --git a/queue-6.1/bluetooth-hci_qca-fix-debugfs-registration.patch b/queue-6.1/bluetooth-hci_qca-fix-debugfs-registration.patch new file mode 100644 index 00000000000..e10c7ab80bd --- /dev/null +++ b/queue-6.1/bluetooth-hci_qca-fix-debugfs-registration.patch @@ -0,0 +1,52 @@ +From 47c5d829a3e326b7395352a10fc8a6effe7afa15 Mon Sep 17 00:00:00 2001 +From: Johan Hovold +Date: Wed, 31 May 2023 10:57:59 +0200 +Subject: Bluetooth: hci_qca: fix debugfs registration + +From: Johan Hovold + +commit 47c5d829a3e326b7395352a10fc8a6effe7afa15 upstream. + +Since commit 3e4be65eb82c ("Bluetooth: hci_qca: Add poweroff support +during hci down for wcn3990"), the setup callback which registers the +debugfs interface can be called multiple times. + +This specifically leads to the following error when powering on the +controller: + + debugfs: Directory 'ibs' with parent 'hci0' already present! + +Add a driver flag to avoid trying to register the debugfs interface more +than once. + +Fixes: 3e4be65eb82c ("Bluetooth: hci_qca: Add poweroff support during hci down for wcn3990") +Cc: stable@vger.kernel.org # 4.20 +Signed-off-by: Johan Hovold +Signed-off-by: Luiz Augusto von Dentz +Signed-off-by: Greg Kroah-Hartman +--- + drivers/bluetooth/hci_qca.c | 6 +++++- + 1 file changed, 5 insertions(+), 1 deletion(-) + +--- a/drivers/bluetooth/hci_qca.c ++++ b/drivers/bluetooth/hci_qca.c +@@ -78,7 +78,8 @@ enum qca_flags { + QCA_HW_ERROR_EVENT, + QCA_SSR_TRIGGERED, + QCA_BT_OFF, +- QCA_ROM_FW ++ QCA_ROM_FW, ++ QCA_DEBUGFS_CREATED, + }; + + enum qca_capabilities { +@@ -635,6 +636,9 @@ static void qca_debugfs_init(struct hci_ + if (!hdev->debugfs) + return; + ++ if (test_and_set_bit(QCA_DEBUGFS_CREATED, &qca->flags)) ++ return; ++ + ibs_dir = debugfs_create_dir("ibs", hdev->debugfs); + + /* read only */ diff --git a/queue-6.1/can-j1939-avoid-possible-use-after-free-when-j1939_can_rx_register-fails.patch b/queue-6.1/can-j1939-avoid-possible-use-after-free-when-j1939_can_rx_register-fails.patch new file mode 100644 index 00000000000..6bc60ca3511 --- /dev/null +++ b/queue-6.1/can-j1939-avoid-possible-use-after-free-when-j1939_can_rx_register-fails.patch @@ -0,0 +1,144 @@ +From 9f16eb106aa5fce15904625661312623ec783ed3 Mon Sep 17 00:00:00 2001 +From: Fedor Pchelkin +Date: Fri, 26 May 2023 20:19:10 +0300 +Subject: can: j1939: avoid possible use-after-free when j1939_can_rx_register fails + +From: Fedor Pchelkin + +commit 9f16eb106aa5fce15904625661312623ec783ed3 upstream. + +Syzkaller reports the following failure: + +BUG: KASAN: use-after-free in kref_put include/linux/kref.h:64 [inline] +BUG: KASAN: use-after-free in j1939_priv_put+0x25/0xa0 net/can/j1939/main.c:172 +Write of size 4 at addr ffff888141c15058 by task swapper/3/0 + +CPU: 3 PID: 0 Comm: swapper/3 Not tainted 5.10.144-syzkaller #0 +Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.12.0-1 04/01/2014 +Call Trace: + + __dump_stack lib/dump_stack.c:77 [inline] + dump_stack+0x107/0x167 lib/dump_stack.c:118 + print_address_description.constprop.0+0x1c/0x220 mm/kasan/report.c:385 + __kasan_report mm/kasan/report.c:545 [inline] + kasan_report.cold+0x1f/0x37 mm/kasan/report.c:562 + check_memory_region_inline mm/kasan/generic.c:186 [inline] + check_memory_region+0x145/0x190 mm/kasan/generic.c:192 + instrument_atomic_read_write include/linux/instrumented.h:101 [inline] + atomic_fetch_sub_release include/asm-generic/atomic-instrumented.h:220 [inline] + __refcount_sub_and_test include/linux/refcount.h:272 [inline] + __refcount_dec_and_test include/linux/refcount.h:315 [inline] + refcount_dec_and_test include/linux/refcount.h:333 [inline] + kref_put include/linux/kref.h:64 [inline] + j1939_priv_put+0x25/0xa0 net/can/j1939/main.c:172 + j1939_sk_sock_destruct+0x44/0x90 net/can/j1939/socket.c:374 + __sk_destruct+0x4e/0x820 net/core/sock.c:1784 + rcu_do_batch kernel/rcu/tree.c:2485 [inline] + rcu_core+0xb35/0x1a30 kernel/rcu/tree.c:2726 + __do_softirq+0x289/0x9a3 kernel/softirq.c:298 + asm_call_irq_on_stack+0x12/0x20 + + __run_on_irqstack arch/x86/include/asm/irq_stack.h:26 [inline] + run_on_irqstack_cond arch/x86/include/asm/irq_stack.h:77 [inline] + do_softirq_own_stack+0xaa/0xe0 arch/x86/kernel/irq_64.c:77 + invoke_softirq kernel/softirq.c:393 [inline] + __irq_exit_rcu kernel/softirq.c:423 [inline] + irq_exit_rcu+0x136/0x200 kernel/softirq.c:435 + sysvec_apic_timer_interrupt+0x4d/0x100 arch/x86/kernel/apic/apic.c:1095 + asm_sysvec_apic_timer_interrupt+0x12/0x20 arch/x86/include/asm/idtentry.h:635 + +Allocated by task 1141: + kasan_save_stack+0x1b/0x40 mm/kasan/common.c:48 + kasan_set_track mm/kasan/common.c:56 [inline] + __kasan_kmalloc.constprop.0+0xc9/0xd0 mm/kasan/common.c:461 + kmalloc include/linux/slab.h:552 [inline] + kzalloc include/linux/slab.h:664 [inline] + j1939_priv_create net/can/j1939/main.c:131 [inline] + j1939_netdev_start+0x111/0x860 net/can/j1939/main.c:268 + j1939_sk_bind+0x8ea/0xd30 net/can/j1939/socket.c:485 + __sys_bind+0x1f2/0x260 net/socket.c:1645 + __do_sys_bind net/socket.c:1656 [inline] + __se_sys_bind net/socket.c:1654 [inline] + __x64_sys_bind+0x6f/0xb0 net/socket.c:1654 + do_syscall_64+0x33/0x40 arch/x86/entry/common.c:46 + entry_SYSCALL_64_after_hwframe+0x61/0xc6 + +Freed by task 1141: + kasan_save_stack+0x1b/0x40 mm/kasan/common.c:48 + kasan_set_track+0x1c/0x30 mm/kasan/common.c:56 + kasan_set_free_info+0x1b/0x30 mm/kasan/generic.c:355 + __kasan_slab_free+0x112/0x170 mm/kasan/common.c:422 + slab_free_hook mm/slub.c:1542 [inline] + slab_free_freelist_hook+0xad/0x190 mm/slub.c:1576 + slab_free mm/slub.c:3149 [inline] + kfree+0xd9/0x3b0 mm/slub.c:4125 + j1939_netdev_start+0x5ee/0x860 net/can/j1939/main.c:300 + j1939_sk_bind+0x8ea/0xd30 net/can/j1939/socket.c:485 + __sys_bind+0x1f2/0x260 net/socket.c:1645 + __do_sys_bind net/socket.c:1656 [inline] + __se_sys_bind net/socket.c:1654 [inline] + __x64_sys_bind+0x6f/0xb0 net/socket.c:1654 + do_syscall_64+0x33/0x40 arch/x86/entry/common.c:46 + entry_SYSCALL_64_after_hwframe+0x61/0xc6 + +It can be caused by this scenario: + +CPU0 CPU1 +j1939_sk_bind(socket0, ndev0, ...) + j1939_netdev_start() + j1939_sk_bind(socket1, ndev0, ...) + j1939_netdev_start() + mutex_lock(&j1939_netdev_lock) + j1939_priv_set(ndev0, priv) + mutex_unlock(&j1939_netdev_lock) + if (priv_new) + kref_get(&priv_new->rx_kref) + return priv_new; + /* inside j1939_sk_bind() */ + jsk->priv = priv + j1939_can_rx_register(priv) // fails + j1939_priv_set(ndev, NULL) + kfree(priv) + j1939_sk_sock_destruct() + j1939_priv_put() // <- uaf + +To avoid this, call j1939_can_rx_register() under j1939_netdev_lock so +that a concurrent thread cannot process j1939_priv before +j1939_can_rx_register() returns. + +Found by Linux Verification Center (linuxtesting.org) with Syzkaller. + +Fixes: 9d71dd0c7009 ("can: add support of SAE J1939 protocol") +Signed-off-by: Fedor Pchelkin +Tested-by: Oleksij Rempel +Acked-by: Oleksij Rempel +Link: https://lore.kernel.org/r/20230526171910.227615-3-pchelkin@ispras.ru +Cc: stable@vger.kernel.org +Signed-off-by: Marc Kleine-Budde +Signed-off-by: Greg Kroah-Hartman +--- + net/can/j1939/main.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +--- a/net/can/j1939/main.c ++++ b/net/can/j1939/main.c +@@ -290,16 +290,18 @@ struct j1939_priv *j1939_netdev_start(st + return priv_new; + } + j1939_priv_set(ndev, priv); +- mutex_unlock(&j1939_netdev_lock); + + ret = j1939_can_rx_register(priv); + if (ret < 0) + goto out_priv_put; + ++ mutex_unlock(&j1939_netdev_lock); + return priv; + + out_priv_put: + j1939_priv_set(ndev, NULL); ++ mutex_unlock(&j1939_netdev_lock); ++ + dev_put(ndev); + kfree(priv); + diff --git a/queue-6.1/can-j1939-change-j1939_netdev_lock-type-to-mutex.patch b/queue-6.1/can-j1939-change-j1939_netdev_lock-type-to-mutex.patch new file mode 100644 index 00000000000..78eef6f20ae --- /dev/null +++ b/queue-6.1/can-j1939-change-j1939_netdev_lock-type-to-mutex.patch @@ -0,0 +1,119 @@ +From cd9c790de2088b0d797dc4d244b4f174f9962554 Mon Sep 17 00:00:00 2001 +From: Fedor Pchelkin +Date: Fri, 26 May 2023 20:19:09 +0300 +Subject: can: j1939: change j1939_netdev_lock type to mutex + +From: Fedor Pchelkin + +commit cd9c790de2088b0d797dc4d244b4f174f9962554 upstream. + +It turns out access to j1939_can_rx_register() needs to be serialized, +otherwise j1939_priv can be corrupted when parallel threads call +j1939_netdev_start() and j1939_can_rx_register() fails. This issue is +thoroughly covered in other commit which serializes access to +j1939_can_rx_register(). + +Change j1939_netdev_lock type to mutex so that we do not need to remove +GFP_KERNEL from can_rx_register(). + +j1939_netdev_lock seems to be used in normal contexts where mutex usage +is not prohibited. + +Found by Linux Verification Center (linuxtesting.org) with Syzkaller. + +Fixes: 9d71dd0c7009 ("can: add support of SAE J1939 protocol") +Suggested-by: Alexey Khoroshilov +Signed-off-by: Fedor Pchelkin +Tested-by: Oleksij Rempel +Acked-by: Oleksij Rempel +Link: https://lore.kernel.org/r/20230526171910.227615-2-pchelkin@ispras.ru +Cc: stable@vger.kernel.org +Signed-off-by: Marc Kleine-Budde +Signed-off-by: Greg Kroah-Hartman +--- + net/can/j1939/main.c | 22 +++++++++++----------- + 1 file changed, 11 insertions(+), 11 deletions(-) + +--- a/net/can/j1939/main.c ++++ b/net/can/j1939/main.c +@@ -126,7 +126,7 @@ static void j1939_can_recv(struct sk_buf + #define J1939_CAN_ID CAN_EFF_FLAG + #define J1939_CAN_MASK (CAN_EFF_FLAG | CAN_RTR_FLAG) + +-static DEFINE_SPINLOCK(j1939_netdev_lock); ++static DEFINE_MUTEX(j1939_netdev_lock); + + static struct j1939_priv *j1939_priv_create(struct net_device *ndev) + { +@@ -220,7 +220,7 @@ static void __j1939_rx_release(struct kr + j1939_can_rx_unregister(priv); + j1939_ecu_unmap_all(priv); + j1939_priv_set(priv->ndev, NULL); +- spin_unlock(&j1939_netdev_lock); ++ mutex_unlock(&j1939_netdev_lock); + } + + /* get pointer to priv without increasing ref counter */ +@@ -248,9 +248,9 @@ static struct j1939_priv *j1939_priv_get + { + struct j1939_priv *priv; + +- spin_lock(&j1939_netdev_lock); ++ mutex_lock(&j1939_netdev_lock); + priv = j1939_priv_get_by_ndev_locked(ndev); +- spin_unlock(&j1939_netdev_lock); ++ mutex_unlock(&j1939_netdev_lock); + + return priv; + } +@@ -260,14 +260,14 @@ struct j1939_priv *j1939_netdev_start(st + struct j1939_priv *priv, *priv_new; + int ret; + +- spin_lock(&j1939_netdev_lock); ++ mutex_lock(&j1939_netdev_lock); + priv = j1939_priv_get_by_ndev_locked(ndev); + if (priv) { + kref_get(&priv->rx_kref); +- spin_unlock(&j1939_netdev_lock); ++ mutex_unlock(&j1939_netdev_lock); + return priv; + } +- spin_unlock(&j1939_netdev_lock); ++ mutex_unlock(&j1939_netdev_lock); + + priv = j1939_priv_create(ndev); + if (!priv) +@@ -277,20 +277,20 @@ struct j1939_priv *j1939_netdev_start(st + spin_lock_init(&priv->j1939_socks_lock); + INIT_LIST_HEAD(&priv->j1939_socks); + +- spin_lock(&j1939_netdev_lock); ++ mutex_lock(&j1939_netdev_lock); + priv_new = j1939_priv_get_by_ndev_locked(ndev); + if (priv_new) { + /* Someone was faster than us, use their priv and roll + * back our's. + */ + kref_get(&priv_new->rx_kref); +- spin_unlock(&j1939_netdev_lock); ++ mutex_unlock(&j1939_netdev_lock); + dev_put(ndev); + kfree(priv); + return priv_new; + } + j1939_priv_set(ndev, priv); +- spin_unlock(&j1939_netdev_lock); ++ mutex_unlock(&j1939_netdev_lock); + + ret = j1939_can_rx_register(priv); + if (ret < 0) +@@ -308,7 +308,7 @@ struct j1939_priv *j1939_netdev_start(st + + void j1939_netdev_stop(struct j1939_priv *priv) + { +- kref_put_lock(&priv->rx_kref, __j1939_rx_release, &j1939_netdev_lock); ++ kref_put_mutex(&priv->rx_kref, __j1939_rx_release, &j1939_netdev_lock); + j1939_priv_put(priv); + } + diff --git a/queue-6.1/can-j1939-j1939_sk_send_loop_abort-improved-error-queue-handling-in-j1939-socket.patch b/queue-6.1/can-j1939-j1939_sk_send_loop_abort-improved-error-queue-handling-in-j1939-socket.patch new file mode 100644 index 00000000000..8a2b49fe4cc --- /dev/null +++ b/queue-6.1/can-j1939-j1939_sk_send_loop_abort-improved-error-queue-handling-in-j1939-socket.patch @@ -0,0 +1,63 @@ +From 2a84aea80e925ecba6349090559754f8e8eb68ef Mon Sep 17 00:00:00 2001 +From: Oleksij Rempel +Date: Fri, 26 May 2023 10:19:46 +0200 +Subject: can: j1939: j1939_sk_send_loop_abort(): improved error queue handling in J1939 Socket + +From: Oleksij Rempel + +commit 2a84aea80e925ecba6349090559754f8e8eb68ef upstream. + +This patch addresses an issue within the j1939_sk_send_loop_abort() +function in the j1939/socket.c file, specifically in the context of +Transport Protocol (TP) sessions. + +Without this patch, when a TP session is initiated and a Clear To Send +(CTS) frame is received from the remote side requesting one data packet, +the kernel dispatches the first Data Transport (DT) frame and then waits +for the next CTS. If the remote side doesn't respond with another CTS, +the kernel aborts due to a timeout. This leads to the user-space +receiving an EPOLLERR on the socket, and the socket becomes active. + +However, when trying to read the error queue from the socket with +sock.recvmsg(, , socket.MSG_ERRQUEUE), it returns -EAGAIN, +given that the socket is non-blocking. This situation results in an +infinite loop: the user-space repeatedly calls epoll(), epoll() returns +the socket file descriptor with EPOLLERR, but the socket then blocks on +the recv() of ERRQUEUE. + +This patch introduces an additional check for the J1939_SOCK_ERRQUEUE +flag within the j1939_sk_send_loop_abort() function. If the flag is set, +it indicates that the application has subscribed to receive error queue +messages. In such cases, the kernel can communicate the current transfer +state via the error queue. This allows for the function to return early, +preventing the unnecessary setting of the socket into an error state, +and breaking the infinite loop. It is crucial to note that a socket +error is only needed if the application isn't using the error queue, as, +without it, the application wouldn't be aware of transfer issues. + +Fixes: 9d71dd0c7009 ("can: add support of SAE J1939 protocol") +Reported-by: David Jander +Tested-by: David Jander +Signed-off-by: Oleksij Rempel +Link: https://lore.kernel.org/r/20230526081946.715190-1-o.rempel@pengutronix.de +Cc: stable@vger.kernel.org +Signed-off-by: Marc Kleine-Budde +Signed-off-by: Greg Kroah-Hartman +--- + net/can/j1939/socket.c | 5 +++++ + 1 file changed, 5 insertions(+) + +--- a/net/can/j1939/socket.c ++++ b/net/can/j1939/socket.c +@@ -1088,6 +1088,11 @@ void j1939_sk_errqueue(struct j1939_sess + + void j1939_sk_send_loop_abort(struct sock *sk, int err) + { ++ struct j1939_sock *jsk = j1939_sk(sk); ++ ++ if (jsk->state & J1939_SOCK_ERRQUEUE) ++ return; ++ + sk->sk_err = err; + + sk_error_report(sk); diff --git a/queue-6.1/ceph-fix-use-after-free-bug-for-inodes-when-flushing-capsnaps.patch b/queue-6.1/ceph-fix-use-after-free-bug-for-inodes-when-flushing-capsnaps.patch new file mode 100644 index 00000000000..037d4f4ca0a --- /dev/null +++ b/queue-6.1/ceph-fix-use-after-free-bug-for-inodes-when-flushing-capsnaps.patch @@ -0,0 +1,90 @@ +From 409e873ea3c1fd3079909718bbeb06ac1ec7f38b Mon Sep 17 00:00:00 2001 +From: Xiubo Li +Date: Thu, 1 Jun 2023 08:59:31 +0800 +Subject: ceph: fix use-after-free bug for inodes when flushing capsnaps + +From: Xiubo Li + +commit 409e873ea3c1fd3079909718bbeb06ac1ec7f38b upstream. + +There is a race between capsnaps flush and removing the inode from +'mdsc->snap_flush_list' list: + + == Thread A == == Thread B == +ceph_queue_cap_snap() + -> allocate 'capsnapA' + ->ihold('&ci->vfs_inode') + ->add 'capsnapA' to 'ci->i_cap_snaps' + ->add 'ci' to 'mdsc->snap_flush_list' + ... + == Thread C == +ceph_flush_snaps() + ->__ceph_flush_snaps() + ->__send_flush_snap() + handle_cap_flushsnap_ack() + ->iput('&ci->vfs_inode') + this also will release 'ci' + ... + == Thread D == + ceph_handle_snap() + ->flush_snaps() + ->iterate 'mdsc->snap_flush_list' + ->get the stale 'ci' + ->remove 'ci' from ->ihold(&ci->vfs_inode) this + 'mdsc->snap_flush_list' will WARNING + +To fix this we will increase the inode's i_count ref when adding 'ci' +to the 'mdsc->snap_flush_list' list. + +[ idryomov: need_put int -> bool ] + +Cc: stable@vger.kernel.org +Link: https://bugzilla.redhat.com/show_bug.cgi?id=2209299 +Signed-off-by: Xiubo Li +Reviewed-by: Milind Changire +Reviewed-by: Ilya Dryomov +Signed-off-by: Ilya Dryomov +Signed-off-by: Greg Kroah-Hartman +--- + fs/ceph/caps.c | 6 ++++++ + fs/ceph/snap.c | 4 +++- + 2 files changed, 9 insertions(+), 1 deletion(-) + +--- a/fs/ceph/caps.c ++++ b/fs/ceph/caps.c +@@ -1626,6 +1626,7 @@ void ceph_flush_snaps(struct ceph_inode_ + struct inode *inode = &ci->netfs.inode; + struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; + struct ceph_mds_session *session = NULL; ++ bool need_put = false; + int mds; + + dout("ceph_flush_snaps %p\n", inode); +@@ -1670,8 +1671,13 @@ out: + ceph_put_mds_session(session); + /* we flushed them all; remove this inode from the queue */ + spin_lock(&mdsc->snap_flush_lock); ++ if (!list_empty(&ci->i_snap_flush_item)) ++ need_put = true; + list_del_init(&ci->i_snap_flush_item); + spin_unlock(&mdsc->snap_flush_lock); ++ ++ if (need_put) ++ iput(inode); + } + + /* +--- a/fs/ceph/snap.c ++++ b/fs/ceph/snap.c +@@ -693,8 +693,10 @@ int __ceph_finish_cap_snap(struct ceph_i + capsnap->size); + + spin_lock(&mdsc->snap_flush_lock); +- if (list_empty(&ci->i_snap_flush_item)) ++ if (list_empty(&ci->i_snap_flush_item)) { ++ ihold(inode); + list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list); ++ } + spin_unlock(&mdsc->snap_flush_lock); + return 1; /* caller may want to ceph_flush_snaps */ + } diff --git a/queue-6.1/drm-amd-display-reduce-sdp-bw-after-urgent-to-90.patch b/queue-6.1/drm-amd-display-reduce-sdp-bw-after-urgent-to-90.patch new file mode 100644 index 00000000000..5ee01c28565 --- /dev/null +++ b/queue-6.1/drm-amd-display-reduce-sdp-bw-after-urgent-to-90.patch @@ -0,0 +1,37 @@ +From e1a600208286c197c2696e51fc313e49889315bd Mon Sep 17 00:00:00 2001 +From: Alvin Lee +Date: Fri, 19 May 2023 11:38:15 -0400 +Subject: drm/amd/display: Reduce sdp bw after urgent to 90% + +From: Alvin Lee + +commit e1a600208286c197c2696e51fc313e49889315bd upstream. + +[Description] +Reduce expected SDP bandwidth due to poor QoS and +arbitration issues on high bandwidth configs + +Cc: Mario Limonciello +Cc: Alex Deucher +Cc: stable@vger.kernel.org +Acked-by: Stylon Wang +Signed-off-by: Alvin Lee +Reviewed-by: Nevenko Stupar +Tested-by: Daniel Wheeler +Signed-off-by: Alex Deucher +Signed-off-by: Greg Kroah-Hartman +--- + drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c ++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +@@ -137,7 +137,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3 + .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096, + .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096, + .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096, +- .pct_ideal_sdp_bw_after_urgent = 100.0, ++ .pct_ideal_sdp_bw_after_urgent = 90.0, + .pct_ideal_fabric_bw_after_urgent = 67.0, + .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 20.0, + .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0, // N/A, for now keep as is until DML implemented diff --git a/queue-6.1/drm-amd-pm-fix-power-context-allocation-in-smu13.patch b/queue-6.1/drm-amd-pm-fix-power-context-allocation-in-smu13.patch new file mode 100644 index 00000000000..cfa5ed57141 --- /dev/null +++ b/queue-6.1/drm-amd-pm-fix-power-context-allocation-in-smu13.patch @@ -0,0 +1,36 @@ +From 1d13c49cf4e246b218d71873f1bb1bbd376aa10e Mon Sep 17 00:00:00 2001 +From: Lijo Lazar +Date: Fri, 31 Mar 2023 16:30:01 +0530 +Subject: drm/amd/pm: Fix power context allocation in SMU13 + +From: Lijo Lazar + +commit 1d13c49cf4e246b218d71873f1bb1bbd376aa10e upstream. + +Use the right data structure for allocation. + +Signed-off-by: Lijo Lazar +Reviewed-by: Hawking Zhang +Signed-off-by: Alex Deucher +Cc: stable@vger.kernel.org +Signed-off-by: Greg Kroah-Hartman +--- + drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c ++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +@@ -582,11 +582,11 @@ int smu_v13_0_init_power(struct smu_cont + if (smu_power->power_context || smu_power->power_context_size != 0) + return -EINVAL; + +- smu_power->power_context = kzalloc(sizeof(struct smu_13_0_dpm_context), ++ smu_power->power_context = kzalloc(sizeof(struct smu_13_0_power_context), + GFP_KERNEL); + if (!smu_power->power_context) + return -ENOMEM; +- smu_power->power_context_size = sizeof(struct smu_13_0_dpm_context); ++ smu_power->power_context_size = sizeof(struct smu_13_0_power_context); + + return 0; + } diff --git a/queue-6.1/mm-page_table_check-ensure-user-pages-are-not-slab-pages.patch b/queue-6.1/mm-page_table_check-ensure-user-pages-are-not-slab-pages.patch new file mode 100644 index 00000000000..6b87ba96c08 --- /dev/null +++ b/queue-6.1/mm-page_table_check-ensure-user-pages-are-not-slab-pages.patch @@ -0,0 +1,77 @@ +From 44d0fb387b53e56c8a050bac5c7d460e21eb226f Mon Sep 17 00:00:00 2001 +From: Ruihan Li +Date: Mon, 15 May 2023 21:09:58 +0800 +Subject: mm: page_table_check: Ensure user pages are not slab pages + +From: Ruihan Li + +commit 44d0fb387b53e56c8a050bac5c7d460e21eb226f upstream. + +The current uses of PageAnon in page table check functions can lead to +type confusion bugs between struct page and slab [1], if slab pages are +accidentally mapped into the user space. This is because slab reuses the +bits in struct page to store its internal states, which renders PageAnon +ineffective on slab pages. + +Since slab pages are not expected to be mapped into the user space, this +patch adds BUG_ON(PageSlab(page)) checks to make sure that slab pages +are not inadvertently mapped. Otherwise, there must be some bugs in the +kernel. + +Reported-by: syzbot+fcf1a817ceb50935ce99@syzkaller.appspotmail.com +Closes: https://lore.kernel.org/lkml/000000000000258e5e05fae79fc1@google.com/ [1] +Fixes: df4e817b7108 ("mm: page table check") +Cc: # 5.17 +Signed-off-by: Ruihan Li +Acked-by: Pasha Tatashin +Link: https://lore.kernel.org/r/20230515130958.32471-5-lrh2000@pku.edu.cn +Signed-off-by: Greg Kroah-Hartman +--- + include/linux/page-flags.h | 6 ++++++ + mm/page_table_check.c | 6 ++++++ + 2 files changed, 12 insertions(+) + +--- a/include/linux/page-flags.h ++++ b/include/linux/page-flags.h +@@ -631,6 +631,12 @@ PAGEFLAG_FALSE(VmemmapSelfHosted, vmemma + * Please note that, confusingly, "page_mapping" refers to the inode + * address_space which maps the page from disk; whereas "page_mapped" + * refers to user virtual address space into which the page is mapped. ++ * ++ * For slab pages, since slab reuses the bits in struct page to store its ++ * internal states, the page->mapping does not exist as such, nor do these ++ * flags below. So in order to avoid testing non-existent bits, please ++ * make sure that PageSlab(page) actually evaluates to false before calling ++ * the following functions (e.g., PageAnon). See mm/slab.h. + */ + #define PAGE_MAPPING_ANON 0x1 + #define PAGE_MAPPING_MOVABLE 0x2 +--- a/mm/page_table_check.c ++++ b/mm/page_table_check.c +@@ -69,6 +69,8 @@ static void page_table_check_clear(struc + + page = pfn_to_page(pfn); + page_ext = page_ext_get(page); ++ ++ BUG_ON(PageSlab(page)); + anon = PageAnon(page); + + for (i = 0; i < pgcnt; i++) { +@@ -105,6 +107,8 @@ static void page_table_check_set(struct + + page = pfn_to_page(pfn); + page_ext = page_ext_get(page); ++ ++ BUG_ON(PageSlab(page)); + anon = PageAnon(page); + + for (i = 0; i < pgcnt; i++) { +@@ -131,6 +135,8 @@ void __page_table_check_zero(struct page + struct page_ext *page_ext; + unsigned long i; + ++ BUG_ON(PageSlab(page)); ++ + page_ext = page_ext_get(page); + BUG_ON(!page_ext); + for (i = 0; i < (1ul << order); i++) { diff --git a/queue-6.1/mm-page_table_check-make-it-dependent-on-exclusive_system_ram.patch b/queue-6.1/mm-page_table_check-make-it-dependent-on-exclusive_system_ram.patch new file mode 100644 index 00000000000..1550b5e2c85 --- /dev/null +++ b/queue-6.1/mm-page_table_check-make-it-dependent-on-exclusive_system_ram.patch @@ -0,0 +1,71 @@ +From 81a31a860bb61d54eb688af2568d9332ed9b8942 Mon Sep 17 00:00:00 2001 +From: Ruihan Li +Date: Mon, 15 May 2023 21:09:57 +0800 +Subject: mm: page_table_check: Make it dependent on EXCLUSIVE_SYSTEM_RAM + +From: Ruihan Li + +commit 81a31a860bb61d54eb688af2568d9332ed9b8942 upstream. + +Without EXCLUSIVE_SYSTEM_RAM, users are allowed to map arbitrary +physical memory regions into the userspace via /dev/mem. At the same +time, pages may change their properties (e.g., from anonymous pages to +named pages) while they are still being mapped in the userspace, leading +to "corruption" detected by the page table check. + +To avoid these false positives, this patch makes PAGE_TABLE_CHECK +depends on EXCLUSIVE_SYSTEM_RAM. This dependency is understandable +because PAGE_TABLE_CHECK is a hardening technique but /dev/mem without +STRICT_DEVMEM (i.e., !EXCLUSIVE_SYSTEM_RAM) is itself a security +problem. + +Even with EXCLUSIVE_SYSTEM_RAM, I/O pages may be still allowed to be +mapped via /dev/mem. However, these pages are always considered as named +pages, so they won't break the logic used in the page table check. + +Cc: # 5.17 +Signed-off-by: Ruihan Li +Acked-by: David Hildenbrand +Acked-by: Pasha Tatashin +Link: https://lore.kernel.org/r/20230515130958.32471-4-lrh2000@pku.edu.cn +Signed-off-by: Greg Kroah-Hartman +--- + Documentation/mm/page_table_check.rst | 19 +++++++++++++++++++ + mm/Kconfig.debug | 1 + + 2 files changed, 20 insertions(+) + +--- a/Documentation/mm/page_table_check.rst ++++ b/Documentation/mm/page_table_check.rst +@@ -54,3 +54,22 @@ Build kernel with: + + Optionally, build kernel with PAGE_TABLE_CHECK_ENFORCED in order to have page + table support without extra kernel parameter. ++ ++Implementation notes ++==================== ++ ++We specifically decided not to use VMA information in order to avoid relying on ++MM states (except for limited "struct page" info). The page table check is a ++separate from Linux-MM state machine that verifies that the user accessible ++pages are not falsely shared. ++ ++PAGE_TABLE_CHECK depends on EXCLUSIVE_SYSTEM_RAM. The reason is that without ++EXCLUSIVE_SYSTEM_RAM, users are allowed to map arbitrary physical memory ++regions into the userspace via /dev/mem. At the same time, pages may change ++their properties (e.g., from anonymous pages to named pages) while they are ++still being mapped in the userspace, leading to "corruption" detected by the ++page table check. ++ ++Even with EXCLUSIVE_SYSTEM_RAM, I/O pages may be still allowed to be mapped via ++/dev/mem. However, these pages are always considered as named pages, so they ++won't break the logic used in the page table check. +--- a/mm/Kconfig.debug ++++ b/mm/Kconfig.debug +@@ -98,6 +98,7 @@ config PAGE_OWNER + config PAGE_TABLE_CHECK + bool "Check for invalid mappings in user page tables" + depends on ARCH_SUPPORTS_PAGE_TABLE_CHECK ++ depends on EXCLUSIVE_SYSTEM_RAM + select PAGE_EXTENSION + help + Check that anonymous page is not being mapped twice with read write diff --git a/queue-6.1/mptcp-add-address-into-userspace-pm-list.patch b/queue-6.1/mptcp-add-address-into-userspace-pm-list.patch new file mode 100644 index 00000000000..db4f02e44c4 --- /dev/null +++ b/queue-6.1/mptcp-add-address-into-userspace-pm-list.patch @@ -0,0 +1,107 @@ +From 24430f8bf51655c5ab7ddc2fafe939dd3cd0dd47 Mon Sep 17 00:00:00 2001 +From: Geliang Tang +Date: Sun, 4 Jun 2023 20:25:19 -0700 +Subject: mptcp: add address into userspace pm list + +From: Geliang Tang + +commit 24430f8bf51655c5ab7ddc2fafe939dd3cd0dd47 upstream. + +Add the address into userspace_pm_local_addr_list when the subflow is +created. Make sure it can be found in mptcp_nl_cmd_remove(). And delete +it in the new helper mptcp_userspace_pm_delete_local_addr(). + +By doing this, the "REMOVE" command also works with subflows that have +been created via the "SUB_CREATE" command instead of restricting to +the addresses that have been announced via the "ANNOUNCE" command. + +Fixes: d9a4594edabf ("mptcp: netlink: Add MPTCP_PM_CMD_REMOVE") +Link: https://github.com/multipath-tcp/mptcp_net-next/issues/379 +Cc: stable@vger.kernel.org +Reviewed-by: Matthieu Baerts +Signed-off-by: Geliang Tang +Signed-off-by: Mat Martineau +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman +--- + net/mptcp/pm_userspace.c | 41 +++++++++++++++++++++++++++++++++++++++++ + 1 file changed, 41 insertions(+) + +--- a/net/mptcp/pm_userspace.c ++++ b/net/mptcp/pm_userspace.c +@@ -78,6 +78,30 @@ int mptcp_userspace_pm_append_new_local_ + return ret; + } + ++/* If the subflow is closed from the other peer (not via a ++ * subflow destroy command then), we want to keep the entry ++ * not to assign the same ID to another address and to be ++ * able to send RM_ADDR after the removal of the subflow. ++ */ ++static int mptcp_userspace_pm_delete_local_addr(struct mptcp_sock *msk, ++ struct mptcp_pm_addr_entry *addr) ++{ ++ struct mptcp_pm_addr_entry *entry, *tmp; ++ ++ list_for_each_entry_safe(entry, tmp, &msk->pm.userspace_pm_local_addr_list, list) { ++ if (mptcp_addresses_equal(&entry->addr, &addr->addr, false)) { ++ /* TODO: a refcount is needed because the entry can ++ * be used multiple times (e.g. fullmesh mode). ++ */ ++ list_del_rcu(&entry->list); ++ kfree(entry); ++ return 0; ++ } ++ } ++ ++ return -EINVAL; ++} ++ + int mptcp_userspace_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk, + unsigned int id, + u8 *flags, int *ifindex) +@@ -250,6 +274,7 @@ int mptcp_nl_cmd_sf_create(struct sk_buf + struct nlattr *raddr = info->attrs[MPTCP_PM_ATTR_ADDR_REMOTE]; + struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN]; + struct nlattr *laddr = info->attrs[MPTCP_PM_ATTR_ADDR]; ++ struct mptcp_pm_addr_entry local = { 0 }; + struct mptcp_addr_info addr_r; + struct mptcp_addr_info addr_l; + struct mptcp_sock *msk; +@@ -301,12 +326,24 @@ int mptcp_nl_cmd_sf_create(struct sk_buf + goto create_err; + } + ++ local.addr = addr_l; ++ err = mptcp_userspace_pm_append_new_local_addr(msk, &local); ++ if (err < 0) { ++ GENL_SET_ERR_MSG(info, "did not match address and id"); ++ goto create_err; ++ } ++ + lock_sock(sk); + + err = __mptcp_subflow_connect(sk, &addr_l, &addr_r); + + release_sock(sk); + ++ spin_lock_bh(&msk->pm.lock); ++ if (err) ++ mptcp_userspace_pm_delete_local_addr(msk, &local); ++ spin_unlock_bh(&msk->pm.lock); ++ + create_err: + sock_put((struct sock *)msk); + return err; +@@ -419,7 +456,11 @@ int mptcp_nl_cmd_sf_destroy(struct sk_bu + ssk = mptcp_nl_find_ssk(msk, &addr_l, &addr_r); + if (ssk) { + struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); ++ struct mptcp_pm_addr_entry entry = { .addr = addr_l }; + ++ spin_lock_bh(&msk->pm.lock); ++ mptcp_userspace_pm_delete_local_addr(msk, &entry); ++ spin_unlock_bh(&msk->pm.lock); + mptcp_subflow_shutdown(sk, ssk, RCV_SHUTDOWN | SEND_SHUTDOWN); + mptcp_close_ssk(sk, ssk, subflow); + MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RMSUBFLOW); diff --git a/queue-6.1/mptcp-only-send-rm_addr-in-nl_cmd_remove.patch b/queue-6.1/mptcp-only-send-rm_addr-in-nl_cmd_remove.patch new file mode 100644 index 00000000000..23ef1a2ffb4 --- /dev/null +++ b/queue-6.1/mptcp-only-send-rm_addr-in-nl_cmd_remove.patch @@ -0,0 +1,85 @@ +From 8b1c94da1e481090f24127b2c420b0c0b0421ce3 Mon Sep 17 00:00:00 2001 +From: Geliang Tang +Date: Sun, 4 Jun 2023 20:25:17 -0700 +Subject: mptcp: only send RM_ADDR in nl_cmd_remove + +From: Geliang Tang + +commit 8b1c94da1e481090f24127b2c420b0c0b0421ce3 upstream. + +The specifications from [1] about the "REMOVE" command say: + + Announce that an address has been lost to the peer + +It was then only supposed to send a RM_ADDR and not trying to delete +associated subflows. + +A new helper mptcp_pm_remove_addrs() is then introduced to do just +that, compared to mptcp_pm_remove_addrs_and_subflows() also removing +subflows. + +To delete a subflow, the userspace daemon can use the "SUB_DESTROY" +command, see mptcp_nl_cmd_sf_destroy(). + +Fixes: d9a4594edabf ("mptcp: netlink: Add MPTCP_PM_CMD_REMOVE") +Link: https://github.com/multipath-tcp/mptcp/blob/mptcp_v0.96/include/uapi/linux/mptcp.h [1] +Cc: stable@vger.kernel.org +Reviewed-by: Matthieu Baerts +Signed-off-by: Geliang Tang +Signed-off-by: Mat Martineau +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman +--- + net/mptcp/pm_netlink.c | 18 ++++++++++++++++++ + net/mptcp/pm_userspace.c | 2 +- + net/mptcp/protocol.h | 1 + + 3 files changed, 20 insertions(+), 1 deletion(-) + +--- a/net/mptcp/pm_netlink.c ++++ b/net/mptcp/pm_netlink.c +@@ -1550,6 +1550,24 @@ static int mptcp_nl_cmd_del_addr(struct + return ret; + } + ++void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list) ++{ ++ struct mptcp_rm_list alist = { .nr = 0 }; ++ struct mptcp_pm_addr_entry *entry; ++ ++ list_for_each_entry(entry, rm_list, list) { ++ remove_anno_list_by_saddr(msk, &entry->addr); ++ if (alist.nr < MPTCP_RM_IDS_MAX) ++ alist.ids[alist.nr++] = entry->addr.id; ++ } ++ ++ if (alist.nr) { ++ spin_lock_bh(&msk->pm.lock); ++ mptcp_pm_remove_addr(msk, &alist); ++ spin_unlock_bh(&msk->pm.lock); ++ } ++} ++ + void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk, + struct list_head *rm_list) + { +--- a/net/mptcp/pm_userspace.c ++++ b/net/mptcp/pm_userspace.c +@@ -231,7 +231,7 @@ int mptcp_nl_cmd_remove(struct sk_buff * + + list_move(&match->list, &free_list); + +- mptcp_pm_remove_addrs_and_subflows(msk, &free_list); ++ mptcp_pm_remove_addrs(msk, &free_list); + + release_sock((struct sock *)msk); + +--- a/net/mptcp/protocol.h ++++ b/net/mptcp/protocol.h +@@ -820,6 +820,7 @@ int mptcp_pm_announce_addr(struct mptcp_ + bool echo); + int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list); + int mptcp_pm_remove_subflow(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list); ++void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list); + void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk, + struct list_head *rm_list); + diff --git a/queue-6.1/mptcp-update-userspace-pm-infos.patch b/queue-6.1/mptcp-update-userspace-pm-infos.patch new file mode 100644 index 00000000000..0de447491d0 --- /dev/null +++ b/queue-6.1/mptcp-update-userspace-pm-infos.patch @@ -0,0 +1,107 @@ +From 77e4b94a3de692a09b79945ecac5b8e6b77f10c1 Mon Sep 17 00:00:00 2001 +From: Geliang Tang +Date: Sun, 4 Jun 2023 20:25:21 -0700 +Subject: mptcp: update userspace pm infos + +From: Geliang Tang + +commit 77e4b94a3de692a09b79945ecac5b8e6b77f10c1 upstream. + +Increase pm subflows counter on both server side and client side when +userspace pm creates a new subflow, and decrease the counter when it +closes a subflow. + +Increase add_addr_signaled counter in mptcp_nl_cmd_announce() when the +address is announced by userspace PM. + +This modification is similar to how the in-kernel PM is updating the +counter: when additional subflows are created/removed. + +Fixes: 9ab4807c84a4 ("mptcp: netlink: Add MPTCP_PM_CMD_ANNOUNCE") +Fixes: 702c2f646d42 ("mptcp: netlink: allow userspace-driven subflow establishment") +Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/329 +Cc: stable@vger.kernel.org +Reviewed-by: Matthieu Baerts +Signed-off-by: Geliang Tang +Signed-off-by: Mat Martineau +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman +--- + net/mptcp/pm.c | 23 +++++++++++++++++++---- + net/mptcp/pm_userspace.c | 5 +++++ + 2 files changed, 24 insertions(+), 4 deletions(-) + +--- a/net/mptcp/pm.c ++++ b/net/mptcp/pm.c +@@ -87,8 +87,15 @@ bool mptcp_pm_allow_new_subflow(struct m + unsigned int subflows_max; + int ret = 0; + +- if (mptcp_pm_is_userspace(msk)) +- return mptcp_userspace_pm_active(msk); ++ if (mptcp_pm_is_userspace(msk)) { ++ if (mptcp_userspace_pm_active(msk)) { ++ spin_lock_bh(&pm->lock); ++ pm->subflows++; ++ spin_unlock_bh(&pm->lock); ++ return true; ++ } ++ return false; ++ } + + subflows_max = mptcp_pm_get_subflows_max(msk); + +@@ -181,8 +188,16 @@ void mptcp_pm_subflow_check_next(struct + struct mptcp_pm_data *pm = &msk->pm; + bool update_subflows; + +- update_subflows = (subflow->request_join || subflow->mp_join) && +- mptcp_pm_is_kernel(msk); ++ update_subflows = subflow->request_join || subflow->mp_join; ++ if (mptcp_pm_is_userspace(msk)) { ++ if (update_subflows) { ++ spin_lock_bh(&pm->lock); ++ pm->subflows--; ++ spin_unlock_bh(&pm->lock); ++ } ++ return; ++ } ++ + if (!READ_ONCE(pm->work_pending) && !update_subflows) + return; + +--- a/net/mptcp/pm_userspace.c ++++ b/net/mptcp/pm_userspace.c +@@ -69,6 +69,7 @@ int mptcp_userspace_pm_append_new_local_ + MPTCP_PM_MAX_ADDR_ID + 1, + 1); + list_add_tail_rcu(&e->list, &msk->pm.userspace_pm_local_addr_list); ++ msk->pm.local_addr_used++; + ret = e->addr.id; + } else if (match) { + ret = entry->addr.id; +@@ -95,6 +96,7 @@ static int mptcp_userspace_pm_delete_loc + */ + list_del_rcu(&entry->list); + kfree(entry); ++ msk->pm.local_addr_used--; + return 0; + } + } +@@ -194,6 +196,7 @@ int mptcp_nl_cmd_announce(struct sk_buff + spin_lock_bh(&msk->pm.lock); + + if (mptcp_pm_alloc_anno_list(msk, &addr_val)) { ++ msk->pm.add_addr_signaled++; + mptcp_pm_announce_addr(msk, &addr_val.addr, false); + mptcp_pm_nl_addr_send_ack(msk); + } +@@ -342,6 +345,8 @@ int mptcp_nl_cmd_sf_create(struct sk_buf + spin_lock_bh(&msk->pm.lock); + if (err) + mptcp_userspace_pm_delete_local_addr(msk, &local); ++ else ++ msk->pm.subflows++; + spin_unlock_bh(&msk->pm.lock); + + create_err: diff --git a/queue-6.1/pinctrl-meson-axg-add-missing-gpioa_18-gpio-group.patch b/queue-6.1/pinctrl-meson-axg-add-missing-gpioa_18-gpio-group.patch new file mode 100644 index 00000000000..562ad5c334e --- /dev/null +++ b/queue-6.1/pinctrl-meson-axg-add-missing-gpioa_18-gpio-group.patch @@ -0,0 +1,36 @@ +From 5b10ff013e8a57f8845615ac2cc37edf7f6eef05 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Martin=20Hundeb=C3=B8ll?= +Date: Fri, 12 May 2023 08:49:25 +0200 +Subject: pinctrl: meson-axg: add missing GPIOA_18 gpio group +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Martin Hundebøll + +commit 5b10ff013e8a57f8845615ac2cc37edf7f6eef05 upstream. + +Without this, the gpio cannot be explicitly mux'ed to its gpio function. + +Fixes: 83c566806a68a ("pinctrl: meson-axg: Add new pinctrl driver for Meson AXG SoC") +Cc: stable@vger.kernel.org +Signed-off-by: Martin Hundebøll +Reviewed-by: Neil Armstrong +Reviewed-by: Dmitry Rokosov +Link: https://lore.kernel.org/r/20230512064925.133516-1-martin@geanix.com +Signed-off-by: Linus Walleij +Signed-off-by: Greg Kroah-Hartman +--- + drivers/pinctrl/meson/pinctrl-meson-axg.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/drivers/pinctrl/meson/pinctrl-meson-axg.c ++++ b/drivers/pinctrl/meson/pinctrl-meson-axg.c +@@ -400,6 +400,7 @@ static struct meson_pmx_group meson_axg_ + GPIO_GROUP(GPIOA_15), + GPIO_GROUP(GPIOA_16), + GPIO_GROUP(GPIOA_17), ++ GPIO_GROUP(GPIOA_18), + GPIO_GROUP(GPIOA_19), + GPIO_GROUP(GPIOA_20), + diff --git a/queue-6.1/rbd-get-snapshot-context-after-exclusive-lock-is-ensured-to-be-held.patch b/queue-6.1/rbd-get-snapshot-context-after-exclusive-lock-is-ensured-to-be-held.patch new file mode 100644 index 00000000000..1f797cce901 --- /dev/null +++ b/queue-6.1/rbd-get-snapshot-context-after-exclusive-lock-is-ensured-to-be-held.patch @@ -0,0 +1,119 @@ +From 870611e4877eff1e8413c3fb92a585e45d5291f6 Mon Sep 17 00:00:00 2001 +From: Ilya Dryomov +Date: Mon, 5 Jun 2023 16:33:35 +0200 +Subject: rbd: get snapshot context after exclusive lock is ensured to be held + +From: Ilya Dryomov + +commit 870611e4877eff1e8413c3fb92a585e45d5291f6 upstream. + +Move capturing the snapshot context into the image request state +machine, after exclusive lock is ensured to be held for the duration of +dealing with the image request. This is needed to ensure correctness +of fast-diff states (OBJECT_EXISTS vs OBJECT_EXISTS_CLEAN) and object +deltas computed based off of them. Otherwise the object map that is +forked for the snapshot isn't guaranteed to accurately reflect the +contents of the snapshot when the snapshot is taken under I/O. This +breaks differential backup and snapshot-based mirroring use cases with +fast-diff enabled: since some object deltas may be incomplete, the +destination image may get corrupted. + +Cc: stable@vger.kernel.org +Link: https://tracker.ceph.com/issues/61472 +Signed-off-by: Ilya Dryomov +Reviewed-by: Dongsheng Yang +Signed-off-by: Greg Kroah-Hartman +--- + drivers/block/rbd.c | 30 +++++++++++++++++++++++------- + 1 file changed, 23 insertions(+), 7 deletions(-) + +--- a/drivers/block/rbd.c ++++ b/drivers/block/rbd.c +@@ -1336,6 +1336,8 @@ static bool rbd_obj_is_tail(struct rbd_o + */ + static void rbd_obj_set_copyup_enabled(struct rbd_obj_request *obj_req) + { ++ rbd_assert(obj_req->img_request->snapc); ++ + if (obj_req->img_request->op_type == OBJ_OP_DISCARD) { + dout("%s %p objno %llu discard\n", __func__, obj_req, + obj_req->ex.oe_objno); +@@ -1456,6 +1458,7 @@ __rbd_obj_add_osd_request(struct rbd_obj + static struct ceph_osd_request * + rbd_obj_add_osd_request(struct rbd_obj_request *obj_req, int num_ops) + { ++ rbd_assert(obj_req->img_request->snapc); + return __rbd_obj_add_osd_request(obj_req, obj_req->img_request->snapc, + num_ops); + } +@@ -1592,15 +1595,18 @@ static void rbd_img_request_init(struct + mutex_init(&img_request->state_mutex); + } + ++/* ++ * Only snap_id is captured here, for reads. For writes, snapshot ++ * context is captured in rbd_img_object_requests() after exclusive ++ * lock is ensured to be held. ++ */ + static void rbd_img_capture_header(struct rbd_img_request *img_req) + { + struct rbd_device *rbd_dev = img_req->rbd_dev; + + lockdep_assert_held(&rbd_dev->header_rwsem); + +- if (rbd_img_is_write(img_req)) +- img_req->snapc = ceph_get_snap_context(rbd_dev->header.snapc); +- else ++ if (!rbd_img_is_write(img_req)) + img_req->snap_id = rbd_dev->spec->snap_id; + + if (rbd_dev_parent_get(rbd_dev)) +@@ -3483,9 +3489,19 @@ static int rbd_img_exclusive_lock(struct + + static void rbd_img_object_requests(struct rbd_img_request *img_req) + { ++ struct rbd_device *rbd_dev = img_req->rbd_dev; + struct rbd_obj_request *obj_req; + + rbd_assert(!img_req->pending.result && !img_req->pending.num_pending); ++ rbd_assert(!need_exclusive_lock(img_req) || ++ __rbd_is_lock_owner(rbd_dev)); ++ ++ if (rbd_img_is_write(img_req)) { ++ rbd_assert(!img_req->snapc); ++ down_read(&rbd_dev->header_rwsem); ++ img_req->snapc = ceph_get_snap_context(rbd_dev->header.snapc); ++ up_read(&rbd_dev->header_rwsem); ++ } + + for_each_obj_request(img_req, obj_req) { + int result = 0; +@@ -3503,7 +3519,6 @@ static void rbd_img_object_requests(stru + + static bool rbd_img_advance(struct rbd_img_request *img_req, int *result) + { +- struct rbd_device *rbd_dev = img_req->rbd_dev; + int ret; + + again: +@@ -3524,9 +3539,6 @@ again: + if (*result) + return true; + +- rbd_assert(!need_exclusive_lock(img_req) || +- __rbd_is_lock_owner(rbd_dev)); +- + rbd_img_object_requests(img_req); + if (!img_req->pending.num_pending) { + *result = img_req->pending.result; +@@ -3988,6 +4000,10 @@ static int rbd_post_acquire_action(struc + { + int ret; + ++ ret = rbd_dev_refresh(rbd_dev); ++ if (ret) ++ return ret; ++ + if (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP) { + ret = rbd_object_map_open(rbd_dev); + if (ret) diff --git a/queue-6.1/rbd-move-rbd_obj_flag_copyup_enabled-flag-setting.patch b/queue-6.1/rbd-move-rbd_obj_flag_copyup_enabled-flag-setting.patch new file mode 100644 index 00000000000..b57aa2ac319 --- /dev/null +++ b/queue-6.1/rbd-move-rbd_obj_flag_copyup_enabled-flag-setting.patch @@ -0,0 +1,85 @@ +From 09fe05c57b5aaf23e2c35036c98ea9f282b19a77 Mon Sep 17 00:00:00 2001 +From: Ilya Dryomov +Date: Mon, 5 Jun 2023 16:33:35 +0200 +Subject: rbd: move RBD_OBJ_FLAG_COPYUP_ENABLED flag setting + +From: Ilya Dryomov + +commit 09fe05c57b5aaf23e2c35036c98ea9f282b19a77 upstream. + +Move RBD_OBJ_FLAG_COPYUP_ENABLED flag setting into the object request +state machine to allow for the snapshot context to be captured in the +image request state machine rather than in rbd_queue_workfn(). + +Cc: stable@vger.kernel.org +Signed-off-by: Ilya Dryomov +Reviewed-by: Dongsheng Yang +Signed-off-by: Greg Kroah-Hartman +--- + drivers/block/rbd.c | 32 +++++++++++++++++++++----------- + 1 file changed, 21 insertions(+), 11 deletions(-) + +--- a/drivers/block/rbd.c ++++ b/drivers/block/rbd.c +@@ -1334,14 +1334,28 @@ static bool rbd_obj_is_tail(struct rbd_o + /* + * Must be called after rbd_obj_calc_img_extents(). + */ +-static bool rbd_obj_copyup_enabled(struct rbd_obj_request *obj_req) ++static void rbd_obj_set_copyup_enabled(struct rbd_obj_request *obj_req) + { +- if (!obj_req->num_img_extents || +- (rbd_obj_is_entire(obj_req) && +- !obj_req->img_request->snapc->num_snaps)) +- return false; ++ if (obj_req->img_request->op_type == OBJ_OP_DISCARD) { ++ dout("%s %p objno %llu discard\n", __func__, obj_req, ++ obj_req->ex.oe_objno); ++ return; ++ } + +- return true; ++ if (!obj_req->num_img_extents) { ++ dout("%s %p objno %llu not overlapping\n", __func__, obj_req, ++ obj_req->ex.oe_objno); ++ return; ++ } ++ ++ if (rbd_obj_is_entire(obj_req) && ++ !obj_req->img_request->snapc->num_snaps) { ++ dout("%s %p objno %llu entire\n", __func__, obj_req, ++ obj_req->ex.oe_objno); ++ return; ++ } ++ ++ obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED; + } + + static u64 rbd_obj_img_extents_bytes(struct rbd_obj_request *obj_req) +@@ -2233,9 +2247,6 @@ static int rbd_obj_init_write(struct rbd + if (ret) + return ret; + +- if (rbd_obj_copyup_enabled(obj_req)) +- obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED; +- + obj_req->write_state = RBD_OBJ_WRITE_START; + return 0; + } +@@ -2341,8 +2352,6 @@ static int rbd_obj_init_zeroout(struct r + if (ret) + return ret; + +- if (rbd_obj_copyup_enabled(obj_req)) +- obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED; + if (!obj_req->num_img_extents) { + obj_req->flags |= RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT; + if (rbd_obj_is_entire(obj_req)) +@@ -3287,6 +3296,7 @@ again: + case RBD_OBJ_WRITE_START: + rbd_assert(!*result); + ++ rbd_obj_set_copyup_enabled(obj_req); + if (rbd_obj_write_is_noop(obj_req)) + return true; + diff --git a/queue-6.1/s390-dasd-use-correct-lock-while-counting-channel-queue-length.patch b/queue-6.1/s390-dasd-use-correct-lock-while-counting-channel-queue-length.patch new file mode 100644 index 00000000000..90393b6c4b2 --- /dev/null +++ b/queue-6.1/s390-dasd-use-correct-lock-while-counting-channel-queue-length.patch @@ -0,0 +1,51 @@ +From ccc45cb4e7271c74dbb27776ae8f73d84557f5c6 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Jan=20H=C3=B6ppner?= +Date: Fri, 9 Jun 2023 17:37:50 +0200 +Subject: s390/dasd: Use correct lock while counting channel queue length +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Jan Höppner + +commit ccc45cb4e7271c74dbb27776ae8f73d84557f5c6 upstream. + +The lock around counting the channel queue length in the BIODASDINFO +ioctl was incorrectly changed to the dasd_block->queue_lock with commit +583d6535cb9d ("dasd: remove dead code"). This can lead to endless list +iterations and a subsequent crash. + +The queue_lock is supposed to be used only for queue lists belonging to +dasd_block. For dasd_device related queue lists the ccwdev lock must be +used. + +Fix the mentioned issues by correctly using the ccwdev lock instead of +the queue lock. + +Fixes: 583d6535cb9d ("dasd: remove dead code") +Cc: stable@vger.kernel.org # v5.0+ +Signed-off-by: Jan Höppner +Reviewed-by: Stefan Haberland +Signed-off-by: Stefan Haberland +Link: https://lore.kernel.org/r/20230609153750.1258763-2-sth@linux.ibm.com +Signed-off-by: Jens Axboe +Signed-off-by: Greg Kroah-Hartman +--- + drivers/s390/block/dasd_ioctl.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/drivers/s390/block/dasd_ioctl.c ++++ b/drivers/s390/block/dasd_ioctl.c +@@ -552,10 +552,10 @@ static int __dasd_ioctl_information(stru + + memcpy(dasd_info->type, base->discipline->name, 4); + +- spin_lock_irqsave(&block->queue_lock, flags); ++ spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); + list_for_each(l, &base->ccw_queue) + dasd_info->chanq_len++; +- spin_unlock_irqrestore(&block->queue_lock, flags); ++ spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags); + return 0; + } + diff --git a/queue-6.1/selftests-mptcp-update-userspace-pm-addr-tests.patch b/queue-6.1/selftests-mptcp-update-userspace-pm-addr-tests.patch new file mode 100644 index 00000000000..f14aa9be850 --- /dev/null +++ b/queue-6.1/selftests-mptcp-update-userspace-pm-addr-tests.patch @@ -0,0 +1,46 @@ +From 48d73f609dcceeb563b0d960e59bf0362581e39c Mon Sep 17 00:00:00 2001 +From: Geliang Tang +Date: Sun, 4 Jun 2023 20:25:18 -0700 +Subject: selftests: mptcp: update userspace pm addr tests + +From: Geliang Tang + +commit 48d73f609dcceeb563b0d960e59bf0362581e39c upstream. + +This patch is linked to the previous commit ("mptcp: only send RM_ADDR in +nl_cmd_remove"). + +To align with what is done by the in-kernel PM, update userspace pm addr +selftests, by sending a remove_subflows command together after the +remove_addrs command. + +Fixes: d9a4594edabf ("mptcp: netlink: Add MPTCP_PM_CMD_REMOVE") +Fixes: 97040cf9806e ("selftests: mptcp: userspace pm address tests") +Cc: stable@vger.kernel.org +Reviewed-by: Matthieu Baerts +Signed-off-by: Geliang Tang +Signed-off-by: Mat Martineau +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman +--- + tools/testing/selftests/net/mptcp/mptcp_join.sh | 8 ++++++++ + 1 file changed, 8 insertions(+) + +--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh ++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh +@@ -851,7 +851,15 @@ do_transfer() + tk=$(sed -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q' "$evts_ns1") + ip netns exec ${listener_ns} ./pm_nl_ctl ann $addr token $tk id $id + sleep 1 ++ sp=$(grep "type:10" "$evts_ns1" | ++ sed -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q') ++ da=$(grep "type:10" "$evts_ns1" | ++ sed -n 's/.*\(daddr6:\)\([0-9a-f:.]*\).*$/\2/p;q') ++ dp=$(grep "type:10" "$evts_ns1" | ++ sed -n 's/.*\(dport:\)\([[:digit:]]*\).*$/\2/p;q') + ip netns exec ${listener_ns} ./pm_nl_ctl rem token $tk id $id ++ ip netns exec ${listener_ns} ./pm_nl_ctl dsf lip "::ffff:$addr" \ ++ lport $sp rip $da rport $dp token $tk + fi + + counter=$((counter + 1)) diff --git a/queue-6.1/selftests-mptcp-update-userspace-pm-subflow-tests.patch b/queue-6.1/selftests-mptcp-update-userspace-pm-subflow-tests.patch new file mode 100644 index 00000000000..bb4a41bdc5b --- /dev/null +++ b/queue-6.1/selftests-mptcp-update-userspace-pm-subflow-tests.patch @@ -0,0 +1,46 @@ +From 6c160b636c91e71e50c39134f78257cc35305ff0 Mon Sep 17 00:00:00 2001 +From: Geliang Tang +Date: Sun, 4 Jun 2023 20:25:20 -0700 +Subject: selftests: mptcp: update userspace pm subflow tests + +From: Geliang Tang + +commit 6c160b636c91e71e50c39134f78257cc35305ff0 upstream. + +To align with what is done by the in-kernel PM, update userspace pm +subflow selftests, by sending the a remove_addrs command together +before the remove_subflows command. This will get a RM_ADDR in +chk_rm_nr(). + +Fixes: d9a4594edabf ("mptcp: netlink: Add MPTCP_PM_CMD_REMOVE") +Fixes: 5e986ec46874 ("selftests: mptcp: userspace pm subflow tests") +Link: https://github.com/multipath-tcp/mptcp_net-next/issues/379 +Cc: stable@vger.kernel.org +Reviewed-by: Matthieu Baerts +Signed-off-by: Geliang Tang +Signed-off-by: Mat Martineau +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman +--- + tools/testing/selftests/net/mptcp/mptcp_join.sh | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh ++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh +@@ -925,6 +925,7 @@ do_transfer() + sleep 1 + sp=$(grep "type:10" "$evts_ns2" | + sed -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q') ++ ip netns exec ${connector_ns} ./pm_nl_ctl rem token $tk id $id + ip netns exec ${connector_ns} ./pm_nl_ctl dsf lip $addr lport $sp \ + rip $da rport $dp token $tk + fi +@@ -3007,7 +3008,7 @@ userspace_tests() + pm_nl_set_limits $ns1 0 1 + run_tests $ns1 $ns2 10.0.1.1 0 0 userspace_1 slow + chk_join_nr 1 1 1 +- chk_rm_nr 0 1 ++ chk_rm_nr 1 1 + fi + } + diff --git a/queue-6.1/series b/queue-6.1/series index 1bbe07180e8..a4d946255ac 100644 --- a/queue-6.1/series +++ b/queue-6.1/series @@ -72,3 +72,29 @@ drm-i915-gt-use-the-correct-error-value-when-kernel_context-fails.patch drm-amd-pm-conditionally-disable-pcie-lane-switching-for-some-sienna_cichlid-skus.patch drm-amdgpu-fix-xclk-freq-on-chip_stoney.patch drm-amdgpu-change-reserved-vram-info-print.patch +drm-amd-pm-fix-power-context-allocation-in-smu13.patch +drm-amd-display-reduce-sdp-bw-after-urgent-to-90.patch +wifi-iwlwifi-mvm-fix-warray-bounds-bug-in-iwl_mvm_wait_d3_notif.patch +can-j1939-j1939_sk_send_loop_abort-improved-error-queue-handling-in-j1939-socket.patch +can-j1939-change-j1939_netdev_lock-type-to-mutex.patch +can-j1939-avoid-possible-use-after-free-when-j1939_can_rx_register-fails.patch +mptcp-only-send-rm_addr-in-nl_cmd_remove.patch +mptcp-add-address-into-userspace-pm-list.patch +mptcp-update-userspace-pm-infos.patch +selftests-mptcp-update-userspace-pm-addr-tests.patch +selftests-mptcp-update-userspace-pm-subflow-tests.patch +ceph-fix-use-after-free-bug-for-inodes-when-flushing-capsnaps.patch +s390-dasd-use-correct-lock-while-counting-channel-queue-length.patch +bluetooth-fix-use-after-free-in-hci_remove_ltk-hci_remove_irk.patch +bluetooth-fix-debugfs-registration.patch +bluetooth-hci_qca-fix-debugfs-registration.patch +tee-amdtee-add-return_origin-to-struct-tee_cmd_load_ta.patch +rbd-move-rbd_obj_flag_copyup_enabled-flag-setting.patch +rbd-get-snapshot-context-after-exclusive-lock-is-ensured-to-be-held.patch +virtio_net-use-control_buf-for-coalesce-params.patch +soc-qcom-icc-bwmon-fix-incorrect-error-code-passed-to-dev_err_probe.patch +pinctrl-meson-axg-add-missing-gpioa_18-gpio-group.patch +usb-usbfs-enforce-page-requirements-for-mmap.patch +usb-usbfs-use-consistent-mmap-functions.patch +mm-page_table_check-make-it-dependent-on-exclusive_system_ram.patch +mm-page_table_check-ensure-user-pages-are-not-slab-pages.patch diff --git a/queue-6.1/soc-qcom-icc-bwmon-fix-incorrect-error-code-passed-to-dev_err_probe.patch b/queue-6.1/soc-qcom-icc-bwmon-fix-incorrect-error-code-passed-to-dev_err_probe.patch new file mode 100644 index 00000000000..cb57418f255 --- /dev/null +++ b/queue-6.1/soc-qcom-icc-bwmon-fix-incorrect-error-code-passed-to-dev_err_probe.patch @@ -0,0 +1,49 @@ +From 3530167c6fe8001de6c026a3058eaca4c8a5329f Mon Sep 17 00:00:00 2001 +From: Krzysztof Kozlowski +Date: Sat, 13 May 2023 13:17:47 +0200 +Subject: soc: qcom: icc-bwmon: fix incorrect error code passed to dev_err_probe() + +From: Krzysztof Kozlowski + +commit 3530167c6fe8001de6c026a3058eaca4c8a5329f upstream. + +Pass to dev_err_probe() PTR_ERR from actual dev_pm_opp_find_bw_floor() +call which failed, instead of previous ret which at this point is 0. +Failure of dev_pm_opp_find_bw_floor() would result in prematurely ending +the probe with success. + +Fixes smatch warnings: + + drivers/soc/qcom/icc-bwmon.c:776 bwmon_probe() warn: passing zero to 'dev_err_probe' + drivers/soc/qcom/icc-bwmon.c:781 bwmon_probe() warn: passing zero to 'dev_err_probe' + +Reported-by: kernel test robot +Reported-by: Dan Carpenter +Link: https://lore.kernel.org/r/202305131657.76XeHDjF-lkp@intel.com/ +Cc: +Fixes: b9c2ae6cac40 ("soc: qcom: icc-bwmon: Add bandwidth monitoring driver") +Signed-off-by: Krzysztof Kozlowski +Signed-off-by: Bjorn Andersson +Link: https://lore.kernel.org/r/20230513111747.132532-1-krzysztof.kozlowski@linaro.org +Signed-off-by: Greg Kroah-Hartman +--- + drivers/soc/qcom/icc-bwmon.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/drivers/soc/qcom/icc-bwmon.c ++++ b/drivers/soc/qcom/icc-bwmon.c +@@ -603,12 +603,12 @@ static int bwmon_probe(struct platform_d + bwmon->max_bw_kbps = UINT_MAX; + opp = dev_pm_opp_find_bw_floor(dev, &bwmon->max_bw_kbps, 0); + if (IS_ERR(opp)) +- return dev_err_probe(dev, ret, "failed to find max peak bandwidth\n"); ++ return dev_err_probe(dev, PTR_ERR(opp), "failed to find max peak bandwidth\n"); + + bwmon->min_bw_kbps = 0; + opp = dev_pm_opp_find_bw_ceil(dev, &bwmon->min_bw_kbps, 0); + if (IS_ERR(opp)) +- return dev_err_probe(dev, ret, "failed to find min peak bandwidth\n"); ++ return dev_err_probe(dev, PTR_ERR(opp), "failed to find min peak bandwidth\n"); + + bwmon->dev = dev; + diff --git a/queue-6.1/tee-amdtee-add-return_origin-to-struct-tee_cmd_load_ta.patch b/queue-6.1/tee-amdtee-add-return_origin-to-struct-tee_cmd_load_ta.patch new file mode 100644 index 00000000000..8ea06534a7e --- /dev/null +++ b/queue-6.1/tee-amdtee-add-return_origin-to-struct-tee_cmd_load_ta.patch @@ -0,0 +1,95 @@ +From 436eeae0411acdfc54521ddea80ee76d4ae8a7ea Mon Sep 17 00:00:00 2001 +From: Rijo Thomas +Date: Tue, 9 May 2023 13:02:40 +0530 +Subject: tee: amdtee: Add return_origin to 'struct tee_cmd_load_ta' + +From: Rijo Thomas + +commit 436eeae0411acdfc54521ddea80ee76d4ae8a7ea upstream. + +After TEE has completed processing of TEE_CMD_ID_LOAD_TA, set proper +value in 'return_origin' argument passed by open_session() call. To do +so, add 'return_origin' field to the structure tee_cmd_load_ta. The +Trusted OS shall update return_origin as part of TEE processing. + +This change to 'struct tee_cmd_load_ta' interface requires a similar update +in AMD-TEE Trusted OS's TEE_CMD_ID_LOAD_TA interface. + +This patch has been verified on Phoenix Birman setup. On older APUs, +return_origin value will be 0. + +Cc: stable@vger.kernel.org +Fixes: 757cc3e9ff1d ("tee: add AMD-TEE driver") +Tested-by: Sourabh Das +Signed-off-by: Rijo Thomas +Acked-by: Sumit Garg +Signed-off-by: Jens Wiklander +Signed-off-by: Greg Kroah-Hartman +--- + drivers/tee/amdtee/amdtee_if.h | 10 ++++++---- + drivers/tee/amdtee/call.c | 28 ++++++++++++++++------------ + 2 files changed, 22 insertions(+), 16 deletions(-) + +--- a/drivers/tee/amdtee/amdtee_if.h ++++ b/drivers/tee/amdtee/amdtee_if.h +@@ -118,16 +118,18 @@ struct tee_cmd_unmap_shared_mem { + + /** + * struct tee_cmd_load_ta - load Trusted Application (TA) binary into TEE +- * @low_addr: [in] bits [31:0] of the physical address of the TA binary +- * @hi_addr: [in] bits [63:32] of the physical address of the TA binary +- * @size: [in] size of TA binary in bytes +- * @ta_handle: [out] return handle of the loaded TA ++ * @low_addr: [in] bits [31:0] of the physical address of the TA binary ++ * @hi_addr: [in] bits [63:32] of the physical address of the TA binary ++ * @size: [in] size of TA binary in bytes ++ * @ta_handle: [out] return handle of the loaded TA ++ * @return_origin: [out] origin of return code after TEE processing + */ + struct tee_cmd_load_ta { + u32 low_addr; + u32 hi_addr; + u32 size; + u32 ta_handle; ++ u32 return_origin; + }; + + /** +--- a/drivers/tee/amdtee/call.c ++++ b/drivers/tee/amdtee/call.c +@@ -423,19 +423,23 @@ int handle_load_ta(void *data, u32 size, + if (ret) { + arg->ret_origin = TEEC_ORIGIN_COMMS; + arg->ret = TEEC_ERROR_COMMUNICATION; +- } else if (arg->ret == TEEC_SUCCESS) { +- ret = get_ta_refcount(load_cmd.ta_handle); +- if (!ret) { +- arg->ret_origin = TEEC_ORIGIN_COMMS; +- arg->ret = TEEC_ERROR_OUT_OF_MEMORY; ++ } else { ++ arg->ret_origin = load_cmd.return_origin; + +- /* Unload the TA on error */ +- unload_cmd.ta_handle = load_cmd.ta_handle; +- psp_tee_process_cmd(TEE_CMD_ID_UNLOAD_TA, +- (void *)&unload_cmd, +- sizeof(unload_cmd), &ret); +- } else { +- set_session_id(load_cmd.ta_handle, 0, &arg->session); ++ if (arg->ret == TEEC_SUCCESS) { ++ ret = get_ta_refcount(load_cmd.ta_handle); ++ if (!ret) { ++ arg->ret_origin = TEEC_ORIGIN_COMMS; ++ arg->ret = TEEC_ERROR_OUT_OF_MEMORY; ++ ++ /* Unload the TA on error */ ++ unload_cmd.ta_handle = load_cmd.ta_handle; ++ psp_tee_process_cmd(TEE_CMD_ID_UNLOAD_TA, ++ (void *)&unload_cmd, ++ sizeof(unload_cmd), &ret); ++ } else { ++ set_session_id(load_cmd.ta_handle, 0, &arg->session); ++ } + } + } + mutex_unlock(&ta_refcount_mutex); diff --git a/queue-6.1/usb-usbfs-enforce-page-requirements-for-mmap.patch b/queue-6.1/usb-usbfs-enforce-page-requirements-for-mmap.patch new file mode 100644 index 00000000000..8486bd502ae --- /dev/null +++ b/queue-6.1/usb-usbfs-enforce-page-requirements-for-mmap.patch @@ -0,0 +1,140 @@ +From 0143d148d1e882fb1538dc9974c94d63961719b9 Mon Sep 17 00:00:00 2001 +From: Ruihan Li +Date: Mon, 15 May 2023 21:09:55 +0800 +Subject: usb: usbfs: Enforce page requirements for mmap + +From: Ruihan Li + +commit 0143d148d1e882fb1538dc9974c94d63961719b9 upstream. + +The current implementation of usbdev_mmap uses usb_alloc_coherent to +allocate memory pages that will later be mapped into the user space. +Meanwhile, usb_alloc_coherent employs three different methods to +allocate memory, as outlined below: + * If hcd->localmem_pool is non-null, it uses gen_pool_dma_alloc to + allocate memory; + * If DMA is not available, it uses kmalloc to allocate memory; + * Otherwise, it uses dma_alloc_coherent. + +However, it should be noted that gen_pool_dma_alloc does not guarantee +that the resulting memory will be page-aligned. Furthermore, trying to +map slab pages (i.e., memory allocated by kmalloc) into the user space +is not resonable and can lead to problems, such as a type confusion bug +when PAGE_TABLE_CHECK=y [1]. + +To address these issues, this patch introduces hcd_alloc_coherent_pages, +which addresses the above two problems. Specifically, +hcd_alloc_coherent_pages uses gen_pool_dma_alloc_align instead of +gen_pool_dma_alloc to ensure that the memory is page-aligned. To replace +kmalloc, hcd_alloc_coherent_pages directly allocates pages by calling +__get_free_pages. + +Reported-by: syzbot+fcf1a817ceb50935ce99@syzkaller.appspotmail.comm +Closes: https://lore.kernel.org/lkml/000000000000258e5e05fae79fc1@google.com/ [1] +Fixes: f7d34b445abc ("USB: Add support for usbfs zerocopy.") +Fixes: ff2437befd8f ("usb: host: Fix excessive alignment restriction for local memory allocations") +Cc: stable@vger.kernel.org +Signed-off-by: Ruihan Li +Acked-by: Alan Stern +Link: https://lore.kernel.org/r/20230515130958.32471-2-lrh2000@pku.edu.cn +Signed-off-by: Greg Kroah-Hartman +Signed-off-by: Greg Kroah-Hartman +--- + drivers/usb/core/buffer.c | 41 +++++++++++++++++++++++++++++++++++++++++ + drivers/usb/core/devio.c | 9 +++++---- + include/linux/usb/hcd.h | 5 +++++ + 3 files changed, 51 insertions(+), 4 deletions(-) + +--- a/drivers/usb/core/buffer.c ++++ b/drivers/usb/core/buffer.c +@@ -172,3 +172,44 @@ void hcd_buffer_free( + } + dma_free_coherent(hcd->self.sysdev, size, addr, dma); + } ++ ++void *hcd_buffer_alloc_pages(struct usb_hcd *hcd, ++ size_t size, gfp_t mem_flags, dma_addr_t *dma) ++{ ++ if (size == 0) ++ return NULL; ++ ++ if (hcd->localmem_pool) ++ return gen_pool_dma_alloc_align(hcd->localmem_pool, ++ size, dma, PAGE_SIZE); ++ ++ /* some USB hosts just use PIO */ ++ if (!hcd_uses_dma(hcd)) { ++ *dma = DMA_MAPPING_ERROR; ++ return (void *)__get_free_pages(mem_flags, ++ get_order(size)); ++ } ++ ++ return dma_alloc_coherent(hcd->self.sysdev, ++ size, dma, mem_flags); ++} ++ ++void hcd_buffer_free_pages(struct usb_hcd *hcd, ++ size_t size, void *addr, dma_addr_t dma) ++{ ++ if (!addr) ++ return; ++ ++ if (hcd->localmem_pool) { ++ gen_pool_free(hcd->localmem_pool, ++ (unsigned long)addr, size); ++ return; ++ } ++ ++ if (!hcd_uses_dma(hcd)) { ++ free_pages((unsigned long)addr, get_order(size)); ++ return; ++ } ++ ++ dma_free_coherent(hcd->self.sysdev, size, addr, dma); ++} +--- a/drivers/usb/core/devio.c ++++ b/drivers/usb/core/devio.c +@@ -186,6 +186,7 @@ static int connected(struct usb_dev_stat + static void dec_usb_memory_use_count(struct usb_memory *usbm, int *count) + { + struct usb_dev_state *ps = usbm->ps; ++ struct usb_hcd *hcd = bus_to_hcd(ps->dev->bus); + unsigned long flags; + + spin_lock_irqsave(&ps->lock, flags); +@@ -194,8 +195,8 @@ static void dec_usb_memory_use_count(str + list_del(&usbm->memlist); + spin_unlock_irqrestore(&ps->lock, flags); + +- usb_free_coherent(ps->dev, usbm->size, usbm->mem, +- usbm->dma_handle); ++ hcd_buffer_free_pages(hcd, usbm->size, ++ usbm->mem, usbm->dma_handle); + usbfs_decrease_memory_usage( + usbm->size + sizeof(struct usb_memory)); + kfree(usbm); +@@ -247,8 +248,8 @@ static int usbdev_mmap(struct file *file + goto error_decrease_mem; + } + +- mem = usb_alloc_coherent(ps->dev, size, GFP_USER | __GFP_NOWARN, +- &dma_handle); ++ mem = hcd_buffer_alloc_pages(hcd, ++ size, GFP_USER | __GFP_NOWARN, &dma_handle); + if (!mem) { + ret = -ENOMEM; + goto error_free_usbm; +--- a/include/linux/usb/hcd.h ++++ b/include/linux/usb/hcd.h +@@ -500,6 +500,11 @@ void *hcd_buffer_alloc(struct usb_bus *b + void hcd_buffer_free(struct usb_bus *bus, size_t size, + void *addr, dma_addr_t dma); + ++void *hcd_buffer_alloc_pages(struct usb_hcd *hcd, ++ size_t size, gfp_t mem_flags, dma_addr_t *dma); ++void hcd_buffer_free_pages(struct usb_hcd *hcd, ++ size_t size, void *addr, dma_addr_t dma); ++ + /* generic bus glue, needed for host controllers that don't use PCI */ + extern irqreturn_t usb_hcd_irq(int irq, void *__hcd); + diff --git a/queue-6.1/usb-usbfs-use-consistent-mmap-functions.patch b/queue-6.1/usb-usbfs-use-consistent-mmap-functions.patch new file mode 100644 index 00000000000..d85d71457d3 --- /dev/null +++ b/queue-6.1/usb-usbfs-use-consistent-mmap-functions.patch @@ -0,0 +1,60 @@ +From d0b861653f8c16839c3035875b556afc4472f941 Mon Sep 17 00:00:00 2001 +From: Ruihan Li +Date: Mon, 15 May 2023 21:09:56 +0800 +Subject: usb: usbfs: Use consistent mmap functions + +From: Ruihan Li + +commit d0b861653f8c16839c3035875b556afc4472f941 upstream. + +When hcd->localmem_pool is non-null, localmem_pool is used to allocate +DMA memory. In this case, the dma address will be properly returned (in +dma_handle), and dma_mmap_coherent should be used to map this memory +into the user space. However, the current implementation uses +pfn_remap_range, which is supposed to map normal pages. + +Instead of repeating the logic in the memory allocation function, this +patch introduces a more robust solution. Here, the type of allocated +memory is checked by testing whether dma_handle is properly set. If +dma_handle is properly returned, it means some DMA pages are allocated +and dma_mmap_coherent should be used to map them. Otherwise, normal +pages are allocated and pfn_remap_range should be called. This ensures +that the correct mmap functions are used consistently, independently +with logic details that determine which type of memory gets allocated. + +Fixes: a0e710a7def4 ("USB: usbfs: fix mmap dma mismatch") +Cc: stable@vger.kernel.org +Signed-off-by: Ruihan Li +Link: https://lore.kernel.org/r/20230515130958.32471-3-lrh2000@pku.edu.cn +Signed-off-by: Greg Kroah-Hartman +--- + drivers/usb/core/devio.c | 11 +++++++++-- + 1 file changed, 9 insertions(+), 2 deletions(-) + +--- a/drivers/usb/core/devio.c ++++ b/drivers/usb/core/devio.c +@@ -235,7 +235,7 @@ static int usbdev_mmap(struct file *file + size_t size = vma->vm_end - vma->vm_start; + void *mem; + unsigned long flags; +- dma_addr_t dma_handle; ++ dma_addr_t dma_handle = DMA_MAPPING_ERROR; + int ret; + + ret = usbfs_increase_memory_usage(size + sizeof(struct usb_memory)); +@@ -265,7 +265,14 @@ static int usbdev_mmap(struct file *file + usbm->vma_use_count = 1; + INIT_LIST_HEAD(&usbm->memlist); + +- if (hcd->localmem_pool || !hcd_uses_dma(hcd)) { ++ /* ++ * In DMA-unavailable cases, hcd_buffer_alloc_pages allocates ++ * normal pages and assigns DMA_MAPPING_ERROR to dma_handle. Check ++ * whether we are in such cases, and then use remap_pfn_range (or ++ * dma_mmap_coherent) to map normal (or DMA) pages into the user ++ * space, respectively. ++ */ ++ if (dma_handle == DMA_MAPPING_ERROR) { + if (remap_pfn_range(vma, vma->vm_start, + virt_to_phys(usbm->mem) >> PAGE_SHIFT, + size, vma->vm_page_prot) < 0) { diff --git a/queue-6.1/virtio_net-use-control_buf-for-coalesce-params.patch b/queue-6.1/virtio_net-use-control_buf-for-coalesce-params.patch new file mode 100644 index 00000000000..941b94d37ee --- /dev/null +++ b/queue-6.1/virtio_net-use-control_buf-for-coalesce-params.patch @@ -0,0 +1,72 @@ +From accc1bf23068c1cdc4c2b015320ba856e210dd98 Mon Sep 17 00:00:00 2001 +From: Brett Creeley +Date: Mon, 5 Jun 2023 12:59:25 -0700 +Subject: virtio_net: use control_buf for coalesce params + +From: Brett Creeley + +commit accc1bf23068c1cdc4c2b015320ba856e210dd98 upstream. + +Commit 699b045a8e43 ("net: virtio_net: notifications coalescing +support") added coalescing command support for virtio_net. However, +the coalesce commands are using buffers on the stack, which is causing +the device to see DMA errors. There should also be a complaint from +check_for_stack() in debug_dma_map_xyz(). Fix this by adding and using +coalesce params from the control_buf struct, which aligns with other +commands. + +Cc: stable@vger.kernel.org +Fixes: 699b045a8e43 ("net: virtio_net: notifications coalescing support") +Reviewed-by: Shannon Nelson +Signed-off-by: Allen Hubbe +Signed-off-by: Brett Creeley +Acked-by: Jason Wang +Reviewed-by: Xuan Zhuo +Acked-by: Michael S. Tsirkin +Link: https://lore.kernel.org/r/20230605195925.51625-1-brett.creeley@amd.com +Signed-off-by: Jakub Kicinski +Signed-off-by: Greg Kroah-Hartman +--- + drivers/net/virtio_net.c | 16 ++++++++-------- + 1 file changed, 8 insertions(+), 8 deletions(-) + +--- a/drivers/net/virtio_net.c ++++ b/drivers/net/virtio_net.c +@@ -200,6 +200,8 @@ struct control_buf { + __virtio16 vid; + __virtio64 offloads; + struct virtio_net_ctrl_rss rss; ++ struct virtio_net_ctrl_coal_tx coal_tx; ++ struct virtio_net_ctrl_coal_rx coal_rx; + }; + + struct virtnet_info { +@@ -2786,12 +2788,10 @@ static int virtnet_send_notf_coal_cmds(s + struct ethtool_coalesce *ec) + { + struct scatterlist sgs_tx, sgs_rx; +- struct virtio_net_ctrl_coal_tx coal_tx; +- struct virtio_net_ctrl_coal_rx coal_rx; + +- coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs); +- coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames); +- sg_init_one(&sgs_tx, &coal_tx, sizeof(coal_tx)); ++ vi->ctrl->coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs); ++ vi->ctrl->coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames); ++ sg_init_one(&sgs_tx, &vi->ctrl->coal_tx, sizeof(vi->ctrl->coal_tx)); + + if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, + VIRTIO_NET_CTRL_NOTF_COAL_TX_SET, +@@ -2802,9 +2802,9 @@ static int virtnet_send_notf_coal_cmds(s + vi->tx_usecs = ec->tx_coalesce_usecs; + vi->tx_max_packets = ec->tx_max_coalesced_frames; + +- coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs); +- coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames); +- sg_init_one(&sgs_rx, &coal_rx, sizeof(coal_rx)); ++ vi->ctrl->coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs); ++ vi->ctrl->coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames); ++ sg_init_one(&sgs_rx, &vi->ctrl->coal_rx, sizeof(vi->ctrl->coal_rx)); + + if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, + VIRTIO_NET_CTRL_NOTF_COAL_RX_SET, diff --git a/queue-6.1/wifi-iwlwifi-mvm-fix-warray-bounds-bug-in-iwl_mvm_wait_d3_notif.patch b/queue-6.1/wifi-iwlwifi-mvm-fix-warray-bounds-bug-in-iwl_mvm_wait_d3_notif.patch new file mode 100644 index 00000000000..51d8bfb8e6d --- /dev/null +++ b/queue-6.1/wifi-iwlwifi-mvm-fix-warray-bounds-bug-in-iwl_mvm_wait_d3_notif.patch @@ -0,0 +1,114 @@ +From 7a4615b9a9da5225b22b36a20508555dd133ac24 Mon Sep 17 00:00:00 2001 +From: "Gustavo A. R. Silva" +Date: Fri, 2 Jun 2023 13:42:47 -0600 +Subject: wifi: iwlwifi: mvm: Fix -Warray-bounds bug in iwl_mvm_wait_d3_notif() +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Gustavo A. R. Silva + +commit 7a4615b9a9da5225b22b36a20508555dd133ac24 upstream. + +kmemdup() at line 2735 is not duplicating enough memory for +notif->tid_tear_down and notif->station_id. As it only duplicates +612 bytes: up to offsetofend(struct iwl_wowlan_info_notif, +received_beacons), this is the range of [0, 612) bytes. + +2735 notif = kmemdup(notif_v1, +2736 offsetofend(struct iwl_wowlan_info_notif, +2737 received_beacons), +2738 GFP_ATOMIC); + +which evidently does not cover bytes 612 and 613 for members +tid_tear_down and station_id in struct iwl_wowlan_info_notif. +See below: + +$ pahole -C iwl_wowlan_info_notif drivers/net/wireless/intel/iwlwifi/mvm/d3.o +struct iwl_wowlan_info_notif { + struct iwl_wowlan_gtk_status_v3 gtk[2]; /* 0 488 */ + /* --- cacheline 7 boundary (448 bytes) was 40 bytes ago --- */ + struct iwl_wowlan_igtk_status igtk[2]; /* 488 80 */ + /* --- cacheline 8 boundary (512 bytes) was 56 bytes ago --- */ + __le64 replay_ctr; /* 568 8 */ + /* --- cacheline 9 boundary (576 bytes) --- */ + __le16 pattern_number; /* 576 2 */ + __le16 reserved1; /* 578 2 */ + __le16 qos_seq_ctr[8]; /* 580 16 */ + __le32 wakeup_reasons; /* 596 4 */ + __le32 num_of_gtk_rekeys; /* 600 4 */ + __le32 transmitted_ndps; /* 604 4 */ + __le32 received_beacons; /* 608 4 */ + u8 tid_tear_down; /* 612 1 */ + u8 station_id; /* 613 1 */ + u8 reserved2[2]; /* 614 2 */ + + /* size: 616, cachelines: 10, members: 13 */ + /* last cacheline: 40 bytes */ +}; + +Therefore, when the following assignments take place, actually no memory +has been allocated for those objects: + +2743 notif->tid_tear_down = notif_v1->tid_tear_down; +2744 notif->station_id = notif_v1->station_id; + +Fix this by allocating space for the whole notif object and zero out the +remaining space in memory after member station_id. + +This also fixes the following -Warray-bounds issues: + CC drivers/net/wireless/intel/iwlwifi/mvm/d3.o +drivers/net/wireless/intel/iwlwifi/mvm/d3.c: In function ‘iwl_mvm_wait_d3_notif’: +drivers/net/wireless/intel/iwlwifi/mvm/d3.c:2743:30: warning: array subscript ‘struct iwl_wowlan_info_notif[0]’ is partly outside array bounds of ‘unsigned char[612]’ [-Warray-bounds=] + 2743 | notif->tid_tear_down = notif_v1->tid_tear_down; + | + from drivers/net/wireless/intel/iwlwifi/mvm/d3.c:7: +In function ‘kmemdup’, + inlined from ‘iwl_mvm_wait_d3_notif’ at drivers/net/wireless/intel/iwlwifi/mvm/d3.c:2735:12: +include/linux/fortify-string.h:765:16: note: object of size 612 allocated by ‘__real_kmemdup’ + 765 | return __real_kmemdup(p, size, gfp); + | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~ +drivers/net/wireless/intel/iwlwifi/mvm/d3.c: In function ‘iwl_mvm_wait_d3_notif’: +drivers/net/wireless/intel/iwlwifi/mvm/d3.c:2744:30: warning: array subscript ‘struct iwl_wowlan_info_notif[0]’ is partly outside array bounds of ‘unsigned char[612]’ [-Warray-bounds=] + 2744 | notif->station_id = notif_v1->station_id; + | ^~ +In function ‘kmemdup’, + inlined from ‘iwl_mvm_wait_d3_notif’ at drivers/net/wireless/intel/iwlwifi/mvm/d3.c:2735:12: +include/linux/fortify-string.h:765:16: note: object of size 612 allocated by ‘__real_kmemdup’ + 765 | return __real_kmemdup(p, size, gfp); + | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Link: https://github.com/KSPP/linux/issues/306 +Fixes: 905d50ddbc83 ("wifi: iwlwifi: mvm: support wowlan info notification version 2") +Cc: stable@vger.kernel.org +Signed-off-by: Gustavo A. R. Silva +Acked-by: Gregory Greenman +Link: https://lore.kernel.org/r/ZHpGN555FwAKGduH@work +Signed-off-by: Johannes Berg +Signed-off-by: Greg Kroah-Hartman +--- + drivers/net/wireless/intel/iwlwifi/mvm/d3.c | 8 ++------ + 1 file changed, 2 insertions(+), 6 deletions(-) + +--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +@@ -2724,17 +2724,13 @@ static bool iwl_mvm_wait_d3_notif(struct + if (wowlan_info_ver < 2) { + struct iwl_wowlan_info_notif_v1 *notif_v1 = (void *)pkt->data; + +- notif = kmemdup(notif_v1, +- offsetofend(struct iwl_wowlan_info_notif, +- received_beacons), +- GFP_ATOMIC); +- ++ notif = kmemdup(notif_v1, sizeof(*notif), GFP_ATOMIC); + if (!notif) + return false; + + notif->tid_tear_down = notif_v1->tid_tear_down; + notif->station_id = notif_v1->station_id; +- ++ memset_after(notif, 0, station_id); + } else { + notif = (void *)pkt->data; + }