--- /dev/null
+From 9f7e3611f6c828fcb6001c39d8e7a523a4f31525 Mon Sep 17 00:00:00 2001
+From: Andrzej Kacprowski <andrzej.kacprowski@linux.intel.com>
+Date: Thu, 25 May 2023 12:38:17 +0200
+Subject: accel/ivpu: Do not trigger extra VPU reset if the VPU is idle
+
+From: Andrzej Kacprowski <andrzej.kacprowski@linux.intel.com>
+
+commit 9f7e3611f6c828fcb6001c39d8e7a523a4f31525 upstream.
+
+Turning off the PLL and entering D0i3 will reset the VPU so
+an explicit IP reset is redundant.
+But if the VPU is active, it may interfere with PLL disabling
+and to avoid that, we have to issue an additional IP reset
+to silence the VPU before turning off the PLL.
+
+Fixes: a8fed6d1e0b9 ("accel/ivpu: Fix power down sequence")
+Cc: stable@vger.kernel.org # 6.3.x
+Signed-off-by: Andrzej Kacprowski <andrzej.kacprowski@linux.intel.com>
+Reviewed-by: Stanislaw Gruszka <stanislaw.gruszka@linux.intel.com>
+Reviewed-by: Jeffrey Hugo <quic_jhugo@quicinc.com>
+Signed-off-by: Stanislaw Gruszka <stanislaw.gruszka@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230525103818.877590-1-stanislaw.gruszka@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/accel/ivpu/ivpu_hw_mtl.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/accel/ivpu/ivpu_hw_mtl.c b/drivers/accel/ivpu/ivpu_hw_mtl.c
+index 382ec127be8e..156dae676967 100644
+--- a/drivers/accel/ivpu/ivpu_hw_mtl.c
++++ b/drivers/accel/ivpu/ivpu_hw_mtl.c
+@@ -754,9 +754,8 @@ static int ivpu_hw_mtl_power_down(struct ivpu_device *vdev)
+ {
+ int ret = 0;
+
+- if (ivpu_hw_mtl_reset(vdev)) {
++ if (!ivpu_hw_mtl_is_idle(vdev) && ivpu_hw_mtl_reset(vdev)) {
+ ivpu_err(vdev, "Failed to reset the VPU\n");
+- ret = -EIO;
+ }
+
+ if (ivpu_pll_disable(vdev)) {
+@@ -764,8 +763,10 @@ static int ivpu_hw_mtl_power_down(struct ivpu_device *vdev)
+ ret = -EIO;
+ }
+
+- if (ivpu_hw_mtl_d0i3_enable(vdev))
+- ivpu_warn(vdev, "Failed to enable D0I3\n");
++ if (ivpu_hw_mtl_d0i3_enable(vdev)) {
++ ivpu_err(vdev, "Failed to enter D0I3\n");
++ ret = -EIO;
++ }
+
+ return ret;
+ }
+--
+2.41.0
+
--- /dev/null
+From a3efabee5878b8d7b1863debb78cb7129d07a346 Mon Sep 17 00:00:00 2001
+From: Andrzej Kacprowski <andrzej.kacprowski@linux.intel.com>
+Date: Wed, 7 Jun 2023 11:45:02 +0200
+Subject: accel/ivpu: Fix sporadic VPU boot failure
+
+From: Andrzej Kacprowski <andrzej.kacprowski@linux.intel.com>
+
+commit a3efabee5878b8d7b1863debb78cb7129d07a346 upstream.
+
+Wait for AON bit in HOST_SS_CPR_RST_CLR to return 0 before
+starting VPUIP power up sequence, otherwise the VPU device
+may sporadically fail to boot.
+
+An error in power up sequence is propagated to the runtime
+power management - the device will be in an error state
+until the VPU driver is reloaded.
+
+Fixes: 35b137630f08 ("accel/ivpu: Introduce a new DRM driver for Intel VPU")
+Cc: stable@vger.kernel.org # 6.3.x
+Signed-off-by: Andrzej Kacprowski <andrzej.kacprowski@linux.intel.com>
+Reviewed-by: Krystian Pradzynski <krystian.pradzynski@linux.intel.com>
+Signed-off-by: Stanislaw Gruszka <stanislaw.gruszka@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230607094502.388489-1-stanislaw.gruszka@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/accel/ivpu/ivpu_hw_mtl.c | 13 ++++++++++++-
+ drivers/accel/ivpu/ivpu_hw_mtl_reg.h | 1 +
+ 2 files changed, 13 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/accel/ivpu/ivpu_hw_mtl.c b/drivers/accel/ivpu/ivpu_hw_mtl.c
+index 156dae676967..fef35422c6f0 100644
+--- a/drivers/accel/ivpu/ivpu_hw_mtl.c
++++ b/drivers/accel/ivpu/ivpu_hw_mtl.c
+@@ -197,6 +197,11 @@ static void ivpu_pll_init_frequency_ratios(struct ivpu_device *vdev)
+ hw->pll.pn_ratio = clamp_t(u8, fuse_pn_ratio, hw->pll.min_ratio, hw->pll.max_ratio);
+ }
+
++static int ivpu_hw_mtl_wait_for_vpuip_bar(struct ivpu_device *vdev)
++{
++ return REGV_POLL_FLD(MTL_VPU_HOST_SS_CPR_RST_CLR, AON, 0, 100);
++}
++
+ static int ivpu_pll_drive(struct ivpu_device *vdev, bool enable)
+ {
+ struct ivpu_hw_info *hw = vdev->hw;
+@@ -239,6 +244,12 @@ static int ivpu_pll_drive(struct ivpu_device *vdev, bool enable)
+ ivpu_err(vdev, "Timed out waiting for PLL ready status\n");
+ return ret;
+ }
++
++ ret = ivpu_hw_mtl_wait_for_vpuip_bar(vdev);
++ if (ret) {
++ ivpu_err(vdev, "Timed out waiting for VPUIP bar\n");
++ return ret;
++ }
+ }
+
+ return 0;
+@@ -256,7 +267,7 @@ static int ivpu_pll_disable(struct ivpu_device *vdev)
+
+ static void ivpu_boot_host_ss_rst_clr_assert(struct ivpu_device *vdev)
+ {
+- u32 val = REGV_RD32(MTL_VPU_HOST_SS_CPR_RST_CLR);
++ u32 val = 0;
+
+ val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_CLR, TOP_NOC, val);
+ val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_CLR, DSS_MAS, val);
+diff --git a/drivers/accel/ivpu/ivpu_hw_mtl_reg.h b/drivers/accel/ivpu/ivpu_hw_mtl_reg.h
+index d83ccfd9a871..593b8ff07417 100644
+--- a/drivers/accel/ivpu/ivpu_hw_mtl_reg.h
++++ b/drivers/accel/ivpu/ivpu_hw_mtl_reg.h
+@@ -91,6 +91,7 @@
+ #define MTL_VPU_HOST_SS_CPR_RST_SET_MSS_MAS_MASK BIT_MASK(11)
+
+ #define MTL_VPU_HOST_SS_CPR_RST_CLR 0x00000098u
++#define MTL_VPU_HOST_SS_CPR_RST_CLR_AON_MASK BIT_MASK(0)
+ #define MTL_VPU_HOST_SS_CPR_RST_CLR_TOP_NOC_MASK BIT_MASK(1)
+ #define MTL_VPU_HOST_SS_CPR_RST_CLR_DSS_MAS_MASK BIT_MASK(10)
+ #define MTL_VPU_HOST_SS_CPR_RST_CLR_MSS_MAS_MASK BIT_MASK(11)
+--
+2.41.0
+
--- /dev/null
+From fe2ccc6c29d53e14d3c8b3ddf8ad965a92e074ee Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan+linaro@kernel.org>
+Date: Wed, 31 May 2023 10:57:58 +0200
+Subject: Bluetooth: fix debugfs registration
+
+From: Johan Hovold <johan+linaro@kernel.org>
+
+commit fe2ccc6c29d53e14d3c8b3ddf8ad965a92e074ee upstream.
+
+Since commit ec6cef9cd98d ("Bluetooth: Fix SMP channel registration for
+unconfigured controllers") the debugfs interface for unconfigured
+controllers will be created when the controller is configured.
+
+There is however currently nothing preventing a controller from being
+configured multiple time (e.g. setting the device address using btmgmt)
+which results in failed attempts to register the already registered
+debugfs entries:
+
+ debugfs: File 'features' in directory 'hci0' already present!
+ debugfs: File 'manufacturer' in directory 'hci0' already present!
+ debugfs: File 'hci_version' in directory 'hci0' already present!
+ ...
+ debugfs: File 'quirk_simultaneous_discovery' in directory 'hci0' already present!
+
+Add a controller flag to avoid trying to register the debugfs interface
+more than once.
+
+Fixes: ec6cef9cd98d ("Bluetooth: Fix SMP channel registration for unconfigured controllers")
+Cc: stable@vger.kernel.org # 4.0
+Signed-off-by: Johan Hovold <johan+linaro@kernel.org>
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/bluetooth/hci.h | 1 +
+ net/bluetooth/hci_sync.c | 3 +++
+ 2 files changed, 4 insertions(+)
+
+--- a/include/net/bluetooth/hci.h
++++ b/include/net/bluetooth/hci.h
+@@ -350,6 +350,7 @@ enum {
+ enum {
+ HCI_SETUP,
+ HCI_CONFIG,
++ HCI_DEBUGFS_CREATED,
+ HCI_AUTO_OFF,
+ HCI_RFKILLED,
+ HCI_MGMT,
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -4510,6 +4510,9 @@ static int hci_init_sync(struct hci_dev
+ !hci_dev_test_flag(hdev, HCI_CONFIG))
+ return 0;
+
++ if (hci_dev_test_and_set_flag(hdev, HCI_DEBUGFS_CREATED))
++ return 0;
++
+ hci_debugfs_create_common(hdev);
+
+ if (lmp_bredr_capable(hdev))
--- /dev/null
+From c5d2b6fa26b5b8386a9cc902cdece3a46bef2bd2 Mon Sep 17 00:00:00 2001
+From: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Date: Tue, 30 May 2023 13:48:44 -0700
+Subject: Bluetooth: Fix use-after-free in hci_remove_ltk/hci_remove_irk
+
+From: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+
+commit c5d2b6fa26b5b8386a9cc902cdece3a46bef2bd2 upstream.
+
+Similar to commit 0f7d9b31ce7a ("netfilter: nf_tables: fix use-after-free
+in nft_set_catchall_destroy()"). We can not access k after kfree_rcu()
+call.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Min Li <lm0963hack@gmail.com>
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/bluetooth/hci_core.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -1416,10 +1416,10 @@ int hci_remove_link_key(struct hci_dev *
+
+ int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
+ {
+- struct smp_ltk *k;
++ struct smp_ltk *k, *tmp;
+ int removed = 0;
+
+- list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
++ list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
+ if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
+ continue;
+
+@@ -1435,9 +1435,9 @@ int hci_remove_ltk(struct hci_dev *hdev,
+
+ void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
+ {
+- struct smp_irk *k;
++ struct smp_irk *k, *tmp;
+
+- list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
++ list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
+ if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
+ continue;
+
--- /dev/null
+From 47c5d829a3e326b7395352a10fc8a6effe7afa15 Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan+linaro@kernel.org>
+Date: Wed, 31 May 2023 10:57:59 +0200
+Subject: Bluetooth: hci_qca: fix debugfs registration
+
+From: Johan Hovold <johan+linaro@kernel.org>
+
+commit 47c5d829a3e326b7395352a10fc8a6effe7afa15 upstream.
+
+Since commit 3e4be65eb82c ("Bluetooth: hci_qca: Add poweroff support
+during hci down for wcn3990"), the setup callback which registers the
+debugfs interface can be called multiple times.
+
+This specifically leads to the following error when powering on the
+controller:
+
+ debugfs: Directory 'ibs' with parent 'hci0' already present!
+
+Add a driver flag to avoid trying to register the debugfs interface more
+than once.
+
+Fixes: 3e4be65eb82c ("Bluetooth: hci_qca: Add poweroff support during hci down for wcn3990")
+Cc: stable@vger.kernel.org # 4.20
+Signed-off-by: Johan Hovold <johan+linaro@kernel.org>
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/bluetooth/hci_qca.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/drivers/bluetooth/hci_qca.c
++++ b/drivers/bluetooth/hci_qca.c
+@@ -78,7 +78,8 @@ enum qca_flags {
+ QCA_HW_ERROR_EVENT,
+ QCA_SSR_TRIGGERED,
+ QCA_BT_OFF,
+- QCA_ROM_FW
++ QCA_ROM_FW,
++ QCA_DEBUGFS_CREATED,
+ };
+
+ enum qca_capabilities {
+@@ -635,6 +636,9 @@ static void qca_debugfs_init(struct hci_
+ if (!hdev->debugfs)
+ return;
+
++ if (test_and_set_bit(QCA_DEBUGFS_CREATED, &qca->flags))
++ return;
++
+ ibs_dir = debugfs_create_dir("ibs", hdev->debugfs);
+
+ /* read only */
--- /dev/null
+From 9f16eb106aa5fce15904625661312623ec783ed3 Mon Sep 17 00:00:00 2001
+From: Fedor Pchelkin <pchelkin@ispras.ru>
+Date: Fri, 26 May 2023 20:19:10 +0300
+Subject: can: j1939: avoid possible use-after-free when j1939_can_rx_register fails
+
+From: Fedor Pchelkin <pchelkin@ispras.ru>
+
+commit 9f16eb106aa5fce15904625661312623ec783ed3 upstream.
+
+Syzkaller reports the following failure:
+
+BUG: KASAN: use-after-free in kref_put include/linux/kref.h:64 [inline]
+BUG: KASAN: use-after-free in j1939_priv_put+0x25/0xa0 net/can/j1939/main.c:172
+Write of size 4 at addr ffff888141c15058 by task swapper/3/0
+
+CPU: 3 PID: 0 Comm: swapper/3 Not tainted 5.10.144-syzkaller #0
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.12.0-1 04/01/2014
+Call Trace:
+ <IRQ>
+ __dump_stack lib/dump_stack.c:77 [inline]
+ dump_stack+0x107/0x167 lib/dump_stack.c:118
+ print_address_description.constprop.0+0x1c/0x220 mm/kasan/report.c:385
+ __kasan_report mm/kasan/report.c:545 [inline]
+ kasan_report.cold+0x1f/0x37 mm/kasan/report.c:562
+ check_memory_region_inline mm/kasan/generic.c:186 [inline]
+ check_memory_region+0x145/0x190 mm/kasan/generic.c:192
+ instrument_atomic_read_write include/linux/instrumented.h:101 [inline]
+ atomic_fetch_sub_release include/asm-generic/atomic-instrumented.h:220 [inline]
+ __refcount_sub_and_test include/linux/refcount.h:272 [inline]
+ __refcount_dec_and_test include/linux/refcount.h:315 [inline]
+ refcount_dec_and_test include/linux/refcount.h:333 [inline]
+ kref_put include/linux/kref.h:64 [inline]
+ j1939_priv_put+0x25/0xa0 net/can/j1939/main.c:172
+ j1939_sk_sock_destruct+0x44/0x90 net/can/j1939/socket.c:374
+ __sk_destruct+0x4e/0x820 net/core/sock.c:1784
+ rcu_do_batch kernel/rcu/tree.c:2485 [inline]
+ rcu_core+0xb35/0x1a30 kernel/rcu/tree.c:2726
+ __do_softirq+0x289/0x9a3 kernel/softirq.c:298
+ asm_call_irq_on_stack+0x12/0x20
+ </IRQ>
+ __run_on_irqstack arch/x86/include/asm/irq_stack.h:26 [inline]
+ run_on_irqstack_cond arch/x86/include/asm/irq_stack.h:77 [inline]
+ do_softirq_own_stack+0xaa/0xe0 arch/x86/kernel/irq_64.c:77
+ invoke_softirq kernel/softirq.c:393 [inline]
+ __irq_exit_rcu kernel/softirq.c:423 [inline]
+ irq_exit_rcu+0x136/0x200 kernel/softirq.c:435
+ sysvec_apic_timer_interrupt+0x4d/0x100 arch/x86/kernel/apic/apic.c:1095
+ asm_sysvec_apic_timer_interrupt+0x12/0x20 arch/x86/include/asm/idtentry.h:635
+
+Allocated by task 1141:
+ kasan_save_stack+0x1b/0x40 mm/kasan/common.c:48
+ kasan_set_track mm/kasan/common.c:56 [inline]
+ __kasan_kmalloc.constprop.0+0xc9/0xd0 mm/kasan/common.c:461
+ kmalloc include/linux/slab.h:552 [inline]
+ kzalloc include/linux/slab.h:664 [inline]
+ j1939_priv_create net/can/j1939/main.c:131 [inline]
+ j1939_netdev_start+0x111/0x860 net/can/j1939/main.c:268
+ j1939_sk_bind+0x8ea/0xd30 net/can/j1939/socket.c:485
+ __sys_bind+0x1f2/0x260 net/socket.c:1645
+ __do_sys_bind net/socket.c:1656 [inline]
+ __se_sys_bind net/socket.c:1654 [inline]
+ __x64_sys_bind+0x6f/0xb0 net/socket.c:1654
+ do_syscall_64+0x33/0x40 arch/x86/entry/common.c:46
+ entry_SYSCALL_64_after_hwframe+0x61/0xc6
+
+Freed by task 1141:
+ kasan_save_stack+0x1b/0x40 mm/kasan/common.c:48
+ kasan_set_track+0x1c/0x30 mm/kasan/common.c:56
+ kasan_set_free_info+0x1b/0x30 mm/kasan/generic.c:355
+ __kasan_slab_free+0x112/0x170 mm/kasan/common.c:422
+ slab_free_hook mm/slub.c:1542 [inline]
+ slab_free_freelist_hook+0xad/0x190 mm/slub.c:1576
+ slab_free mm/slub.c:3149 [inline]
+ kfree+0xd9/0x3b0 mm/slub.c:4125
+ j1939_netdev_start+0x5ee/0x860 net/can/j1939/main.c:300
+ j1939_sk_bind+0x8ea/0xd30 net/can/j1939/socket.c:485
+ __sys_bind+0x1f2/0x260 net/socket.c:1645
+ __do_sys_bind net/socket.c:1656 [inline]
+ __se_sys_bind net/socket.c:1654 [inline]
+ __x64_sys_bind+0x6f/0xb0 net/socket.c:1654
+ do_syscall_64+0x33/0x40 arch/x86/entry/common.c:46
+ entry_SYSCALL_64_after_hwframe+0x61/0xc6
+
+It can be caused by this scenario:
+
+CPU0 CPU1
+j1939_sk_bind(socket0, ndev0, ...)
+ j1939_netdev_start()
+ j1939_sk_bind(socket1, ndev0, ...)
+ j1939_netdev_start()
+ mutex_lock(&j1939_netdev_lock)
+ j1939_priv_set(ndev0, priv)
+ mutex_unlock(&j1939_netdev_lock)
+ if (priv_new)
+ kref_get(&priv_new->rx_kref)
+ return priv_new;
+ /* inside j1939_sk_bind() */
+ jsk->priv = priv
+ j1939_can_rx_register(priv) // fails
+ j1939_priv_set(ndev, NULL)
+ kfree(priv)
+ j1939_sk_sock_destruct()
+ j1939_priv_put() // <- uaf
+
+To avoid this, call j1939_can_rx_register() under j1939_netdev_lock so
+that a concurrent thread cannot process j1939_priv before
+j1939_can_rx_register() returns.
+
+Found by Linux Verification Center (linuxtesting.org) with Syzkaller.
+
+Fixes: 9d71dd0c7009 ("can: add support of SAE J1939 protocol")
+Signed-off-by: Fedor Pchelkin <pchelkin@ispras.ru>
+Tested-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Acked-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Link: https://lore.kernel.org/r/20230526171910.227615-3-pchelkin@ispras.ru
+Cc: stable@vger.kernel.org
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/can/j1939/main.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/net/can/j1939/main.c
++++ b/net/can/j1939/main.c
+@@ -290,16 +290,18 @@ struct j1939_priv *j1939_netdev_start(st
+ return priv_new;
+ }
+ j1939_priv_set(ndev, priv);
+- mutex_unlock(&j1939_netdev_lock);
+
+ ret = j1939_can_rx_register(priv);
+ if (ret < 0)
+ goto out_priv_put;
+
++ mutex_unlock(&j1939_netdev_lock);
+ return priv;
+
+ out_priv_put:
+ j1939_priv_set(ndev, NULL);
++ mutex_unlock(&j1939_netdev_lock);
++
+ dev_put(ndev);
+ kfree(priv);
+
--- /dev/null
+From cd9c790de2088b0d797dc4d244b4f174f9962554 Mon Sep 17 00:00:00 2001
+From: Fedor Pchelkin <pchelkin@ispras.ru>
+Date: Fri, 26 May 2023 20:19:09 +0300
+Subject: can: j1939: change j1939_netdev_lock type to mutex
+
+From: Fedor Pchelkin <pchelkin@ispras.ru>
+
+commit cd9c790de2088b0d797dc4d244b4f174f9962554 upstream.
+
+It turns out access to j1939_can_rx_register() needs to be serialized,
+otherwise j1939_priv can be corrupted when parallel threads call
+j1939_netdev_start() and j1939_can_rx_register() fails. This issue is
+thoroughly covered in other commit which serializes access to
+j1939_can_rx_register().
+
+Change j1939_netdev_lock type to mutex so that we do not need to remove
+GFP_KERNEL from can_rx_register().
+
+j1939_netdev_lock seems to be used in normal contexts where mutex usage
+is not prohibited.
+
+Found by Linux Verification Center (linuxtesting.org) with Syzkaller.
+
+Fixes: 9d71dd0c7009 ("can: add support of SAE J1939 protocol")
+Suggested-by: Alexey Khoroshilov <khoroshilov@ispras.ru>
+Signed-off-by: Fedor Pchelkin <pchelkin@ispras.ru>
+Tested-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Acked-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Link: https://lore.kernel.org/r/20230526171910.227615-2-pchelkin@ispras.ru
+Cc: stable@vger.kernel.org
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/can/j1939/main.c | 22 +++++++++++-----------
+ 1 file changed, 11 insertions(+), 11 deletions(-)
+
+--- a/net/can/j1939/main.c
++++ b/net/can/j1939/main.c
+@@ -126,7 +126,7 @@ static void j1939_can_recv(struct sk_buf
+ #define J1939_CAN_ID CAN_EFF_FLAG
+ #define J1939_CAN_MASK (CAN_EFF_FLAG | CAN_RTR_FLAG)
+
+-static DEFINE_SPINLOCK(j1939_netdev_lock);
++static DEFINE_MUTEX(j1939_netdev_lock);
+
+ static struct j1939_priv *j1939_priv_create(struct net_device *ndev)
+ {
+@@ -220,7 +220,7 @@ static void __j1939_rx_release(struct kr
+ j1939_can_rx_unregister(priv);
+ j1939_ecu_unmap_all(priv);
+ j1939_priv_set(priv->ndev, NULL);
+- spin_unlock(&j1939_netdev_lock);
++ mutex_unlock(&j1939_netdev_lock);
+ }
+
+ /* get pointer to priv without increasing ref counter */
+@@ -248,9 +248,9 @@ static struct j1939_priv *j1939_priv_get
+ {
+ struct j1939_priv *priv;
+
+- spin_lock(&j1939_netdev_lock);
++ mutex_lock(&j1939_netdev_lock);
+ priv = j1939_priv_get_by_ndev_locked(ndev);
+- spin_unlock(&j1939_netdev_lock);
++ mutex_unlock(&j1939_netdev_lock);
+
+ return priv;
+ }
+@@ -260,14 +260,14 @@ struct j1939_priv *j1939_netdev_start(st
+ struct j1939_priv *priv, *priv_new;
+ int ret;
+
+- spin_lock(&j1939_netdev_lock);
++ mutex_lock(&j1939_netdev_lock);
+ priv = j1939_priv_get_by_ndev_locked(ndev);
+ if (priv) {
+ kref_get(&priv->rx_kref);
+- spin_unlock(&j1939_netdev_lock);
++ mutex_unlock(&j1939_netdev_lock);
+ return priv;
+ }
+- spin_unlock(&j1939_netdev_lock);
++ mutex_unlock(&j1939_netdev_lock);
+
+ priv = j1939_priv_create(ndev);
+ if (!priv)
+@@ -277,20 +277,20 @@ struct j1939_priv *j1939_netdev_start(st
+ spin_lock_init(&priv->j1939_socks_lock);
+ INIT_LIST_HEAD(&priv->j1939_socks);
+
+- spin_lock(&j1939_netdev_lock);
++ mutex_lock(&j1939_netdev_lock);
+ priv_new = j1939_priv_get_by_ndev_locked(ndev);
+ if (priv_new) {
+ /* Someone was faster than us, use their priv and roll
+ * back our's.
+ */
+ kref_get(&priv_new->rx_kref);
+- spin_unlock(&j1939_netdev_lock);
++ mutex_unlock(&j1939_netdev_lock);
+ dev_put(ndev);
+ kfree(priv);
+ return priv_new;
+ }
+ j1939_priv_set(ndev, priv);
+- spin_unlock(&j1939_netdev_lock);
++ mutex_unlock(&j1939_netdev_lock);
+
+ ret = j1939_can_rx_register(priv);
+ if (ret < 0)
+@@ -308,7 +308,7 @@ struct j1939_priv *j1939_netdev_start(st
+
+ void j1939_netdev_stop(struct j1939_priv *priv)
+ {
+- kref_put_lock(&priv->rx_kref, __j1939_rx_release, &j1939_netdev_lock);
++ kref_put_mutex(&priv->rx_kref, __j1939_rx_release, &j1939_netdev_lock);
+ j1939_priv_put(priv);
+ }
+
--- /dev/null
+From 2a84aea80e925ecba6349090559754f8e8eb68ef Mon Sep 17 00:00:00 2001
+From: Oleksij Rempel <o.rempel@pengutronix.de>
+Date: Fri, 26 May 2023 10:19:46 +0200
+Subject: can: j1939: j1939_sk_send_loop_abort(): improved error queue handling in J1939 Socket
+
+From: Oleksij Rempel <o.rempel@pengutronix.de>
+
+commit 2a84aea80e925ecba6349090559754f8e8eb68ef upstream.
+
+This patch addresses an issue within the j1939_sk_send_loop_abort()
+function in the j1939/socket.c file, specifically in the context of
+Transport Protocol (TP) sessions.
+
+Without this patch, when a TP session is initiated and a Clear To Send
+(CTS) frame is received from the remote side requesting one data packet,
+the kernel dispatches the first Data Transport (DT) frame and then waits
+for the next CTS. If the remote side doesn't respond with another CTS,
+the kernel aborts due to a timeout. This leads to the user-space
+receiving an EPOLLERR on the socket, and the socket becomes active.
+
+However, when trying to read the error queue from the socket with
+sock.recvmsg(, , socket.MSG_ERRQUEUE), it returns -EAGAIN,
+given that the socket is non-blocking. This situation results in an
+infinite loop: the user-space repeatedly calls epoll(), epoll() returns
+the socket file descriptor with EPOLLERR, but the socket then blocks on
+the recv() of ERRQUEUE.
+
+This patch introduces an additional check for the J1939_SOCK_ERRQUEUE
+flag within the j1939_sk_send_loop_abort() function. If the flag is set,
+it indicates that the application has subscribed to receive error queue
+messages. In such cases, the kernel can communicate the current transfer
+state via the error queue. This allows for the function to return early,
+preventing the unnecessary setting of the socket into an error state,
+and breaking the infinite loop. It is crucial to note that a socket
+error is only needed if the application isn't using the error queue, as,
+without it, the application wouldn't be aware of transfer issues.
+
+Fixes: 9d71dd0c7009 ("can: add support of SAE J1939 protocol")
+Reported-by: David Jander <david@protonic.nl>
+Tested-by: David Jander <david@protonic.nl>
+Signed-off-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Link: https://lore.kernel.org/r/20230526081946.715190-1-o.rempel@pengutronix.de
+Cc: stable@vger.kernel.org
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/can/j1939/socket.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/net/can/j1939/socket.c
++++ b/net/can/j1939/socket.c
+@@ -1088,6 +1088,11 @@ void j1939_sk_errqueue(struct j1939_sess
+
+ void j1939_sk_send_loop_abort(struct sock *sk, int err)
+ {
++ struct j1939_sock *jsk = j1939_sk(sk);
++
++ if (jsk->state & J1939_SOCK_ERRQUEUE)
++ return;
++
+ sk->sk_err = err;
+
+ sk_error_report(sk);
--- /dev/null
+From 409e873ea3c1fd3079909718bbeb06ac1ec7f38b Mon Sep 17 00:00:00 2001
+From: Xiubo Li <xiubli@redhat.com>
+Date: Thu, 1 Jun 2023 08:59:31 +0800
+Subject: ceph: fix use-after-free bug for inodes when flushing capsnaps
+
+From: Xiubo Li <xiubli@redhat.com>
+
+commit 409e873ea3c1fd3079909718bbeb06ac1ec7f38b upstream.
+
+There is a race between capsnaps flush and removing the inode from
+'mdsc->snap_flush_list' list:
+
+ == Thread A == == Thread B ==
+ceph_queue_cap_snap()
+ -> allocate 'capsnapA'
+ ->ihold('&ci->vfs_inode')
+ ->add 'capsnapA' to 'ci->i_cap_snaps'
+ ->add 'ci' to 'mdsc->snap_flush_list'
+ ...
+ == Thread C ==
+ceph_flush_snaps()
+ ->__ceph_flush_snaps()
+ ->__send_flush_snap()
+ handle_cap_flushsnap_ack()
+ ->iput('&ci->vfs_inode')
+ this also will release 'ci'
+ ...
+ == Thread D ==
+ ceph_handle_snap()
+ ->flush_snaps()
+ ->iterate 'mdsc->snap_flush_list'
+ ->get the stale 'ci'
+ ->remove 'ci' from ->ihold(&ci->vfs_inode) this
+ 'mdsc->snap_flush_list' will WARNING
+
+To fix this we will increase the inode's i_count ref when adding 'ci'
+to the 'mdsc->snap_flush_list' list.
+
+[ idryomov: need_put int -> bool ]
+
+Cc: stable@vger.kernel.org
+Link: https://bugzilla.redhat.com/show_bug.cgi?id=2209299
+Signed-off-by: Xiubo Li <xiubli@redhat.com>
+Reviewed-by: Milind Changire <mchangir@redhat.com>
+Reviewed-by: Ilya Dryomov <idryomov@gmail.com>
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ceph/caps.c | 6 ++++++
+ fs/ceph/snap.c | 4 +++-
+ 2 files changed, 9 insertions(+), 1 deletion(-)
+
+--- a/fs/ceph/caps.c
++++ b/fs/ceph/caps.c
+@@ -1627,6 +1627,7 @@ void ceph_flush_snaps(struct ceph_inode_
+ struct inode *inode = &ci->netfs.inode;
+ struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
+ struct ceph_mds_session *session = NULL;
++ bool need_put = false;
+ int mds;
+
+ dout("ceph_flush_snaps %p\n", inode);
+@@ -1671,8 +1672,13 @@ out:
+ ceph_put_mds_session(session);
+ /* we flushed them all; remove this inode from the queue */
+ spin_lock(&mdsc->snap_flush_lock);
++ if (!list_empty(&ci->i_snap_flush_item))
++ need_put = true;
+ list_del_init(&ci->i_snap_flush_item);
+ spin_unlock(&mdsc->snap_flush_lock);
++
++ if (need_put)
++ iput(inode);
+ }
+
+ /*
+--- a/fs/ceph/snap.c
++++ b/fs/ceph/snap.c
+@@ -693,8 +693,10 @@ int __ceph_finish_cap_snap(struct ceph_i
+ capsnap->size);
+
+ spin_lock(&mdsc->snap_flush_lock);
+- if (list_empty(&ci->i_snap_flush_item))
++ if (list_empty(&ci->i_snap_flush_item)) {
++ ihold(inode);
+ list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list);
++ }
+ spin_unlock(&mdsc->snap_flush_lock);
+ return 1; /* caller may want to ceph_flush_snaps */
+ }
--- /dev/null
+From 30c3d3b70aba2464ee8c91025e91428f92464077 Mon Sep 17 00:00:00 2001
+From: Mario Limonciello <mario.limonciello@amd.com>
+Date: Tue, 30 May 2023 11:57:59 -0500
+Subject: drm/amd: Disallow s0ix without BIOS support again
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Mario Limonciello <mario.limonciello@amd.com>
+
+commit 30c3d3b70aba2464ee8c91025e91428f92464077 upstream.
+
+commit cf488dcd0ab7 ("drm/amd: Allow s0ix without BIOS support") showed
+improvements to power consumption over suspend when s0ix wasn't enabled in
+BIOS and the system didn't support S3.
+
+This patch however was misguided because the reason the system didn't
+support S3 was because SMT was disabled in OEM BIOS setup.
+This prevented the BIOS from allowing S3.
+
+Also allowing GPUs to use the s2idle path actually causes problems if
+they're invoked on systems that may not support s2idle in the platform
+firmware. `systemd` has a tendency to try to use `s2idle` if `deep` fails
+for any reason, which could lead to unexpected flows.
+
+The original commit also fixed a problem during resume from suspend to idle
+without hardware support, but this is no longer necessary with commit
+ca4751866397 ("drm/amd: Don't allow s0ix on APUs older than Raven")
+
+Revert commit cf488dcd0ab7 ("drm/amd: Allow s0ix without BIOS support")
+to make it match the expected behavior again.
+
+Cc: Rafael Ávila de Espíndola <rafael@espindo.la>
+Link: https://github.com/torvalds/linux/blob/v6.1/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c#L1060
+Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/2599
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+@@ -1092,16 +1092,20 @@ bool amdgpu_acpi_is_s0ix_active(struct a
+ * S0ix even though the system is suspending to idle, so return false
+ * in that case.
+ */
+- if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0))
++ if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) {
+ dev_warn_once(adev->dev,
+ "Power consumption will be higher as BIOS has not been configured for suspend-to-idle.\n"
+ "To use suspend-to-idle change the sleep mode in BIOS setup.\n");
++ return false;
++ }
+
+ #if !IS_ENABLED(CONFIG_AMD_PMC)
+ dev_warn_once(adev->dev,
+ "Power consumption will be higher as the kernel has not been compiled with CONFIG_AMD_PMC.\n");
+-#endif /* CONFIG_AMD_PMC */
++ return false;
++#else
+ return true;
++#endif /* CONFIG_AMD_PMC */
+ }
+
+ #endif /* CONFIG_SUSPEND */
--- /dev/null
+From 59de751e3845d699e02dc4da47322b92d83a41e2 Mon Sep 17 00:00:00 2001
+From: Samson Tam <samson.tam@amd.com>
+Date: Tue, 9 May 2023 16:40:19 -0400
+Subject: drm/amd/display: add ODM case when looking for first split pipe
+
+From: Samson Tam <samson.tam@amd.com>
+
+commit 59de751e3845d699e02dc4da47322b92d83a41e2 upstream.
+
+[Why]
+When going from ODM 2:1 single display case to max displays, second
+odm pipe needs to be repurposed for one of the new single displays.
+However, acquire_first_split_pipe() only handles MPC case and not
+ODM case
+
+[How]
+Add ODM conditions in acquire_first_split_pipe()
+Add commit_minimal_transition_state() in commit_streams() to handle
+odm 2:1 exit first, and then process new streams
+Handle ODM condition in commit_minimal_transition_state()
+
+Cc: Mario Limonciello <mario.limonciello@amd.com>
+Cc: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Acked-by: Stylon Wang <stylon.wang@amd.com>
+Signed-off-by: Samson Tam <samson.tam@amd.com>
+Reviewed-by: Alvin Lee <Alvin.Lee2@amd.com>
+Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc.c | 36 +++++++++++++++++++++-
+ drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 20 ++++++++++++
+ 2 files changed, 55 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -1962,6 +1962,9 @@ static enum dc_status dc_commit_state_no
+ return result;
+ }
+
++static bool commit_minimal_transition_state(struct dc *dc,
++ struct dc_state *transition_base_context);
++
+ /**
+ * dc_commit_streams - Commit current stream state
+ *
+@@ -1983,6 +1986,8 @@ enum dc_status dc_commit_streams(struct
+ struct dc_state *context;
+ enum dc_status res = DC_OK;
+ struct dc_validation_set set[MAX_STREAMS] = {0};
++ struct pipe_ctx *pipe;
++ bool handle_exit_odm2to1 = false;
+
+ if (dc->ctx->dce_environment == DCE_ENV_VIRTUAL_HW)
+ return res;
+@@ -2007,6 +2012,22 @@ enum dc_status dc_commit_streams(struct
+ }
+ }
+
++ /* Check for case where we are going from odm 2:1 to max
++ * pipe scenario. For these cases, we will call
++ * commit_minimal_transition_state() to exit out of odm 2:1
++ * first before processing new streams
++ */
++ if (stream_count == dc->res_pool->pipe_count) {
++ for (i = 0; i < dc->res_pool->pipe_count; i++) {
++ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
++ if (pipe->next_odm_pipe)
++ handle_exit_odm2to1 = true;
++ }
++ }
++
++ if (handle_exit_odm2to1)
++ res = commit_minimal_transition_state(dc, dc->current_state);
++
+ context = dc_create_state(dc);
+ if (!context)
+ goto context_alloc_fail;
+@@ -3915,6 +3936,7 @@ static bool commit_minimal_transition_st
+ unsigned int i, j;
+ unsigned int pipe_in_use = 0;
+ bool subvp_in_use = false;
++ bool odm_in_use = false;
+
+ if (!transition_context)
+ return false;
+@@ -3943,6 +3965,18 @@ static bool commit_minimal_transition_st
+ }
+ }
+
++ /* If ODM is enabled and we are adding or removing planes from any ODM
++ * pipe, we must use the minimal transition.
++ */
++ for (i = 0; i < dc->res_pool->pipe_count; i++) {
++ struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
++
++ if (pipe->stream && pipe->next_odm_pipe) {
++ odm_in_use = true;
++ break;
++ }
++ }
++
+ /* When the OS add a new surface if we have been used all of pipes with odm combine
+ * and mpc split feature, it need use commit_minimal_transition_state to transition safely.
+ * After OS exit MPO, it will back to use odm and mpc split with all of pipes, we need
+@@ -3951,7 +3985,7 @@ static bool commit_minimal_transition_st
+ * Reduce the scenarios to use dc_commit_state_no_check in the stage of flip. Especially
+ * enter/exit MPO when DCN still have enough resources.
+ */
+- if (pipe_in_use != dc->res_pool->pipe_count && !subvp_in_use) {
++ if (pipe_in_use != dc->res_pool->pipe_count && !subvp_in_use && !odm_in_use) {
+ dc_release_state(transition_context);
+ return true;
+ }
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -1446,6 +1446,26 @@ static int acquire_first_split_pipe(
+
+ split_pipe->stream = stream;
+ return i;
++ } else if (split_pipe->prev_odm_pipe &&
++ split_pipe->prev_odm_pipe->plane_state == split_pipe->plane_state) {
++ split_pipe->prev_odm_pipe->next_odm_pipe = split_pipe->next_odm_pipe;
++ if (split_pipe->next_odm_pipe)
++ split_pipe->next_odm_pipe->prev_odm_pipe = split_pipe->prev_odm_pipe;
++
++ if (split_pipe->prev_odm_pipe->plane_state)
++ resource_build_scaling_params(split_pipe->prev_odm_pipe);
++
++ memset(split_pipe, 0, sizeof(*split_pipe));
++ split_pipe->stream_res.tg = pool->timing_generators[i];
++ split_pipe->plane_res.hubp = pool->hubps[i];
++ split_pipe->plane_res.ipp = pool->ipps[i];
++ split_pipe->plane_res.dpp = pool->dpps[i];
++ split_pipe->stream_res.opp = pool->opps[i];
++ split_pipe->plane_res.mpcc_inst = pool->dpps[i]->inst;
++ split_pipe->pipe_idx = i;
++
++ split_pipe->stream = stream;
++ return i;
+ }
+ }
+ return -1;
--- /dev/null
+From e1a600208286c197c2696e51fc313e49889315bd Mon Sep 17 00:00:00 2001
+From: Alvin Lee <alvin.lee2@amd.com>
+Date: Fri, 19 May 2023 11:38:15 -0400
+Subject: drm/amd/display: Reduce sdp bw after urgent to 90%
+
+From: Alvin Lee <alvin.lee2@amd.com>
+
+commit e1a600208286c197c2696e51fc313e49889315bd upstream.
+
+[Description]
+Reduce expected SDP bandwidth due to poor QoS and
+arbitration issues on high bandwidth configs
+
+Cc: Mario Limonciello <mario.limonciello@amd.com>
+Cc: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Acked-by: Stylon Wang <stylon.wang@amd.com>
+Signed-off-by: Alvin Lee <alvin.lee2@amd.com>
+Reviewed-by: Nevenko Stupar <Nevenko.Stupar@amd.com>
+Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+@@ -138,7 +138,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3
+ .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
+- .pct_ideal_sdp_bw_after_urgent = 100.0,
++ .pct_ideal_sdp_bw_after_urgent = 90.0,
+ .pct_ideal_fabric_bw_after_urgent = 67.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 20.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0, // N/A, for now keep as is until DML implemented
--- /dev/null
+From 1d13c49cf4e246b218d71873f1bb1bbd376aa10e Mon Sep 17 00:00:00 2001
+From: Lijo Lazar <lijo.lazar@amd.com>
+Date: Fri, 31 Mar 2023 16:30:01 +0530
+Subject: drm/amd/pm: Fix power context allocation in SMU13
+
+From: Lijo Lazar <lijo.lazar@amd.com>
+
+commit 1d13c49cf4e246b218d71873f1bb1bbd376aa10e upstream.
+
+Use the right data structure for allocation.
+
+Signed-off-by: Lijo Lazar <lijo.lazar@amd.com>
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+@@ -566,11 +566,11 @@ int smu_v13_0_init_power(struct smu_cont
+ if (smu_power->power_context || smu_power->power_context_size != 0)
+ return -EINVAL;
+
+- smu_power->power_context = kzalloc(sizeof(struct smu_13_0_dpm_context),
++ smu_power->power_context = kzalloc(sizeof(struct smu_13_0_power_context),
+ GFP_KERNEL);
+ if (!smu_power->power_context)
+ return -ENOMEM;
+- smu_power->power_context_size = sizeof(struct smu_13_0_dpm_context);
++ smu_power->power_context_size = sizeof(struct smu_13_0_power_context);
+
+ return 0;
+ }
--- /dev/null
+From 44d0fb387b53e56c8a050bac5c7d460e21eb226f Mon Sep 17 00:00:00 2001
+From: Ruihan Li <lrh2000@pku.edu.cn>
+Date: Mon, 15 May 2023 21:09:58 +0800
+Subject: mm: page_table_check: Ensure user pages are not slab pages
+
+From: Ruihan Li <lrh2000@pku.edu.cn>
+
+commit 44d0fb387b53e56c8a050bac5c7d460e21eb226f upstream.
+
+The current uses of PageAnon in page table check functions can lead to
+type confusion bugs between struct page and slab [1], if slab pages are
+accidentally mapped into the user space. This is because slab reuses the
+bits in struct page to store its internal states, which renders PageAnon
+ineffective on slab pages.
+
+Since slab pages are not expected to be mapped into the user space, this
+patch adds BUG_ON(PageSlab(page)) checks to make sure that slab pages
+are not inadvertently mapped. Otherwise, there must be some bugs in the
+kernel.
+
+Reported-by: syzbot+fcf1a817ceb50935ce99@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/lkml/000000000000258e5e05fae79fc1@google.com/ [1]
+Fixes: df4e817b7108 ("mm: page table check")
+Cc: <stable@vger.kernel.org> # 5.17
+Signed-off-by: Ruihan Li <lrh2000@pku.edu.cn>
+Acked-by: Pasha Tatashin <pasha.tatashin@soleen.com>
+Link: https://lore.kernel.org/r/20230515130958.32471-5-lrh2000@pku.edu.cn
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/page-flags.h | 6 ++++++
+ mm/page_table_check.c | 6 ++++++
+ 2 files changed, 12 insertions(+)
+
+--- a/include/linux/page-flags.h
++++ b/include/linux/page-flags.h
+@@ -630,6 +630,12 @@ PAGEFLAG_FALSE(VmemmapSelfHosted, vmemma
+ * Please note that, confusingly, "page_mapping" refers to the inode
+ * address_space which maps the page from disk; whereas "page_mapped"
+ * refers to user virtual address space into which the page is mapped.
++ *
++ * For slab pages, since slab reuses the bits in struct page to store its
++ * internal states, the page->mapping does not exist as such, nor do these
++ * flags below. So in order to avoid testing non-existent bits, please
++ * make sure that PageSlab(page) actually evaluates to false before calling
++ * the following functions (e.g., PageAnon). See mm/slab.h.
+ */
+ #define PAGE_MAPPING_ANON 0x1
+ #define PAGE_MAPPING_MOVABLE 0x2
+--- a/mm/page_table_check.c
++++ b/mm/page_table_check.c
+@@ -71,6 +71,8 @@ static void page_table_check_clear(struc
+
+ page = pfn_to_page(pfn);
+ page_ext = page_ext_get(page);
++
++ BUG_ON(PageSlab(page));
+ anon = PageAnon(page);
+
+ for (i = 0; i < pgcnt; i++) {
+@@ -107,6 +109,8 @@ static void page_table_check_set(struct
+
+ page = pfn_to_page(pfn);
+ page_ext = page_ext_get(page);
++
++ BUG_ON(PageSlab(page));
+ anon = PageAnon(page);
+
+ for (i = 0; i < pgcnt; i++) {
+@@ -133,6 +137,8 @@ void __page_table_check_zero(struct page
+ struct page_ext *page_ext;
+ unsigned long i;
+
++ BUG_ON(PageSlab(page));
++
+ page_ext = page_ext_get(page);
+ BUG_ON(!page_ext);
+ for (i = 0; i < (1ul << order); i++) {
--- /dev/null
+From 81a31a860bb61d54eb688af2568d9332ed9b8942 Mon Sep 17 00:00:00 2001
+From: Ruihan Li <lrh2000@pku.edu.cn>
+Date: Mon, 15 May 2023 21:09:57 +0800
+Subject: mm: page_table_check: Make it dependent on EXCLUSIVE_SYSTEM_RAM
+
+From: Ruihan Li <lrh2000@pku.edu.cn>
+
+commit 81a31a860bb61d54eb688af2568d9332ed9b8942 upstream.
+
+Without EXCLUSIVE_SYSTEM_RAM, users are allowed to map arbitrary
+physical memory regions into the userspace via /dev/mem. At the same
+time, pages may change their properties (e.g., from anonymous pages to
+named pages) while they are still being mapped in the userspace, leading
+to "corruption" detected by the page table check.
+
+To avoid these false positives, this patch makes PAGE_TABLE_CHECK
+depends on EXCLUSIVE_SYSTEM_RAM. This dependency is understandable
+because PAGE_TABLE_CHECK is a hardening technique but /dev/mem without
+STRICT_DEVMEM (i.e., !EXCLUSIVE_SYSTEM_RAM) is itself a security
+problem.
+
+Even with EXCLUSIVE_SYSTEM_RAM, I/O pages may be still allowed to be
+mapped via /dev/mem. However, these pages are always considered as named
+pages, so they won't break the logic used in the page table check.
+
+Cc: <stable@vger.kernel.org> # 5.17
+Signed-off-by: Ruihan Li <lrh2000@pku.edu.cn>
+Acked-by: David Hildenbrand <david@redhat.com>
+Acked-by: Pasha Tatashin <pasha.tatashin@soleen.com>
+Link: https://lore.kernel.org/r/20230515130958.32471-4-lrh2000@pku.edu.cn
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/mm/page_table_check.rst | 19 +++++++++++++++++++
+ mm/Kconfig.debug | 1 +
+ 2 files changed, 20 insertions(+)
+
+--- a/Documentation/mm/page_table_check.rst
++++ b/Documentation/mm/page_table_check.rst
+@@ -52,3 +52,22 @@ Build kernel with:
+
+ Optionally, build kernel with PAGE_TABLE_CHECK_ENFORCED in order to have page
+ table support without extra kernel parameter.
++
++Implementation notes
++====================
++
++We specifically decided not to use VMA information in order to avoid relying on
++MM states (except for limited "struct page" info). The page table check is a
++separate from Linux-MM state machine that verifies that the user accessible
++pages are not falsely shared.
++
++PAGE_TABLE_CHECK depends on EXCLUSIVE_SYSTEM_RAM. The reason is that without
++EXCLUSIVE_SYSTEM_RAM, users are allowed to map arbitrary physical memory
++regions into the userspace via /dev/mem. At the same time, pages may change
++their properties (e.g., from anonymous pages to named pages) while they are
++still being mapped in the userspace, leading to "corruption" detected by the
++page table check.
++
++Even with EXCLUSIVE_SYSTEM_RAM, I/O pages may be still allowed to be mapped via
++/dev/mem. However, these pages are always considered as named pages, so they
++won't break the logic used in the page table check.
+--- a/mm/Kconfig.debug
++++ b/mm/Kconfig.debug
+@@ -98,6 +98,7 @@ config PAGE_OWNER
+ config PAGE_TABLE_CHECK
+ bool "Check for invalid mappings in user page tables"
+ depends on ARCH_SUPPORTS_PAGE_TABLE_CHECK
++ depends on EXCLUSIVE_SYSTEM_RAM
+ select PAGE_EXTENSION
+ help
+ Check that anonymous page is not being mapped twice with read write
--- /dev/null
+From 24430f8bf51655c5ab7ddc2fafe939dd3cd0dd47 Mon Sep 17 00:00:00 2001
+From: Geliang Tang <geliang.tang@suse.com>
+Date: Sun, 4 Jun 2023 20:25:19 -0700
+Subject: mptcp: add address into userspace pm list
+
+From: Geliang Tang <geliang.tang@suse.com>
+
+commit 24430f8bf51655c5ab7ddc2fafe939dd3cd0dd47 upstream.
+
+Add the address into userspace_pm_local_addr_list when the subflow is
+created. Make sure it can be found in mptcp_nl_cmd_remove(). And delete
+it in the new helper mptcp_userspace_pm_delete_local_addr().
+
+By doing this, the "REMOVE" command also works with subflows that have
+been created via the "SUB_CREATE" command instead of restricting to
+the addresses that have been announced via the "ANNOUNCE" command.
+
+Fixes: d9a4594edabf ("mptcp: netlink: Add MPTCP_PM_CMD_REMOVE")
+Link: https://github.com/multipath-tcp/mptcp_net-next/issues/379
+Cc: stable@vger.kernel.org
+Reviewed-by: Matthieu Baerts <matthieu.baerts@tessares.net>
+Signed-off-by: Geliang Tang <geliang.tang@suse.com>
+Signed-off-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/pm_userspace.c | 41 +++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 41 insertions(+)
+
+--- a/net/mptcp/pm_userspace.c
++++ b/net/mptcp/pm_userspace.c
+@@ -79,6 +79,30 @@ append_err:
+ return ret;
+ }
+
++/* If the subflow is closed from the other peer (not via a
++ * subflow destroy command then), we want to keep the entry
++ * not to assign the same ID to another address and to be
++ * able to send RM_ADDR after the removal of the subflow.
++ */
++static int mptcp_userspace_pm_delete_local_addr(struct mptcp_sock *msk,
++ struct mptcp_pm_addr_entry *addr)
++{
++ struct mptcp_pm_addr_entry *entry, *tmp;
++
++ list_for_each_entry_safe(entry, tmp, &msk->pm.userspace_pm_local_addr_list, list) {
++ if (mptcp_addresses_equal(&entry->addr, &addr->addr, false)) {
++ /* TODO: a refcount is needed because the entry can
++ * be used multiple times (e.g. fullmesh mode).
++ */
++ list_del_rcu(&entry->list);
++ kfree(entry);
++ return 0;
++ }
++ }
++
++ return -EINVAL;
++}
++
+ int mptcp_userspace_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk,
+ unsigned int id,
+ u8 *flags, int *ifindex)
+@@ -251,6 +275,7 @@ int mptcp_nl_cmd_sf_create(struct sk_buf
+ struct nlattr *raddr = info->attrs[MPTCP_PM_ATTR_ADDR_REMOTE];
+ struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN];
+ struct nlattr *laddr = info->attrs[MPTCP_PM_ATTR_ADDR];
++ struct mptcp_pm_addr_entry local = { 0 };
+ struct mptcp_addr_info addr_r;
+ struct mptcp_addr_info addr_l;
+ struct mptcp_sock *msk;
+@@ -302,12 +327,24 @@ int mptcp_nl_cmd_sf_create(struct sk_buf
+ goto create_err;
+ }
+
++ local.addr = addr_l;
++ err = mptcp_userspace_pm_append_new_local_addr(msk, &local);
++ if (err < 0) {
++ GENL_SET_ERR_MSG(info, "did not match address and id");
++ goto create_err;
++ }
++
+ lock_sock(sk);
+
+ err = __mptcp_subflow_connect(sk, &addr_l, &addr_r);
+
+ release_sock(sk);
+
++ spin_lock_bh(&msk->pm.lock);
++ if (err)
++ mptcp_userspace_pm_delete_local_addr(msk, &local);
++ spin_unlock_bh(&msk->pm.lock);
++
+ create_err:
+ sock_put((struct sock *)msk);
+ return err;
+@@ -420,7 +457,11 @@ int mptcp_nl_cmd_sf_destroy(struct sk_bu
+ ssk = mptcp_nl_find_ssk(msk, &addr_l, &addr_r);
+ if (ssk) {
+ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
++ struct mptcp_pm_addr_entry entry = { .addr = addr_l };
+
++ spin_lock_bh(&msk->pm.lock);
++ mptcp_userspace_pm_delete_local_addr(msk, &entry);
++ spin_unlock_bh(&msk->pm.lock);
+ mptcp_subflow_shutdown(sk, ssk, RCV_SHUTDOWN | SEND_SHUTDOWN);
+ mptcp_close_ssk(sk, ssk, subflow);
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RMSUBFLOW);
--- /dev/null
+From 8b1c94da1e481090f24127b2c420b0c0b0421ce3 Mon Sep 17 00:00:00 2001
+From: Geliang Tang <geliang.tang@suse.com>
+Date: Sun, 4 Jun 2023 20:25:17 -0700
+Subject: mptcp: only send RM_ADDR in nl_cmd_remove
+
+From: Geliang Tang <geliang.tang@suse.com>
+
+commit 8b1c94da1e481090f24127b2c420b0c0b0421ce3 upstream.
+
+The specifications from [1] about the "REMOVE" command say:
+
+ Announce that an address has been lost to the peer
+
+It was then only supposed to send a RM_ADDR and not trying to delete
+associated subflows.
+
+A new helper mptcp_pm_remove_addrs() is then introduced to do just
+that, compared to mptcp_pm_remove_addrs_and_subflows() also removing
+subflows.
+
+To delete a subflow, the userspace daemon can use the "SUB_DESTROY"
+command, see mptcp_nl_cmd_sf_destroy().
+
+Fixes: d9a4594edabf ("mptcp: netlink: Add MPTCP_PM_CMD_REMOVE")
+Link: https://github.com/multipath-tcp/mptcp/blob/mptcp_v0.96/include/uapi/linux/mptcp.h [1]
+Cc: stable@vger.kernel.org
+Reviewed-by: Matthieu Baerts <matthieu.baerts@tessares.net>
+Signed-off-by: Geliang Tang <geliang.tang@suse.com>
+Signed-off-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/pm_netlink.c | 18 ++++++++++++++++++
+ net/mptcp/pm_userspace.c | 2 +-
+ net/mptcp/protocol.h | 1 +
+ 3 files changed, 20 insertions(+), 1 deletion(-)
+
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -1558,6 +1558,24 @@ static int mptcp_nl_cmd_del_addr(struct
+ return ret;
+ }
+
++void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list)
++{
++ struct mptcp_rm_list alist = { .nr = 0 };
++ struct mptcp_pm_addr_entry *entry;
++
++ list_for_each_entry(entry, rm_list, list) {
++ remove_anno_list_by_saddr(msk, &entry->addr);
++ if (alist.nr < MPTCP_RM_IDS_MAX)
++ alist.ids[alist.nr++] = entry->addr.id;
++ }
++
++ if (alist.nr) {
++ spin_lock_bh(&msk->pm.lock);
++ mptcp_pm_remove_addr(msk, &alist);
++ spin_unlock_bh(&msk->pm.lock);
++ }
++}
++
+ void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk,
+ struct list_head *rm_list)
+ {
+--- a/net/mptcp/pm_userspace.c
++++ b/net/mptcp/pm_userspace.c
+@@ -232,7 +232,7 @@ int mptcp_nl_cmd_remove(struct sk_buff *
+
+ list_move(&match->list, &free_list);
+
+- mptcp_pm_remove_addrs_and_subflows(msk, &free_list);
++ mptcp_pm_remove_addrs(msk, &free_list);
+
+ release_sock((struct sock *)msk);
+
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -835,6 +835,7 @@ int mptcp_pm_announce_addr(struct mptcp_
+ bool echo);
+ int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list);
+ int mptcp_pm_remove_subflow(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list);
++void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list);
+ void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk,
+ struct list_head *rm_list);
+
--- /dev/null
+From 77e4b94a3de692a09b79945ecac5b8e6b77f10c1 Mon Sep 17 00:00:00 2001
+From: Geliang Tang <geliang.tang@suse.com>
+Date: Sun, 4 Jun 2023 20:25:21 -0700
+Subject: mptcp: update userspace pm infos
+
+From: Geliang Tang <geliang.tang@suse.com>
+
+commit 77e4b94a3de692a09b79945ecac5b8e6b77f10c1 upstream.
+
+Increase pm subflows counter on both server side and client side when
+userspace pm creates a new subflow, and decrease the counter when it
+closes a subflow.
+
+Increase add_addr_signaled counter in mptcp_nl_cmd_announce() when the
+address is announced by userspace PM.
+
+This modification is similar to how the in-kernel PM is updating the
+counter: when additional subflows are created/removed.
+
+Fixes: 9ab4807c84a4 ("mptcp: netlink: Add MPTCP_PM_CMD_ANNOUNCE")
+Fixes: 702c2f646d42 ("mptcp: netlink: allow userspace-driven subflow establishment")
+Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/329
+Cc: stable@vger.kernel.org
+Reviewed-by: Matthieu Baerts <matthieu.baerts@tessares.net>
+Signed-off-by: Geliang Tang <geliang.tang@suse.com>
+Signed-off-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/pm.c | 23 +++++++++++++++++++----
+ net/mptcp/pm_userspace.c | 5 +++++
+ 2 files changed, 24 insertions(+), 4 deletions(-)
+
+--- a/net/mptcp/pm.c
++++ b/net/mptcp/pm.c
+@@ -87,8 +87,15 @@ bool mptcp_pm_allow_new_subflow(struct m
+ unsigned int subflows_max;
+ int ret = 0;
+
+- if (mptcp_pm_is_userspace(msk))
+- return mptcp_userspace_pm_active(msk);
++ if (mptcp_pm_is_userspace(msk)) {
++ if (mptcp_userspace_pm_active(msk)) {
++ spin_lock_bh(&pm->lock);
++ pm->subflows++;
++ spin_unlock_bh(&pm->lock);
++ return true;
++ }
++ return false;
++ }
+
+ subflows_max = mptcp_pm_get_subflows_max(msk);
+
+@@ -181,8 +188,16 @@ void mptcp_pm_subflow_check_next(struct
+ struct mptcp_pm_data *pm = &msk->pm;
+ bool update_subflows;
+
+- update_subflows = (subflow->request_join || subflow->mp_join) &&
+- mptcp_pm_is_kernel(msk);
++ update_subflows = subflow->request_join || subflow->mp_join;
++ if (mptcp_pm_is_userspace(msk)) {
++ if (update_subflows) {
++ spin_lock_bh(&pm->lock);
++ pm->subflows--;
++ spin_unlock_bh(&pm->lock);
++ }
++ return;
++ }
++
+ if (!READ_ONCE(pm->work_pending) && !update_subflows)
+ return;
+
+--- a/net/mptcp/pm_userspace.c
++++ b/net/mptcp/pm_userspace.c
+@@ -69,6 +69,7 @@ int mptcp_userspace_pm_append_new_local_
+ MPTCP_PM_MAX_ADDR_ID + 1,
+ 1);
+ list_add_tail_rcu(&e->list, &msk->pm.userspace_pm_local_addr_list);
++ msk->pm.local_addr_used++;
+ ret = e->addr.id;
+ } else if (match) {
+ ret = entry->addr.id;
+@@ -96,6 +97,7 @@ static int mptcp_userspace_pm_delete_loc
+ */
+ list_del_rcu(&entry->list);
+ kfree(entry);
++ msk->pm.local_addr_used--;
+ return 0;
+ }
+ }
+@@ -195,6 +197,7 @@ int mptcp_nl_cmd_announce(struct sk_buff
+ spin_lock_bh(&msk->pm.lock);
+
+ if (mptcp_pm_alloc_anno_list(msk, &addr_val)) {
++ msk->pm.add_addr_signaled++;
+ mptcp_pm_announce_addr(msk, &addr_val.addr, false);
+ mptcp_pm_nl_addr_send_ack(msk);
+ }
+@@ -343,6 +346,8 @@ int mptcp_nl_cmd_sf_create(struct sk_buf
+ spin_lock_bh(&msk->pm.lock);
+ if (err)
+ mptcp_userspace_pm_delete_local_addr(msk, &local);
++ else
++ msk->pm.subflows++;
+ spin_unlock_bh(&msk->pm.lock);
+
+ create_err:
--- /dev/null
+From 5b10ff013e8a57f8845615ac2cc37edf7f6eef05 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Martin=20Hundeb=C3=B8ll?= <martin@geanix.com>
+Date: Fri, 12 May 2023 08:49:25 +0200
+Subject: pinctrl: meson-axg: add missing GPIOA_18 gpio group
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Martin Hundebøll <martin@geanix.com>
+
+commit 5b10ff013e8a57f8845615ac2cc37edf7f6eef05 upstream.
+
+Without this, the gpio cannot be explicitly mux'ed to its gpio function.
+
+Fixes: 83c566806a68a ("pinctrl: meson-axg: Add new pinctrl driver for Meson AXG SoC")
+Cc: stable@vger.kernel.org
+Signed-off-by: Martin Hundebøll <martin@geanix.com>
+Reviewed-by: Neil Armstrong <neil.armstrong@linaro.org>
+Reviewed-by: Dmitry Rokosov <ddrokosov@sberdevices.ru>
+Link: https://lore.kernel.org/r/20230512064925.133516-1-martin@geanix.com
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/pinctrl/meson/pinctrl-meson-axg.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/pinctrl/meson/pinctrl-meson-axg.c
++++ b/drivers/pinctrl/meson/pinctrl-meson-axg.c
+@@ -400,6 +400,7 @@ static struct meson_pmx_group meson_axg_
+ GPIO_GROUP(GPIOA_15),
+ GPIO_GROUP(GPIOA_16),
+ GPIO_GROUP(GPIOA_17),
++ GPIO_GROUP(GPIOA_18),
+ GPIO_GROUP(GPIOA_19),
+ GPIO_GROUP(GPIOA_20),
+
--- /dev/null
+From 870611e4877eff1e8413c3fb92a585e45d5291f6 Mon Sep 17 00:00:00 2001
+From: Ilya Dryomov <idryomov@gmail.com>
+Date: Mon, 5 Jun 2023 16:33:35 +0200
+Subject: rbd: get snapshot context after exclusive lock is ensured to be held
+
+From: Ilya Dryomov <idryomov@gmail.com>
+
+commit 870611e4877eff1e8413c3fb92a585e45d5291f6 upstream.
+
+Move capturing the snapshot context into the image request state
+machine, after exclusive lock is ensured to be held for the duration of
+dealing with the image request. This is needed to ensure correctness
+of fast-diff states (OBJECT_EXISTS vs OBJECT_EXISTS_CLEAN) and object
+deltas computed based off of them. Otherwise the object map that is
+forked for the snapshot isn't guaranteed to accurately reflect the
+contents of the snapshot when the snapshot is taken under I/O. This
+breaks differential backup and snapshot-based mirroring use cases with
+fast-diff enabled: since some object deltas may be incomplete, the
+destination image may get corrupted.
+
+Cc: stable@vger.kernel.org
+Link: https://tracker.ceph.com/issues/61472
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Reviewed-by: Dongsheng Yang <dongsheng.yang@easystack.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/block/rbd.c | 30 +++++++++++++++++++++++-------
+ 1 file changed, 23 insertions(+), 7 deletions(-)
+
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -1336,6 +1336,8 @@ static bool rbd_obj_is_tail(struct rbd_o
+ */
+ static void rbd_obj_set_copyup_enabled(struct rbd_obj_request *obj_req)
+ {
++ rbd_assert(obj_req->img_request->snapc);
++
+ if (obj_req->img_request->op_type == OBJ_OP_DISCARD) {
+ dout("%s %p objno %llu discard\n", __func__, obj_req,
+ obj_req->ex.oe_objno);
+@@ -1456,6 +1458,7 @@ __rbd_obj_add_osd_request(struct rbd_obj
+ static struct ceph_osd_request *
+ rbd_obj_add_osd_request(struct rbd_obj_request *obj_req, int num_ops)
+ {
++ rbd_assert(obj_req->img_request->snapc);
+ return __rbd_obj_add_osd_request(obj_req, obj_req->img_request->snapc,
+ num_ops);
+ }
+@@ -1592,15 +1595,18 @@ static void rbd_img_request_init(struct
+ mutex_init(&img_request->state_mutex);
+ }
+
++/*
++ * Only snap_id is captured here, for reads. For writes, snapshot
++ * context is captured in rbd_img_object_requests() after exclusive
++ * lock is ensured to be held.
++ */
+ static void rbd_img_capture_header(struct rbd_img_request *img_req)
+ {
+ struct rbd_device *rbd_dev = img_req->rbd_dev;
+
+ lockdep_assert_held(&rbd_dev->header_rwsem);
+
+- if (rbd_img_is_write(img_req))
+- img_req->snapc = ceph_get_snap_context(rbd_dev->header.snapc);
+- else
++ if (!rbd_img_is_write(img_req))
+ img_req->snap_id = rbd_dev->spec->snap_id;
+
+ if (rbd_dev_parent_get(rbd_dev))
+@@ -3482,9 +3488,19 @@ static int rbd_img_exclusive_lock(struct
+
+ static void rbd_img_object_requests(struct rbd_img_request *img_req)
+ {
++ struct rbd_device *rbd_dev = img_req->rbd_dev;
+ struct rbd_obj_request *obj_req;
+
+ rbd_assert(!img_req->pending.result && !img_req->pending.num_pending);
++ rbd_assert(!need_exclusive_lock(img_req) ||
++ __rbd_is_lock_owner(rbd_dev));
++
++ if (rbd_img_is_write(img_req)) {
++ rbd_assert(!img_req->snapc);
++ down_read(&rbd_dev->header_rwsem);
++ img_req->snapc = ceph_get_snap_context(rbd_dev->header.snapc);
++ up_read(&rbd_dev->header_rwsem);
++ }
+
+ for_each_obj_request(img_req, obj_req) {
+ int result = 0;
+@@ -3502,7 +3518,6 @@ static void rbd_img_object_requests(stru
+
+ static bool rbd_img_advance(struct rbd_img_request *img_req, int *result)
+ {
+- struct rbd_device *rbd_dev = img_req->rbd_dev;
+ int ret;
+
+ again:
+@@ -3523,9 +3538,6 @@ again:
+ if (*result)
+ return true;
+
+- rbd_assert(!need_exclusive_lock(img_req) ||
+- __rbd_is_lock_owner(rbd_dev));
+-
+ rbd_img_object_requests(img_req);
+ if (!img_req->pending.num_pending) {
+ *result = img_req->pending.result;
+@@ -3987,6 +3999,10 @@ static int rbd_post_acquire_action(struc
+ {
+ int ret;
+
++ ret = rbd_dev_refresh(rbd_dev);
++ if (ret)
++ return ret;
++
+ if (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP) {
+ ret = rbd_object_map_open(rbd_dev);
+ if (ret)
--- /dev/null
+From 09fe05c57b5aaf23e2c35036c98ea9f282b19a77 Mon Sep 17 00:00:00 2001
+From: Ilya Dryomov <idryomov@gmail.com>
+Date: Mon, 5 Jun 2023 16:33:35 +0200
+Subject: rbd: move RBD_OBJ_FLAG_COPYUP_ENABLED flag setting
+
+From: Ilya Dryomov <idryomov@gmail.com>
+
+commit 09fe05c57b5aaf23e2c35036c98ea9f282b19a77 upstream.
+
+Move RBD_OBJ_FLAG_COPYUP_ENABLED flag setting into the object request
+state machine to allow for the snapshot context to be captured in the
+image request state machine rather than in rbd_queue_workfn().
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Reviewed-by: Dongsheng Yang <dongsheng.yang@easystack.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/block/rbd.c | 32 +++++++++++++++++++++-----------
+ 1 file changed, 21 insertions(+), 11 deletions(-)
+
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -1334,14 +1334,28 @@ static bool rbd_obj_is_tail(struct rbd_o
+ /*
+ * Must be called after rbd_obj_calc_img_extents().
+ */
+-static bool rbd_obj_copyup_enabled(struct rbd_obj_request *obj_req)
++static void rbd_obj_set_copyup_enabled(struct rbd_obj_request *obj_req)
+ {
+- if (!obj_req->num_img_extents ||
+- (rbd_obj_is_entire(obj_req) &&
+- !obj_req->img_request->snapc->num_snaps))
+- return false;
++ if (obj_req->img_request->op_type == OBJ_OP_DISCARD) {
++ dout("%s %p objno %llu discard\n", __func__, obj_req,
++ obj_req->ex.oe_objno);
++ return;
++ }
+
+- return true;
++ if (!obj_req->num_img_extents) {
++ dout("%s %p objno %llu not overlapping\n", __func__, obj_req,
++ obj_req->ex.oe_objno);
++ return;
++ }
++
++ if (rbd_obj_is_entire(obj_req) &&
++ !obj_req->img_request->snapc->num_snaps) {
++ dout("%s %p objno %llu entire\n", __func__, obj_req,
++ obj_req->ex.oe_objno);
++ return;
++ }
++
++ obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED;
+ }
+
+ static u64 rbd_obj_img_extents_bytes(struct rbd_obj_request *obj_req)
+@@ -2233,9 +2247,6 @@ static int rbd_obj_init_write(struct rbd
+ if (ret)
+ return ret;
+
+- if (rbd_obj_copyup_enabled(obj_req))
+- obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED;
+-
+ obj_req->write_state = RBD_OBJ_WRITE_START;
+ return 0;
+ }
+@@ -2341,8 +2352,6 @@ static int rbd_obj_init_zeroout(struct r
+ if (ret)
+ return ret;
+
+- if (rbd_obj_copyup_enabled(obj_req))
+- obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED;
+ if (!obj_req->num_img_extents) {
+ obj_req->flags |= RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT;
+ if (rbd_obj_is_entire(obj_req))
+@@ -3286,6 +3295,7 @@ again:
+ case RBD_OBJ_WRITE_START:
+ rbd_assert(!*result);
+
++ rbd_obj_set_copyup_enabled(obj_req);
+ if (rbd_obj_write_is_noop(obj_req))
+ return true;
+
--- /dev/null
+From ccc45cb4e7271c74dbb27776ae8f73d84557f5c6 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Jan=20H=C3=B6ppner?= <hoeppner@linux.ibm.com>
+Date: Fri, 9 Jun 2023 17:37:50 +0200
+Subject: s390/dasd: Use correct lock while counting channel queue length
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jan Höppner <hoeppner@linux.ibm.com>
+
+commit ccc45cb4e7271c74dbb27776ae8f73d84557f5c6 upstream.
+
+The lock around counting the channel queue length in the BIODASDINFO
+ioctl was incorrectly changed to the dasd_block->queue_lock with commit
+583d6535cb9d ("dasd: remove dead code"). This can lead to endless list
+iterations and a subsequent crash.
+
+The queue_lock is supposed to be used only for queue lists belonging to
+dasd_block. For dasd_device related queue lists the ccwdev lock must be
+used.
+
+Fix the mentioned issues by correctly using the ccwdev lock instead of
+the queue lock.
+
+Fixes: 583d6535cb9d ("dasd: remove dead code")
+Cc: stable@vger.kernel.org # v5.0+
+Signed-off-by: Jan Höppner <hoeppner@linux.ibm.com>
+Reviewed-by: Stefan Haberland <sth@linux.ibm.com>
+Signed-off-by: Stefan Haberland <sth@linux.ibm.com>
+Link: https://lore.kernel.org/r/20230609153750.1258763-2-sth@linux.ibm.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/s390/block/dasd_ioctl.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/s390/block/dasd_ioctl.c
++++ b/drivers/s390/block/dasd_ioctl.c
+@@ -552,10 +552,10 @@ static int __dasd_ioctl_information(stru
+
+ memcpy(dasd_info->type, base->discipline->name, 4);
+
+- spin_lock_irqsave(&block->queue_lock, flags);
++ spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
+ list_for_each(l, &base->ccw_queue)
+ dasd_info->chanq_len++;
+- spin_unlock_irqrestore(&block->queue_lock, flags);
++ spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
+ return 0;
+ }
+
--- /dev/null
+From 48d73f609dcceeb563b0d960e59bf0362581e39c Mon Sep 17 00:00:00 2001
+From: Geliang Tang <geliang.tang@suse.com>
+Date: Sun, 4 Jun 2023 20:25:18 -0700
+Subject: selftests: mptcp: update userspace pm addr tests
+
+From: Geliang Tang <geliang.tang@suse.com>
+
+commit 48d73f609dcceeb563b0d960e59bf0362581e39c upstream.
+
+This patch is linked to the previous commit ("mptcp: only send RM_ADDR in
+nl_cmd_remove").
+
+To align with what is done by the in-kernel PM, update userspace pm addr
+selftests, by sending a remove_subflows command together after the
+remove_addrs command.
+
+Fixes: d9a4594edabf ("mptcp: netlink: Add MPTCP_PM_CMD_REMOVE")
+Fixes: 97040cf9806e ("selftests: mptcp: userspace pm address tests")
+Cc: stable@vger.kernel.org
+Reviewed-by: Matthieu Baerts <matthieu.baerts@tessares.net>
+Signed-off-by: Geliang Tang <geliang.tang@suse.com>
+Signed-off-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/net/mptcp/mptcp_join.sh | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -856,7 +856,15 @@ do_transfer()
+ sed -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q')
+ ip netns exec ${listener_ns} ./pm_nl_ctl ann $addr token $tk id $id
+ sleep 1
++ sp=$(grep "type:10" "$evts_ns1" |
++ sed -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q')
++ da=$(grep "type:10" "$evts_ns1" |
++ sed -n 's/.*\(daddr6:\)\([0-9a-f:.]*\).*$/\2/p;q')
++ dp=$(grep "type:10" "$evts_ns1" |
++ sed -n 's/.*\(dport:\)\([[:digit:]]*\).*$/\2/p;q')
+ ip netns exec ${listener_ns} ./pm_nl_ctl rem token $tk id $id
++ ip netns exec ${listener_ns} ./pm_nl_ctl dsf lip "::ffff:$addr" \
++ lport $sp rip $da rport $dp token $tk
+ fi
+
+ counter=$((counter + 1))
--- /dev/null
+From 6c160b636c91e71e50c39134f78257cc35305ff0 Mon Sep 17 00:00:00 2001
+From: Geliang Tang <geliang.tang@suse.com>
+Date: Sun, 4 Jun 2023 20:25:20 -0700
+Subject: selftests: mptcp: update userspace pm subflow tests
+
+From: Geliang Tang <geliang.tang@suse.com>
+
+commit 6c160b636c91e71e50c39134f78257cc35305ff0 upstream.
+
+To align with what is done by the in-kernel PM, update userspace pm
+subflow selftests, by sending the a remove_addrs command together
+before the remove_subflows command. This will get a RM_ADDR in
+chk_rm_nr().
+
+Fixes: d9a4594edabf ("mptcp: netlink: Add MPTCP_PM_CMD_REMOVE")
+Fixes: 5e986ec46874 ("selftests: mptcp: userspace pm subflow tests")
+Link: https://github.com/multipath-tcp/mptcp_net-next/issues/379
+Cc: stable@vger.kernel.org
+Reviewed-by: Matthieu Baerts <matthieu.baerts@tessares.net>
+Signed-off-by: Geliang Tang <geliang.tang@suse.com>
+Signed-off-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/net/mptcp/mptcp_join.sh | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -930,6 +930,7 @@ do_transfer()
+ sleep 1
+ sp=$(grep "type:10" "$evts_ns2" |
+ sed -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q')
++ ip netns exec ${connector_ns} ./pm_nl_ctl rem token $tk id $id
+ ip netns exec ${connector_ns} ./pm_nl_ctl dsf lip $addr lport $sp \
+ rip $da rport $dp token $tk
+ fi
+@@ -3104,7 +3105,7 @@ userspace_tests()
+ pm_nl_set_limits $ns1 0 1
+ run_tests $ns1 $ns2 10.0.1.1 0 0 userspace_1 slow
+ chk_join_nr 1 1 1
+- chk_rm_nr 0 1
++ chk_rm_nr 1 1
+ kill_events_pids
+ fi
+ }
drm-amd-pm-conditionally-disable-pcie-lane-switching-for-some-sienna_cichlid-skus.patch
drm-amdgpu-fix-xclk-freq-on-chip_stoney.patch
drm-amdgpu-change-reserved-vram-info-print.patch
+drm-amd-disallow-s0ix-without-bios-support-again.patch
+drm-amd-pm-fix-power-context-allocation-in-smu13.patch
+drm-amd-display-reduce-sdp-bw-after-urgent-to-90.patch
+drm-amd-display-add-odm-case-when-looking-for-first-split-pipe.patch
+wifi-iwlwifi-mvm-fix-warray-bounds-bug-in-iwl_mvm_wait_d3_notif.patch
+can-j1939-j1939_sk_send_loop_abort-improved-error-queue-handling-in-j1939-socket.patch
+can-j1939-change-j1939_netdev_lock-type-to-mutex.patch
+can-j1939-avoid-possible-use-after-free-when-j1939_can_rx_register-fails.patch
+mptcp-only-send-rm_addr-in-nl_cmd_remove.patch
+mptcp-add-address-into-userspace-pm-list.patch
+mptcp-update-userspace-pm-infos.patch
+selftests-mptcp-update-userspace-pm-addr-tests.patch
+selftests-mptcp-update-userspace-pm-subflow-tests.patch
+ceph-fix-use-after-free-bug-for-inodes-when-flushing-capsnaps.patch
+accel-ivpu-do-not-trigger-extra-vpu-reset-if-the-vpu-is-idle.patch
+accel-ivpu-fix-sporadic-vpu-boot-failure.patch
+s390-dasd-use-correct-lock-while-counting-channel-queue-length.patch
+bluetooth-fix-use-after-free-in-hci_remove_ltk-hci_remove_irk.patch
+bluetooth-fix-debugfs-registration.patch
+bluetooth-hci_qca-fix-debugfs-registration.patch
+tee-amdtee-add-return_origin-to-struct-tee_cmd_load_ta.patch
+rbd-move-rbd_obj_flag_copyup_enabled-flag-setting.patch
+rbd-get-snapshot-context-after-exclusive-lock-is-ensured-to-be-held.patch
+virtio_net-use-control_buf-for-coalesce-params.patch
+soc-qcom-icc-bwmon-fix-incorrect-error-code-passed-to-dev_err_probe.patch
+pinctrl-meson-axg-add-missing-gpioa_18-gpio-group.patch
+usb-usbfs-enforce-page-requirements-for-mmap.patch
+usb-usbfs-use-consistent-mmap-functions.patch
+mm-page_table_check-make-it-dependent-on-exclusive_system_ram.patch
+mm-page_table_check-ensure-user-pages-are-not-slab-pages.patch
--- /dev/null
+From 3530167c6fe8001de6c026a3058eaca4c8a5329f Mon Sep 17 00:00:00 2001
+From: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+Date: Sat, 13 May 2023 13:17:47 +0200
+Subject: soc: qcom: icc-bwmon: fix incorrect error code passed to dev_err_probe()
+
+From: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+
+commit 3530167c6fe8001de6c026a3058eaca4c8a5329f upstream.
+
+Pass to dev_err_probe() PTR_ERR from actual dev_pm_opp_find_bw_floor()
+call which failed, instead of previous ret which at this point is 0.
+Failure of dev_pm_opp_find_bw_floor() would result in prematurely ending
+the probe with success.
+
+Fixes smatch warnings:
+
+ drivers/soc/qcom/icc-bwmon.c:776 bwmon_probe() warn: passing zero to 'dev_err_probe'
+ drivers/soc/qcom/icc-bwmon.c:781 bwmon_probe() warn: passing zero to 'dev_err_probe'
+
+Reported-by: kernel test robot <lkp@intel.com>
+Reported-by: Dan Carpenter <error27@gmail.com>
+Link: https://lore.kernel.org/r/202305131657.76XeHDjF-lkp@intel.com/
+Cc: <stable@vger.kernel.org>
+Fixes: b9c2ae6cac40 ("soc: qcom: icc-bwmon: Add bandwidth monitoring driver")
+Signed-off-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+Link: https://lore.kernel.org/r/20230513111747.132532-1-krzysztof.kozlowski@linaro.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/soc/qcom/icc-bwmon.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/soc/qcom/icc-bwmon.c
++++ b/drivers/soc/qcom/icc-bwmon.c
+@@ -603,12 +603,12 @@ static int bwmon_probe(struct platform_d
+ bwmon->max_bw_kbps = UINT_MAX;
+ opp = dev_pm_opp_find_bw_floor(dev, &bwmon->max_bw_kbps, 0);
+ if (IS_ERR(opp))
+- return dev_err_probe(dev, ret, "failed to find max peak bandwidth\n");
++ return dev_err_probe(dev, PTR_ERR(opp), "failed to find max peak bandwidth\n");
+
+ bwmon->min_bw_kbps = 0;
+ opp = dev_pm_opp_find_bw_ceil(dev, &bwmon->min_bw_kbps, 0);
+ if (IS_ERR(opp))
+- return dev_err_probe(dev, ret, "failed to find min peak bandwidth\n");
++ return dev_err_probe(dev, PTR_ERR(opp), "failed to find min peak bandwidth\n");
+
+ bwmon->dev = dev;
+
--- /dev/null
+From 436eeae0411acdfc54521ddea80ee76d4ae8a7ea Mon Sep 17 00:00:00 2001
+From: Rijo Thomas <Rijo-john.Thomas@amd.com>
+Date: Tue, 9 May 2023 13:02:40 +0530
+Subject: tee: amdtee: Add return_origin to 'struct tee_cmd_load_ta'
+
+From: Rijo Thomas <Rijo-john.Thomas@amd.com>
+
+commit 436eeae0411acdfc54521ddea80ee76d4ae8a7ea upstream.
+
+After TEE has completed processing of TEE_CMD_ID_LOAD_TA, set proper
+value in 'return_origin' argument passed by open_session() call. To do
+so, add 'return_origin' field to the structure tee_cmd_load_ta. The
+Trusted OS shall update return_origin as part of TEE processing.
+
+This change to 'struct tee_cmd_load_ta' interface requires a similar update
+in AMD-TEE Trusted OS's TEE_CMD_ID_LOAD_TA interface.
+
+This patch has been verified on Phoenix Birman setup. On older APUs,
+return_origin value will be 0.
+
+Cc: stable@vger.kernel.org
+Fixes: 757cc3e9ff1d ("tee: add AMD-TEE driver")
+Tested-by: Sourabh Das <sourabh.das@amd.com>
+Signed-off-by: Rijo Thomas <Rijo-john.Thomas@amd.com>
+Acked-by: Sumit Garg <sumit.garg@linaro.org>
+Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/tee/amdtee/amdtee_if.h | 10 ++++++----
+ drivers/tee/amdtee/call.c | 28 ++++++++++++++++------------
+ 2 files changed, 22 insertions(+), 16 deletions(-)
+
+--- a/drivers/tee/amdtee/amdtee_if.h
++++ b/drivers/tee/amdtee/amdtee_if.h
+@@ -118,16 +118,18 @@ struct tee_cmd_unmap_shared_mem {
+
+ /**
+ * struct tee_cmd_load_ta - load Trusted Application (TA) binary into TEE
+- * @low_addr: [in] bits [31:0] of the physical address of the TA binary
+- * @hi_addr: [in] bits [63:32] of the physical address of the TA binary
+- * @size: [in] size of TA binary in bytes
+- * @ta_handle: [out] return handle of the loaded TA
++ * @low_addr: [in] bits [31:0] of the physical address of the TA binary
++ * @hi_addr: [in] bits [63:32] of the physical address of the TA binary
++ * @size: [in] size of TA binary in bytes
++ * @ta_handle: [out] return handle of the loaded TA
++ * @return_origin: [out] origin of return code after TEE processing
+ */
+ struct tee_cmd_load_ta {
+ u32 low_addr;
+ u32 hi_addr;
+ u32 size;
+ u32 ta_handle;
++ u32 return_origin;
+ };
+
+ /**
+--- a/drivers/tee/amdtee/call.c
++++ b/drivers/tee/amdtee/call.c
+@@ -423,19 +423,23 @@ int handle_load_ta(void *data, u32 size,
+ if (ret) {
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
+ arg->ret = TEEC_ERROR_COMMUNICATION;
+- } else if (arg->ret == TEEC_SUCCESS) {
+- ret = get_ta_refcount(load_cmd.ta_handle);
+- if (!ret) {
+- arg->ret_origin = TEEC_ORIGIN_COMMS;
+- arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
++ } else {
++ arg->ret_origin = load_cmd.return_origin;
+
+- /* Unload the TA on error */
+- unload_cmd.ta_handle = load_cmd.ta_handle;
+- psp_tee_process_cmd(TEE_CMD_ID_UNLOAD_TA,
+- (void *)&unload_cmd,
+- sizeof(unload_cmd), &ret);
+- } else {
+- set_session_id(load_cmd.ta_handle, 0, &arg->session);
++ if (arg->ret == TEEC_SUCCESS) {
++ ret = get_ta_refcount(load_cmd.ta_handle);
++ if (!ret) {
++ arg->ret_origin = TEEC_ORIGIN_COMMS;
++ arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
++
++ /* Unload the TA on error */
++ unload_cmd.ta_handle = load_cmd.ta_handle;
++ psp_tee_process_cmd(TEE_CMD_ID_UNLOAD_TA,
++ (void *)&unload_cmd,
++ sizeof(unload_cmd), &ret);
++ } else {
++ set_session_id(load_cmd.ta_handle, 0, &arg->session);
++ }
+ }
+ }
+ mutex_unlock(&ta_refcount_mutex);
--- /dev/null
+From 0143d148d1e882fb1538dc9974c94d63961719b9 Mon Sep 17 00:00:00 2001
+From: Ruihan Li <lrh2000@pku.edu.cn>
+Date: Mon, 15 May 2023 21:09:55 +0800
+Subject: usb: usbfs: Enforce page requirements for mmap
+
+From: Ruihan Li <lrh2000@pku.edu.cn>
+
+commit 0143d148d1e882fb1538dc9974c94d63961719b9 upstream.
+
+The current implementation of usbdev_mmap uses usb_alloc_coherent to
+allocate memory pages that will later be mapped into the user space.
+Meanwhile, usb_alloc_coherent employs three different methods to
+allocate memory, as outlined below:
+ * If hcd->localmem_pool is non-null, it uses gen_pool_dma_alloc to
+ allocate memory;
+ * If DMA is not available, it uses kmalloc to allocate memory;
+ * Otherwise, it uses dma_alloc_coherent.
+
+However, it should be noted that gen_pool_dma_alloc does not guarantee
+that the resulting memory will be page-aligned. Furthermore, trying to
+map slab pages (i.e., memory allocated by kmalloc) into the user space
+is not resonable and can lead to problems, such as a type confusion bug
+when PAGE_TABLE_CHECK=y [1].
+
+To address these issues, this patch introduces hcd_alloc_coherent_pages,
+which addresses the above two problems. Specifically,
+hcd_alloc_coherent_pages uses gen_pool_dma_alloc_align instead of
+gen_pool_dma_alloc to ensure that the memory is page-aligned. To replace
+kmalloc, hcd_alloc_coherent_pages directly allocates pages by calling
+__get_free_pages.
+
+Reported-by: syzbot+fcf1a817ceb50935ce99@syzkaller.appspotmail.comm
+Closes: https://lore.kernel.org/lkml/000000000000258e5e05fae79fc1@google.com/ [1]
+Fixes: f7d34b445abc ("USB: Add support for usbfs zerocopy.")
+Fixes: ff2437befd8f ("usb: host: Fix excessive alignment restriction for local memory allocations")
+Cc: stable@vger.kernel.org
+Signed-off-by: Ruihan Li <lrh2000@pku.edu.cn>
+Acked-by: Alan Stern <stern@rowland.harvard.edu>
+Link: https://lore.kernel.org/r/20230515130958.32471-2-lrh2000@pku.edu.cn
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/core/buffer.c | 41 +++++++++++++++++++++++++++++++++++++++++
+ drivers/usb/core/devio.c | 9 +++++----
+ include/linux/usb/hcd.h | 5 +++++
+ 3 files changed, 51 insertions(+), 4 deletions(-)
+
+--- a/drivers/usb/core/buffer.c
++++ b/drivers/usb/core/buffer.c
+@@ -172,3 +172,44 @@ void hcd_buffer_free(
+ }
+ dma_free_coherent(hcd->self.sysdev, size, addr, dma);
+ }
++
++void *hcd_buffer_alloc_pages(struct usb_hcd *hcd,
++ size_t size, gfp_t mem_flags, dma_addr_t *dma)
++{
++ if (size == 0)
++ return NULL;
++
++ if (hcd->localmem_pool)
++ return gen_pool_dma_alloc_align(hcd->localmem_pool,
++ size, dma, PAGE_SIZE);
++
++ /* some USB hosts just use PIO */
++ if (!hcd_uses_dma(hcd)) {
++ *dma = DMA_MAPPING_ERROR;
++ return (void *)__get_free_pages(mem_flags,
++ get_order(size));
++ }
++
++ return dma_alloc_coherent(hcd->self.sysdev,
++ size, dma, mem_flags);
++}
++
++void hcd_buffer_free_pages(struct usb_hcd *hcd,
++ size_t size, void *addr, dma_addr_t dma)
++{
++ if (!addr)
++ return;
++
++ if (hcd->localmem_pool) {
++ gen_pool_free(hcd->localmem_pool,
++ (unsigned long)addr, size);
++ return;
++ }
++
++ if (!hcd_uses_dma(hcd)) {
++ free_pages((unsigned long)addr, get_order(size));
++ return;
++ }
++
++ dma_free_coherent(hcd->self.sysdev, size, addr, dma);
++}
+--- a/drivers/usb/core/devio.c
++++ b/drivers/usb/core/devio.c
+@@ -186,6 +186,7 @@ static int connected(struct usb_dev_stat
+ static void dec_usb_memory_use_count(struct usb_memory *usbm, int *count)
+ {
+ struct usb_dev_state *ps = usbm->ps;
++ struct usb_hcd *hcd = bus_to_hcd(ps->dev->bus);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ps->lock, flags);
+@@ -194,8 +195,8 @@ static void dec_usb_memory_use_count(str
+ list_del(&usbm->memlist);
+ spin_unlock_irqrestore(&ps->lock, flags);
+
+- usb_free_coherent(ps->dev, usbm->size, usbm->mem,
+- usbm->dma_handle);
++ hcd_buffer_free_pages(hcd, usbm->size,
++ usbm->mem, usbm->dma_handle);
+ usbfs_decrease_memory_usage(
+ usbm->size + sizeof(struct usb_memory));
+ kfree(usbm);
+@@ -247,8 +248,8 @@ static int usbdev_mmap(struct file *file
+ goto error_decrease_mem;
+ }
+
+- mem = usb_alloc_coherent(ps->dev, size, GFP_USER | __GFP_NOWARN,
+- &dma_handle);
++ mem = hcd_buffer_alloc_pages(hcd,
++ size, GFP_USER | __GFP_NOWARN, &dma_handle);
+ if (!mem) {
+ ret = -ENOMEM;
+ goto error_free_usbm;
+--- a/include/linux/usb/hcd.h
++++ b/include/linux/usb/hcd.h
+@@ -503,6 +503,11 @@ void *hcd_buffer_alloc(struct usb_bus *b
+ void hcd_buffer_free(struct usb_bus *bus, size_t size,
+ void *addr, dma_addr_t dma);
+
++void *hcd_buffer_alloc_pages(struct usb_hcd *hcd,
++ size_t size, gfp_t mem_flags, dma_addr_t *dma);
++void hcd_buffer_free_pages(struct usb_hcd *hcd,
++ size_t size, void *addr, dma_addr_t dma);
++
+ /* generic bus glue, needed for host controllers that don't use PCI */
+ extern irqreturn_t usb_hcd_irq(int irq, void *__hcd);
+
--- /dev/null
+From d0b861653f8c16839c3035875b556afc4472f941 Mon Sep 17 00:00:00 2001
+From: Ruihan Li <lrh2000@pku.edu.cn>
+Date: Mon, 15 May 2023 21:09:56 +0800
+Subject: usb: usbfs: Use consistent mmap functions
+
+From: Ruihan Li <lrh2000@pku.edu.cn>
+
+commit d0b861653f8c16839c3035875b556afc4472f941 upstream.
+
+When hcd->localmem_pool is non-null, localmem_pool is used to allocate
+DMA memory. In this case, the dma address will be properly returned (in
+dma_handle), and dma_mmap_coherent should be used to map this memory
+into the user space. However, the current implementation uses
+pfn_remap_range, which is supposed to map normal pages.
+
+Instead of repeating the logic in the memory allocation function, this
+patch introduces a more robust solution. Here, the type of allocated
+memory is checked by testing whether dma_handle is properly set. If
+dma_handle is properly returned, it means some DMA pages are allocated
+and dma_mmap_coherent should be used to map them. Otherwise, normal
+pages are allocated and pfn_remap_range should be called. This ensures
+that the correct mmap functions are used consistently, independently
+with logic details that determine which type of memory gets allocated.
+
+Fixes: a0e710a7def4 ("USB: usbfs: fix mmap dma mismatch")
+Cc: stable@vger.kernel.org
+Signed-off-by: Ruihan Li <lrh2000@pku.edu.cn>
+Link: https://lore.kernel.org/r/20230515130958.32471-3-lrh2000@pku.edu.cn
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/core/devio.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+--- a/drivers/usb/core/devio.c
++++ b/drivers/usb/core/devio.c
+@@ -235,7 +235,7 @@ static int usbdev_mmap(struct file *file
+ size_t size = vma->vm_end - vma->vm_start;
+ void *mem;
+ unsigned long flags;
+- dma_addr_t dma_handle;
++ dma_addr_t dma_handle = DMA_MAPPING_ERROR;
+ int ret;
+
+ ret = usbfs_increase_memory_usage(size + sizeof(struct usb_memory));
+@@ -265,7 +265,14 @@ static int usbdev_mmap(struct file *file
+ usbm->vma_use_count = 1;
+ INIT_LIST_HEAD(&usbm->memlist);
+
+- if (hcd->localmem_pool || !hcd_uses_dma(hcd)) {
++ /*
++ * In DMA-unavailable cases, hcd_buffer_alloc_pages allocates
++ * normal pages and assigns DMA_MAPPING_ERROR to dma_handle. Check
++ * whether we are in such cases, and then use remap_pfn_range (or
++ * dma_mmap_coherent) to map normal (or DMA) pages into the user
++ * space, respectively.
++ */
++ if (dma_handle == DMA_MAPPING_ERROR) {
+ if (remap_pfn_range(vma, vma->vm_start,
+ virt_to_phys(usbm->mem) >> PAGE_SHIFT,
+ size, vma->vm_page_prot) < 0) {
--- /dev/null
+From accc1bf23068c1cdc4c2b015320ba856e210dd98 Mon Sep 17 00:00:00 2001
+From: Brett Creeley <brett.creeley@amd.com>
+Date: Mon, 5 Jun 2023 12:59:25 -0700
+Subject: virtio_net: use control_buf for coalesce params
+
+From: Brett Creeley <brett.creeley@amd.com>
+
+commit accc1bf23068c1cdc4c2b015320ba856e210dd98 upstream.
+
+Commit 699b045a8e43 ("net: virtio_net: notifications coalescing
+support") added coalescing command support for virtio_net. However,
+the coalesce commands are using buffers on the stack, which is causing
+the device to see DMA errors. There should also be a complaint from
+check_for_stack() in debug_dma_map_xyz(). Fix this by adding and using
+coalesce params from the control_buf struct, which aligns with other
+commands.
+
+Cc: stable@vger.kernel.org
+Fixes: 699b045a8e43 ("net: virtio_net: notifications coalescing support")
+Reviewed-by: Shannon Nelson <shannon.nelson@amd.com>
+Signed-off-by: Allen Hubbe <allen.hubbe@amd.com>
+Signed-off-by: Brett Creeley <brett.creeley@amd.com>
+Acked-by: Jason Wang <jasowang@redhat.com>
+Reviewed-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
+Acked-by: Michael S. Tsirkin <mst@redhat.com>
+Link: https://lore.kernel.org/r/20230605195925.51625-1-brett.creeley@amd.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/virtio_net.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -204,6 +204,8 @@ struct control_buf {
+ __virtio16 vid;
+ __virtio64 offloads;
+ struct virtio_net_ctrl_rss rss;
++ struct virtio_net_ctrl_coal_tx coal_tx;
++ struct virtio_net_ctrl_coal_rx coal_rx;
+ };
+
+ struct virtnet_info {
+@@ -2933,12 +2935,10 @@ static int virtnet_send_notf_coal_cmds(s
+ struct ethtool_coalesce *ec)
+ {
+ struct scatterlist sgs_tx, sgs_rx;
+- struct virtio_net_ctrl_coal_tx coal_tx;
+- struct virtio_net_ctrl_coal_rx coal_rx;
+
+- coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
+- coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames);
+- sg_init_one(&sgs_tx, &coal_tx, sizeof(coal_tx));
++ vi->ctrl->coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
++ vi->ctrl->coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames);
++ sg_init_one(&sgs_tx, &vi->ctrl->coal_tx, sizeof(vi->ctrl->coal_tx));
+
+ if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
+ VIRTIO_NET_CTRL_NOTF_COAL_TX_SET,
+@@ -2949,9 +2949,9 @@ static int virtnet_send_notf_coal_cmds(s
+ vi->tx_usecs = ec->tx_coalesce_usecs;
+ vi->tx_max_packets = ec->tx_max_coalesced_frames;
+
+- coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
+- coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
+- sg_init_one(&sgs_rx, &coal_rx, sizeof(coal_rx));
++ vi->ctrl->coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
++ vi->ctrl->coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
++ sg_init_one(&sgs_rx, &vi->ctrl->coal_rx, sizeof(vi->ctrl->coal_rx));
+
+ if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
+ VIRTIO_NET_CTRL_NOTF_COAL_RX_SET,
--- /dev/null
+From 7a4615b9a9da5225b22b36a20508555dd133ac24 Mon Sep 17 00:00:00 2001
+From: "Gustavo A. R. Silva" <gustavoars@kernel.org>
+Date: Fri, 2 Jun 2023 13:42:47 -0600
+Subject: wifi: iwlwifi: mvm: Fix -Warray-bounds bug in iwl_mvm_wait_d3_notif()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Gustavo A. R. Silva <gustavoars@kernel.org>
+
+commit 7a4615b9a9da5225b22b36a20508555dd133ac24 upstream.
+
+kmemdup() at line 2735 is not duplicating enough memory for
+notif->tid_tear_down and notif->station_id. As it only duplicates
+612 bytes: up to offsetofend(struct iwl_wowlan_info_notif,
+received_beacons), this is the range of [0, 612) bytes.
+
+2735 notif = kmemdup(notif_v1,
+2736 offsetofend(struct iwl_wowlan_info_notif,
+2737 received_beacons),
+2738 GFP_ATOMIC);
+
+which evidently does not cover bytes 612 and 613 for members
+tid_tear_down and station_id in struct iwl_wowlan_info_notif.
+See below:
+
+$ pahole -C iwl_wowlan_info_notif drivers/net/wireless/intel/iwlwifi/mvm/d3.o
+struct iwl_wowlan_info_notif {
+ struct iwl_wowlan_gtk_status_v3 gtk[2]; /* 0 488 */
+ /* --- cacheline 7 boundary (448 bytes) was 40 bytes ago --- */
+ struct iwl_wowlan_igtk_status igtk[2]; /* 488 80 */
+ /* --- cacheline 8 boundary (512 bytes) was 56 bytes ago --- */
+ __le64 replay_ctr; /* 568 8 */
+ /* --- cacheline 9 boundary (576 bytes) --- */
+ __le16 pattern_number; /* 576 2 */
+ __le16 reserved1; /* 578 2 */
+ __le16 qos_seq_ctr[8]; /* 580 16 */
+ __le32 wakeup_reasons; /* 596 4 */
+ __le32 num_of_gtk_rekeys; /* 600 4 */
+ __le32 transmitted_ndps; /* 604 4 */
+ __le32 received_beacons; /* 608 4 */
+ u8 tid_tear_down; /* 612 1 */
+ u8 station_id; /* 613 1 */
+ u8 reserved2[2]; /* 614 2 */
+
+ /* size: 616, cachelines: 10, members: 13 */
+ /* last cacheline: 40 bytes */
+};
+
+Therefore, when the following assignments take place, actually no memory
+has been allocated for those objects:
+
+2743 notif->tid_tear_down = notif_v1->tid_tear_down;
+2744 notif->station_id = notif_v1->station_id;
+
+Fix this by allocating space for the whole notif object and zero out the
+remaining space in memory after member station_id.
+
+This also fixes the following -Warray-bounds issues:
+ CC drivers/net/wireless/intel/iwlwifi/mvm/d3.o
+drivers/net/wireless/intel/iwlwifi/mvm/d3.c: In function ‘iwl_mvm_wait_d3_notif’:
+drivers/net/wireless/intel/iwlwifi/mvm/d3.c:2743:30: warning: array subscript ‘struct iwl_wowlan_info_notif[0]’ is partly outside array bounds of ‘unsigned char[612]’ [-Warray-bounds=]
+ 2743 | notif->tid_tear_down = notif_v1->tid_tear_down;
+ |
+ from drivers/net/wireless/intel/iwlwifi/mvm/d3.c:7:
+In function ‘kmemdup’,
+ inlined from ‘iwl_mvm_wait_d3_notif’ at drivers/net/wireless/intel/iwlwifi/mvm/d3.c:2735:12:
+include/linux/fortify-string.h:765:16: note: object of size 612 allocated by ‘__real_kmemdup’
+ 765 | return __real_kmemdup(p, size, gfp);
+ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~
+drivers/net/wireless/intel/iwlwifi/mvm/d3.c: In function ‘iwl_mvm_wait_d3_notif’:
+drivers/net/wireless/intel/iwlwifi/mvm/d3.c:2744:30: warning: array subscript ‘struct iwl_wowlan_info_notif[0]’ is partly outside array bounds of ‘unsigned char[612]’ [-Warray-bounds=]
+ 2744 | notif->station_id = notif_v1->station_id;
+ | ^~
+In function ‘kmemdup’,
+ inlined from ‘iwl_mvm_wait_d3_notif’ at drivers/net/wireless/intel/iwlwifi/mvm/d3.c:2735:12:
+include/linux/fortify-string.h:765:16: note: object of size 612 allocated by ‘__real_kmemdup’
+ 765 | return __real_kmemdup(p, size, gfp);
+ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Link: https://github.com/KSPP/linux/issues/306
+Fixes: 905d50ddbc83 ("wifi: iwlwifi: mvm: support wowlan info notification version 2")
+Cc: stable@vger.kernel.org
+Signed-off-by: Gustavo A. R. Silva <gustavoars@kernel.org>
+Acked-by: Gregory Greenman <gregory.greenman@intel.com>
+Link: https://lore.kernel.org/r/ZHpGN555FwAKGduH@work
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/intel/iwlwifi/mvm/d3.c | 8 ++------
+ 1 file changed, 2 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+@@ -2729,17 +2729,13 @@ static bool iwl_mvm_wait_d3_notif(struct
+ if (wowlan_info_ver < 2) {
+ struct iwl_wowlan_info_notif_v1 *notif_v1 = (void *)pkt->data;
+
+- notif = kmemdup(notif_v1,
+- offsetofend(struct iwl_wowlan_info_notif,
+- received_beacons),
+- GFP_ATOMIC);
+-
++ notif = kmemdup(notif_v1, sizeof(*notif), GFP_ATOMIC);
+ if (!notif)
+ return false;
+
+ notif->tid_tear_down = notif_v1->tid_tear_down;
+ notif->station_id = notif_v1->station_id;
+-
++ memset_after(notif, 0, station_id);
+ } else {
+ notif = (void *)pkt->data;
+ }