From: Sasha Levin Date: Sun, 11 Jun 2023 02:02:37 +0000 (-0400) Subject: Fixes for 6.3 X-Git-Tag: v4.14.318~70 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=9a393f601a32721aa5984b61971dd989d2f8ce4f;p=thirdparty%2Fkernel%2Fstable-queue.git Fixes for 6.3 Signed-off-by: Sasha Levin --- diff --git a/queue-6.3/accel-ivpu-do-not-use-mutex_lock_interruptible.patch b/queue-6.3/accel-ivpu-do-not-use-mutex_lock_interruptible.patch new file mode 100644 index 00000000000..5bb4e441564 --- /dev/null +++ b/queue-6.3/accel-ivpu-do-not-use-mutex_lock_interruptible.patch @@ -0,0 +1,94 @@ +From 28336c66888827cd49a880d7447d3200ed72b936 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 25 May 2023 12:38:18 +0200 +Subject: accel/ivpu: Do not use mutex_lock_interruptible + +From: Stanislaw Gruszka + +[ Upstream commit b563e47957af4ff71736c5cc4120a59b055ab583 ] + +If we get signal when waiting for the mmu->lock we do not invalidate +current MMU configuration that might result in undefined behavior. + +Additionally there is little or no benefit on break waiting for +ipc->lock. In current code base, we keep this lock for short periods. + +Fixes: 263b2ba5fc93 ("accel/ivpu: Add Intel VPU MMU support") +Reviewed-by: Krystian Pradzynski +Reviewed-by: Jeffrey Hugo +Signed-off-by: Stanislaw Gruszka +Link: https://patchwork.freedesktop.org/patch/msgid/20230525103818.877590-2-stanislaw.gruszka@linux.intel.com +Signed-off-by: Sasha Levin +--- + drivers/accel/ivpu/ivpu_ipc.c | 4 +--- + drivers/accel/ivpu/ivpu_mmu.c | 22 ++++++---------------- + 2 files changed, 7 insertions(+), 19 deletions(-) + +diff --git a/drivers/accel/ivpu/ivpu_ipc.c b/drivers/accel/ivpu/ivpu_ipc.c +index 3adcfa80fc0e5..fa0af59e39ab6 100644 +--- a/drivers/accel/ivpu/ivpu_ipc.c ++++ b/drivers/accel/ivpu/ivpu_ipc.c +@@ -183,9 +183,7 @@ ivpu_ipc_send(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, struct v + struct ivpu_ipc_info *ipc = vdev->ipc; + int ret; + +- ret = mutex_lock_interruptible(&ipc->lock); +- if (ret) +- return ret; ++ mutex_lock(&ipc->lock); + + if (!ipc->on) { + ret = -EAGAIN; +diff --git a/drivers/accel/ivpu/ivpu_mmu.c b/drivers/accel/ivpu/ivpu_mmu.c +index 694e978aba663..b8b259b3aa635 100644 +--- a/drivers/accel/ivpu/ivpu_mmu.c ++++ b/drivers/accel/ivpu/ivpu_mmu.c +@@ -587,16 +587,11 @@ static int ivpu_mmu_strtab_init(struct ivpu_device *vdev) + int ivpu_mmu_invalidate_tlb(struct ivpu_device *vdev, u16 ssid) + { + struct ivpu_mmu_info *mmu = vdev->mmu; +- int ret; +- +- ret = mutex_lock_interruptible(&mmu->lock); +- if (ret) +- return ret; ++ int ret = 0; + +- if (!mmu->on) { +- ret = 0; ++ mutex_lock(&mmu->lock); ++ if (!mmu->on) + goto unlock; +- } + + ret = ivpu_mmu_cmdq_write_tlbi_nh_asid(vdev, ssid); + if (ret) +@@ -614,7 +609,7 @@ static int ivpu_mmu_cd_add(struct ivpu_device *vdev, u32 ssid, u64 cd_dma) + struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab; + u64 *entry; + u64 cd[4]; +- int ret; ++ int ret = 0; + + if (ssid > IVPU_MMU_CDTAB_ENT_COUNT) + return -EINVAL; +@@ -655,14 +650,9 @@ static int ivpu_mmu_cd_add(struct ivpu_device *vdev, u32 ssid, u64 cd_dma) + ivpu_dbg(vdev, MMU, "CDTAB %s entry (SSID=%u, dma=%pad): 0x%llx, 0x%llx, 0x%llx, 0x%llx\n", + cd_dma ? "write" : "clear", ssid, &cd_dma, cd[0], cd[1], cd[2], cd[3]); + +- ret = mutex_lock_interruptible(&mmu->lock); +- if (ret) +- return ret; +- +- if (!mmu->on) { +- ret = 0; ++ mutex_lock(&mmu->lock); ++ if (!mmu->on) + goto unlock; +- } + + ret = ivpu_mmu_cmdq_write_cfgi_all(vdev); + if (ret) +-- +2.39.2 + diff --git a/queue-6.3/accel-ivpu-ivpu_ipc-needs-generic_allocator.patch b/queue-6.3/accel-ivpu-ivpu_ipc-needs-generic_allocator.patch new file mode 100644 index 00000000000..73aa5d272f9 --- /dev/null +++ b/queue-6.3/accel-ivpu-ivpu_ipc-needs-generic_allocator.patch @@ -0,0 +1,58 @@ +From 1d843239558453983a294718c654e8b22b133ef0 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 25 May 2023 21:45:19 -0700 +Subject: accel/ivpu: ivpu_ipc needs GENERIC_ALLOCATOR + +From: Randy Dunlap + +[ Upstream commit 50d30040eb856ff6b0382b4d9dc332dc15597729 ] + +Drivers that use the gen_pool*() family of functions should +select GENERIC_ALLOCATOR to prevent build errors like these: + +ld: drivers/accel/ivpu/ivpu_ipc.o: in function `gen_pool_free': +include/linux/genalloc.h:172: undefined reference to `gen_pool_free_owner' +ld: drivers/accel/ivpu/ivpu_ipc.o: in function `gen_pool_alloc_algo': +include/linux/genalloc.h:138: undefined reference to `gen_pool_alloc_algo_owner' +ld: drivers/accel/ivpu/ivpu_ipc.o: in function `gen_pool_free': +include/linux/genalloc.h:172: undefined reference to `gen_pool_free_owner' +ld: drivers/accel/ivpu/ivpu_ipc.o: in function `ivpu_ipc_init': +drivers/accel/ivpu/ivpu_ipc.c:441: undefined reference to `devm_gen_pool_create' +ld: drivers/accel/ivpu/ivpu_ipc.o: in function `gen_pool_add_virt': +include/linux/genalloc.h:104: undefined reference to `gen_pool_add_owner' + +Fixes: 5d7422cfb498 ("accel/ivpu: Add IPC driver and JSM messages") +Signed-off-by: Randy Dunlap +Reported-by: kernel test robot +Link: https://lore.kernel.org/all/202305221206.1TaugDKP-lkp@intel.com/ +Cc: Oded Gabbay +Cc: dri-devel@lists.freedesktop.org +Cc: Jacek Lawrynowicz +Cc: Stanislaw Gruszka +Cc: Andrzej Kacprowski +Cc: Krystian Pradzynski +Cc: Jeffrey Hugo +Cc: Daniel Vetter +Reviewed-by: Jeffrey Hugo +Signed-off-by: Stanislaw Gruszka +Link: https://patchwork.freedesktop.org/patch/msgid/20230526044519.13441-1-rdunlap@infradead.org +Signed-off-by: Sasha Levin +--- + drivers/accel/ivpu/Kconfig | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/drivers/accel/ivpu/Kconfig b/drivers/accel/ivpu/Kconfig +index 9bdf168bf1d0e..1a4c4ed9d1136 100644 +--- a/drivers/accel/ivpu/Kconfig ++++ b/drivers/accel/ivpu/Kconfig +@@ -7,6 +7,7 @@ config DRM_ACCEL_IVPU + depends on PCI && PCI_MSI + select FW_LOADER + select SHMEM ++ select GENERIC_ALLOCATOR + help + Choose this option if you have a system that has an 14th generation Intel CPU + or newer. VPU stands for Versatile Processing Unit and it's a CPU-integrated +-- +2.39.2 + diff --git a/queue-6.3/accel-ivpu-reserve-all-non-command-bo-s-using-dma_re.patch b/queue-6.3/accel-ivpu-reserve-all-non-command-bo-s-using-dma_re.patch new file mode 100644 index 00000000000..8662937fa10 --- /dev/null +++ b/queue-6.3/accel-ivpu-reserve-all-non-command-bo-s-using-dma_re.patch @@ -0,0 +1,74 @@ +From f4ff0de31537503809c850cabdbdaaeffa0e9ad5 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 13 Apr 2023 08:38:10 +0200 +Subject: accel/ivpu: Reserve all non-command bo's using + DMA_RESV_USAGE_BOOKKEEP + +From: Stanislaw Gruszka + +[ Upstream commit 411360257c1f4fccaa20143098b6d3fcc9d4e4dc ] + +Use DMA_RESV_USAGE_BOOKKEEP reservation for buffer objects, except for +command buffers for which we use DMA_RESV_USAGE_WRITE (since VPU can +write to command buffer context save area). + +Fixes: 0ec8671837a6 ("accel/ivpu: Fix S3 system suspend when not idle") +Reviewed-by: Jeffrey Hugo +Signed-off-by: Stanislaw Gruszka +Link: https://patchwork.freedesktop.org/patch/msgid/20230413063810.3167511-1-stanislaw.gruszka@linux.intel.com +Signed-off-by: Sasha Levin +--- + drivers/accel/ivpu/ivpu_job.c | 21 ++++++++++++++------- + 1 file changed, 14 insertions(+), 7 deletions(-) + +diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c +index 3c6f1e16cf2ff..d45be0615b476 100644 +--- a/drivers/accel/ivpu/ivpu_job.c ++++ b/drivers/accel/ivpu/ivpu_job.c +@@ -431,6 +431,7 @@ ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32 + struct ivpu_file_priv *file_priv = file->driver_priv; + struct ivpu_device *vdev = file_priv->vdev; + struct ww_acquire_ctx acquire_ctx; ++ enum dma_resv_usage usage; + struct ivpu_bo *bo; + int ret; + u32 i; +@@ -461,22 +462,28 @@ ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32 + + job->cmd_buf_vpu_addr = bo->vpu_addr + commands_offset; + +- ret = drm_gem_lock_reservations((struct drm_gem_object **)job->bos, 1, &acquire_ctx); ++ ret = drm_gem_lock_reservations((struct drm_gem_object **)job->bos, buf_count, ++ &acquire_ctx); + if (ret) { + ivpu_warn(vdev, "Failed to lock reservations: %d\n", ret); + return ret; + } + +- ret = dma_resv_reserve_fences(bo->base.resv, 1); +- if (ret) { +- ivpu_warn(vdev, "Failed to reserve fences: %d\n", ret); +- goto unlock_reservations; ++ for (i = 0; i < buf_count; i++) { ++ ret = dma_resv_reserve_fences(job->bos[i]->base.resv, 1); ++ if (ret) { ++ ivpu_warn(vdev, "Failed to reserve fences: %d\n", ret); ++ goto unlock_reservations; ++ } + } + +- dma_resv_add_fence(bo->base.resv, job->done_fence, DMA_RESV_USAGE_WRITE); ++ for (i = 0; i < buf_count; i++) { ++ usage = (i == CMD_BUF_IDX) ? DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_BOOKKEEP; ++ dma_resv_add_fence(job->bos[i]->base.resv, job->done_fence, usage); ++ } + + unlock_reservations: +- drm_gem_unlock_reservations((struct drm_gem_object **)job->bos, 1, &acquire_ctx); ++ drm_gem_unlock_reservations((struct drm_gem_object **)job->bos, buf_count, &acquire_ctx); + + wmb(); /* Flush write combining buffers */ + +-- +2.39.2 + diff --git a/queue-6.3/afs-fix-setting-of-mtime-when-creating-a-file-dir-sy.patch b/queue-6.3/afs-fix-setting-of-mtime-when-creating-a-file-dir-sy.patch new file mode 100644 index 00000000000..5b1f1acb328 --- /dev/null +++ b/queue-6.3/afs-fix-setting-of-mtime-when-creating-a-file-dir-sy.patch @@ -0,0 +1,61 @@ +From 5cf4f4016d21fb69ad099ae53229f2372714e16b Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 7 Jun 2023 09:47:13 +0100 +Subject: afs: Fix setting of mtime when creating a file/dir/symlink + +From: David Howells + +[ Upstream commit a27648c742104a833a01c54becc24429898d85bf ] + +kafs incorrectly passes a zero mtime (ie. 1st Jan 1970) to the server when +creating a file, dir or symlink because the mtime recorded in the +afs_operation struct gets passed to the server by the marshalling routines, +but the afs_mkdir(), afs_create() and afs_symlink() functions don't set it. + +This gets masked if a file or directory is subsequently modified. + +Fix this by filling in op->mtime before calling the create op. + +Fixes: e49c7b2f6de7 ("afs: Build an abstraction around an "operation" concept") +Signed-off-by: David Howells +Reviewed-by: Jeffrey Altman +Reviewed-by: Marc Dionne +cc: linux-afs@lists.infradead.org +cc: linux-fsdevel@vger.kernel.org +Signed-off-by: Linus Torvalds +Signed-off-by: Sasha Levin +--- + fs/afs/dir.c | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/fs/afs/dir.c b/fs/afs/dir.c +index a97499fd747b6..93e8b06ef76a6 100644 +--- a/fs/afs/dir.c ++++ b/fs/afs/dir.c +@@ -1358,6 +1358,7 @@ static int afs_mkdir(struct mnt_idmap *idmap, struct inode *dir, + op->dentry = dentry; + op->create.mode = S_IFDIR | mode; + op->create.reason = afs_edit_dir_for_mkdir; ++ op->mtime = current_time(dir); + op->ops = &afs_mkdir_operation; + return afs_do_sync_operation(op); + } +@@ -1661,6 +1662,7 @@ static int afs_create(struct mnt_idmap *idmap, struct inode *dir, + op->dentry = dentry; + op->create.mode = S_IFREG | mode; + op->create.reason = afs_edit_dir_for_create; ++ op->mtime = current_time(dir); + op->ops = &afs_create_operation; + return afs_do_sync_operation(op); + +@@ -1796,6 +1798,7 @@ static int afs_symlink(struct mnt_idmap *idmap, struct inode *dir, + op->ops = &afs_symlink_operation; + op->create.reason = afs_edit_dir_for_symlink; + op->create.symlink = content; ++ op->mtime = current_time(dir); + return afs_do_sync_operation(op); + + error: +-- +2.39.2 + diff --git a/queue-6.3/bluetooth-fix-l2cap_disconnect_req-deadlock.patch b/queue-6.3/bluetooth-fix-l2cap_disconnect_req-deadlock.patch new file mode 100644 index 00000000000..db01f8eb9cb --- /dev/null +++ b/queue-6.3/bluetooth-fix-l2cap_disconnect_req-deadlock.patch @@ -0,0 +1,61 @@ +From e2a1132b7809cb40a47557fd31108e6b308635b2 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 31 May 2023 03:44:56 +0000 +Subject: Bluetooth: Fix l2cap_disconnect_req deadlock + +From: Ying Hsu + +[ Upstream commit 02c5ea5246a44d6ffde0fddebfc1d56188052976 ] + +L2CAP assumes that the locks conn->chan_lock and chan->lock are +acquired in the order conn->chan_lock, chan->lock to avoid +potential deadlock. +For example, l2sock_shutdown acquires these locks in the order: + mutex_lock(&conn->chan_lock) + l2cap_chan_lock(chan) + +However, l2cap_disconnect_req acquires chan->lock in +l2cap_get_chan_by_scid first and then acquires conn->chan_lock +before calling l2cap_chan_del. This means that these locks are +acquired in unexpected order, which leads to potential deadlock: + l2cap_chan_lock(c) + mutex_lock(&conn->chan_lock) + +This patch releases chan->lock before acquiring the conn_chan_lock +to avoid the potential deadlock. + +Fixes: a2a9339e1c9d ("Bluetooth: L2CAP: Fix use-after-free in l2cap_disconnect_{req,rsp}") +Signed-off-by: Ying Hsu +Signed-off-by: Luiz Augusto von Dentz +Signed-off-by: Sasha Levin +--- + net/bluetooth/l2cap_core.c | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c +index 24d075282996c..e54e2aeb2a891 100644 +--- a/net/bluetooth/l2cap_core.c ++++ b/net/bluetooth/l2cap_core.c +@@ -4664,7 +4664,9 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, + + chan->ops->set_shutdown(chan); + ++ l2cap_chan_unlock(chan); + mutex_lock(&conn->chan_lock); ++ l2cap_chan_lock(chan); + l2cap_chan_del(chan, ECONNRESET); + mutex_unlock(&conn->chan_lock); + +@@ -4703,7 +4705,9 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, + return 0; + } + ++ l2cap_chan_unlock(chan); + mutex_lock(&conn->chan_lock); ++ l2cap_chan_lock(chan); + l2cap_chan_del(chan, 0); + mutex_unlock(&conn->chan_lock); + +-- +2.39.2 + diff --git a/queue-6.3/bluetooth-hci_conn-add-support-for-linking-multiple-.patch b/queue-6.3/bluetooth-hci_conn-add-support-for-linking-multiple-.patch new file mode 100644 index 00000000000..eca588fa799 --- /dev/null +++ b/queue-6.3/bluetooth-hci_conn-add-support-for-linking-multiple-.patch @@ -0,0 +1,569 @@ +From 3a3f0081508815e7112e45190efcf3f16f8bae5c Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 11 Apr 2023 16:02:22 -0700 +Subject: Bluetooth: hci_conn: Add support for linking multiple hcon + +From: Luiz Augusto von Dentz + +[ Upstream commit 06149746e7203d5ffe2d6faf9799ee36203aa8b8 ] + +Since it is required for some configurations to have multiple CIS with +the same peer which is now covered by iso-tester in the following test +cases: + + ISO AC 6(i) - Success + ISO AC 7(i) - Success + ISO AC 8(i) - Success + ISO AC 9(i) - Success + ISO AC 11(i) - Success + +Signed-off-by: Luiz Augusto von Dentz +Stable-dep-of: 71e9588435c3 ("Bluetooth: ISO: use correct CIS order in Set CIG Parameters event") +Signed-off-by: Sasha Levin +--- + include/net/bluetooth/hci_core.h | 14 ++- + net/bluetooth/hci_conn.c | 155 ++++++++++++++++++++++--------- + net/bluetooth/hci_event.c | 92 ++++++++---------- + net/bluetooth/iso.c | 8 +- + 4 files changed, 172 insertions(+), 97 deletions(-) + +diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h +index b973ecb222f65..9361e75b9299b 100644 +--- a/include/net/bluetooth/hci_core.h ++++ b/include/net/bluetooth/hci_core.h +@@ -766,7 +766,10 @@ struct hci_conn { + void *iso_data; + struct amp_mgr *amp_mgr; + +- struct hci_conn *link; ++ struct list_head link_list; ++ struct hci_conn *parent; ++ struct hci_link *link; ++ + struct bt_codec codec; + + void (*connect_cfm_cb) (struct hci_conn *conn, u8 status); +@@ -776,6 +779,11 @@ struct hci_conn { + void (*cleanup)(struct hci_conn *conn); + }; + ++struct hci_link { ++ struct list_head list; ++ struct hci_conn *conn; ++}; ++ + struct hci_chan { + struct list_head list; + __u16 handle; +@@ -1379,12 +1387,14 @@ static inline void hci_conn_put(struct hci_conn *conn) + put_device(&conn->dev); + } + +-static inline void hci_conn_hold(struct hci_conn *conn) ++static inline struct hci_conn *hci_conn_hold(struct hci_conn *conn) + { + BT_DBG("hcon %p orig refcnt %d", conn, atomic_read(&conn->refcnt)); + + atomic_inc(&conn->refcnt); + cancel_delayed_work(&conn->disc_work); ++ ++ return conn; + } + + static inline void hci_conn_drop(struct hci_conn *conn) +diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c +index e02afdc557e7b..81aebbbe0b1eb 100644 +--- a/net/bluetooth/hci_conn.c ++++ b/net/bluetooth/hci_conn.c +@@ -330,8 +330,11 @@ static void hci_add_sco(struct hci_conn *conn, __u16 handle) + static bool find_next_esco_param(struct hci_conn *conn, + const struct sco_param *esco_param, int size) + { ++ if (!conn->parent) ++ return false; ++ + for (; conn->attempt <= size; conn->attempt++) { +- if (lmp_esco_2m_capable(conn->link) || ++ if (lmp_esco_2m_capable(conn->parent) || + (esco_param[conn->attempt - 1].pkt_type & ESCO_2EV3)) + break; + BT_DBG("hcon %p skipped attempt %d, eSCO 2M not supported", +@@ -461,7 +464,7 @@ static int hci_enhanced_setup_sync(struct hci_dev *hdev, void *data) + break; + + case BT_CODEC_CVSD: +- if (lmp_esco_capable(conn->link)) { ++ if (conn->parent && lmp_esco_capable(conn->parent)) { + if (!find_next_esco_param(conn, esco_param_cvsd, + ARRAY_SIZE(esco_param_cvsd))) + return -EINVAL; +@@ -531,7 +534,7 @@ static bool hci_setup_sync_conn(struct hci_conn *conn, __u16 handle) + param = &esco_param_msbc[conn->attempt - 1]; + break; + case SCO_AIRMODE_CVSD: +- if (lmp_esco_capable(conn->link)) { ++ if (conn->parent && lmp_esco_capable(conn->parent)) { + if (!find_next_esco_param(conn, esco_param_cvsd, + ARRAY_SIZE(esco_param_cvsd))) + return false; +@@ -637,21 +640,22 @@ void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand, + /* Device _must_ be locked */ + void hci_sco_setup(struct hci_conn *conn, __u8 status) + { +- struct hci_conn *sco = conn->link; ++ struct hci_link *link; + +- if (!sco) ++ link = list_first_entry_or_null(&conn->link_list, struct hci_link, list); ++ if (!link || !link->conn) + return; + + BT_DBG("hcon %p", conn); + + if (!status) { + if (lmp_esco_capable(conn->hdev)) +- hci_setup_sync(sco, conn->handle); ++ hci_setup_sync(link->conn, conn->handle); + else +- hci_add_sco(sco, conn->handle); ++ hci_add_sco(link->conn, conn->handle); + } else { +- hci_connect_cfm(sco, status); +- hci_conn_del(sco); ++ hci_connect_cfm(link->conn, status); ++ hci_conn_del(link->conn); + } + } + +@@ -1047,6 +1051,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst, + skb_queue_head_init(&conn->data_q); + + INIT_LIST_HEAD(&conn->chan_list); ++ INIT_LIST_HEAD(&conn->link_list); + + INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout); + INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept); +@@ -1074,15 +1079,39 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst, + return conn; + } + +-static bool hci_conn_unlink(struct hci_conn *conn) ++static void hci_conn_unlink(struct hci_conn *conn) + { ++ struct hci_dev *hdev = conn->hdev; ++ ++ bt_dev_dbg(hdev, "hcon %p", conn); ++ ++ if (!conn->parent) { ++ struct hci_link *link, *t; ++ ++ list_for_each_entry_safe(link, t, &conn->link_list, list) ++ hci_conn_unlink(link->conn); ++ ++ return; ++ } ++ + if (!conn->link) +- return false; ++ return; ++ ++ hci_conn_put(conn->parent); ++ conn->parent = NULL; + +- conn->link->link = NULL; ++ list_del_rcu(&conn->link->list); ++ synchronize_rcu(); ++ ++ kfree(conn->link); + conn->link = NULL; + +- return true; ++ /* Due to race, SCO connection might be not established ++ * yet at this point. Delete it now, otherwise it is ++ * possible for it to be stuck and can't be deleted. ++ */ ++ if (conn->handle == HCI_CONN_HANDLE_UNSET) ++ hci_conn_del(conn); + } + + int hci_conn_del(struct hci_conn *conn) +@@ -1096,18 +1125,7 @@ int hci_conn_del(struct hci_conn *conn) + cancel_delayed_work_sync(&conn->idle_work); + + if (conn->type == ACL_LINK) { +- struct hci_conn *link = conn->link; +- +- if (link) { +- hci_conn_unlink(conn); +- /* Due to race, SCO connection might be not established +- * yet at this point. Delete it now, otherwise it is +- * possible for it to be stuck and can't be deleted. +- */ +- if (link->handle == HCI_CONN_HANDLE_UNSET) +- hci_conn_del(link); +- } +- ++ hci_conn_unlink(conn); + /* Unacked frames */ + hdev->acl_cnt += conn->sent; + } else if (conn->type == LE_LINK) { +@@ -1118,7 +1136,7 @@ int hci_conn_del(struct hci_conn *conn) + else + hdev->acl_cnt += conn->sent; + } else { +- struct hci_conn *acl = conn->link; ++ struct hci_conn *acl = conn->parent; + + if (acl) { + hci_conn_unlink(conn); +@@ -1605,11 +1623,40 @@ struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst, + return acl; + } + ++static struct hci_link *hci_conn_link(struct hci_conn *parent, ++ struct hci_conn *conn) ++{ ++ struct hci_dev *hdev = parent->hdev; ++ struct hci_link *link; ++ ++ bt_dev_dbg(hdev, "parent %p hcon %p", parent, conn); ++ ++ if (conn->link) ++ return conn->link; ++ ++ if (conn->parent) ++ return NULL; ++ ++ link = kzalloc(sizeof(*link), GFP_KERNEL); ++ if (!link) ++ return NULL; ++ ++ link->conn = hci_conn_hold(conn); ++ conn->link = link; ++ conn->parent = hci_conn_get(parent); ++ ++ /* Use list_add_tail_rcu append to the list */ ++ list_add_tail_rcu(&link->list, &parent->link_list); ++ ++ return link; ++} ++ + struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst, + __u16 setting, struct bt_codec *codec) + { + struct hci_conn *acl; + struct hci_conn *sco; ++ struct hci_link *link; + + acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING, + CONN_REASON_SCO_CONNECT); +@@ -1625,10 +1672,12 @@ struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst, + } + } + +- acl->link = sco; +- sco->link = acl; +- +- hci_conn_hold(sco); ++ link = hci_conn_link(acl, sco); ++ if (!link) { ++ hci_conn_drop(acl); ++ hci_conn_drop(sco); ++ return NULL; ++ } + + sco->setting = setting; + sco->codec = *codec; +@@ -1895,7 +1944,7 @@ static int hci_create_cis_sync(struct hci_dev *hdev, void *data) + u8 cig; + + memset(&cmd, 0, sizeof(cmd)); +- cmd.cis[0].acl_handle = cpu_to_le16(conn->link->handle); ++ cmd.cis[0].acl_handle = cpu_to_le16(conn->parent->handle); + cmd.cis[0].cis_handle = cpu_to_le16(conn->handle); + cmd.cp.num_cis++; + cig = conn->iso_qos.ucast.cig; +@@ -1908,11 +1957,12 @@ static int hci_create_cis_sync(struct hci_dev *hdev, void *data) + struct hci_cis *cis = &cmd.cis[cmd.cp.num_cis]; + + if (conn == data || conn->type != ISO_LINK || +- conn->state == BT_CONNECTED || conn->iso_qos.ucast.cig != cig) ++ conn->state == BT_CONNECTED || ++ conn->iso_qos.ucast.cig != cig) + continue; + + /* Check if all CIS(s) belonging to a CIG are ready */ +- if (!conn->link || conn->link->state != BT_CONNECTED || ++ if (!conn->parent || conn->parent->state != BT_CONNECTED || + conn->state != BT_CONNECT) { + cmd.cp.num_cis = 0; + break; +@@ -1929,7 +1979,7 @@ static int hci_create_cis_sync(struct hci_dev *hdev, void *data) + * command have been generated, the Controller shall return the + * error code Command Disallowed (0x0C). + */ +- cis->acl_handle = cpu_to_le16(conn->link->handle); ++ cis->acl_handle = cpu_to_le16(conn->parent->handle); + cis->cis_handle = cpu_to_le16(conn->handle); + cmd.cp.num_cis++; + } +@@ -1948,15 +1998,33 @@ static int hci_create_cis_sync(struct hci_dev *hdev, void *data) + int hci_le_create_cis(struct hci_conn *conn) + { + struct hci_conn *cis; ++ struct hci_link *link, *t; + struct hci_dev *hdev = conn->hdev; + int err; + ++ bt_dev_dbg(hdev, "hcon %p", conn); ++ + switch (conn->type) { + case LE_LINK: +- if (!conn->link || conn->state != BT_CONNECTED) ++ if (conn->state != BT_CONNECTED || list_empty(&conn->link_list)) + return -EINVAL; +- cis = conn->link; +- break; ++ ++ cis = NULL; ++ ++ /* hci_conn_link uses list_add_tail_rcu so the list is in ++ * the same order as the connections are requested. ++ */ ++ list_for_each_entry_safe(link, t, &conn->link_list, list) { ++ if (link->conn->state == BT_BOUND) { ++ err = hci_le_create_cis(link->conn); ++ if (err) ++ return err; ++ ++ cis = link->conn; ++ } ++ } ++ ++ return cis ? 0 : -EINVAL; + case ISO_LINK: + cis = conn; + break; +@@ -2177,6 +2245,7 @@ struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst, + { + struct hci_conn *le; + struct hci_conn *cis; ++ struct hci_link *link; + + if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) + le = hci_connect_le(hdev, dst, dst_type, false, +@@ -2202,16 +2271,18 @@ struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst, + return cis; + } + +- le->link = cis; +- cis->link = le; +- +- hci_conn_hold(cis); ++ link = hci_conn_link(le, cis); ++ if (!link) { ++ hci_conn_drop(le); ++ hci_conn_drop(cis); ++ return NULL; ++ } + + /* If LE is already connected and CIS handle is already set proceed to + * Create CIS immediately. + */ + if (le->state == BT_CONNECTED && cis->handle != HCI_CONN_HANDLE_UNSET) +- hci_le_create_cis(le); ++ hci_le_create_cis(cis); + + return cis; + } +diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c +index 0e0a93cc12186..d00ef6e3fc451 100644 +--- a/net/bluetooth/hci_event.c ++++ b/net/bluetooth/hci_event.c +@@ -2345,7 +2345,8 @@ static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) + static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status) + { + struct hci_cp_add_sco *cp; +- struct hci_conn *acl, *sco; ++ struct hci_conn *acl; ++ struct hci_link *link; + __u16 handle; + + bt_dev_dbg(hdev, "status 0x%2.2x", status); +@@ -2365,12 +2366,13 @@ static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status) + + acl = hci_conn_hash_lookup_handle(hdev, handle); + if (acl) { +- sco = acl->link; +- if (sco) { +- sco->state = BT_CLOSED; ++ link = list_first_entry_or_null(&acl->link_list, ++ struct hci_link, list); ++ if (link && link->conn) { ++ link->conn->state = BT_CLOSED; + +- hci_connect_cfm(sco, status); +- hci_conn_del(sco); ++ hci_connect_cfm(link->conn, status); ++ hci_conn_del(link->conn); + } + } + +@@ -2637,74 +2639,61 @@ static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status) + hci_dev_unlock(hdev); + } + +-static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status) ++static void hci_setup_sync_conn_status(struct hci_dev *hdev, __u16 handle, ++ __u8 status) + { +- struct hci_cp_setup_sync_conn *cp; +- struct hci_conn *acl, *sco; +- __u16 handle; +- +- bt_dev_dbg(hdev, "status 0x%2.2x", status); +- +- if (!status) +- return; +- +- cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN); +- if (!cp) +- return; ++ struct hci_conn *acl; ++ struct hci_link *link; + +- handle = __le16_to_cpu(cp->handle); +- +- bt_dev_dbg(hdev, "handle 0x%4.4x", handle); ++ bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x", handle, status); + + hci_dev_lock(hdev); + + acl = hci_conn_hash_lookup_handle(hdev, handle); + if (acl) { +- sco = acl->link; +- if (sco) { +- sco->state = BT_CLOSED; ++ link = list_first_entry_or_null(&acl->link_list, ++ struct hci_link, list); ++ if (link && link->conn) { ++ link->conn->state = BT_CLOSED; + +- hci_connect_cfm(sco, status); +- hci_conn_del(sco); ++ hci_connect_cfm(link->conn, status); ++ hci_conn_del(link->conn); + } + } + + hci_dev_unlock(hdev); + } + +-static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status) ++static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status) + { +- struct hci_cp_enhanced_setup_sync_conn *cp; +- struct hci_conn *acl, *sco; +- __u16 handle; ++ struct hci_cp_setup_sync_conn *cp; + + bt_dev_dbg(hdev, "status 0x%2.2x", status); + + if (!status) + return; + +- cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN); ++ cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN); + if (!cp) + return; + +- handle = __le16_to_cpu(cp->handle); ++ hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status); ++} + +- bt_dev_dbg(hdev, "handle 0x%4.4x", handle); ++static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status) ++{ ++ struct hci_cp_enhanced_setup_sync_conn *cp; + +- hci_dev_lock(hdev); ++ bt_dev_dbg(hdev, "status 0x%2.2x", status); + +- acl = hci_conn_hash_lookup_handle(hdev, handle); +- if (acl) { +- sco = acl->link; +- if (sco) { +- sco->state = BT_CLOSED; ++ if (!status) ++ return; + +- hci_connect_cfm(sco, status); +- hci_conn_del(sco); +- } +- } ++ cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN); ++ if (!cp) ++ return; + +- hci_dev_unlock(hdev); ++ hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status); + } + + static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status) +@@ -3834,19 +3823,20 @@ static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data, + rcu_read_lock(); + + list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { +- if (conn->type != ISO_LINK || conn->iso_qos.ucast.cig != rp->cig_id || ++ if (conn->type != ISO_LINK || ++ conn->iso_qos.ucast.cig != rp->cig_id || + conn->state == BT_CONNECTED) + continue; + + conn->handle = __le16_to_cpu(rp->handle[i++]); + +- bt_dev_dbg(hdev, "%p handle 0x%4.4x link %p", conn, +- conn->handle, conn->link); ++ bt_dev_dbg(hdev, "%p handle 0x%4.4x parent %p", conn, ++ conn->handle, conn->parent); + + /* Create CIS if LE is already connected */ +- if (conn->link && conn->link->state == BT_CONNECTED) { ++ if (conn->parent && conn->parent->state == BT_CONNECTED) { + rcu_read_unlock(); +- hci_le_create_cis(conn->link); ++ hci_le_create_cis(conn); + rcu_read_lock(); + } + +@@ -5031,7 +5021,7 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data, + if (conn->out) { + conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | + (hdev->esco_type & EDR_ESCO_MASK); +- if (hci_setup_sync(conn, conn->link->handle)) ++ if (hci_setup_sync(conn, conn->parent->handle)) + goto unlock; + } + fallthrough; +diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c +index 74117df03a3fa..34d55a85d8f6f 100644 +--- a/net/bluetooth/iso.c ++++ b/net/bluetooth/iso.c +@@ -1657,8 +1657,12 @@ static void iso_connect_cfm(struct hci_conn *hcon, __u8 status) + + /* Check if LE link has failed */ + if (status) { +- if (hcon->link) +- iso_conn_del(hcon->link, bt_to_errno(status)); ++ struct hci_link *link, *t; ++ ++ list_for_each_entry_safe(link, t, &hcon->link_list, ++ list) ++ iso_conn_del(link->conn, bt_to_errno(status)); ++ + return; + } + +-- +2.39.2 + diff --git a/queue-6.3/bluetooth-hci_conn-fix-not-matching-by-cis-id.patch b/queue-6.3/bluetooth-hci_conn-fix-not-matching-by-cis-id.patch new file mode 100644 index 00000000000..f0e3ffc213c --- /dev/null +++ b/queue-6.3/bluetooth-hci_conn-fix-not-matching-by-cis-id.patch @@ -0,0 +1,67 @@ +From f16ef46cdcdaa1df33b5abd144b5aecf1b50a521 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 11 Apr 2023 16:14:25 -0700 +Subject: Bluetooth: hci_conn: Fix not matching by CIS ID + +From: Luiz Augusto von Dentz + +[ Upstream commit c14516faede33c2c31da45cf950d55dbff42962e ] + +This fixes only matching CIS by address which prevents creating new hcon +if upper layer is requesting a specific CIS ID. + +Signed-off-by: Luiz Augusto von Dentz +Stable-dep-of: 71e9588435c3 ("Bluetooth: ISO: use correct CIS order in Set CIG Parameters event") +Signed-off-by: Sasha Levin +--- + include/net/bluetooth/hci_core.h | 12 +++++++++++- + net/bluetooth/hci_conn.c | 3 ++- + 2 files changed, 13 insertions(+), 2 deletions(-) + +diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h +index 9361e75b9299b..a08e8dc772e54 100644 +--- a/include/net/bluetooth/hci_core.h ++++ b/include/net/bluetooth/hci_core.h +@@ -1176,7 +1176,9 @@ static inline struct hci_conn *hci_conn_hash_lookup_le(struct hci_dev *hdev, + + static inline struct hci_conn *hci_conn_hash_lookup_cis(struct hci_dev *hdev, + bdaddr_t *ba, +- __u8 ba_type) ++ __u8 ba_type, ++ __u8 cig, ++ __u8 id) + { + struct hci_conn_hash *h = &hdev->conn_hash; + struct hci_conn *c; +@@ -1187,6 +1189,14 @@ static inline struct hci_conn *hci_conn_hash_lookup_cis(struct hci_dev *hdev, + if (c->type != ISO_LINK) + continue; + ++ /* Match CIG ID if set */ ++ if (cig != BT_ISO_QOS_CIG_UNSET && cig != c->iso_qos.ucast.cig) ++ continue; ++ ++ /* Match CIS ID if set */ ++ if (id != BT_ISO_QOS_CIS_UNSET && id != c->iso_qos.ucast.cis) ++ continue; ++ + if (ba_type == c->dst_type && !bacmp(&c->dst, ba)) { + rcu_read_unlock(); + return c; +diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c +index 81aebbbe0b1eb..163d52b929994 100644 +--- a/net/bluetooth/hci_conn.c ++++ b/net/bluetooth/hci_conn.c +@@ -1846,7 +1846,8 @@ struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst, + { + struct hci_conn *cis; + +- cis = hci_conn_hash_lookup_cis(hdev, dst, dst_type); ++ cis = hci_conn_hash_lookup_cis(hdev, dst, dst_type, qos->ucast.cig, ++ qos->ucast.cis); + if (!cis) { + cis = hci_conn_add(hdev, ISO_LINK, dst, HCI_ROLE_MASTER); + if (!cis) +-- +2.39.2 + diff --git a/queue-6.3/bluetooth-hci_sync-add-lock-to-protect-hci_unregiste.patch b/queue-6.3/bluetooth-hci_sync-add-lock-to-protect-hci_unregiste.patch new file mode 100644 index 00000000000..a23ccdc0333 --- /dev/null +++ b/queue-6.3/bluetooth-hci_sync-add-lock-to-protect-hci_unregiste.patch @@ -0,0 +1,100 @@ +From 1036271d82e6b3d698e5b7f85384b497fcbd2653 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 24 May 2023 17:11:58 -0700 +Subject: Bluetooth: hci_sync: add lock to protect HCI_UNREGISTER + +From: Zhengping Jiang + +[ Upstream commit 1857c19941c87eb36ad47f22a406be5dfe5eff9f ] + +When the HCI_UNREGISTER flag is set, no jobs should be scheduled. Fix +potential race when HCI_UNREGISTER is set after the flag is tested in +hci_cmd_sync_queue. + +Fixes: 0b94f2651f56 ("Bluetooth: hci_sync: Fix queuing commands when HCI_UNREGISTER is set") +Signed-off-by: Zhengping Jiang +Signed-off-by: Luiz Augusto von Dentz +Signed-off-by: Sasha Levin +--- + include/net/bluetooth/hci_core.h | 1 + + net/bluetooth/hci_core.c | 2 ++ + net/bluetooth/hci_sync.c | 20 ++++++++++++++------ + 3 files changed, 17 insertions(+), 6 deletions(-) + +diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h +index 86db7f3a31ce5..b973ecb222f65 100644 +--- a/include/net/bluetooth/hci_core.h ++++ b/include/net/bluetooth/hci_core.h +@@ -514,6 +514,7 @@ struct hci_dev { + struct work_struct cmd_sync_work; + struct list_head cmd_sync_work_list; + struct mutex cmd_sync_work_lock; ++ struct mutex unregister_lock; + struct work_struct cmd_sync_cancel_work; + struct work_struct reenable_adv_work; + +diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c +index 334e308451f53..ac36e7ae70b21 100644 +--- a/net/bluetooth/hci_core.c ++++ b/net/bluetooth/hci_core.c +@@ -2685,7 +2685,9 @@ void hci_unregister_dev(struct hci_dev *hdev) + { + BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); + ++ mutex_lock(&hdev->unregister_lock); + hci_dev_set_flag(hdev, HCI_UNREGISTER); ++ mutex_unlock(&hdev->unregister_lock); + + write_lock(&hci_dev_list_lock); + list_del(&hdev->list); +diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c +index b65ee3a32e5d7..fd3b6d79a7f8b 100644 +--- a/net/bluetooth/hci_sync.c ++++ b/net/bluetooth/hci_sync.c +@@ -629,6 +629,7 @@ void hci_cmd_sync_init(struct hci_dev *hdev) + INIT_WORK(&hdev->cmd_sync_work, hci_cmd_sync_work); + INIT_LIST_HEAD(&hdev->cmd_sync_work_list); + mutex_init(&hdev->cmd_sync_work_lock); ++ mutex_init(&hdev->unregister_lock); + + INIT_WORK(&hdev->cmd_sync_cancel_work, hci_cmd_sync_cancel_work); + INIT_WORK(&hdev->reenable_adv_work, reenable_adv); +@@ -688,14 +689,19 @@ int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, + void *data, hci_cmd_sync_work_destroy_t destroy) + { + struct hci_cmd_sync_work_entry *entry; ++ int err = 0; + +- if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) +- return -ENODEV; ++ mutex_lock(&hdev->unregister_lock); ++ if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) { ++ err = -ENODEV; ++ goto unlock; ++ } + + entry = kmalloc(sizeof(*entry), GFP_KERNEL); +- if (!entry) +- return -ENOMEM; +- ++ if (!entry) { ++ err = -ENOMEM; ++ goto unlock; ++ } + entry->func = func; + entry->data = data; + entry->destroy = destroy; +@@ -706,7 +712,9 @@ int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, + + queue_work(hdev->req_workqueue, &hdev->cmd_sync_work); + +- return 0; ++unlock: ++ mutex_unlock(&hdev->unregister_lock); ++ return err; + } + EXPORT_SYMBOL(hci_cmd_sync_queue); + +-- +2.39.2 + diff --git a/queue-6.3/bluetooth-iso-consider-right-cis-when-removing-cig-a.patch b/queue-6.3/bluetooth-iso-consider-right-cis-when-removing-cig-a.patch new file mode 100644 index 00000000000..ab31cdc4a24 --- /dev/null +++ b/queue-6.3/bluetooth-iso-consider-right-cis-when-removing-cig-a.patch @@ -0,0 +1,48 @@ +From d8c9844cc4891f6a362bd49fe83f416f6721c6c7 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sun, 21 May 2023 15:48:28 +0000 +Subject: Bluetooth: ISO: consider right CIS when removing CIG at cleanup + +From: Pauli Virtanen + +[ Upstream commit 31c5f9164949347c9cb34f041a7e04fdc08b1b85 ] + +When looking for CIS blocking CIG removal, consider only the CIS with +the right CIG ID. Don't try to remove CIG with unset CIG ID. + +Fixes: 26afbd826ee3 ("Bluetooth: Add initial implementation of CIS connections") +Signed-off-by: Pauli Virtanen +Signed-off-by: Luiz Augusto von Dentz +Signed-off-by: Sasha Levin +--- + net/bluetooth/hci_conn.c | 7 +++++-- + 1 file changed, 5 insertions(+), 2 deletions(-) + +diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c +index 5672b49245721..3820d5d873e12 100644 +--- a/net/bluetooth/hci_conn.c ++++ b/net/bluetooth/hci_conn.c +@@ -943,8 +943,8 @@ static void find_cis(struct hci_conn *conn, void *data) + { + struct iso_list_data *d = data; + +- /* Ignore broadcast */ +- if (!bacmp(&conn->dst, BDADDR_ANY)) ++ /* Ignore broadcast or if CIG don't match */ ++ if (!bacmp(&conn->dst, BDADDR_ANY) || d->cig != conn->iso_qos.ucast.cig) + return; + + d->count++; +@@ -959,6 +959,9 @@ static void cis_cleanup(struct hci_conn *conn) + struct hci_dev *hdev = conn->hdev; + struct iso_list_data d; + ++ if (conn->iso_qos.ucast.cig == BT_ISO_QOS_CIG_UNSET) ++ return; ++ + memset(&d, 0, sizeof(d)); + d.cig = conn->iso_qos.ucast.cig; + +-- +2.39.2 + diff --git a/queue-6.3/bluetooth-iso-don-t-try-to-remove-cig-if-there-are-b.patch b/queue-6.3/bluetooth-iso-don-t-try-to-remove-cig-if-there-are-b.patch new file mode 100644 index 00000000000..c4e80550055 --- /dev/null +++ b/queue-6.3/bluetooth-iso-don-t-try-to-remove-cig-if-there-are-b.patch @@ -0,0 +1,37 @@ +From 070a377731df327c0ae6d18d5fe398e1830bb303 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 1 Jun 2023 09:34:44 +0300 +Subject: Bluetooth: ISO: don't try to remove CIG if there are bound CIS left + +From: Pauli Virtanen + +[ Upstream commit 6c242c64a09e78349fb0a5f0a6f8076a3d7c0bb4 ] + +Consider existing BOUND & CONNECT state CIS to block CIG removal. +Otherwise, under suitable timing conditions we may attempt to remove CIG +while Create CIS is pending, which fails. + +Fixes: 26afbd826ee3 ("Bluetooth: Add initial implementation of CIS connections") +Signed-off-by: Pauli Virtanen +Signed-off-by: Luiz Augusto von Dentz +Signed-off-by: Sasha Levin +--- + net/bluetooth/hci_conn.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c +index 96df87692f962..e02afdc557e7b 100644 +--- a/net/bluetooth/hci_conn.c ++++ b/net/bluetooth/hci_conn.c +@@ -968,6 +968,8 @@ static void cis_cleanup(struct hci_conn *conn) + /* Check if ISO connection is a CIS and remove CIG if there are + * no other connections using it. + */ ++ hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_BOUND, &d); ++ hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECT, &d); + hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECTED, &d); + if (d.count) + return; +-- +2.39.2 + diff --git a/queue-6.3/bluetooth-iso-fix-cig-auto-allocation-to-select-conf.patch b/queue-6.3/bluetooth-iso-fix-cig-auto-allocation-to-select-conf.patch new file mode 100644 index 00000000000..2f134a05f6c --- /dev/null +++ b/queue-6.3/bluetooth-iso-fix-cig-auto-allocation-to-select-conf.patch @@ -0,0 +1,63 @@ +From 9305df8e04975ddade7ad888f5c1447a0049ce45 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sun, 21 May 2023 15:48:29 +0000 +Subject: Bluetooth: ISO: Fix CIG auto-allocation to select configurable CIG + +From: Pauli Virtanen + +[ Upstream commit e6a7a46b8636efe95c75bed63a57fc05c13feba4 ] + +Make CIG auto-allocation to select the first CIG_ID that is still +configurable. Also use correct CIG_ID range (see Core v5.3 Vol 4 Part E +Sec 7.8.97 p.2553). + +Previously, it would always select CIG_ID 0 regardless of anything, +because cis_list with data.cis == 0xff (BT_ISO_QOS_CIS_UNSET) would not +count any CIS. Since we are not adding CIS here, use find_cis instead. + +Fixes: 26afbd826ee3 ("Bluetooth: Add initial implementation of CIS connections") +Signed-off-by: Pauli Virtanen +Signed-off-by: Luiz Augusto von Dentz +Signed-off-by: Sasha Levin +--- + net/bluetooth/hci_conn.c | 13 ++++++------- + 1 file changed, 6 insertions(+), 7 deletions(-) + +diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c +index 3820d5d873e12..96df87692f962 100644 +--- a/net/bluetooth/hci_conn.c ++++ b/net/bluetooth/hci_conn.c +@@ -1715,24 +1715,23 @@ static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos) + + memset(&data, 0, sizeof(data)); + +- /* Allocate a CIG if not set */ ++ /* Allocate first still reconfigurable CIG if not set */ + if (qos->ucast.cig == BT_ISO_QOS_CIG_UNSET) { +- for (data.cig = 0x00; data.cig < 0xff; data.cig++) { ++ for (data.cig = 0x00; data.cig < 0xf0; data.cig++) { + data.count = 0; +- data.cis = 0xff; + +- hci_conn_hash_list_state(hdev, cis_list, ISO_LINK, +- BT_BOUND, &data); ++ hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, ++ BT_CONNECT, &data); + if (data.count) + continue; + +- hci_conn_hash_list_state(hdev, cis_list, ISO_LINK, ++ hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, + BT_CONNECTED, &data); + if (!data.count) + break; + } + +- if (data.cig == 0xff) ++ if (data.cig == 0xf0) + return false; + + /* Update CIG */ +-- +2.39.2 + diff --git a/queue-6.3/bluetooth-iso-use-correct-cis-order-in-set-cig-param.patch b/queue-6.3/bluetooth-iso-use-correct-cis-order-in-set-cig-param.patch new file mode 100644 index 00000000000..caf91c732a2 --- /dev/null +++ b/queue-6.3/bluetooth-iso-use-correct-cis-order-in-set-cig-param.patch @@ -0,0 +1,126 @@ +From 9e531931d00093f604b085aa04b024edfdd761a1 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 1 Jun 2023 09:34:45 +0300 +Subject: Bluetooth: ISO: use correct CIS order in Set CIG Parameters event + +From: Pauli Virtanen + +[ Upstream commit 71e9588435c38112d6a8686d3d8e7cc1de8fe22c ] + +The order of CIS handle array in Set CIG Parameters response shall match +the order of the CIS_ID array in the command (Core v5.3 Vol 4 Part E Sec +7.8.97). We send CIS_IDs mainly in the order of increasing CIS_ID (but +with "last" CIS first if it has fixed CIG_ID). In handling of the +reply, we currently assume this is also the same as the order of +hci_conn in hdev->conn_hash, but that is not true. + +Match the correct hci_conn to the correct handle by matching them based +on the CIG+CIS combination. The CIG+CIS combination shall be unique for +ISO_LINK hci_conn at state >= BT_BOUND, which we maintain in +hci_le_set_cig_params. + +Fixes: 26afbd826ee3 ("Bluetooth: Add initial implementation of CIS connections") +Signed-off-by: Pauli Virtanen +Signed-off-by: Luiz Augusto von Dentz +Signed-off-by: Sasha Levin +--- + include/net/bluetooth/hci_core.h | 3 ++- + net/bluetooth/hci_event.c | 44 +++++++++++++++++++------------- + 2 files changed, 28 insertions(+), 19 deletions(-) + +diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h +index a08e8dc772e54..341592d427520 100644 +--- a/include/net/bluetooth/hci_core.h ++++ b/include/net/bluetooth/hci_core.h +@@ -1197,7 +1197,8 @@ static inline struct hci_conn *hci_conn_hash_lookup_cis(struct hci_dev *hdev, + if (id != BT_ISO_QOS_CIS_UNSET && id != c->iso_qos.ucast.cis) + continue; + +- if (ba_type == c->dst_type && !bacmp(&c->dst, ba)) { ++ /* Match destination address if set */ ++ if (!ba || (ba_type == c->dst_type && !bacmp(&c->dst, ba))) { + rcu_read_unlock(); + return c; + } +diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c +index d00ef6e3fc451..09ba6d8987ee1 100644 +--- a/net/bluetooth/hci_event.c ++++ b/net/bluetooth/hci_event.c +@@ -3804,48 +3804,56 @@ static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data, + struct sk_buff *skb) + { + struct hci_rp_le_set_cig_params *rp = data; ++ struct hci_cp_le_set_cig_params *cp; + struct hci_conn *conn; +- int i = 0; ++ u8 status = rp->status; ++ int i; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + ++ cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_CIG_PARAMS); ++ if (!cp || rp->num_handles != cp->num_cis || rp->cig_id != cp->cig_id) { ++ bt_dev_err(hdev, "unexpected Set CIG Parameters response data"); ++ status = HCI_ERROR_UNSPECIFIED; ++ } ++ + hci_dev_lock(hdev); + +- if (rp->status) { ++ if (status) { + while ((conn = hci_conn_hash_lookup_cig(hdev, rp->cig_id))) { + conn->state = BT_CLOSED; +- hci_connect_cfm(conn, rp->status); ++ hci_connect_cfm(conn, status); + hci_conn_del(conn); + } + goto unlock; + } + +- rcu_read_lock(); ++ /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2553 ++ * ++ * If the Status return parameter is zero, then the Controller shall ++ * set the Connection_Handle arrayed return parameter to the connection ++ * handle(s) corresponding to the CIS configurations specified in ++ * the CIS_IDs command parameter, in the same order. ++ */ ++ for (i = 0; i < rp->num_handles; ++i) { ++ conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, rp->cig_id, ++ cp->cis[i].cis_id); ++ if (!conn || !bacmp(&conn->dst, BDADDR_ANY)) ++ continue; + +- list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { +- if (conn->type != ISO_LINK || +- conn->iso_qos.ucast.cig != rp->cig_id || +- conn->state == BT_CONNECTED) ++ if (conn->state != BT_BOUND && conn->state != BT_CONNECT) + continue; + +- conn->handle = __le16_to_cpu(rp->handle[i++]); ++ conn->handle = __le16_to_cpu(rp->handle[i]); + + bt_dev_dbg(hdev, "%p handle 0x%4.4x parent %p", conn, + conn->handle, conn->parent); + + /* Create CIS if LE is already connected */ +- if (conn->parent && conn->parent->state == BT_CONNECTED) { +- rcu_read_unlock(); ++ if (conn->parent && conn->parent->state == BT_CONNECTED) + hci_le_create_cis(conn); +- rcu_read_lock(); +- } +- +- if (i == rp->num_handles) +- break; + } + +- rcu_read_unlock(); +- + unlock: + hci_dev_unlock(hdev); + +-- +2.39.2 + diff --git a/queue-6.3/bluetooth-l2cap-add-missing-checks-for-invalid-dcid.patch b/queue-6.3/bluetooth-l2cap-add-missing-checks-for-invalid-dcid.patch new file mode 100644 index 00000000000..11de6bac529 --- /dev/null +++ b/queue-6.3/bluetooth-l2cap-add-missing-checks-for-invalid-dcid.patch @@ -0,0 +1,53 @@ +From 9308444dfb1bc80d898e248022440ab2f90ce530 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sat, 3 Jun 2023 08:28:09 -0400 +Subject: Bluetooth: L2CAP: Add missing checks for invalid DCID + +From: Sungwoo Kim + +[ Upstream commit 75767213f3d9b97f63694d02260b6a49a2271876 ] + +When receiving a connect response we should make sure that the DCID is +within the valid range and that we don't already have another channel +allocated for the same DCID. +Missing checks may violate the specification (BLUETOOTH CORE SPECIFICATION +Version 5.4 | Vol 3, Part A, Page 1046). + +Fixes: 40624183c202 ("Bluetooth: L2CAP: Add missing checks for invalid LE DCID") +Signed-off-by: Sungwoo Kim +Signed-off-by: Luiz Augusto von Dentz +Signed-off-by: Sasha Levin +--- + net/bluetooth/l2cap_core.c | 9 +++++++++ + 1 file changed, 9 insertions(+) + +diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c +index e54e2aeb2a891..5678218a19607 100644 +--- a/net/bluetooth/l2cap_core.c ++++ b/net/bluetooth/l2cap_core.c +@@ -4307,6 +4307,10 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn, + result = __le16_to_cpu(rsp->result); + status = __le16_to_cpu(rsp->status); + ++ if (result == L2CAP_CR_SUCCESS && (dcid < L2CAP_CID_DYN_START || ++ dcid > L2CAP_CID_DYN_END)) ++ return -EPROTO; ++ + BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", + dcid, scid, result, status); + +@@ -4338,6 +4342,11 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn, + + switch (result) { + case L2CAP_CR_SUCCESS: ++ if (__l2cap_get_chan_by_dcid(conn, dcid)) { ++ err = -EBADSLT; ++ break; ++ } ++ + l2cap_state_change(chan, BT_CONFIG); + chan->ident = 0; + chan->dcid = dcid; +-- +2.39.2 + diff --git a/queue-6.3/bluetooth-split-bt_iso_qos-into-dedicated-structures.patch b/queue-6.3/bluetooth-split-bt_iso_qos-into-dedicated-structures.patch new file mode 100644 index 00000000000..cead90b2283 --- /dev/null +++ b/queue-6.3/bluetooth-split-bt_iso_qos-into-dedicated-structures.patch @@ -0,0 +1,872 @@ +From 1b9a3cbd70eb3327f22626ca7a89ef772ba396dc Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 31 Mar 2023 18:38:01 +0300 +Subject: Bluetooth: Split bt_iso_qos into dedicated structures + +From: Iulia Tanasescu + +[ Upstream commit 0fe8c8d071343fa9278980ce4b6f8e6ea24a2ed1 ] + +Split bt_iso_qos into dedicated unicast and broadcast +structures and add additional broadcast parameters. + +Fixes: eca0ae4aea66 ("Bluetooth: Add initial implementation of BIS connections") +Signed-off-by: Iulia Tanasescu +Signed-off-by: Luiz Augusto von Dentz +Stable-dep-of: 31c5f9164949 ("Bluetooth: ISO: consider right CIS when removing CIG at cleanup") +Signed-off-by: Sasha Levin +--- + include/net/bluetooth/bluetooth.h | 43 +++++--- + include/net/bluetooth/hci_core.h | 9 +- + net/bluetooth/hci_conn.c | 162 ++++++++++++++++-------------- + net/bluetooth/hci_event.c | 33 +++--- + net/bluetooth/iso.c | 125 ++++++++++++++++++----- + 5 files changed, 237 insertions(+), 135 deletions(-) + +diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h +index bcc5a4cd2c17b..1b4230cd42a37 100644 +--- a/include/net/bluetooth/bluetooth.h ++++ b/include/net/bluetooth/bluetooth.h +@@ -1,6 +1,7 @@ + /* + BlueZ - Bluetooth protocol stack for Linux + Copyright (C) 2000-2001 Qualcomm Incorporated ++ Copyright 2023 NXP + + Written 2000,2001 by Maxim Krasnyansky + +@@ -171,23 +172,39 @@ struct bt_iso_io_qos { + __u8 rtn; + }; + +-struct bt_iso_qos { +- union { +- __u8 cig; +- __u8 big; +- }; +- union { +- __u8 cis; +- __u8 bis; +- }; +- union { +- __u8 sca; +- __u8 sync_interval; +- }; ++struct bt_iso_ucast_qos { ++ __u8 cig; ++ __u8 cis; ++ __u8 sca; ++ __u8 packing; ++ __u8 framing; ++ struct bt_iso_io_qos in; ++ struct bt_iso_io_qos out; ++}; ++ ++struct bt_iso_bcast_qos { ++ __u8 big; ++ __u8 bis; ++ __u8 sync_interval; + __u8 packing; + __u8 framing; + struct bt_iso_io_qos in; + struct bt_iso_io_qos out; ++ __u8 encryption; ++ __u8 bcode[16]; ++ __u8 options; ++ __u16 skip; ++ __u16 sync_timeout; ++ __u8 sync_cte_type; ++ __u8 mse; ++ __u16 timeout; ++}; ++ ++struct bt_iso_qos { ++ union { ++ struct bt_iso_ucast_qos ucast; ++ struct bt_iso_bcast_qos bcast; ++ }; + }; + + #define BT_ISO_PHY_1M 0x01 +diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h +index d5311ceb21c62..86db7f3a31ce5 100644 +--- a/include/net/bluetooth/hci_core.h ++++ b/include/net/bluetooth/hci_core.h +@@ -1,6 +1,7 @@ + /* + BlueZ - Bluetooth protocol stack for Linux + Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved. ++ Copyright 2023 NXP + + Written 2000,2001 by Maxim Krasnyansky + +@@ -1091,7 +1092,7 @@ static inline struct hci_conn *hci_conn_hash_lookup_bis(struct hci_dev *hdev, + if (bacmp(&c->dst, ba) || c->type != ISO_LINK) + continue; + +- if (c->iso_qos.big == big && c->iso_qos.bis == bis) { ++ if (c->iso_qos.bcast.big == big && c->iso_qos.bcast.bis == bis) { + rcu_read_unlock(); + return c; + } +@@ -1200,7 +1201,7 @@ static inline struct hci_conn *hci_conn_hash_lookup_cig(struct hci_dev *hdev, + if (c->type != ISO_LINK) + continue; + +- if (handle == c->iso_qos.cig) { ++ if (handle == c->iso_qos.ucast.cig) { + rcu_read_unlock(); + return c; + } +@@ -1223,7 +1224,7 @@ static inline struct hci_conn *hci_conn_hash_lookup_big(struct hci_dev *hdev, + if (bacmp(&c->dst, BDADDR_ANY) || c->type != ISO_LINK) + continue; + +- if (handle == c->iso_qos.big) { ++ if (handle == c->iso_qos.bcast.big) { + rcu_read_unlock(); + return c; + } +@@ -1332,7 +1333,7 @@ struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst, + __u8 dst_type, struct bt_iso_qos *qos, + __u8 data_len, __u8 *data); + int hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type, +- __u8 sid); ++ __u8 sid, struct bt_iso_qos *qos); + int hci_le_big_create_sync(struct hci_dev *hdev, struct bt_iso_qos *qos, + __u16 sync_handle, __u8 num_bis, __u8 bis[]); + int hci_conn_check_link_mode(struct hci_conn *conn); +diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c +index 8455ba141ee61..5672b49245721 100644 +--- a/net/bluetooth/hci_conn.c ++++ b/net/bluetooth/hci_conn.c +@@ -1,6 +1,7 @@ + /* + BlueZ - Bluetooth protocol stack for Linux + Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved. ++ Copyright 2023 NXP + + Written 2000,2001 by Maxim Krasnyansky + +@@ -795,8 +796,8 @@ static void bis_list(struct hci_conn *conn, void *data) + if (bacmp(&conn->dst, BDADDR_ANY)) + return; + +- if (d->big != conn->iso_qos.big || d->bis == BT_ISO_QOS_BIS_UNSET || +- d->bis != conn->iso_qos.bis) ++ if (d->big != conn->iso_qos.bcast.big || d->bis == BT_ISO_QOS_BIS_UNSET || ++ d->bis != conn->iso_qos.bcast.bis) + return; + + d->count++; +@@ -916,10 +917,10 @@ static void bis_cleanup(struct hci_conn *conn) + if (!test_and_clear_bit(HCI_CONN_PER_ADV, &conn->flags)) + return; + +- hci_le_terminate_big(hdev, conn->iso_qos.big, +- conn->iso_qos.bis); ++ hci_le_terminate_big(hdev, conn->iso_qos.bcast.big, ++ conn->iso_qos.bcast.bis); + } else { +- hci_le_big_terminate(hdev, conn->iso_qos.big, ++ hci_le_big_terminate(hdev, conn->iso_qos.bcast.big, + conn->sync_handle); + } + } +@@ -959,7 +960,7 @@ static void cis_cleanup(struct hci_conn *conn) + struct iso_list_data d; + + memset(&d, 0, sizeof(d)); +- d.cig = conn->iso_qos.cig; ++ d.cig = conn->iso_qos.ucast.cig; + + /* Check if ISO connection is a CIS and remove CIG if there are + * no other connections using it. +@@ -968,7 +969,7 @@ static void cis_cleanup(struct hci_conn *conn) + if (d.count) + return; + +- hci_le_remove_cig(hdev, conn->iso_qos.cig); ++ hci_le_remove_cig(hdev, conn->iso_qos.ucast.cig); + } + + struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst, +@@ -1411,7 +1412,7 @@ static int qos_set_big(struct hci_dev *hdev, struct bt_iso_qos *qos) + struct iso_list_data data; + + /* Allocate a BIG if not set */ +- if (qos->big == BT_ISO_QOS_BIG_UNSET) { ++ if (qos->bcast.big == BT_ISO_QOS_BIG_UNSET) { + for (data.big = 0x00; data.big < 0xef; data.big++) { + data.count = 0; + data.bis = 0xff; +@@ -1426,7 +1427,7 @@ static int qos_set_big(struct hci_dev *hdev, struct bt_iso_qos *qos) + return -EADDRNOTAVAIL; + + /* Update BIG */ +- qos->big = data.big; ++ qos->bcast.big = data.big; + } + + return 0; +@@ -1437,7 +1438,7 @@ static int qos_set_bis(struct hci_dev *hdev, struct bt_iso_qos *qos) + struct iso_list_data data; + + /* Allocate BIS if not set */ +- if (qos->bis == BT_ISO_QOS_BIS_UNSET) { ++ if (qos->bcast.bis == BT_ISO_QOS_BIS_UNSET) { + /* Find an unused adv set to advertise BIS, skip instance 0x00 + * since it is reserved as general purpose set. + */ +@@ -1455,7 +1456,7 @@ static int qos_set_bis(struct hci_dev *hdev, struct bt_iso_qos *qos) + return -EADDRNOTAVAIL; + + /* Update BIS */ +- qos->bis = data.bis; ++ qos->bcast.bis = data.bis; + } + + return 0; +@@ -1484,8 +1485,8 @@ static struct hci_conn *hci_add_bis(struct hci_dev *hdev, bdaddr_t *dst, + if (err) + return ERR_PTR(err); + +- data.big = qos->big; +- data.bis = qos->bis; ++ data.big = qos->bcast.big; ++ data.bis = qos->bcast.bis; + data.count = 0; + + /* Check if there is already a matching BIG/BIS */ +@@ -1493,7 +1494,7 @@ static struct hci_conn *hci_add_bis(struct hci_dev *hdev, bdaddr_t *dst, + if (data.count) + return ERR_PTR(-EADDRINUSE); + +- conn = hci_conn_hash_lookup_bis(hdev, dst, qos->big, qos->bis); ++ conn = hci_conn_hash_lookup_bis(hdev, dst, qos->bcast.big, qos->bcast.bis); + if (conn) + return ERR_PTR(-EADDRINUSE); + +@@ -1648,13 +1649,13 @@ static void cis_add(struct iso_list_data *d, struct bt_iso_qos *qos) + { + struct hci_cis_params *cis = &d->pdu.cis[d->pdu.cp.num_cis]; + +- cis->cis_id = qos->cis; +- cis->c_sdu = cpu_to_le16(qos->out.sdu); +- cis->p_sdu = cpu_to_le16(qos->in.sdu); +- cis->c_phy = qos->out.phy ? qos->out.phy : qos->in.phy; +- cis->p_phy = qos->in.phy ? qos->in.phy : qos->out.phy; +- cis->c_rtn = qos->out.rtn; +- cis->p_rtn = qos->in.rtn; ++ cis->cis_id = qos->ucast.cis; ++ cis->c_sdu = cpu_to_le16(qos->ucast.out.sdu); ++ cis->p_sdu = cpu_to_le16(qos->ucast.in.sdu); ++ cis->c_phy = qos->ucast.out.phy ? qos->ucast.out.phy : qos->ucast.in.phy; ++ cis->p_phy = qos->ucast.in.phy ? qos->ucast.in.phy : qos->ucast.out.phy; ++ cis->c_rtn = qos->ucast.out.rtn; ++ cis->p_rtn = qos->ucast.in.rtn; + + d->pdu.cp.num_cis++; + } +@@ -1667,8 +1668,8 @@ static void cis_list(struct hci_conn *conn, void *data) + if (!bacmp(&conn->dst, BDADDR_ANY)) + return; + +- if (d->cig != conn->iso_qos.cig || d->cis == BT_ISO_QOS_CIS_UNSET || +- d->cis != conn->iso_qos.cis) ++ if (d->cig != conn->iso_qos.ucast.cig || d->cis == BT_ISO_QOS_CIS_UNSET || ++ d->cis != conn->iso_qos.ucast.cis) + return; + + d->count++; +@@ -1687,17 +1688,18 @@ static int hci_le_create_big(struct hci_conn *conn, struct bt_iso_qos *qos) + + memset(&cp, 0, sizeof(cp)); + +- cp.handle = qos->big; +- cp.adv_handle = qos->bis; ++ cp.handle = qos->bcast.big; ++ cp.adv_handle = qos->bcast.bis; + cp.num_bis = 0x01; +- hci_cpu_to_le24(qos->out.interval, cp.bis.sdu_interval); +- cp.bis.sdu = cpu_to_le16(qos->out.sdu); +- cp.bis.latency = cpu_to_le16(qos->out.latency); +- cp.bis.rtn = qos->out.rtn; +- cp.bis.phy = qos->out.phy; +- cp.bis.packing = qos->packing; +- cp.bis.framing = qos->framing; +- cp.bis.encryption = 0x00; ++ hci_cpu_to_le24(qos->bcast.out.interval, cp.bis.sdu_interval); ++ cp.bis.sdu = cpu_to_le16(qos->bcast.out.sdu); ++ cp.bis.latency = cpu_to_le16(qos->bcast.out.latency); ++ cp.bis.rtn = qos->bcast.out.rtn; ++ cp.bis.phy = qos->bcast.out.phy; ++ cp.bis.packing = qos->bcast.packing; ++ cp.bis.framing = qos->bcast.framing; ++ cp.bis.encryption = qos->bcast.encryption; ++ memcpy(cp.bis.bcode, qos->bcast.bcode, sizeof(cp.bis.bcode)); + memset(&cp.bis.bcode, 0, sizeof(cp.bis.bcode)); + + return hci_send_cmd(hdev, HCI_OP_LE_CREATE_BIG, sizeof(cp), &cp); +@@ -1711,7 +1713,7 @@ static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos) + memset(&data, 0, sizeof(data)); + + /* Allocate a CIG if not set */ +- if (qos->cig == BT_ISO_QOS_CIG_UNSET) { ++ if (qos->ucast.cig == BT_ISO_QOS_CIG_UNSET) { + for (data.cig = 0x00; data.cig < 0xff; data.cig++) { + data.count = 0; + data.cis = 0xff; +@@ -1731,22 +1733,22 @@ static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos) + return false; + + /* Update CIG */ +- qos->cig = data.cig; ++ qos->ucast.cig = data.cig; + } + +- data.pdu.cp.cig_id = qos->cig; +- hci_cpu_to_le24(qos->out.interval, data.pdu.cp.c_interval); +- hci_cpu_to_le24(qos->in.interval, data.pdu.cp.p_interval); +- data.pdu.cp.sca = qos->sca; +- data.pdu.cp.packing = qos->packing; +- data.pdu.cp.framing = qos->framing; +- data.pdu.cp.c_latency = cpu_to_le16(qos->out.latency); +- data.pdu.cp.p_latency = cpu_to_le16(qos->in.latency); ++ data.pdu.cp.cig_id = qos->ucast.cig; ++ hci_cpu_to_le24(qos->ucast.out.interval, data.pdu.cp.c_interval); ++ hci_cpu_to_le24(qos->ucast.in.interval, data.pdu.cp.p_interval); ++ data.pdu.cp.sca = qos->ucast.sca; ++ data.pdu.cp.packing = qos->ucast.packing; ++ data.pdu.cp.framing = qos->ucast.framing; ++ data.pdu.cp.c_latency = cpu_to_le16(qos->ucast.out.latency); ++ data.pdu.cp.p_latency = cpu_to_le16(qos->ucast.in.latency); + +- if (qos->cis != BT_ISO_QOS_CIS_UNSET) { ++ if (qos->ucast.cis != BT_ISO_QOS_CIS_UNSET) { + data.count = 0; +- data.cig = qos->cig; +- data.cis = qos->cis; ++ data.cig = qos->ucast.cig; ++ data.cis = qos->ucast.cis; + + hci_conn_hash_list_state(hdev, cis_list, ISO_LINK, BT_BOUND, + &data); +@@ -1757,7 +1759,7 @@ static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos) + } + + /* Reprogram all CIS(s) with the same CIG */ +- for (data.cig = qos->cig, data.cis = 0x00; data.cis < 0x11; ++ for (data.cig = qos->ucast.cig, data.cis = 0x00; data.cis < 0x11; + data.cis++) { + data.count = 0; + +@@ -1767,14 +1769,14 @@ static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos) + continue; + + /* Allocate a CIS if not set */ +- if (qos->cis == BT_ISO_QOS_CIS_UNSET) { ++ if (qos->ucast.cis == BT_ISO_QOS_CIS_UNSET) { + /* Update CIS */ +- qos->cis = data.cis; ++ qos->ucast.cis = data.cis; + cis_add(&data, qos); + } + } + +- if (qos->cis == BT_ISO_QOS_CIS_UNSET || !data.pdu.cp.num_cis) ++ if (qos->ucast.cis == BT_ISO_QOS_CIS_UNSET || !data.pdu.cp.num_cis) + return false; + + if (hci_send_cmd(hdev, HCI_OP_LE_SET_CIG_PARAMS, +@@ -1809,32 +1811,32 @@ struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst, + return cis; + + /* Update LINK PHYs according to QoS preference */ +- cis->le_tx_phy = qos->out.phy; +- cis->le_rx_phy = qos->in.phy; ++ cis->le_tx_phy = qos->ucast.out.phy; ++ cis->le_rx_phy = qos->ucast.in.phy; + + /* If output interval is not set use the input interval as it cannot be + * 0x000000. + */ +- if (!qos->out.interval) +- qos->out.interval = qos->in.interval; ++ if (!qos->ucast.out.interval) ++ qos->ucast.out.interval = qos->ucast.in.interval; + + /* If input interval is not set use the output interval as it cannot be + * 0x000000. + */ +- if (!qos->in.interval) +- qos->in.interval = qos->out.interval; ++ if (!qos->ucast.in.interval) ++ qos->ucast.in.interval = qos->ucast.out.interval; + + /* If output latency is not set use the input latency as it cannot be + * 0x0000. + */ +- if (!qos->out.latency) +- qos->out.latency = qos->in.latency; ++ if (!qos->ucast.out.latency) ++ qos->ucast.out.latency = qos->ucast.in.latency; + + /* If input latency is not set use the output latency as it cannot be + * 0x0000. + */ +- if (!qos->in.latency) +- qos->in.latency = qos->out.latency; ++ if (!qos->ucast.in.latency) ++ qos->ucast.in.latency = qos->ucast.out.latency; + + if (!hci_le_set_cig_params(cis, qos)) { + hci_conn_drop(cis); +@@ -1854,7 +1856,7 @@ bool hci_iso_setup_path(struct hci_conn *conn) + + memset(&cmd, 0, sizeof(cmd)); + +- if (conn->iso_qos.out.sdu) { ++ if (conn->iso_qos.ucast.out.sdu) { + cmd.handle = cpu_to_le16(conn->handle); + cmd.direction = 0x00; /* Input (Host to Controller) */ + cmd.path = 0x00; /* HCI path if enabled */ +@@ -1865,7 +1867,7 @@ bool hci_iso_setup_path(struct hci_conn *conn) + return false; + } + +- if (conn->iso_qos.in.sdu) { ++ if (conn->iso_qos.ucast.in.sdu) { + cmd.handle = cpu_to_le16(conn->handle); + cmd.direction = 0x01; /* Output (Controller to Host) */ + cmd.path = 0x00; /* HCI path if enabled */ +@@ -1892,7 +1894,7 @@ static int hci_create_cis_sync(struct hci_dev *hdev, void *data) + cmd.cis[0].acl_handle = cpu_to_le16(conn->link->handle); + cmd.cis[0].cis_handle = cpu_to_le16(conn->handle); + cmd.cp.num_cis++; +- cig = conn->iso_qos.cig; ++ cig = conn->iso_qos.ucast.cig; + + hci_dev_lock(hdev); + +@@ -1902,7 +1904,7 @@ static int hci_create_cis_sync(struct hci_dev *hdev, void *data) + struct hci_cis *cis = &cmd.cis[cmd.cp.num_cis]; + + if (conn == data || conn->type != ISO_LINK || +- conn->state == BT_CONNECTED || conn->iso_qos.cig != cig) ++ conn->state == BT_CONNECTED || conn->iso_qos.ucast.cig != cig) + continue; + + /* Check if all CIS(s) belonging to a CIG are ready */ +@@ -2002,8 +2004,8 @@ static void hci_bind_bis(struct hci_conn *conn, + struct bt_iso_qos *qos) + { + /* Update LINK PHYs according to QoS preference */ +- conn->le_tx_phy = qos->out.phy; +- conn->le_tx_phy = qos->out.phy; ++ conn->le_tx_phy = qos->bcast.out.phy; ++ conn->le_tx_phy = qos->bcast.out.phy; + conn->iso_qos = *qos; + conn->state = BT_BOUND; + } +@@ -2016,16 +2018,16 @@ static int create_big_sync(struct hci_dev *hdev, void *data) + u32 flags = 0; + int err; + +- if (qos->out.phy == 0x02) ++ if (qos->bcast.out.phy == 0x02) + flags |= MGMT_ADV_FLAG_SEC_2M; + + /* Align intervals */ +- interval = qos->out.interval / 1250; ++ interval = qos->bcast.out.interval / 1250; + +- if (qos->bis) +- sync_interval = qos->sync_interval * 1600; ++ if (qos->bcast.bis) ++ sync_interval = qos->bcast.sync_interval * 1600; + +- err = hci_start_per_adv_sync(hdev, qos->bis, conn->le_per_adv_data_len, ++ err = hci_start_per_adv_sync(hdev, qos->bcast.bis, conn->le_per_adv_data_len, + conn->le_per_adv_data, flags, interval, + interval, sync_interval); + if (err) +@@ -2062,7 +2064,7 @@ static int create_pa_sync(struct hci_dev *hdev, void *data) + } + + int hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type, +- __u8 sid) ++ __u8 sid, struct bt_iso_qos *qos) + { + struct hci_cp_le_pa_create_sync *cp; + +@@ -2075,9 +2077,13 @@ int hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type, + return -ENOMEM; + } + ++ cp->options = qos->bcast.options; + cp->sid = sid; + cp->addr_type = dst_type; + bacpy(&cp->addr, dst); ++ cp->skip = cpu_to_le16(qos->bcast.skip); ++ cp->sync_timeout = cpu_to_le16(qos->bcast.sync_timeout); ++ cp->sync_cte_type = qos->bcast.sync_cte_type; + + /* Queue start pa_create_sync and scan */ + return hci_cmd_sync_queue(hdev, create_pa_sync, cp, create_pa_complete); +@@ -2100,8 +2106,12 @@ int hci_le_big_create_sync(struct hci_dev *hdev, struct bt_iso_qos *qos, + return err; + + memset(&pdu, 0, sizeof(pdu)); +- pdu.cp.handle = qos->big; ++ pdu.cp.handle = qos->bcast.big; + pdu.cp.sync_handle = cpu_to_le16(sync_handle); ++ pdu.cp.encryption = qos->bcast.encryption; ++ memcpy(pdu.cp.bcode, qos->bcast.bcode, sizeof(pdu.cp.bcode)); ++ pdu.cp.mse = qos->bcast.mse; ++ pdu.cp.timeout = cpu_to_le16(qos->bcast.timeout); + pdu.cp.num_bis = num_bis; + memcpy(pdu.bis, bis, num_bis); + +@@ -2151,7 +2161,7 @@ struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst, + return ERR_PTR(err); + } + +- hci_iso_qos_setup(hdev, conn, &qos->out, ++ hci_iso_qos_setup(hdev, conn, &qos->bcast.out, + conn->le_tx_phy ? conn->le_tx_phy : + hdev->le_tx_def_phys); + +@@ -2177,9 +2187,9 @@ struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst, + if (IS_ERR(le)) + return le; + +- hci_iso_qos_setup(hdev, le, &qos->out, ++ hci_iso_qos_setup(hdev, le, &qos->ucast.out, + le->le_tx_phy ? le->le_tx_phy : hdev->le_tx_def_phys); +- hci_iso_qos_setup(hdev, le, &qos->in, ++ hci_iso_qos_setup(hdev, le, &qos->ucast.in, + le->le_rx_phy ? le->le_rx_phy : hdev->le_rx_def_phys); + + cis = hci_bind_cis(hdev, dst, dst_type, qos); +diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c +index 51f13518dba9b..0e0a93cc12186 100644 +--- a/net/bluetooth/hci_event.c ++++ b/net/bluetooth/hci_event.c +@@ -1,6 +1,7 @@ + /* + BlueZ - Bluetooth protocol stack for Linux + Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved. ++ Copyright 2023 NXP + + Written 2000,2001 by Maxim Krasnyansky + +@@ -3833,7 +3834,7 @@ static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data, + rcu_read_lock(); + + list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { +- if (conn->type != ISO_LINK || conn->iso_qos.cig != rp->cig_id || ++ if (conn->type != ISO_LINK || conn->iso_qos.ucast.cig != rp->cig_id || + conn->state == BT_CONNECTED) + continue; + +@@ -3890,7 +3891,7 @@ static u8 hci_cc_le_setup_iso_path(struct hci_dev *hdev, void *data, + /* Input (Host to Controller) */ + case 0x00: + /* Only confirm connection if output only */ +- if (conn->iso_qos.out.sdu && !conn->iso_qos.in.sdu) ++ if (conn->iso_qos.ucast.out.sdu && !conn->iso_qos.ucast.in.sdu) + hci_connect_cfm(conn, rp->status); + break; + /* Output (Controller to Host) */ +@@ -6818,15 +6819,15 @@ static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data, + memset(&interval, 0, sizeof(interval)); + + memcpy(&interval, ev->c_latency, sizeof(ev->c_latency)); +- conn->iso_qos.in.interval = le32_to_cpu(interval); ++ conn->iso_qos.ucast.in.interval = le32_to_cpu(interval); + memcpy(&interval, ev->p_latency, sizeof(ev->p_latency)); +- conn->iso_qos.out.interval = le32_to_cpu(interval); +- conn->iso_qos.in.latency = le16_to_cpu(ev->interval); +- conn->iso_qos.out.latency = le16_to_cpu(ev->interval); +- conn->iso_qos.in.sdu = le16_to_cpu(ev->c_mtu); +- conn->iso_qos.out.sdu = le16_to_cpu(ev->p_mtu); +- conn->iso_qos.in.phy = ev->c_phy; +- conn->iso_qos.out.phy = ev->p_phy; ++ conn->iso_qos.ucast.out.interval = le32_to_cpu(interval); ++ conn->iso_qos.ucast.in.latency = le16_to_cpu(ev->interval); ++ conn->iso_qos.ucast.out.latency = le16_to_cpu(ev->interval); ++ conn->iso_qos.ucast.in.sdu = le16_to_cpu(ev->c_mtu); ++ conn->iso_qos.ucast.out.sdu = le16_to_cpu(ev->p_mtu); ++ conn->iso_qos.ucast.in.phy = ev->c_phy; ++ conn->iso_qos.ucast.out.phy = ev->p_phy; + } + + if (!ev->status) { +@@ -6900,8 +6901,8 @@ static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data, + cis->handle = cis_handle; + } + +- cis->iso_qos.cig = ev->cig_id; +- cis->iso_qos.cis = ev->cis_id; ++ cis->iso_qos.ucast.cig = ev->cig_id; ++ cis->iso_qos.ucast.cis = ev->cis_id; + + if (!(flags & HCI_PROTO_DEFER)) { + hci_le_accept_cis(hdev, ev->cis_handle); +@@ -6988,13 +6989,13 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data, + bis->handle = handle; + } + +- bis->iso_qos.big = ev->handle; ++ bis->iso_qos.bcast.big = ev->handle; + memset(&interval, 0, sizeof(interval)); + memcpy(&interval, ev->latency, sizeof(ev->latency)); +- bis->iso_qos.in.interval = le32_to_cpu(interval); ++ bis->iso_qos.bcast.in.interval = le32_to_cpu(interval); + /* Convert ISO Interval (1.25 ms slots) to latency (ms) */ +- bis->iso_qos.in.latency = le16_to_cpu(ev->interval) * 125 / 100; +- bis->iso_qos.in.sdu = le16_to_cpu(ev->max_pdu); ++ bis->iso_qos.bcast.in.latency = le16_to_cpu(ev->interval) * 125 / 100; ++ bis->iso_qos.bcast.in.sdu = le16_to_cpu(ev->max_pdu); + + hci_iso_setup_path(bis); + } +diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c +index 8d136a7301630..74117df03a3fa 100644 +--- a/net/bluetooth/iso.c ++++ b/net/bluetooth/iso.c +@@ -3,6 +3,7 @@ + * BlueZ - Bluetooth protocol stack for Linux + * + * Copyright (C) 2022 Intel Corporation ++ * Copyright 2023 NXP + */ + + #include +@@ -59,11 +60,17 @@ struct iso_pinfo { + __u16 sync_handle; + __u32 flags; + struct bt_iso_qos qos; ++ bool qos_user_set; + __u8 base_len; + __u8 base[BASE_MAX_LENGTH]; + struct iso_conn *conn; + }; + ++static struct bt_iso_qos default_qos; ++ ++static bool check_ucast_qos(struct bt_iso_qos *qos); ++static bool check_bcast_qos(struct bt_iso_qos *qos); ++ + /* ---- ISO timers ---- */ + #define ISO_CONN_TIMEOUT (HZ * 40) + #define ISO_DISCONN_TIMEOUT (HZ * 2) +@@ -264,8 +271,15 @@ static int iso_connect_bis(struct sock *sk) + goto unlock; + } + ++ /* Fail if user set invalid QoS */ ++ if (iso_pi(sk)->qos_user_set && !check_bcast_qos(&iso_pi(sk)->qos)) { ++ iso_pi(sk)->qos = default_qos; ++ err = -EINVAL; ++ goto unlock; ++ } ++ + /* Fail if out PHYs are marked as disabled */ +- if (!iso_pi(sk)->qos.out.phy) { ++ if (!iso_pi(sk)->qos.bcast.out.phy) { + err = -EINVAL; + goto unlock; + } +@@ -336,8 +350,15 @@ static int iso_connect_cis(struct sock *sk) + goto unlock; + } + ++ /* Fail if user set invalid QoS */ ++ if (iso_pi(sk)->qos_user_set && !check_ucast_qos(&iso_pi(sk)->qos)) { ++ iso_pi(sk)->qos = default_qos; ++ err = -EINVAL; ++ goto unlock; ++ } ++ + /* Fail if either PHYs are marked as disabled */ +- if (!iso_pi(sk)->qos.in.phy && !iso_pi(sk)->qos.out.phy) { ++ if (!iso_pi(sk)->qos.ucast.in.phy && !iso_pi(sk)->qos.ucast.out.phy) { + err = -EINVAL; + goto unlock; + } +@@ -417,7 +438,7 @@ static int iso_send_frame(struct sock *sk, struct sk_buff *skb) + + BT_DBG("sk %p len %d", sk, skb->len); + +- if (skb->len > qos->out.sdu) ++ if (skb->len > qos->ucast.out.sdu) + return -EMSGSIZE; + + len = skb->len; +@@ -680,13 +701,23 @@ static struct proto iso_proto = { + } + + static struct bt_iso_qos default_qos = { +- .cig = BT_ISO_QOS_CIG_UNSET, +- .cis = BT_ISO_QOS_CIS_UNSET, +- .sca = 0x00, +- .packing = 0x00, +- .framing = 0x00, +- .in = DEFAULT_IO_QOS, +- .out = DEFAULT_IO_QOS, ++ .bcast = { ++ .big = BT_ISO_QOS_BIG_UNSET, ++ .bis = BT_ISO_QOS_BIS_UNSET, ++ .sync_interval = 0x00, ++ .packing = 0x00, ++ .framing = 0x00, ++ .in = DEFAULT_IO_QOS, ++ .out = DEFAULT_IO_QOS, ++ .encryption = 0x00, ++ .bcode = {0x00}, ++ .options = 0x00, ++ .skip = 0x0000, ++ .sync_timeout = 0x4000, ++ .sync_cte_type = 0x00, ++ .mse = 0x00, ++ .timeout = 0x4000, ++ }, + }; + + static struct sock *iso_sock_alloc(struct net *net, struct socket *sock, +@@ -893,9 +924,15 @@ static int iso_listen_bis(struct sock *sk) + if (!hdev) + return -EHOSTUNREACH; + ++ /* Fail if user set invalid QoS */ ++ if (iso_pi(sk)->qos_user_set && !check_bcast_qos(&iso_pi(sk)->qos)) { ++ iso_pi(sk)->qos = default_qos; ++ return -EINVAL; ++ } ++ + err = hci_pa_create_sync(hdev, &iso_pi(sk)->dst, + le_addr_type(iso_pi(sk)->dst_type), +- iso_pi(sk)->bc_sid); ++ iso_pi(sk)->bc_sid, &iso_pi(sk)->qos); + + hci_dev_put(hdev); + +@@ -1154,21 +1191,62 @@ static bool check_io_qos(struct bt_iso_io_qos *qos) + return true; + } + +-static bool check_qos(struct bt_iso_qos *qos) ++static bool check_ucast_qos(struct bt_iso_qos *qos) + { +- if (qos->sca > 0x07) ++ if (qos->ucast.sca > 0x07) + return false; + +- if (qos->packing > 0x01) ++ if (qos->ucast.packing > 0x01) + return false; + +- if (qos->framing > 0x01) ++ if (qos->ucast.framing > 0x01) + return false; + +- if (!check_io_qos(&qos->in)) ++ if (!check_io_qos(&qos->ucast.in)) + return false; + +- if (!check_io_qos(&qos->out)) ++ if (!check_io_qos(&qos->ucast.out)) ++ return false; ++ ++ return true; ++} ++ ++static bool check_bcast_qos(struct bt_iso_qos *qos) ++{ ++ if (qos->bcast.sync_interval > 0x07) ++ return false; ++ ++ if (qos->bcast.packing > 0x01) ++ return false; ++ ++ if (qos->bcast.framing > 0x01) ++ return false; ++ ++ if (!check_io_qos(&qos->bcast.in)) ++ return false; ++ ++ if (!check_io_qos(&qos->bcast.out)) ++ return false; ++ ++ if (qos->bcast.encryption > 0x01) ++ return false; ++ ++ if (qos->bcast.options > 0x07) ++ return false; ++ ++ if (qos->bcast.skip > 0x01f3) ++ return false; ++ ++ if (qos->bcast.sync_timeout < 0x000a || qos->bcast.sync_timeout > 0x4000) ++ return false; ++ ++ if (qos->bcast.sync_cte_type > 0x1f) ++ return false; ++ ++ if (qos->bcast.mse > 0x1f) ++ return false; ++ ++ if (qos->bcast.timeout < 0x000a || qos->bcast.timeout > 0x4000) + return false; + + return true; +@@ -1179,7 +1257,7 @@ static int iso_sock_setsockopt(struct socket *sock, int level, int optname, + { + struct sock *sk = sock->sk; + int len, err = 0; +- struct bt_iso_qos qos; ++ struct bt_iso_qos qos = default_qos; + u32 opt; + + BT_DBG("sk %p", sk); +@@ -1212,24 +1290,19 @@ static int iso_sock_setsockopt(struct socket *sock, int level, int optname, + } + + len = min_t(unsigned int, sizeof(qos), optlen); +- if (len != sizeof(qos)) { +- err = -EINVAL; +- break; +- } +- +- memset(&qos, 0, sizeof(qos)); + + if (copy_from_sockptr(&qos, optval, len)) { + err = -EFAULT; + break; + } + +- if (!check_qos(&qos)) { ++ if (len == sizeof(qos.ucast) && !check_ucast_qos(&qos)) { + err = -EINVAL; + break; + } + + iso_pi(sk)->qos = qos; ++ iso_pi(sk)->qos_user_set = true; + + break; + +@@ -1419,7 +1492,7 @@ static bool iso_match_big(struct sock *sk, void *data) + { + struct hci_evt_le_big_sync_estabilished *ev = data; + +- return ev->handle == iso_pi(sk)->qos.big; ++ return ev->handle == iso_pi(sk)->qos.bcast.big; + } + + static void iso_conn_ready(struct iso_conn *conn) +-- +2.39.2 + diff --git a/queue-6.3/bnxt_en-don-t-issue-ap-reset-during-ethtool-s-reset-.patch b/queue-6.3/bnxt_en-don-t-issue-ap-reset-during-ethtool-s-reset-.patch new file mode 100644 index 00000000000..536170e369a --- /dev/null +++ b/queue-6.3/bnxt_en-don-t-issue-ap-reset-during-ethtool-s-reset-.patch @@ -0,0 +1,43 @@ +From e2398b27f5665fe0004d1ab4fb23971e46341bef Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 7 Jun 2023 00:54:05 -0700 +Subject: bnxt_en: Don't issue AP reset during ethtool's reset operation + +From: Sreekanth Reddy + +[ Upstream commit 1d997801c7cc6a7f542e46d5a6bf16f893ad3fe9 ] + +Only older NIC controller's firmware uses the PROC AP reset type. +Firmware on 5731X/5741X and newer chips does not support this reset +type. When bnxt_reset() issues a series of resets, this PROC AP +reset may actually fail on these newer chips because the firmware +is not ready to accept this unsupported command yet. Avoid this +unnecessary error by skipping this reset type on chips that don't +support it. + +Fixes: 7a13240e3718 ("bnxt_en: fix ethtool_reset_flags ABI violations") +Reviewed-by: Pavan Chebbi +Signed-off-by: Sreekanth Reddy +Signed-off-by: Michael Chan +Signed-off-by: Paolo Abeni +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +index 2dd8ee4a6f75b..8fd5071d8b099 100644 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +@@ -3831,7 +3831,7 @@ static int bnxt_reset(struct net_device *dev, u32 *flags) + } + } + +- if (req & BNXT_FW_RESET_AP) { ++ if (!BNXT_CHIP_P4_PLUS(bp) && (req & BNXT_FW_RESET_AP)) { + /* This feature is not supported in older firmware versions */ + if (bp->hwrm_spec_code >= 0x10803) { + if (!bnxt_firmware_reset_ap(dev)) { +-- +2.39.2 + diff --git a/queue-6.3/bnxt_en-fix-bnxt_hwrm_update_rss_hash_cfg.patch b/queue-6.3/bnxt_en-fix-bnxt_hwrm_update_rss_hash_cfg.patch new file mode 100644 index 00000000000..439ff38a18c --- /dev/null +++ b/queue-6.3/bnxt_en-fix-bnxt_hwrm_update_rss_hash_cfg.patch @@ -0,0 +1,38 @@ +From 4a1681dbfd51ac885de7079cda80bc7e67d8c7c8 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 7 Jun 2023 00:54:04 -0700 +Subject: bnxt_en: Fix bnxt_hwrm_update_rss_hash_cfg() + +From: Pavan Chebbi + +[ Upstream commit 095d5dc0c1d9f3284e3c575ccf4c0e8b04b548f8 ] + +We must specify the vnic id of the vnic in the input structure of this +firmware message. Otherwise we will get an error from the firmware. + +Fixes: 98a4322b70e8 ("bnxt_en: update RSS config using difference algorithm") +Reviewed-by: Kalesh Anakkur Purayil +Reviewed-by: Somnath Kotur +Signed-off-by: Pavan Chebbi +Signed-off-by: Michael Chan +Signed-off-by: Paolo Abeni +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/broadcom/bnxt/bnxt.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c +index 651b79ce5d80c..26766c93b06ac 100644 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c +@@ -5376,6 +5376,7 @@ static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp) + if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_QCFG)) + return; + ++ req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); + /* all contexts configured to same hash_type, zero always exists */ + req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); + resp = hwrm_req_hold(bp, req); +-- +2.39.2 + diff --git a/queue-6.3/bnxt_en-implement-.set_port-.unset_port-udp-tunnel-c.patch b/queue-6.3/bnxt_en-implement-.set_port-.unset_port-udp-tunnel-c.patch new file mode 100644 index 00000000000..49914231568 --- /dev/null +++ b/queue-6.3/bnxt_en-implement-.set_port-.unset_port-udp-tunnel-c.patch @@ -0,0 +1,81 @@ +From 42a093af57fbdfa5d7b2ae6a69c6431ae740f01b Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 7 Jun 2023 00:54:09 -0700 +Subject: bnxt_en: Implement .set_port / .unset_port UDP tunnel callbacks + +From: Somnath Kotur + +[ Upstream commit 1eb4ef12591348c440ac9d6efcf7521e73cf2b10 ] + +As per the new udp tunnel framework, drivers which need to know the +details of a port entry (i.e. port type) when it gets deleted should +use the .set_port / .unset_port callbacks. + +Implementing the current .udp_tunnel_sync callback would mean that the +deleted tunnel port entry would be all zeros. This used to work on +older firmware because it would not check the input when deleting a +tunnel port. With newer firmware, the delete will now fail and +subsequent tunnel port allocation will fail as a result. + +Fixes: 442a35a5a7aa ("bnxt: convert to new udp_tunnel_nic infra") +Reviewed-by: Kalesh Anakkur Purayil +Signed-off-by: Somnath Kotur +Signed-off-by: Michael Chan +Signed-off-by: Paolo Abeni +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/broadcom/bnxt/bnxt.c | 25 ++++++++++++++++------- + 1 file changed, 18 insertions(+), 7 deletions(-) + +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c +index f14519aa6d4f6..9784e86d4d96a 100644 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c +@@ -13062,26 +13062,37 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp) + + #endif /* CONFIG_RFS_ACCEL */ + +-static int bnxt_udp_tunnel_sync(struct net_device *netdev, unsigned int table) ++static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table, ++ unsigned int entry, struct udp_tunnel_info *ti) + { + struct bnxt *bp = netdev_priv(netdev); +- struct udp_tunnel_info ti; + unsigned int cmd; + +- udp_tunnel_nic_get_port(netdev, table, 0, &ti); +- if (ti.type == UDP_TUNNEL_TYPE_VXLAN) ++ if (ti->type == UDP_TUNNEL_TYPE_VXLAN) + cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN; + else + cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE; + +- if (ti.port) +- return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti.port, cmd); ++ return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd); ++} ++ ++static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table, ++ unsigned int entry, struct udp_tunnel_info *ti) ++{ ++ struct bnxt *bp = netdev_priv(netdev); ++ unsigned int cmd; ++ ++ if (ti->type == UDP_TUNNEL_TYPE_VXLAN) ++ cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN; ++ else ++ cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE; + + return bnxt_hwrm_tunnel_dst_port_free(bp, cmd); + } + + static const struct udp_tunnel_nic_info bnxt_udp_tunnels = { +- .sync_table = bnxt_udp_tunnel_sync, ++ .set_port = bnxt_udp_tunnel_set_port, ++ .unset_port = bnxt_udp_tunnel_unset_port, + .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP | + UDP_TUNNEL_NIC_INFO_OPEN_ONLY, + .tables = { +-- +2.39.2 + diff --git a/queue-6.3/bnxt_en-prevent-kernel-panic-when-receiving-unexpect.patch b/queue-6.3/bnxt_en-prevent-kernel-panic-when-receiving-unexpect.patch new file mode 100644 index 00000000000..8280da1cad0 --- /dev/null +++ b/queue-6.3/bnxt_en-prevent-kernel-panic-when-receiving-unexpect.patch @@ -0,0 +1,67 @@ +From 693219dfcd6f3c9c06317e24dbf26b2e45cb3a50 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 7 Jun 2023 00:54:08 -0700 +Subject: bnxt_en: Prevent kernel panic when receiving unexpected PHC_UPDATE + event + +From: Pavan Chebbi + +[ Upstream commit 319a7827df9784048abe072afe6b4fb4501d8de4 ] + +The firmware can send PHC_RTC_UPDATE async event on a PF that may not +have PTP registered. In such a case, there will be a null pointer +deference for bp->ptp_cfg when we try to handle the event. + +Fix it by not registering for this event with the firmware if !bp->ptp_cfg. +Also, check that bp->ptp_cfg is valid before proceeding when we receive +the event. + +Fixes: 8bcf6f04d4a5 ("bnxt_en: Handle async event when the PHC is updated in RTC mode") +Signed-off-by: Pavan Chebbi +Signed-off-by: Michael Chan +Signed-off-by: Paolo Abeni +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/broadcom/bnxt/bnxt.c | 6 ++++++ + drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c | 1 + + 2 files changed, 7 insertions(+) + +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c +index 48753ebe79c37..f14519aa6d4f6 100644 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c +@@ -2392,6 +2392,9 @@ static int bnxt_async_event_process(struct bnxt *bp, + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + u64 ns; + ++ if (!ptp) ++ goto async_event_process_exit; ++ + spin_lock_bh(&ptp->ptp_lock); + bnxt_ptp_update_current_time(bp); + ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) << +@@ -4789,6 +4792,9 @@ int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size, + if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY && + !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) + continue; ++ if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE && ++ !bp->ptp_cfg) ++ continue; + __set_bit(bnxt_async_events_arr[i], async_events_bmap); + } + if (bmap && bmap_size) { +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c +index a3a3978a4d1c2..af7b4466f9520 100644 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c +@@ -946,6 +946,7 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg) + bnxt_ptp_timecounter_init(bp, true); + bnxt_ptp_adjfine_rtc(bp, 0); + } ++ bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, true); + + ptp->ptp_info = bnxt_ptp_caps; + if ((bp->fw_cap & BNXT_FW_CAP_PTP_PPS)) { +-- +2.39.2 + diff --git a/queue-6.3/bnxt_en-query-default-vlan-before-vnic-setup-on-a-vf.patch b/queue-6.3/bnxt_en-query-default-vlan-before-vnic-setup-on-a-vf.patch new file mode 100644 index 00000000000..05c72c35eff --- /dev/null +++ b/queue-6.3/bnxt_en-query-default-vlan-before-vnic-setup-on-a-vf.patch @@ -0,0 +1,51 @@ +From 89e9bb0be07bbc0d462e4afe8af9cae07d9ad293 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 7 Jun 2023 00:54:06 -0700 +Subject: bnxt_en: Query default VLAN before VNIC setup on a VF + +From: Somnath Kotur + +[ Upstream commit 1a9e4f501bc6ff1b6ecb60df54fbf2b54db43bfe ] + +We need to call bnxt_hwrm_func_qcfg() on a VF to query the default +VLAN that may be setup by the PF. If a default VLAN is enabled, +the VF cannot support VLAN acceleration on the receive side and +the VNIC must be setup to strip out the default VLAN tag. If a +default VLAN is not enabled, the VF can support VLAN acceleration +on the receive side. The VNIC should be set up to strip or not +strip the VLAN based on the RX VLAN acceleration setting. + +Without this call to determine the default VLAN before calling +bnxt_setup_vnic(), the VNIC may not be set up correctly. For +example, bnxt_setup_vnic() may set up to strip the VLAN tag based +on stale default VLAN information. If RX VLAN acceleration is +not enabled, the VLAN tag will be incorrectly stripped and the +RX data path will not work correctly. + +Fixes: cf6645f8ebc6 ("bnxt_en: Add function for VF driver to query default VLAN.") +Reviewed-by: Pavan Chebbi +Signed-off-by: Somnath Kotur +Signed-off-by: Michael Chan +Signed-off-by: Paolo Abeni +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/broadcom/bnxt/bnxt.c | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c +index 26766c93b06ac..d974cfdbc9378 100644 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c +@@ -8839,6 +8839,9 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) + goto err_out; + } + ++ if (BNXT_VF(bp)) ++ bnxt_hwrm_func_qcfg(bp); ++ + rc = bnxt_setup_vnic(bp, 0); + if (rc) + goto err_out; +-- +2.39.2 + diff --git a/queue-6.3/bnxt_en-skip-firmware-fatal-error-recovery-if-chip-i.patch b/queue-6.3/bnxt_en-skip-firmware-fatal-error-recovery-if-chip-i.patch new file mode 100644 index 00000000000..bc049e6c4c8 --- /dev/null +++ b/queue-6.3/bnxt_en-skip-firmware-fatal-error-recovery-if-chip-i.patch @@ -0,0 +1,60 @@ +From c54e560c7770cf93a8edfffeae330b5d705d336d Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 7 Jun 2023 00:54:07 -0700 +Subject: bnxt_en: Skip firmware fatal error recovery if chip is not accessible + +From: Vikas Gupta + +[ Upstream commit 83474a9b252ab23e6003865c2775024344cb9c09 ] + +Driver starts firmware fatal error recovery by detecting +heartbeat failure or fw reset count register changing. But +these checks are not reliable if the device is not accessible. +This can happen while DPC (Downstream Port containment) is in +progress. Skip firmware fatal recovery if pci_device_is_present() +returns false. + +Fixes: acfb50e4e773 ("bnxt_en: Add FW fatal devlink_health_reporter.") +Reviewed-by: Somnath Kotur +Reviewed-by: Pavan Chebbi +Signed-off-by: Vikas Gupta +Signed-off-by: Michael Chan +Signed-off-by: Paolo Abeni +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/broadcom/bnxt/bnxt.c | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c +index d974cfdbc9378..48753ebe79c37 100644 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c +@@ -11628,6 +11628,7 @@ static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue) + static void bnxt_fw_health_check(struct bnxt *bp) + { + struct bnxt_fw_health *fw_health = bp->fw_health; ++ struct pci_dev *pdev = bp->pdev; + u32 val; + + if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) +@@ -11641,7 +11642,7 @@ static void bnxt_fw_health_check(struct bnxt *bp) + } + + val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); +- if (val == fw_health->last_fw_heartbeat) { ++ if (val == fw_health->last_fw_heartbeat && pci_device_is_present(pdev)) { + fw_health->arrests++; + goto fw_reset; + } +@@ -11649,7 +11650,7 @@ static void bnxt_fw_health_check(struct bnxt *bp) + fw_health->last_fw_heartbeat = val; + + val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); +- if (val != fw_health->last_fw_reset_cnt) { ++ if (val != fw_health->last_fw_reset_cnt && pci_device_is_present(pdev)) { + fw_health->discoveries++; + goto fw_reset; + } +-- +2.39.2 + diff --git a/queue-6.3/bpf-add-extra-path-pointer-check-to-d_path-helper.patch b/queue-6.3/bpf-add-extra-path-pointer-check-to-d_path-helper.patch new file mode 100644 index 00000000000..b1f491f3770 --- /dev/null +++ b/queue-6.3/bpf-add-extra-path-pointer-check-to-d_path-helper.patch @@ -0,0 +1,98 @@ +From b1d989262ef922f15b25ff52f7a349aa8a94ef67 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 6 Jun 2023 11:17:14 -0700 +Subject: bpf: Add extra path pointer check to d_path helper + +From: Jiri Olsa + +[ Upstream commit f46fab0e36e611a2389d3843f34658c849b6bd60 ] + +Anastasios reported crash on stable 5.15 kernel with following +BPF attached to lsm hook: + + SEC("lsm.s/bprm_creds_for_exec") + int BPF_PROG(bprm_creds_for_exec, struct linux_binprm *bprm) + { + struct path *path = &bprm->executable->f_path; + char p[128] = { 0 }; + + bpf_d_path(path, p, 128); + return 0; + } + +But bprm->executable can be NULL, so bpf_d_path call will crash: + + BUG: kernel NULL pointer dereference, address: 0000000000000018 + #PF: supervisor read access in kernel mode + #PF: error_code(0x0000) - not-present page + PGD 0 P4D 0 + Oops: 0000 [#1] PREEMPT SMP DEBUG_PAGEALLOC NOPTI + ... + RIP: 0010:d_path+0x22/0x280 + ... + Call Trace: + + bpf_d_path+0x21/0x60 + bpf_prog_db9cf176e84498d9_bprm_creds_for_exec+0x94/0x99 + bpf_trampoline_6442506293_0+0x55/0x1000 + bpf_lsm_bprm_creds_for_exec+0x5/0x10 + security_bprm_creds_for_exec+0x29/0x40 + bprm_execve+0x1c1/0x900 + do_execveat_common.isra.0+0x1af/0x260 + __x64_sys_execve+0x32/0x40 + +It's problem for all stable trees with bpf_d_path helper, which was +added in 5.9. + +This issue is fixed in current bpf code, where we identify and mark +trusted pointers, so the above code would fail even to load. + +For the sake of the stable trees and to workaround potentially broken +verifier in the future, adding the code that reads the path object from +the passed pointer and verifies it's valid in kernel space. + +Fixes: 6e22ab9da793 ("bpf: Add d_path helper") +Reported-by: Anastasios Papagiannis +Suggested-by: Alexei Starovoitov +Signed-off-by: Jiri Olsa +Signed-off-by: Daniel Borkmann +Acked-by: Stanislav Fomichev +Acked-by: Yonghong Song +Link: https://lore.kernel.org/bpf/20230606181714.532998-1-jolsa@kernel.org +Signed-off-by: Sasha Levin +--- + kernel/trace/bpf_trace.c | 12 +++++++++++- + 1 file changed, 11 insertions(+), 1 deletion(-) + +diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c +index e8da032bb6fc8..165441044bc55 100644 +--- a/kernel/trace/bpf_trace.c ++++ b/kernel/trace/bpf_trace.c +@@ -900,13 +900,23 @@ static const struct bpf_func_proto bpf_send_signal_thread_proto = { + + BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz) + { ++ struct path copy; + long len; + char *p; + + if (!sz) + return 0; + +- p = d_path(path, buf, sz); ++ /* ++ * The path pointer is verified as trusted and safe to use, ++ * but let's double check it's valid anyway to workaround ++ * potentially broken verifier. ++ */ ++ len = copy_from_kernel_nofault(©, path, sizeof(*path)); ++ if (len < 0) ++ return len; ++ ++ p = d_path(©, buf, sz); + if (IS_ERR(p)) { + len = PTR_ERR(p); + } else { +-- +2.39.2 + diff --git a/queue-6.3/bpf-fix-elem_size-not-being-set-for-inner-maps.patch b/queue-6.3/bpf-fix-elem_size-not-being-set-for-inner-maps.patch new file mode 100644 index 00000000000..4f203cadf80 --- /dev/null +++ b/queue-6.3/bpf-fix-elem_size-not-being-set-for-inner-maps.patch @@ -0,0 +1,56 @@ +From df6e48e18fef13abb581df9fe394ae47cd6899a7 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 2 Jun 2023 19:02:02 +0000 +Subject: bpf: Fix elem_size not being set for inner maps + +From: Rhys Rustad-Elliott + +[ Upstream commit cba41bb78d70aad98d8e61e019fd48c561f7f396 ] + +Commit d937bc3449fa ("bpf: make uniform use of array->elem_size +everywhere in arraymap.c") changed array_map_gen_lookup to use +array->elem_size instead of round_up(map->value_size, 8) as the element +size when generating code to access a value in an array map. + +array->elem_size, however, is not set by bpf_map_meta_alloc when +initializing an BPF_MAP_TYPE_ARRAY_OF_MAPS or BPF_MAP_TYPE_HASH_OF_MAPS. +This results in array_map_gen_lookup incorrectly outputting code that +always accesses index 0 in the array (as the index will be calculated +via a multiplication with the element size, which is incorrectly set to +0). + +Set elem_size on the bpf_array object when allocating an array or hash +of maps to fix this. + +Fixes: d937bc3449fa ("bpf: make uniform use of array->elem_size everywhere in arraymap.c") +Signed-off-by: Rhys Rustad-Elliott +Link: https://lore.kernel.org/r/20230602190110.47068-2-me@rhysre.net +Signed-off-by: Martin KaFai Lau +Signed-off-by: Sasha Levin +--- + kernel/bpf/map_in_map.c | 8 ++++++-- + 1 file changed, 6 insertions(+), 2 deletions(-) + +diff --git a/kernel/bpf/map_in_map.c b/kernel/bpf/map_in_map.c +index 38136ec4e095a..fbc3e944dc747 100644 +--- a/kernel/bpf/map_in_map.c ++++ b/kernel/bpf/map_in_map.c +@@ -81,9 +81,13 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) + /* Misc members not needed in bpf_map_meta_equal() check. */ + inner_map_meta->ops = inner_map->ops; + if (inner_map->ops == &array_map_ops) { ++ struct bpf_array *inner_array_meta = ++ container_of(inner_map_meta, struct bpf_array, map); ++ struct bpf_array *inner_array = container_of(inner_map, struct bpf_array, map); ++ ++ inner_array_meta->index_mask = inner_array->index_mask; ++ inner_array_meta->elem_size = inner_array->elem_size; + inner_map_meta->bypass_spec_v1 = inner_map->bypass_spec_v1; +- container_of(inner_map_meta, struct bpf_array, map)->index_mask = +- container_of(inner_map, struct bpf_array, map)->index_mask; + } + + fdput(f); +-- +2.39.2 + diff --git a/queue-6.3/bpf-fix-uaf-in-task-local-storage.patch b/queue-6.3/bpf-fix-uaf-in-task-local-storage.patch new file mode 100644 index 00000000000..28fccee44f6 --- /dev/null +++ b/queue-6.3/bpf-fix-uaf-in-task-local-storage.patch @@ -0,0 +1,56 @@ +From d0134f53a9eb64c02044c28e89288b9734541128 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 2 Jun 2023 02:26:12 +0200 +Subject: bpf: Fix UAF in task local storage + +From: KP Singh + +[ Upstream commit b0fd1852bcc21accca6260ef245356d5c141ff66 ] + +When task local storage was generalized for tracing programs, the +bpf_task_local_storage callback was moved from a BPF LSM hook +callback for security_task_free LSM hook to it's own callback. But a +failure case in bad_fork_cleanup_security was missed which, when +triggered, led to a dangling task owner pointer and a subsequent +use-after-free. Move the bpf_task_storage_free to the very end of +free_task to handle all failure cases. + +This issue was noticed when a BPF LSM program was attached to the +task_alloc hook on a kernel with KASAN enabled. The program used +bpf_task_storage_get to copy the task local storage from the current +task to the new task being created. + +Fixes: a10787e6d58c ("bpf: Enable task local storage for tracing programs") +Reported-by: Kuba Piecuch +Signed-off-by: KP Singh +Acked-by: Song Liu +Link: https://lore.kernel.org/r/20230602002612.1117381-1-kpsingh@kernel.org +Signed-off-by: Martin KaFai Lau +Signed-off-by: Sasha Levin +--- + kernel/fork.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/kernel/fork.c b/kernel/fork.c +index ea332319dffea..1ec1e9ea4bf83 100644 +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -559,6 +559,7 @@ void free_task(struct task_struct *tsk) + arch_release_task_struct(tsk); + if (tsk->flags & PF_KTHREAD) + free_kthread_struct(tsk); ++ bpf_task_storage_free(tsk); + free_task_struct(tsk); + } + EXPORT_SYMBOL(free_task); +@@ -845,7 +846,6 @@ void __put_task_struct(struct task_struct *tsk) + cgroup_free(tsk); + task_numa_free(tsk, true); + security_task_free(tsk); +- bpf_task_storage_free(tsk); + exit_creds(tsk); + delayacct_tsk_free(tsk); + put_signal_struct(tsk->signal); +-- +2.39.2 + diff --git a/queue-6.3/bpf-sockmap-avoid-potential-null-dereference-in-sk_p.patch b/queue-6.3/bpf-sockmap-avoid-potential-null-dereference-in-sk_p.patch new file mode 100644 index 00000000000..bcb5083f822 --- /dev/null +++ b/queue-6.3/bpf-sockmap-avoid-potential-null-dereference-in-sk_p.patch @@ -0,0 +1,92 @@ +From 56ff95bdae602263bc0e978e4f1e4b018da737ab Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 30 May 2023 19:51:49 +0000 +Subject: bpf, sockmap: Avoid potential NULL dereference in + sk_psock_verdict_data_ready() + +From: Eric Dumazet + +[ Upstream commit b320a45638296b63be8d9a901ca8bc43716b1ae1 ] + +syzbot found sk_psock(sk) could return NULL when called +from sk_psock_verdict_data_ready(). + +Just make sure to handle this case. + +[1] +general protection fault, probably for non-canonical address 0xdffffc000000005c: 0000 [#1] PREEMPT SMP KASAN +KASAN: null-ptr-deref in range [0x00000000000002e0-0x00000000000002e7] +CPU: 0 PID: 15 Comm: ksoftirqd/0 Not tainted 6.4.0-rc3-syzkaller-00588-g4781e965e655 #0 +Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 05/16/2023 +RIP: 0010:sk_psock_verdict_data_ready+0x19f/0x3c0 net/core/skmsg.c:1213 +Code: 4c 89 e6 e8 63 70 5e f9 4d 85 e4 75 75 e8 19 74 5e f9 48 8d bb e0 02 00 00 48 b8 00 00 00 00 00 fc ff df 48 89 fa 48 c1 ea 03 <80> 3c 02 00 0f 85 07 02 00 00 48 89 ef ff 93 e0 02 00 00 e8 29 fd +RSP: 0018:ffffc90000147688 EFLAGS: 00010206 +RAX: dffffc0000000000 RBX: 0000000000000000 RCX: 0000000000000100 +RDX: 000000000000005c RSI: ffffffff8825ceb7 RDI: 00000000000002e0 +RBP: ffff888076518c40 R08: 0000000000000007 R09: 0000000000000000 +R10: 0000000000000000 R11: 0000000000000001 R12: 0000000000000000 +R13: 0000000000000000 R14: 0000000000008000 R15: ffff888076518c40 +FS: 0000000000000000(0000) GS:ffff8880b9800000(0000) knlGS:0000000000000000 +CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 +CR2: 00007f901375bab0 CR3: 000000004bf26000 CR4: 00000000003506f0 +DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 +DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 +Call Trace: + +tcp_data_ready+0x10a/0x520 net/ipv4/tcp_input.c:5006 +tcp_data_queue+0x25d3/0x4c50 net/ipv4/tcp_input.c:5080 +tcp_rcv_established+0x829/0x1f90 net/ipv4/tcp_input.c:6019 +tcp_v4_do_rcv+0x65a/0x9c0 net/ipv4/tcp_ipv4.c:1726 +tcp_v4_rcv+0x2cbf/0x3340 net/ipv4/tcp_ipv4.c:2148 +ip_protocol_deliver_rcu+0x9f/0x480 net/ipv4/ip_input.c:205 +ip_local_deliver_finish+0x2ec/0x520 net/ipv4/ip_input.c:233 +NF_HOOK include/linux/netfilter.h:303 [inline] +NF_HOOK include/linux/netfilter.h:297 [inline] +ip_local_deliver+0x1ae/0x200 net/ipv4/ip_input.c:254 +dst_input include/net/dst.h:468 [inline] +ip_rcv_finish+0x1cf/0x2f0 net/ipv4/ip_input.c:449 +NF_HOOK include/linux/netfilter.h:303 [inline] +NF_HOOK include/linux/netfilter.h:297 [inline] +ip_rcv+0xae/0xd0 net/ipv4/ip_input.c:569 +__netif_receive_skb_one_core+0x114/0x180 net/core/dev.c:5491 +__netif_receive_skb+0x1f/0x1c0 net/core/dev.c:5605 +process_backlog+0x101/0x670 net/core/dev.c:5933 +__napi_poll+0xb7/0x6f0 net/core/dev.c:6499 +napi_poll net/core/dev.c:6566 [inline] +net_rx_action+0x8a9/0xcb0 net/core/dev.c:6699 +__do_softirq+0x1d4/0x905 kernel/softirq.c:571 +run_ksoftirqd kernel/softirq.c:939 [inline] +run_ksoftirqd+0x31/0x60 kernel/softirq.c:931 +smpboot_thread_fn+0x659/0x9e0 kernel/smpboot.c:164 +kthread+0x344/0x440 kernel/kthread.c:379 +ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:308 + + +Fixes: 6df7f764cd3c ("bpf, sockmap: Wake up polling after data copy") +Reported-by: syzbot +Signed-off-by: Eric Dumazet +Signed-off-by: Daniel Borkmann +Reviewed-by: John Fastabend +Link: https://lore.kernel.org/bpf/20230530195149.68145-1-edumazet@google.com +Signed-off-by: Sasha Levin +--- + net/core/skmsg.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/net/core/skmsg.c b/net/core/skmsg.c +index a9060e1f0e437..a29508e1ff356 100644 +--- a/net/core/skmsg.c ++++ b/net/core/skmsg.c +@@ -1210,7 +1210,8 @@ static void sk_psock_verdict_data_ready(struct sock *sk) + + rcu_read_lock(); + psock = sk_psock(sk); +- psock->saved_data_ready(sk); ++ if (psock) ++ psock->saved_data_ready(sk); + rcu_read_unlock(); + } + } +-- +2.39.2 + diff --git a/queue-6.3/drm-amdgpu-fix-null-pointer-dereference-error-in-amd.patch b/queue-6.3/drm-amdgpu-fix-null-pointer-dereference-error-in-amd.patch new file mode 100644 index 00000000000..9ac3e921d3e --- /dev/null +++ b/queue-6.3/drm-amdgpu-fix-null-pointer-dereference-error-in-amd.patch @@ -0,0 +1,78 @@ +From 4c1e60ab876e1d5f0f7ba5ed3caefa2d7937e1aa Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 29 May 2023 14:23:37 -0400 +Subject: drm/amdgpu: fix Null pointer dereference error in + amdgpu_device_recover_vram + +From: Horatio Zhang + +[ Upstream commit 2a1eb1a343208ce7d6839b73d62aece343e693ff ] + +Use the function of amdgpu_bo_vm_destroy to handle the resource release +of shadow bo. During the amdgpu_mes_self_test, shadow bo released, but +vmbo->shadow_list was not, which caused a null pointer reference error +in amdgpu_device_recover_vram when GPU reset. + +Fixes: 6c032c37ac3e ("drm/amdgpu: Fix vram recover doesn't work after whole GPU reset (v2)") +Signed-off-by: xinhui pan +Signed-off-by: Horatio Zhang +Acked-by: Feifei Xu +Signed-off-by: Alex Deucher +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 10 ++++------ + drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c | 1 - + 2 files changed, 4 insertions(+), 7 deletions(-) + +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +index 6c7d672412b21..5e9a0c1bb3079 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +@@ -79,9 +79,10 @@ static void amdgpu_bo_user_destroy(struct ttm_buffer_object *tbo) + static void amdgpu_bo_vm_destroy(struct ttm_buffer_object *tbo) + { + struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); +- struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo); ++ struct amdgpu_bo *shadow_bo = ttm_to_amdgpu_bo(tbo), *bo; + struct amdgpu_bo_vm *vmbo; + ++ bo = shadow_bo->parent; + vmbo = to_amdgpu_bo_vm(bo); + /* in case amdgpu_device_recover_vram got NULL of bo->parent */ + if (!list_empty(&vmbo->shadow_list)) { +@@ -694,11 +695,6 @@ int amdgpu_bo_create_vm(struct amdgpu_device *adev, + return r; + + *vmbo_ptr = to_amdgpu_bo_vm(bo_ptr); +- INIT_LIST_HEAD(&(*vmbo_ptr)->shadow_list); +- /* Set destroy callback to amdgpu_bo_vm_destroy after vmbo->shadow_list +- * is initialized. +- */ +- bo_ptr->tbo.destroy = &amdgpu_bo_vm_destroy; + return r; + } + +@@ -715,6 +711,8 @@ void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo) + + mutex_lock(&adev->shadow_list_lock); + list_add_tail(&vmbo->shadow_list, &adev->shadow_list); ++ vmbo->shadow->parent = amdgpu_bo_ref(&vmbo->bo); ++ vmbo->shadow->tbo.destroy = &amdgpu_bo_vm_destroy; + mutex_unlock(&adev->shadow_list_lock); + } + +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c +index 01e42bdd8e4e8..4642cff0e1a4f 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c +@@ -564,7 +564,6 @@ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm, + return r; + } + +- (*vmbo)->shadow->parent = amdgpu_bo_ref(bo); + amdgpu_bo_add_to_shadow_list(*vmbo); + + return 0; +-- +2.39.2 + diff --git a/queue-6.3/drm-i915-explain-the-magic-numbers-for-aux-sync-prec.patch b/queue-6.3/drm-i915-explain-the-magic-numbers-for-aux-sync-prec.patch new file mode 100644 index 00000000000..3c4fd65ea6a --- /dev/null +++ b/queue-6.3/drm-i915-explain-the-magic-numbers-for-aux-sync-prec.patch @@ -0,0 +1,88 @@ +From 86630d655822ec3481acae08f085549c0dbe7cff Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 29 Mar 2023 20:24:34 +0300 +Subject: drm/i915: Explain the magic numbers for AUX SYNC/precharge length +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Ville Syrjälä + +[ Upstream commit 26bfc3f36f2104c174dfc72415547d5c28ef3f1c ] + +Replace the hardcoded final numbers in the AUX SYNC/precharge +setup, and derive those from numbers from the (e)DP specs. + +The new functions can serve as the single point of truth for +the number of SYNC pulses we use. + +Cc: Jouni Högander +Signed-off-by: Ville Syrjälä +Link: https://patchwork.freedesktop.org/patch/msgid/20230329172434.18744-2-ville.syrjala@linux.intel.com +Reviewed-by: Jouni Högander +Stable-dep-of: 2d6f2f79e065 ("drm/i915: Use 18 fast wake AUX sync len") +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/i915/display/intel_dp_aux.c | 32 +++++++++++++++++++-- + 1 file changed, 29 insertions(+), 3 deletions(-) + +diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c +index 30c98810e28bb..2ffd68b07984b 100644 +--- a/drivers/gpu/drm/i915/display/intel_dp_aux.c ++++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c +@@ -117,6 +117,32 @@ static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index) + return index ? 0 : 1; + } + ++static int intel_dp_aux_sync_len(void) ++{ ++ int precharge = 16; /* 10-16 */ ++ int preamble = 16; ++ ++ return precharge + preamble; ++} ++ ++static int intel_dp_aux_fw_sync_len(void) ++{ ++ int precharge = 16; /* 10-16 */ ++ int preamble = 8; ++ ++ return precharge + preamble; ++} ++ ++static int g4x_dp_aux_precharge_len(void) ++{ ++ int precharge_min = 10; ++ int preamble = 16; ++ ++ /* HW wants the length of the extra precharge in 2us units */ ++ return (intel_dp_aux_sync_len() - ++ precharge_min - preamble) / 2; ++} ++ + static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp, + int send_bytes, + u32 aux_clock_divider) +@@ -139,7 +165,7 @@ static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp, + timeout | + DP_AUX_CH_CTL_RECEIVE_ERROR | + (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | +- (3 << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | ++ (g4x_dp_aux_precharge_len() << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | + (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT); + } + +@@ -163,8 +189,8 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp, + DP_AUX_CH_CTL_TIME_OUT_MAX | + DP_AUX_CH_CTL_RECEIVE_ERROR | + (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | +- DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(24) | +- DP_AUX_CH_CTL_SYNC_PULSE_SKL(32); ++ DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(intel_dp_aux_fw_sync_len()) | ++ DP_AUX_CH_CTL_SYNC_PULSE_SKL(intel_dp_aux_sync_len()); + + if (intel_tc_port_in_tbt_alt_mode(dig_port)) + ret |= DP_AUX_CH_CTL_TBT_IO; +-- +2.39.2 + diff --git a/queue-6.3/drm-i915-selftests-add-some-missing-error-propagatio.patch b/queue-6.3/drm-i915-selftests-add-some-missing-error-propagatio.patch new file mode 100644 index 00000000000..5a070f86c8c --- /dev/null +++ b/queue-6.3/drm-i915-selftests-add-some-missing-error-propagatio.patch @@ -0,0 +1,73 @@ +From 4af174d398b0de2b692f342d8b33e54ce6cf0640 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 5 Jun 2023 14:11:35 +0100 +Subject: drm/i915/selftests: Add some missing error propagation + +From: Tvrtko Ursulin + +[ Upstream commit 79d0150d2d983a4f6efee676cea06027f586fcd0 ] + +Add some missing error propagation in live_parallel_switch. + +To avoid needlessly burdening the various backport processes, note I am +not marking it as a fix against any patches and not copying stable since +it is debug/selftests only code. + +Signed-off-by: Tvrtko Ursulin +Reported-by: Dan Carpenter +Cc: Andi Shyti +Reviewed-by: Andi Shyti +Fixes: 50d16d44cce4 ("drm/i915/selftests: Exercise context switching in parallel") +Fixes: 6407cf533217 ("drm/i915/selftests: Stop using kthread_stop()") +Link: https://patchwork.freedesktop.org/patch/msgid/20230605131135.396854-1-tvrtko.ursulin@linux.intel.com +(cherry picked from commit 412fa1f097f48c8c1321806dd25e46618e0da147) +Signed-off-by: Joonas Lahtinen +Signed-off-by: Sasha Levin +--- + .../gpu/drm/i915/gem/selftests/i915_gem_context.c | 14 ++++++++++---- + 1 file changed, 10 insertions(+), 4 deletions(-) + +diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +index a81fa6a20f5aa..7b516b1a4915b 100644 +--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c ++++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +@@ -346,8 +346,10 @@ static int live_parallel_switch(void *arg) + continue; + + ce = intel_context_create(data[m].ce[0]->engine); +- if (IS_ERR(ce)) ++ if (IS_ERR(ce)) { ++ err = PTR_ERR(ce); + goto out; ++ } + + err = intel_context_pin(ce); + if (err) { +@@ -367,8 +369,10 @@ static int live_parallel_switch(void *arg) + + worker = kthread_create_worker(0, "igt/parallel:%s", + data[n].ce[0]->engine->name); +- if (IS_ERR(worker)) ++ if (IS_ERR(worker)) { ++ err = PTR_ERR(worker); + goto out; ++ } + + data[n].worker = worker; + } +@@ -397,8 +401,10 @@ static int live_parallel_switch(void *arg) + } + } + +- if (igt_live_test_end(&t)) +- err = -EIO; ++ if (igt_live_test_end(&t)) { ++ err = err ?: -EIO; ++ break; ++ } + } + + out: +-- +2.39.2 + diff --git a/queue-6.3/drm-i915-use-18-fast-wake-aux-sync-len.patch b/queue-6.3/drm-i915-use-18-fast-wake-aux-sync-len.patch new file mode 100644 index 00000000000..0ff407c3ff8 --- /dev/null +++ b/queue-6.3/drm-i915-use-18-fast-wake-aux-sync-len.patch @@ -0,0 +1,47 @@ +From 522a6a5c0a26beeab1681656ee8e08eb89c828c1 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 30 May 2023 13:16:49 +0300 +Subject: drm/i915: Use 18 fast wake AUX sync len +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Jouni Högander + +[ Upstream commit 2d6f2f79e06571d41eb1223abebe9097511c9544 ] + +HW default for wake sync pulses is 18. 10 precharge and 8 preamble. There +is no reason to change this especially as it is causing problems with +certain eDP panels. + +v3: Change "Fixes:" commit +v2: Remove "fast wake" repeat from subject + +Signed-off-by: Jouni Högander +Fixes: e1c71f8f9180 ("drm/i915: Fix fast wake AUX sync len") +Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/8475 +Reviewed-by: Luca Coelho +Link: https://patchwork.freedesktop.org/patch/msgid/20230530101649.2549949-1-jouni.hogander@intel.com +(cherry picked from commit b29a20f7c4995a059ed764ce42389857426397c7) +Signed-off-by: Joonas Lahtinen +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/i915/display/intel_dp_aux.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c +index 2ffd68b07984b..36d6ece8b4616 100644 +--- a/drivers/gpu/drm/i915/display/intel_dp_aux.c ++++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c +@@ -127,7 +127,7 @@ static int intel_dp_aux_sync_len(void) + + static int intel_dp_aux_fw_sync_len(void) + { +- int precharge = 16; /* 10-16 */ ++ int precharge = 10; /* 10-16 */ + int preamble = 8; + + return precharge + preamble; +-- +2.39.2 + diff --git a/queue-6.3/drm-lima-fix-sched-context-destroy.patch b/queue-6.3/drm-lima-fix-sched-context-destroy.patch new file mode 100644 index 00000000000..e7f27f71498 --- /dev/null +++ b/queue-6.3/drm-lima-fix-sched-context-destroy.patch @@ -0,0 +1,44 @@ +From 0f7a653d525e3aecb54a8c0a865fae0aef824292 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 6 Jun 2023 16:32:47 +0200 +Subject: drm/lima: fix sched context destroy + +From: Erico Nunes + +[ Upstream commit 6eea63c7090b20ee41032d3e478e617b219d69aa ] + +The drm sched entity must be flushed before finishing, to account for +jobs potentially still in flight at that time. +Lima did not do this flush until now, so switch the destroy call to the +drm_sched_entity_destroy() wrapper which will take care of that. + +This fixes a regression on lima which started since the rework in +commit 2fdb8a8f07c2 ("drm/scheduler: rework entity flush, kill and fini") +where some specific types of applications may hang indefinitely. + +Fixes: 2fdb8a8f07c2 ("drm/scheduler: rework entity flush, kill and fini") +Reviewed-by: Vasily Khoruzhick +Signed-off-by: Erico Nunes +Signed-off-by: Qiang Yu +Link: https://patchwork.freedesktop.org/patch/msgid/20230606143247.433018-1-nunes.erico@gmail.com +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/lima/lima_sched.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c +index ff003403fbbc7..ffd91a5ee2990 100644 +--- a/drivers/gpu/drm/lima/lima_sched.c ++++ b/drivers/gpu/drm/lima/lima_sched.c +@@ -165,7 +165,7 @@ int lima_sched_context_init(struct lima_sched_pipe *pipe, + void lima_sched_context_fini(struct lima_sched_pipe *pipe, + struct lima_sched_context *context) + { +- drm_sched_entity_fini(&context->base); ++ drm_sched_entity_destroy(&context->base); + } + + struct dma_fence *lima_sched_context_queue_task(struct lima_sched_task *task) +-- +2.39.2 + diff --git a/queue-6.3/drm-msm-a6xx-initialize-gmu-mutex-earlier.patch b/queue-6.3/drm-msm-a6xx-initialize-gmu-mutex-earlier.patch new file mode 100644 index 00000000000..aff3f8310d0 --- /dev/null +++ b/queue-6.3/drm-msm-a6xx-initialize-gmu-mutex-earlier.patch @@ -0,0 +1,114 @@ +From 8cbaa31645e5932852fcadcde276be9158cabcf1 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 10 Apr 2023 19:59:08 +0300 +Subject: drm/msm/a6xx: initialize GMU mutex earlier + +From: Dmitry Baryshkov + +[ Upstream commit 12abd735f0300600bfc01b2a3832b966312df205 ] + +Move GMU mutex initialization earlier to make sure that it is always +initialized. a6xx_destroy can be called from ther failure path before +GMU initialization. + +This fixes the following backtrace: + +------------[ cut here ]------------ +DEBUG_LOCKS_WARN_ON(lock->magic != lock) +WARNING: CPU: 0 PID: 58 at kernel/locking/mutex.c:582 __mutex_lock+0x1ec/0x3d0 +Modules linked in: +CPU: 0 PID: 58 Comm: kworker/u16:1 Not tainted 6.3.0-rc5-00155-g187c06436519 #565 +Hardware name: Qualcomm Technologies, Inc. SM8350 HDK (DT) +Workqueue: events_unbound deferred_probe_work_func +pstate: 60400005 (nZCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--) +pc : __mutex_lock+0x1ec/0x3d0 +lr : __mutex_lock+0x1ec/0x3d0 +sp : ffff800008993620 +x29: ffff800008993620 x28: 0000000000000002 x27: ffff47b253c52800 +x26: 0000000001000606 x25: ffff47b240bb2810 x24: fffffffffffffff4 +x23: 0000000000000000 x22: ffffc38bba15ac14 x21: 0000000000000002 +x20: ffff800008993690 x19: ffff47b2430cc668 x18: fffffffffffe98f0 +x17: 6f74616c75676572 x16: 20796d6d75642067 x15: 0000000000000038 +x14: 0000000000000000 x13: ffffc38bbba050b8 x12: 0000000000000666 +x11: 0000000000000222 x10: ffffc38bbba603e8 x9 : ffffc38bbba050b8 +x8 : 00000000ffffefff x7 : ffffc38bbba5d0b8 x6 : 0000000000000222 +x5 : 000000000000bff4 x4 : 40000000fffff222 x3 : 0000000000000000 +x2 : 0000000000000000 x1 : 0000000000000000 x0 : ffff47b240cb1880 +Call trace: + __mutex_lock+0x1ec/0x3d0 + mutex_lock_nested+0x2c/0x38 + a6xx_destroy+0xa0/0x138 + a6xx_gpu_init+0x41c/0x618 + adreno_bind+0x188/0x290 + component_bind_all+0x118/0x248 + msm_drm_bind+0x1c0/0x670 + try_to_bring_up_aggregate_device+0x164/0x1d0 + __component_add+0xa8/0x16c + component_add+0x14/0x20 + dsi_dev_attach+0x20/0x2c + dsi_host_attach+0x9c/0x144 + devm_mipi_dsi_attach+0x34/0xac + lt9611uxc_attach_dsi.isra.0+0x84/0xfc + lt9611uxc_probe+0x5b8/0x67c + i2c_device_probe+0x1ac/0x358 + really_probe+0x148/0x2ac + __driver_probe_device+0x78/0xe0 + driver_probe_device+0x3c/0x160 + __device_attach_driver+0xb8/0x138 + bus_for_each_drv+0x84/0xe0 + __device_attach+0x9c/0x188 + device_initial_probe+0x14/0x20 + bus_probe_device+0xac/0xb0 + deferred_probe_work_func+0x8c/0xc8 + process_one_work+0x2bc/0x594 + worker_thread+0x228/0x438 + kthread+0x108/0x10c + ret_from_fork+0x10/0x20 +irq event stamp: 299345 +hardirqs last enabled at (299345): [] put_cpu_partial+0x1c8/0x22c +hardirqs last disabled at (299344): [] put_cpu_partial+0x1c0/0x22c +softirqs last enabled at (296752): [] _stext+0x434/0x4e8 +softirqs last disabled at (296741): [] ____do_softirq+0x10/0x1c +---[ end trace 0000000000000000 ]--- + +Fixes: 4cd15a3e8b36 ("drm/msm/a6xx: Make GPU destroy a bit safer") +Cc: Douglas Anderson +Signed-off-by: Dmitry Baryshkov +Reviewed-by: Douglas Anderson +Patchwork: https://patchwork.freedesktop.org/patch/531540/ +Signed-off-by: Rob Clark +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/msm/adreno/a6xx_gmu.c | 2 -- + drivers/gpu/drm/msm/adreno/a6xx_gpu.c | 2 ++ + 2 files changed, 2 insertions(+), 2 deletions(-) + +diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +index 7f5bc73b20402..611311b65b168 100644 +--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c ++++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +@@ -1514,8 +1514,6 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) + if (!pdev) + return -ENODEV; + +- mutex_init(&gmu->lock); +- + gmu->dev = &pdev->dev; + + of_dma_configure(gmu->dev, node, true); +diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +index 6faea5049f765..2942d2548ce69 100644 +--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c ++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +@@ -1998,6 +1998,8 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev) + adreno_gpu = &a6xx_gpu->base; + gpu = &adreno_gpu->base; + ++ mutex_init(&a6xx_gpu->gmu.lock); ++ + adreno_gpu->registers = NULL; + + /* +-- +2.39.2 + diff --git a/queue-6.3/ice-make-writes-to-dev-gnssx-synchronous.patch b/queue-6.3/ice-make-writes-to-dev-gnssx-synchronous.patch new file mode 100644 index 00000000000..5294cff9569 --- /dev/null +++ b/queue-6.3/ice-make-writes-to-dev-gnssx-synchronous.patch @@ -0,0 +1,224 @@ +From b5737a06c3187f09bc129da0dcc82892975463c9 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 6 Jun 2023 10:12:53 -0700 +Subject: ice: make writes to /dev/gnssX synchronous + +From: Michal Schmidt + +[ Upstream commit bf15bb38ec7f4ff522da5c20e1673dbda7159938 ] + +The current ice driver's GNSS write implementation buffers writes and +works through them asynchronously in a kthread. That's bad because: + - The GNSS write_raw operation is supposed to be synchronous[1][2]. + - There is no upper bound on the number of pending writes. + Userspace can submit writes much faster than the driver can process, + consuming unlimited amounts of kernel memory. + +A patch that's currently on review[3] ("[v3,net] ice: Write all GNSS +buffers instead of first one") would add one more problem: + - The possibility of waiting for a very long time to flush the write + work when doing rmmod, softlockups. + +To fix these issues, simplify the implementation: Drop the buffering, +the write_work, and make the writes synchronous. + +I tested this with gpsd and ubxtool. + +[1] https://events19.linuxfoundation.org/wp-content/uploads/2017/12/The-GNSS-Subsystem-Johan-Hovold-Hovold-Consulting-AB.pdf + "User interface" slide. +[2] A comment in drivers/gnss/core.c:gnss_write(): + /* Ignoring O_NONBLOCK, write_raw() is synchronous. */ +[3] https://patchwork.ozlabs.org/project/intel-wired-lan/patch/20230217120541.16745-1-karol.kolacinski@intel.com/ + +Fixes: d6b98c8d242a ("ice: add write functionality for GNSS TTY") +Signed-off-by: Michal Schmidt +Reviewed-by: Simon Horman +Tested-by: Sunitha Mekala (A Contingent worker at Intel) +Signed-off-by: Tony Nguyen +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/intel/ice/ice_common.c | 2 +- + drivers/net/ethernet/intel/ice/ice_common.h | 2 +- + drivers/net/ethernet/intel/ice/ice_gnss.c | 64 ++------------------- + drivers/net/ethernet/intel/ice/ice_gnss.h | 10 ---- + 4 files changed, 6 insertions(+), 72 deletions(-) + +diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c +index c2fda4fa4188c..b534d7726d3e8 100644 +--- a/drivers/net/ethernet/intel/ice/ice_common.c ++++ b/drivers/net/ethernet/intel/ice/ice_common.c +@@ -5169,7 +5169,7 @@ ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, + */ + int + ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, +- u16 bus_addr, __le16 addr, u8 params, u8 *data, ++ u16 bus_addr, __le16 addr, u8 params, const u8 *data, + struct ice_sq_cd *cd) + { + struct ice_aq_desc desc = { 0 }; +diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h +index 8ba5f935a092b..81961a7d65985 100644 +--- a/drivers/net/ethernet/intel/ice/ice_common.h ++++ b/drivers/net/ethernet/intel/ice/ice_common.h +@@ -229,7 +229,7 @@ ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, + struct ice_sq_cd *cd); + int + ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, +- u16 bus_addr, __le16 addr, u8 params, u8 *data, ++ u16 bus_addr, __le16 addr, u8 params, const u8 *data, + struct ice_sq_cd *cd); + bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw); + #endif /* _ICE_COMMON_H_ */ +diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.c b/drivers/net/ethernet/intel/ice/ice_gnss.c +index 8dec748bb53a4..12086aafb42fb 100644 +--- a/drivers/net/ethernet/intel/ice/ice_gnss.c ++++ b/drivers/net/ethernet/intel/ice/ice_gnss.c +@@ -16,8 +16,8 @@ + * * number of bytes written - success + * * negative - error code + */ +-static unsigned int +-ice_gnss_do_write(struct ice_pf *pf, unsigned char *buf, unsigned int size) ++static int ++ice_gnss_do_write(struct ice_pf *pf, const unsigned char *buf, unsigned int size) + { + struct ice_aqc_link_topo_addr link_topo; + struct ice_hw *hw = &pf->hw; +@@ -72,39 +72,7 @@ ice_gnss_do_write(struct ice_pf *pf, unsigned char *buf, unsigned int size) + dev_err(ice_pf_to_dev(pf), "GNSS failed to write, offset=%u, size=%u, err=%d\n", + offset, size, err); + +- return offset; +-} +- +-/** +- * ice_gnss_write_pending - Write all pending data to internal GNSS +- * @work: GNSS write work structure +- */ +-static void ice_gnss_write_pending(struct kthread_work *work) +-{ +- struct gnss_serial *gnss = container_of(work, struct gnss_serial, +- write_work); +- struct ice_pf *pf = gnss->back; +- +- if (!pf) +- return; +- +- if (!test_bit(ICE_FLAG_GNSS, pf->flags)) +- return; +- +- if (!list_empty(&gnss->queue)) { +- struct gnss_write_buf *write_buf = NULL; +- unsigned int bytes; +- +- write_buf = list_first_entry(&gnss->queue, +- struct gnss_write_buf, queue); +- +- bytes = ice_gnss_do_write(pf, write_buf->buf, write_buf->size); +- dev_dbg(ice_pf_to_dev(pf), "%u bytes written to GNSS\n", bytes); +- +- list_del(&write_buf->queue); +- kfree(write_buf->buf); +- kfree(write_buf); +- } ++ return err; + } + + /** +@@ -224,8 +192,6 @@ static struct gnss_serial *ice_gnss_struct_init(struct ice_pf *pf) + pf->gnss_serial = gnss; + + kthread_init_delayed_work(&gnss->read_work, ice_gnss_read); +- INIT_LIST_HEAD(&gnss->queue); +- kthread_init_work(&gnss->write_work, ice_gnss_write_pending); + kworker = kthread_create_worker(0, "ice-gnss-%s", dev_name(dev)); + if (IS_ERR(kworker)) { + kfree(gnss); +@@ -285,7 +251,6 @@ static void ice_gnss_close(struct gnss_device *gdev) + if (!gnss) + return; + +- kthread_cancel_work_sync(&gnss->write_work); + kthread_cancel_delayed_work_sync(&gnss->read_work); + } + +@@ -304,10 +269,7 @@ ice_gnss_write(struct gnss_device *gdev, const unsigned char *buf, + size_t count) + { + struct ice_pf *pf = gnss_get_drvdata(gdev); +- struct gnss_write_buf *write_buf; + struct gnss_serial *gnss; +- unsigned char *cmd_buf; +- int err = count; + + /* We cannot write a single byte using our I2C implementation. */ + if (count <= 1 || count > ICE_GNSS_TTY_WRITE_BUF) +@@ -323,24 +285,7 @@ ice_gnss_write(struct gnss_device *gdev, const unsigned char *buf, + if (!gnss) + return -ENODEV; + +- cmd_buf = kcalloc(count, sizeof(*buf), GFP_KERNEL); +- if (!cmd_buf) +- return -ENOMEM; +- +- memcpy(cmd_buf, buf, count); +- write_buf = kzalloc(sizeof(*write_buf), GFP_KERNEL); +- if (!write_buf) { +- kfree(cmd_buf); +- return -ENOMEM; +- } +- +- write_buf->buf = cmd_buf; +- write_buf->size = count; +- INIT_LIST_HEAD(&write_buf->queue); +- list_add_tail(&write_buf->queue, &gnss->queue); +- kthread_queue_work(gnss->kworker, &gnss->write_work); +- +- return err; ++ return ice_gnss_do_write(pf, buf, count); + } + + static const struct gnss_operations ice_gnss_ops = { +@@ -436,7 +381,6 @@ void ice_gnss_exit(struct ice_pf *pf) + if (pf->gnss_serial) { + struct gnss_serial *gnss = pf->gnss_serial; + +- kthread_cancel_work_sync(&gnss->write_work); + kthread_cancel_delayed_work_sync(&gnss->read_work); + kthread_destroy_worker(gnss->kworker); + gnss->kworker = NULL; +diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.h b/drivers/net/ethernet/intel/ice/ice_gnss.h +index 4d49e5b0b4b81..d95ca3928b2ea 100644 +--- a/drivers/net/ethernet/intel/ice/ice_gnss.h ++++ b/drivers/net/ethernet/intel/ice/ice_gnss.h +@@ -23,26 +23,16 @@ + #define ICE_MAX_UBX_READ_TRIES 255 + #define ICE_MAX_UBX_ACK_READ_TRIES 4095 + +-struct gnss_write_buf { +- struct list_head queue; +- unsigned int size; +- unsigned char *buf; +-}; +- + /** + * struct gnss_serial - data used to initialize GNSS TTY port + * @back: back pointer to PF + * @kworker: kwork thread for handling periodic work + * @read_work: read_work function for handling GNSS reads +- * @write_work: write_work function for handling GNSS writes +- * @queue: write buffers queue + */ + struct gnss_serial { + struct ice_pf *back; + struct kthread_worker *kworker; + struct kthread_delayed_work read_work; +- struct kthread_work write_work; +- struct list_head queue; + }; + + #if IS_ENABLED(CONFIG_GNSS) +-- +2.39.2 + diff --git a/queue-6.3/ipv6-rpl-fix-route-of-death.patch b/queue-6.3/ipv6-rpl-fix-route-of-death.patch new file mode 100644 index 00000000000..7c31b36a8ed --- /dev/null +++ b/queue-6.3/ipv6-rpl-fix-route-of-death.patch @@ -0,0 +1,195 @@ +From d81b07d3ed40d5d707eb7eb2d6969f1bbf2aa93b Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 5 Jun 2023 11:06:17 -0700 +Subject: ipv6: rpl: Fix Route of Death. + +From: Kuniyuki Iwashima + +[ Upstream commit a2f4c143d76b1a47c91ef9bc46907116b111da0b ] + +A remote DoS vulnerability of RPL Source Routing is assigned CVE-2023-2156. + +The Source Routing Header (SRH) has the following format: + + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Next Header | Hdr Ext Len | Routing Type | Segments Left | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | CmprI | CmprE | Pad | Reserved | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | | + . . + . Addresses[1..n] . + . . + | | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +The originator of an SRH places the first hop's IPv6 address in the IPv6 +header's IPv6 Destination Address and the second hop's IPv6 address as +the first address in Addresses[1..n]. + +The CmprI and CmprE fields indicate the number of prefix octets that are +shared with the IPv6 Destination Address. When CmprI or CmprE is not 0, +Addresses[1..n] are compressed as follows: + + 1..n-1 : (16 - CmprI) bytes + n : (16 - CmprE) bytes + +Segments Left indicates the number of route segments remaining. When the +value is not zero, the SRH is forwarded to the next hop. Its address +is extracted from Addresses[n - Segment Left + 1] and swapped with IPv6 +Destination Address. + +When Segment Left is greater than or equal to 2, the size of SRH is not +changed because Addresses[1..n-1] are decompressed and recompressed with +CmprI. + +OTOH, when Segment Left changes from 1 to 0, the new SRH could have a +different size because Addresses[1..n-1] are decompressed with CmprI and +recompressed with CmprE. + +Let's say CmprI is 15 and CmprE is 0. When we receive SRH with Segment +Left >= 2, Addresses[1..n-1] have 1 byte for each, and Addresses[n] has +16 bytes. When Segment Left is 1, Addresses[1..n-1] is decompressed to +16 bytes and not recompressed. Finally, the new SRH will need more room +in the header, and the size is (16 - 1) * (n - 1) bytes. + +Here the max value of n is 255 as Segment Left is u8, so in the worst case, +we have to allocate 3825 bytes in the skb headroom. However, now we only +allocate a small fixed buffer that is IPV6_RPL_SRH_WORST_SWAP_SIZE (16 + 7 +bytes). If the decompressed size overflows the room, skb_push() hits BUG() +below [0]. + +Instead of allocating the fixed buffer for every packet, let's allocate +enough headroom only when we receive SRH with Segment Left 1. + +[0]: +skbuff: skb_under_panic: text:ffffffff81c9f6e2 len:576 put:576 head:ffff8880070b5180 data:ffff8880070b4fb0 tail:0x70 end:0x140 dev:lo +kernel BUG at net/core/skbuff.c:200! +invalid opcode: 0000 [#1] PREEMPT SMP PTI +CPU: 0 PID: 154 Comm: python3 Not tainted 6.4.0-rc4-00190-gc308e9ec0047 #7 +Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.16.0-0-gd239552ce722-prebuilt.qemu.org 04/01/2014 +RIP: 0010:skb_panic (net/core/skbuff.c:200) +Code: 4f 70 50 8b 87 bc 00 00 00 50 8b 87 b8 00 00 00 50 ff b7 c8 00 00 00 4c 8b 8f c0 00 00 00 48 c7 c7 80 6e 77 82 e8 ad 8b 60 ff <0f> 0b 66 66 2e 0f 1f 84 00 00 00 00 00 90 90 90 90 90 90 90 90 90 +RSP: 0018:ffffc90000003da0 EFLAGS: 00000246 +RAX: 0000000000000085 RBX: ffff8880058a6600 RCX: 0000000000000000 +RDX: 0000000000000000 RSI: ffff88807dc1c540 RDI: ffff88807dc1c540 +RBP: ffffc90000003e48 R08: ffffffff82b392c8 R09: 00000000ffffdfff +R10: ffffffff82a592e0 R11: ffffffff82b092e0 R12: ffff888005b1c800 +R13: ffff8880070b51b8 R14: ffff888005b1ca18 R15: ffff8880070b5190 +FS: 00007f4539f0b740(0000) GS:ffff88807dc00000(0000) knlGS:0000000000000000 +CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 +CR2: 000055670baf3000 CR3: 0000000005b0e000 CR4: 00000000007506f0 +PKRU: 55555554 +Call Trace: + + skb_push (net/core/skbuff.c:210) + ipv6_rthdr_rcv (./include/linux/skbuff.h:2880 net/ipv6/exthdrs.c:634 net/ipv6/exthdrs.c:718) + ip6_protocol_deliver_rcu (net/ipv6/ip6_input.c:437 (discriminator 5)) + ip6_input_finish (./include/linux/rcupdate.h:805 net/ipv6/ip6_input.c:483) + __netif_receive_skb_one_core (net/core/dev.c:5494) + process_backlog (./include/linux/rcupdate.h:805 net/core/dev.c:5934) + __napi_poll (net/core/dev.c:6496) + net_rx_action (net/core/dev.c:6565 net/core/dev.c:6696) + __do_softirq (./arch/x86/include/asm/jump_label.h:27 ./include/linux/jump_label.h:207 ./include/trace/events/irq.h:142 kernel/softirq.c:572) + do_softirq (kernel/softirq.c:472 kernel/softirq.c:459) + + + __local_bh_enable_ip (kernel/softirq.c:396) + __dev_queue_xmit (net/core/dev.c:4272) + ip6_finish_output2 (./include/net/neighbour.h:544 net/ipv6/ip6_output.c:134) + rawv6_sendmsg (./include/net/dst.h:458 ./include/linux/netfilter.h:303 net/ipv6/raw.c:656 net/ipv6/raw.c:914) + sock_sendmsg (net/socket.c:724 net/socket.c:747) + __sys_sendto (net/socket.c:2144) + __x64_sys_sendto (net/socket.c:2156 net/socket.c:2152 net/socket.c:2152) + do_syscall_64 (arch/x86/entry/common.c:50 arch/x86/entry/common.c:80) + entry_SYSCALL_64_after_hwframe (arch/x86/entry/entry_64.S:120) +RIP: 0033:0x7f453a138aea +Code: d8 64 89 02 48 c7 c0 ff ff ff ff eb b8 0f 1f 00 f3 0f 1e fa 41 89 ca 64 8b 04 25 18 00 00 00 85 c0 75 15 b8 2c 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 7e c3 0f 1f 44 00 00 41 54 48 83 ec 30 44 89 +RSP: 002b:00007ffcc212a1c8 EFLAGS: 00000246 ORIG_RAX: 000000000000002c +RAX: ffffffffffffffda RBX: 00007ffcc212a288 RCX: 00007f453a138aea +RDX: 0000000000000060 RSI: 00007f4539084c20 RDI: 0000000000000003 +RBP: 00007f4538308e80 R08: 00007ffcc212a300 R09: 000000000000001c +R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000000 +R13: ffffffffc4653600 R14: 0000000000000001 R15: 00007f4539712d1b + +Modules linked in: + +Fixes: 8610c7c6e3bd ("net: ipv6: add support for rpl sr exthdr") +Reported-by: Max VA +Closes: https://www.interruptlabs.co.uk/articles/linux-ipv6-route-of-death +Signed-off-by: Kuniyuki Iwashima +Reviewed-by: Eric Dumazet +Link: https://lore.kernel.org/r/20230605180617.67284-1-kuniyu@amazon.com +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + include/net/rpl.h | 3 --- + net/ipv6/exthdrs.c | 29 +++++++++++------------------ + 2 files changed, 11 insertions(+), 21 deletions(-) + +diff --git a/include/net/rpl.h b/include/net/rpl.h +index 308ef0a05caef..30fe780d1e7c8 100644 +--- a/include/net/rpl.h ++++ b/include/net/rpl.h +@@ -23,9 +23,6 @@ static inline int rpl_init(void) + static inline void rpl_exit(void) {} + #endif + +-/* Worst decompression memory usage ipv6 address (16) + pad 7 */ +-#define IPV6_RPL_SRH_WORST_SWAP_SIZE (sizeof(struct in6_addr) + 7) +- + size_t ipv6_rpl_srh_size(unsigned char n, unsigned char cmpri, + unsigned char cmpre); + +diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c +index a8d961d3a477f..5fa0e37305d9d 100644 +--- a/net/ipv6/exthdrs.c ++++ b/net/ipv6/exthdrs.c +@@ -569,24 +569,6 @@ static int ipv6_rpl_srh_rcv(struct sk_buff *skb) + return -1; + } + +- if (skb_cloned(skb)) { +- if (pskb_expand_head(skb, IPV6_RPL_SRH_WORST_SWAP_SIZE, 0, +- GFP_ATOMIC)) { +- __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), +- IPSTATS_MIB_OUTDISCARDS); +- kfree_skb(skb); +- return -1; +- } +- } else { +- err = skb_cow_head(skb, IPV6_RPL_SRH_WORST_SWAP_SIZE); +- if (unlikely(err)) { +- kfree_skb(skb); +- return -1; +- } +- } +- +- hdr = (struct ipv6_rpl_sr_hdr *)skb_transport_header(skb); +- + if (!pskb_may_pull(skb, ipv6_rpl_srh_size(n, hdr->cmpri, + hdr->cmpre))) { + kfree_skb(skb); +@@ -630,6 +612,17 @@ static int ipv6_rpl_srh_rcv(struct sk_buff *skb) + skb_pull(skb, ((hdr->hdrlen + 1) << 3)); + skb_postpull_rcsum(skb, oldhdr, + sizeof(struct ipv6hdr) + ((hdr->hdrlen + 1) << 3)); ++ if (unlikely(!hdr->segments_left)) { ++ if (pskb_expand_head(skb, sizeof(struct ipv6hdr) + ((chdr->hdrlen + 1) << 3), 0, ++ GFP_ATOMIC)) { ++ __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_OUTDISCARDS); ++ kfree_skb(skb); ++ kfree(buf); ++ return -1; ++ } ++ ++ oldhdr = ipv6_hdr(skb); ++ } + skb_push(skb, ((chdr->hdrlen + 1) << 3) + sizeof(struct ipv6hdr)); + skb_reset_network_header(skb); + skb_mac_header_rebuild(skb); +-- +2.39.2 + diff --git a/queue-6.3/lib-cpu_rmap-fix-potential-use-after-free-in-irq_cpu.patch b/queue-6.3/lib-cpu_rmap-fix-potential-use-after-free-in-irq_cpu.patch new file mode 100644 index 00000000000..e1903fd0d12 --- /dev/null +++ b/queue-6.3/lib-cpu_rmap-fix-potential-use-after-free-in-irq_cpu.patch @@ -0,0 +1,40 @@ +From ea1f899bddee4effa12b601dac7f6278ba01899a Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 2 Jun 2023 20:28:15 +0200 +Subject: lib: cpu_rmap: Fix potential use-after-free in irq_cpu_rmap_release() + +From: Ben Hutchings + +[ Upstream commit 7c5d4801ecf0564c860033d89726b99723c55146 ] + +irq_cpu_rmap_release() calls cpu_rmap_put(), which may free the rmap. +So we need to clear the pointer to our glue structure in rmap before +doing that, not after. + +Fixes: 4e0473f1060a ("lib: cpu_rmap: Avoid use after free on rmap->obj array entries") +Signed-off-by: Ben Hutchings +Reviewed-by: Simon Horman +Link: https://lore.kernel.org/r/ZHo0vwquhOy3FaXc@decadent.org.uk +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + lib/cpu_rmap.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/lib/cpu_rmap.c b/lib/cpu_rmap.c +index e77f12bb3c774..1833ad73de6fc 100644 +--- a/lib/cpu_rmap.c ++++ b/lib/cpu_rmap.c +@@ -268,8 +268,8 @@ static void irq_cpu_rmap_release(struct kref *ref) + struct irq_glue *glue = + container_of(ref, struct irq_glue, notify.kref); + +- cpu_rmap_put(glue->rmap); + glue->rmap->obj[glue->index] = NULL; ++ cpu_rmap_put(glue->rmap); + kfree(glue); + } + +-- +2.39.2 + diff --git a/queue-6.3/neighbour-fix-unaligned-access-to-pneigh_entry.patch b/queue-6.3/neighbour-fix-unaligned-access-to-pneigh_entry.patch new file mode 100644 index 00000000000..b8bae2fa486 --- /dev/null +++ b/queue-6.3/neighbour-fix-unaligned-access-to-pneigh_entry.patch @@ -0,0 +1,41 @@ +From 95a3b4faa3ef37c3b0ac30ca9107e31823bbe430 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 1 Jun 2023 09:54:32 +0800 +Subject: neighbour: fix unaligned access to pneigh_entry + +From: Qingfang DENG + +[ Upstream commit ed779fe4c9b5a20b4ab4fd6f3e19807445bb78c7 ] + +After the blamed commit, the member key is longer 4-byte aligned. On +platforms that do not support unaligned access, e.g., MIPS32R2 with +unaligned_action set to 1, this will trigger a crash when accessing +an IPv6 pneigh_entry, as the key is cast to an in6_addr pointer. + +Change the type of the key to u32 to make it aligned. + +Fixes: 62dd93181aaa ("[IPV6] NDISC: Set per-entry is_router flag in Proxy NA.") +Signed-off-by: Qingfang DENG +Link: https://lore.kernel.org/r/20230601015432.159066-1-dqfext@gmail.com +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + include/net/neighbour.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/include/net/neighbour.h b/include/net/neighbour.h +index 2f2a6023fb0e5..94a1599824d8f 100644 +--- a/include/net/neighbour.h ++++ b/include/net/neighbour.h +@@ -180,7 +180,7 @@ struct pneigh_entry { + netdevice_tracker dev_tracker; + u32 flags; + u8 protocol; +- u8 key[]; ++ u32 key[]; + }; + + /* +-- +2.39.2 + diff --git a/queue-6.3/net-bcmgenet-fix-eee-implementation.patch b/queue-6.3/net-bcmgenet-fix-eee-implementation.patch new file mode 100644 index 00000000000..ee80ca5356e --- /dev/null +++ b/queue-6.3/net-bcmgenet-fix-eee-implementation.patch @@ -0,0 +1,141 @@ +From 37e2ffa975a83837dfbdfc26a91ba8e9ce484b4c Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 6 Jun 2023 14:43:47 -0700 +Subject: net: bcmgenet: Fix EEE implementation + +From: Florian Fainelli + +[ Upstream commit a9f31047baca57d47440c879cf259b86f900260c ] + +We had a number of short comings: + +- EEE must be re-evaluated whenever the state machine detects a link + change as wight be switching from a link partner with EEE + enabled/disabled + +- tx_lpi_enabled controls whether EEE should be enabled/disabled for the + transmit path, which applies to the TBUF block + +- We do not need to forcibly enable EEE upon system resume, as the PHY + state machine will trigger a link event that will do that, too + +Fixes: 6ef398ea60d9 ("net: bcmgenet: add EEE support") +Signed-off-by: Florian Fainelli +Reviewed-by: Russell King (Oracle) +Link: https://lore.kernel.org/r/20230606214348.2408018-1-florian.fainelli@broadcom.com +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + .../net/ethernet/broadcom/genet/bcmgenet.c | 22 +++++++------------ + .../net/ethernet/broadcom/genet/bcmgenet.h | 3 +++ + drivers/net/ethernet/broadcom/genet/bcmmii.c | 5 +++++ + 3 files changed, 16 insertions(+), 14 deletions(-) + +diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c +index eca0c92c0c84d..2b5761ad2f92f 100644 +--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c ++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c +@@ -1272,7 +1272,8 @@ static void bcmgenet_get_ethtool_stats(struct net_device *dev, + } + } + +-static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable) ++void bcmgenet_eee_enable_set(struct net_device *dev, bool enable, ++ bool tx_lpi_enabled) + { + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL; +@@ -1292,7 +1293,7 @@ static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable) + + /* Enable EEE and switch to a 27Mhz clock automatically */ + reg = bcmgenet_readl(priv->base + off); +- if (enable) ++ if (tx_lpi_enabled) + reg |= TBUF_EEE_EN | TBUF_PM_EN; + else + reg &= ~(TBUF_EEE_EN | TBUF_PM_EN); +@@ -1313,6 +1314,7 @@ static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable) + + priv->eee.eee_enabled = enable; + priv->eee.eee_active = enable; ++ priv->eee.tx_lpi_enabled = tx_lpi_enabled; + } + + static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e) +@@ -1328,6 +1330,7 @@ static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e) + + e->eee_enabled = p->eee_enabled; + e->eee_active = p->eee_active; ++ e->tx_lpi_enabled = p->tx_lpi_enabled; + e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER); + + return phy_ethtool_get_eee(dev->phydev, e); +@@ -1337,7 +1340,6 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e) + { + struct bcmgenet_priv *priv = netdev_priv(dev); + struct ethtool_eee *p = &priv->eee; +- int ret = 0; + + if (GENET_IS_V1(priv)) + return -EOPNOTSUPP; +@@ -1348,16 +1350,11 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e) + p->eee_enabled = e->eee_enabled; + + if (!p->eee_enabled) { +- bcmgenet_eee_enable_set(dev, false); ++ bcmgenet_eee_enable_set(dev, false, false); + } else { +- ret = phy_init_eee(dev->phydev, false); +- if (ret) { +- netif_err(priv, hw, dev, "EEE initialization failed\n"); +- return ret; +- } +- ++ p->eee_active = phy_init_eee(dev->phydev, false) >= 0; + bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER); +- bcmgenet_eee_enable_set(dev, true); ++ bcmgenet_eee_enable_set(dev, p->eee_active, e->tx_lpi_enabled); + } + + return phy_ethtool_set_eee(dev->phydev, e); +@@ -4279,9 +4276,6 @@ static int bcmgenet_resume(struct device *d) + if (!device_may_wakeup(d)) + phy_resume(dev->phydev); + +- if (priv->eee.eee_enabled) +- bcmgenet_eee_enable_set(dev, true); +- + bcmgenet_netif_start(dev); + + netif_device_attach(dev); +diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h +index 946f6e283c4e6..1985c0ec4da2a 100644 +--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h ++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h +@@ -703,4 +703,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv, + void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv, + enum bcmgenet_power_mode mode); + ++void bcmgenet_eee_enable_set(struct net_device *dev, bool enable, ++ bool tx_lpi_enabled); ++ + #endif /* __BCMGENET_H__ */ +diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c +index be042905ada2a..c15ed0acdb777 100644 +--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c ++++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c +@@ -87,6 +87,11 @@ static void bcmgenet_mac_config(struct net_device *dev) + reg |= CMD_TX_EN | CMD_RX_EN; + } + bcmgenet_umac_writel(priv, reg, UMAC_CMD); ++ ++ priv->eee.eee_active = phy_init_eee(phydev, 0) >= 0; ++ bcmgenet_eee_enable_set(dev, ++ priv->eee.eee_enabled && priv->eee.eee_active, ++ priv->eee.tx_lpi_enabled); + } + + /* setup netdev link state when PHY link status change and +-- +2.39.2 + diff --git a/queue-6.3/net-dsa-lan9303-allow-vid-0-in-port_fdb_-add-del-met.patch b/queue-6.3/net-dsa-lan9303-allow-vid-0-in-port_fdb_-add-del-met.patch new file mode 100644 index 00000000000..d75b8dae3b1 --- /dev/null +++ b/queue-6.3/net-dsa-lan9303-allow-vid-0-in-port_fdb_-add-del-met.patch @@ -0,0 +1,56 @@ +From 2f25a9b3db0459a9c19879c9439ff361663e02d6 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 31 May 2023 16:38:26 +0200 +Subject: net: dsa: lan9303: allow vid != 0 in port_fdb_{add|del} methods + +From: Alexander Sverdlin + +[ Upstream commit 5a59a58ec25d44f853c26bdbfda47d73b3067435 ] + +LAN9303 doesn't associate FDB (ALR) entries with VLANs, it has just one +global Address Logic Resolution table [1]. + +Ignore VID in port_fdb_{add|del} methods, go on with the global table. This +is the same semantics as hellcreek or RZ/N1 implement. + +Visible symptoms: +LAN9303_MDIO 5b050000.ethernet-1:00: port 2 failed to delete 00:xx:xx:xx:xx:cf vid 1 from fdb: -2 +LAN9303_MDIO 5b050000.ethernet-1:00: port 2 failed to add 00:xx:xx:xx:xx:cf vid 1 to fdb: -95 + +[1] https://ww1.microchip.com/downloads/en/DeviceDoc/00002308A.pdf + +Fixes: 0620427ea0d6 ("net: dsa: lan9303: Add fdb/mdb manipulation") +Signed-off-by: Alexander Sverdlin +Reviewed-by: Vladimir Oltean +Link: https://lore.kernel.org/r/20230531143826.477267-1-alexander.sverdlin@siemens.com +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + drivers/net/dsa/lan9303-core.c | 4 ---- + 1 file changed, 4 deletions(-) + +diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c +index cbe8318753471..c0215a8770f49 100644 +--- a/drivers/net/dsa/lan9303-core.c ++++ b/drivers/net/dsa/lan9303-core.c +@@ -1188,8 +1188,6 @@ static int lan9303_port_fdb_add(struct dsa_switch *ds, int port, + struct lan9303 *chip = ds->priv; + + dev_dbg(chip->dev, "%s(%d, %pM, %d)\n", __func__, port, addr, vid); +- if (vid) +- return -EOPNOTSUPP; + + return lan9303_alr_add_port(chip, addr, port, false); + } +@@ -1201,8 +1199,6 @@ static int lan9303_port_fdb_del(struct dsa_switch *ds, int port, + struct lan9303 *chip = ds->priv; + + dev_dbg(chip->dev, "%s(%d, %pM, %d)\n", __func__, port, addr, vid); +- if (vid) +- return -EOPNOTSUPP; + lan9303_alr_del_port(chip, addr, port); + + return 0; +-- +2.39.2 + diff --git a/queue-6.3/net-enetc-correct-rx_bytes-statistics-of-xdp.patch b/queue-6.3/net-enetc-correct-rx_bytes-statistics-of-xdp.patch new file mode 100644 index 00000000000..9bb98995505 --- /dev/null +++ b/queue-6.3/net-enetc-correct-rx_bytes-statistics-of-xdp.patch @@ -0,0 +1,43 @@ +From b102871a72f89d27567f8eda875992ec6ab4c8fc Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 2 Jun 2023 17:46:59 +0800 +Subject: net: enetc: correct rx_bytes statistics of XDP + +From: Wei Fang + +[ Upstream commit fdebd850cc065495abf1d64756496050bb22db67 ] + +The rx_bytes statistics of XDP are always zero, because rx_byte_cnt +is not updated after it is initialized to 0. So fix it. + +Fixes: d1b15102dd16 ("net: enetc: add support for XDP_DROP and XDP_PASS") +Signed-off-by: Wei Fang +Reviewed-by: Vladimir Oltean +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/freescale/enetc/enetc.c | 8 ++++++++ + 1 file changed, 8 insertions(+) + +diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c +index f7248aed93d98..24024745ecef6 100644 +--- a/drivers/net/ethernet/freescale/enetc/enetc.c ++++ b/drivers/net/ethernet/freescale/enetc/enetc.c +@@ -1564,6 +1564,14 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, + enetc_build_xdp_buff(rx_ring, bd_status, &rxbd, &i, + &cleaned_cnt, &xdp_buff); + ++ /* When set, the outer VLAN header is extracted and reported ++ * in the receive buffer descriptor. So rx_byte_cnt should ++ * add the length of the extracted VLAN header. ++ */ ++ if (bd_status & ENETC_RXBD_FLAG_VLAN) ++ rx_byte_cnt += VLAN_HLEN; ++ rx_byte_cnt += xdp_get_buff_len(&xdp_buff); ++ + xdp_act = bpf_prog_run_xdp(prog, &xdp_buff); + + switch (xdp_act) { +-- +2.39.2 + diff --git a/queue-6.3/net-enetc-correct-the-statistics-of-rx-bytes.patch b/queue-6.3/net-enetc-correct-the-statistics-of-rx-bytes.patch new file mode 100644 index 00000000000..f8b1bb2d162 --- /dev/null +++ b/queue-6.3/net-enetc-correct-the-statistics-of-rx-bytes.patch @@ -0,0 +1,50 @@ +From 3ab7bafb2209d42294e917c37ed6bf4e1f808e70 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 2 Jun 2023 17:46:58 +0800 +Subject: net: enetc: correct the statistics of rx bytes + +From: Wei Fang + +[ Upstream commit 7190d0ff0e17690a9b1279d84a06473600ba2060 ] + +The rx_bytes of struct net_device_stats should count the length of +ethernet frames excluding the FCS. However, there are two problems +with the rx_bytes statistics of the current enetc driver. one is +that the length of VLAN header is not counted if the VLAN extraction +feature is enabled. The other is that the length of L2 header is not +counted, because eth_type_trans() is invoked before updating rx_bytes +which will subtract the length of L2 header from skb->len. +BTW, the rx_bytes statistics of XDP path also have similar problem, +I will fix it in another patch. + +Fixes: a800abd3ecb9 ("net: enetc: move skb creation into enetc_build_skb") +Signed-off-by: Wei Fang +Reviewed-by: Vladimir Oltean +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/freescale/enetc/enetc.c | 8 +++++++- + 1 file changed, 7 insertions(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c +index 2fc712b24d126..f7248aed93d98 100644 +--- a/drivers/net/ethernet/freescale/enetc/enetc.c ++++ b/drivers/net/ethernet/freescale/enetc/enetc.c +@@ -1222,7 +1222,13 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring, + if (!skb) + break; + +- rx_byte_cnt += skb->len; ++ /* When set, the outer VLAN header is extracted and reported ++ * in the receive buffer descriptor. So rx_byte_cnt should ++ * add the length of the extracted VLAN header. ++ */ ++ if (bd_status & ENETC_RXBD_FLAG_VLAN) ++ rx_byte_cnt += VLAN_HLEN; ++ rx_byte_cnt += skb->len + ETH_HLEN; + rx_frm_cnt++; + + napi_gro_receive(napi, skb); +-- +2.39.2 + diff --git a/queue-6.3/net-ipv4-ping_group_range-allow-gid-from-2147483648-.patch b/queue-6.3/net-ipv4-ping_group_range-allow-gid-from-2147483648-.patch new file mode 100644 index 00000000000..d57a05bb920 --- /dev/null +++ b/queue-6.3/net-ipv4-ping_group_range-allow-gid-from-2147483648-.patch @@ -0,0 +1,100 @@ +From 91aff2645340e0845bd1cd6422ce1097e6764f0b Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 1 Jun 2023 12:13:05 +0900 +Subject: net/ipv4: ping_group_range: allow GID from 2147483648 to 4294967294 + +From: Akihiro Suda + +[ Upstream commit e209fee4118fe9a449d4d805361eb2de6796be39 ] + +With this commit, all the GIDs ("0 4294967294") can be written to the +"net.ipv4.ping_group_range" sysctl. + +Note that 4294967295 (0xffffffff) is an invalid GID (see gid_valid() in +include/linux/uidgid.h), and an attempt to register this number will cause +-EINVAL. + +Prior to this commit, only up to GID 2147483647 could be covered. +Documentation/networking/ip-sysctl.rst had "0 4294967295" as an example +value, but this example was wrong and causing -EINVAL. + +Fixes: c319b4d76b9e ("net: ipv4: add IPPROTO_ICMP socket kind") +Co-developed-by: Kuniyuki Iwashima +Signed-off-by: Kuniyuki Iwashima +Signed-off-by: Akihiro Suda +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + Documentation/networking/ip-sysctl.rst | 4 ++-- + include/net/ping.h | 6 +----- + net/ipv4/sysctl_net_ipv4.c | 8 ++++---- + 3 files changed, 7 insertions(+), 11 deletions(-) + +diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst +index 58a78a3166978..97ae2b5a6101c 100644 +--- a/Documentation/networking/ip-sysctl.rst ++++ b/Documentation/networking/ip-sysctl.rst +@@ -1352,8 +1352,8 @@ ping_group_range - 2 INTEGERS + Restrict ICMP_PROTO datagram sockets to users in the group range. + The default is "1 0", meaning, that nobody (not even root) may + create ping sockets. Setting it to "100 100" would grant permissions +- to the single group. "0 4294967295" would enable it for the world, "100 +- 4294967295" would enable it for the users, but not daemons. ++ to the single group. "0 4294967294" would enable it for the world, "100 ++ 4294967294" would enable it for the users, but not daemons. + + tcp_early_demux - BOOLEAN + Enable early demux for established TCP sockets. +diff --git a/include/net/ping.h b/include/net/ping.h +index 9233ad3de0ade..bc7779262e603 100644 +--- a/include/net/ping.h ++++ b/include/net/ping.h +@@ -16,11 +16,7 @@ + #define PING_HTABLE_SIZE 64 + #define PING_HTABLE_MASK (PING_HTABLE_SIZE-1) + +-/* +- * gid_t is either uint or ushort. We want to pass it to +- * proc_dointvec_minmax(), so it must not be larger than MAX_INT +- */ +-#define GID_T_MAX (((gid_t)~0U) >> 1) ++#define GID_T_MAX (((gid_t)~0U) - 1) + + /* Compatibility glue so we can support IPv6 when it's compiled as a module */ + struct pingv6_ops { +diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c +index 40fe70fc2015d..88dfe51e68f3c 100644 +--- a/net/ipv4/sysctl_net_ipv4.c ++++ b/net/ipv4/sysctl_net_ipv4.c +@@ -34,8 +34,8 @@ static int ip_ttl_min = 1; + static int ip_ttl_max = 255; + static int tcp_syn_retries_min = 1; + static int tcp_syn_retries_max = MAX_TCP_SYNCNT; +-static int ip_ping_group_range_min[] = { 0, 0 }; +-static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX }; ++static unsigned long ip_ping_group_range_min[] = { 0, 0 }; ++static unsigned long ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX }; + static u32 u32_max_div_HZ = UINT_MAX / HZ; + static int one_day_secs = 24 * 3600; + static u32 fib_multipath_hash_fields_all_mask __maybe_unused = +@@ -165,7 +165,7 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write, + { + struct user_namespace *user_ns = current_user_ns(); + int ret; +- gid_t urange[2]; ++ unsigned long urange[2]; + kgid_t low, high; + struct ctl_table tmp = { + .data = &urange, +@@ -178,7 +178,7 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write, + inet_get_ping_group_range_table(table, &low, &high); + urange[0] = from_kgid_munged(user_ns, low); + urange[1] = from_kgid_munged(user_ns, high); +- ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); ++ ret = proc_doulongvec_minmax(&tmp, write, buffer, lenp, ppos); + + if (write && ret == 0) { + low = make_kgid(user_ns, urange[0]); +-- +2.39.2 + diff --git a/queue-6.3/net-ipv6-fix-bool-int-mismatch-for-skip_notify_on_de.patch b/queue-6.3/net-ipv6-fix-bool-int-mismatch-for-skip_notify_on_de.patch new file mode 100644 index 00000000000..74ab929eec9 --- /dev/null +++ b/queue-6.3/net-ipv6-fix-bool-int-mismatch-for-skip_notify_on_de.patch @@ -0,0 +1,43 @@ +From 104a46d37dbbb8926a44061783367eab0f4bf4dc Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 1 Jun 2023 16:04:44 +0000 +Subject: net/ipv6: fix bool/int mismatch for skip_notify_on_dev_down + +From: Eric Dumazet + +[ Upstream commit edf2e1d2019b2730d6076dbe4c040d37d7c10bbe ] + +skip_notify_on_dev_down ctl table expects this field +to be an int (4 bytes), not a bool (1 byte). + +Because proc_dou8vec_minmax() was added in 5.13, +this patch converts skip_notify_on_dev_down to an int. + +Following patch then converts the field to u8 and use proc_dou8vec_minmax(). + +Fixes: 7c6bb7d2faaf ("net/ipv6: Add knob to skip DELROUTE message on device down") +Signed-off-by: Eric Dumazet +Reviewed-by: David Ahern +Acked-by: Matthieu Baerts +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + include/net/netns/ipv6.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h +index b4af4837d80b4..f6e6a3ab91489 100644 +--- a/include/net/netns/ipv6.h ++++ b/include/net/netns/ipv6.h +@@ -53,7 +53,7 @@ struct netns_sysctl_ipv6 { + int seg6_flowlabel; + u32 ioam6_id; + u64 ioam6_id_wide; +- bool skip_notify_on_dev_down; ++ int skip_notify_on_dev_down; + u8 fib_notify_on_flag_change; + }; + +-- +2.39.2 + diff --git a/queue-6.3/net-openvswitch-fix-upcall-counter-access-before-all.patch b/queue-6.3/net-openvswitch-fix-upcall-counter-access-before-all.patch new file mode 100644 index 00000000000..0d0bc3fca00 --- /dev/null +++ b/queue-6.3/net-openvswitch-fix-upcall-counter-access-before-all.patch @@ -0,0 +1,175 @@ +From 79f31758ddbf6e4731ac27e2e8b83cc7cd3c0bc2 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 6 Jun 2023 13:56:35 +0200 +Subject: net: openvswitch: fix upcall counter access before allocation + +From: Eelco Chaudron + +[ Upstream commit de9df6c6b27e22d7bdd20107947ef3a20e687de5 ] + +Currently, the per cpu upcall counters are allocated after the vport is +created and inserted into the system. This could lead to the datapath +accessing the counters before they are allocated resulting in a kernel +Oops. + +Here is an example: + + PID: 59693 TASK: ffff0005f4f51500 CPU: 0 COMMAND: "ovs-vswitchd" + #0 [ffff80000a39b5b0] __switch_to at ffffb70f0629f2f4 + #1 [ffff80000a39b5d0] __schedule at ffffb70f0629f5cc + #2 [ffff80000a39b650] preempt_schedule_common at ffffb70f0629fa60 + #3 [ffff80000a39b670] dynamic_might_resched at ffffb70f0629fb58 + #4 [ffff80000a39b680] mutex_lock_killable at ffffb70f062a1388 + #5 [ffff80000a39b6a0] pcpu_alloc at ffffb70f0594460c + #6 [ffff80000a39b750] __alloc_percpu_gfp at ffffb70f05944e68 + #7 [ffff80000a39b760] ovs_vport_cmd_new at ffffb70ee6961b90 [openvswitch] + ... + + PID: 58682 TASK: ffff0005b2f0bf00 CPU: 0 COMMAND: "kworker/0:3" + #0 [ffff80000a5d2f40] machine_kexec at ffffb70f056a0758 + #1 [ffff80000a5d2f70] __crash_kexec at ffffb70f057e2994 + #2 [ffff80000a5d3100] crash_kexec at ffffb70f057e2ad8 + #3 [ffff80000a5d3120] die at ffffb70f0628234c + #4 [ffff80000a5d31e0] die_kernel_fault at ffffb70f062828a8 + #5 [ffff80000a5d3210] __do_kernel_fault at ffffb70f056a31f4 + #6 [ffff80000a5d3240] do_bad_area at ffffb70f056a32a4 + #7 [ffff80000a5d3260] do_translation_fault at ffffb70f062a9710 + #8 [ffff80000a5d3270] do_mem_abort at ffffb70f056a2f74 + #9 [ffff80000a5d32a0] el1_abort at ffffb70f06297dac + #10 [ffff80000a5d32d0] el1h_64_sync_handler at ffffb70f06299b24 + #11 [ffff80000a5d3410] el1h_64_sync at ffffb70f056812dc + #12 [ffff80000a5d3430] ovs_dp_upcall at ffffb70ee6963c84 [openvswitch] + #13 [ffff80000a5d3470] ovs_dp_process_packet at ffffb70ee6963fdc [openvswitch] + #14 [ffff80000a5d34f0] ovs_vport_receive at ffffb70ee6972c78 [openvswitch] + #15 [ffff80000a5d36f0] netdev_port_receive at ffffb70ee6973948 [openvswitch] + #16 [ffff80000a5d3720] netdev_frame_hook at ffffb70ee6973a28 [openvswitch] + #17 [ffff80000a5d3730] __netif_receive_skb_core.constprop.0 at ffffb70f06079f90 + +We moved the per cpu upcall counter allocation to the existing vport +alloc and free functions to solve this. + +Fixes: 95637d91fefd ("net: openvswitch: release vport resources on failure") +Fixes: 1933ea365aa7 ("net: openvswitch: Add support to count upcall packets") +Signed-off-by: Eelco Chaudron +Reviewed-by: Simon Horman +Acked-by: Aaron Conole +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + net/openvswitch/datapath.c | 19 ------------------- + net/openvswitch/vport.c | 18 ++++++++++++++++-- + 2 files changed, 16 insertions(+), 21 deletions(-) + +diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c +index fcee6012293b1..58f530f60172a 100644 +--- a/net/openvswitch/datapath.c ++++ b/net/openvswitch/datapath.c +@@ -236,9 +236,6 @@ void ovs_dp_detach_port(struct vport *p) + /* First drop references to device. */ + hlist_del_rcu(&p->dp_hash_node); + +- /* Free percpu memory */ +- free_percpu(p->upcall_stats); +- + /* Then destroy it. */ + ovs_vport_del(p); + } +@@ -1858,12 +1855,6 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info) + goto err_destroy_portids; + } + +- vport->upcall_stats = netdev_alloc_pcpu_stats(struct vport_upcall_stats_percpu); +- if (!vport->upcall_stats) { +- err = -ENOMEM; +- goto err_destroy_vport; +- } +- + err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid, + info->snd_seq, 0, OVS_DP_CMD_NEW); + BUG_ON(err < 0); +@@ -1876,8 +1867,6 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info) + ovs_notify(&dp_datapath_genl_family, reply, info); + return 0; + +-err_destroy_vport: +- ovs_dp_detach_port(vport); + err_destroy_portids: + kfree(rcu_dereference_raw(dp->upcall_portids)); + err_unlock_and_destroy_meters: +@@ -2322,12 +2311,6 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info) + goto exit_unlock_free; + } + +- vport->upcall_stats = netdev_alloc_pcpu_stats(struct vport_upcall_stats_percpu); +- if (!vport->upcall_stats) { +- err = -ENOMEM; +- goto exit_unlock_free_vport; +- } +- + err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info), + info->snd_portid, info->snd_seq, 0, + OVS_VPORT_CMD_NEW, GFP_KERNEL); +@@ -2345,8 +2328,6 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info) + ovs_notify(&dp_vport_genl_family, reply, info); + return 0; + +-exit_unlock_free_vport: +- ovs_dp_detach_port(vport); + exit_unlock_free: + ovs_unlock(); + kfree_skb(reply); +diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c +index 7e0f5c45b5124..972ae01a70f76 100644 +--- a/net/openvswitch/vport.c ++++ b/net/openvswitch/vport.c +@@ -124,6 +124,7 @@ struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops, + { + struct vport *vport; + size_t alloc_size; ++ int err; + + alloc_size = sizeof(struct vport); + if (priv_size) { +@@ -135,17 +136,29 @@ struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops, + if (!vport) + return ERR_PTR(-ENOMEM); + ++ vport->upcall_stats = netdev_alloc_pcpu_stats(struct vport_upcall_stats_percpu); ++ if (!vport->upcall_stats) { ++ err = -ENOMEM; ++ goto err_kfree_vport; ++ } ++ + vport->dp = parms->dp; + vport->port_no = parms->port_no; + vport->ops = ops; + INIT_HLIST_NODE(&vport->dp_hash_node); + + if (ovs_vport_set_upcall_portids(vport, parms->upcall_portids)) { +- kfree(vport); +- return ERR_PTR(-EINVAL); ++ err = -EINVAL; ++ goto err_free_percpu; + } + + return vport; ++ ++err_free_percpu: ++ free_percpu(vport->upcall_stats); ++err_kfree_vport: ++ kfree(vport); ++ return ERR_PTR(err); + } + EXPORT_SYMBOL_GPL(ovs_vport_alloc); + +@@ -165,6 +178,7 @@ void ovs_vport_free(struct vport *vport) + * it is safe to use raw dereference. + */ + kfree(rcu_dereference_raw(vport->upcall_portids)); ++ free_percpu(vport->upcall_stats); + kfree(vport); + } + EXPORT_SYMBOL_GPL(ovs_vport_free); +-- +2.39.2 + diff --git a/queue-6.3/net-sched-act_police-fix-sparse-errors-in-tcf_police.patch b/queue-6.3/net-sched-act_police-fix-sparse-errors-in-tcf_police.patch new file mode 100644 index 00000000000..8cea19cc9f9 --- /dev/null +++ b/queue-6.3/net-sched-act_police-fix-sparse-errors-in-tcf_police.patch @@ -0,0 +1,66 @@ +From 2386485a3b3466a392c880e671d1798e9b090f77 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 6 Jun 2023 13:13:04 +0000 +Subject: net: sched: act_police: fix sparse errors in tcf_police_dump() + +From: Eric Dumazet + +[ Upstream commit 682881ee45c81daa883dcd4fe613b0b0d988bb22 ] + +Fixes following sparse errors: + +net/sched/act_police.c:360:28: warning: dereference of noderef expression +net/sched/act_police.c:362:45: warning: dereference of noderef expression +net/sched/act_police.c:362:45: warning: dereference of noderef expression +net/sched/act_police.c:368:28: warning: dereference of noderef expression +net/sched/act_police.c:370:45: warning: dereference of noderef expression +net/sched/act_police.c:370:45: warning: dereference of noderef expression +net/sched/act_police.c:376:45: warning: dereference of noderef expression +net/sched/act_police.c:376:45: warning: dereference of noderef expression + +Fixes: d1967e495a8d ("net_sched: act_police: add 2 new attributes to support police 64bit rate and peakrate") +Signed-off-by: Eric Dumazet +Reviewed-by: Simon Horman +Acked-by: Jamal Hadi Salim +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + net/sched/act_police.c | 10 +++++----- + 1 file changed, 5 insertions(+), 5 deletions(-) + +diff --git a/net/sched/act_police.c b/net/sched/act_police.c +index 227cba58ce9f3..2e9dce03d1ecc 100644 +--- a/net/sched/act_police.c ++++ b/net/sched/act_police.c +@@ -357,23 +357,23 @@ static int tcf_police_dump(struct sk_buff *skb, struct tc_action *a, + opt.burst = PSCHED_NS2TICKS(p->tcfp_burst); + if (p->rate_present) { + psched_ratecfg_getrate(&opt.rate, &p->rate); +- if ((police->params->rate.rate_bytes_ps >= (1ULL << 32)) && ++ if ((p->rate.rate_bytes_ps >= (1ULL << 32)) && + nla_put_u64_64bit(skb, TCA_POLICE_RATE64, +- police->params->rate.rate_bytes_ps, ++ p->rate.rate_bytes_ps, + TCA_POLICE_PAD)) + goto nla_put_failure; + } + if (p->peak_present) { + psched_ratecfg_getrate(&opt.peakrate, &p->peak); +- if ((police->params->peak.rate_bytes_ps >= (1ULL << 32)) && ++ if ((p->peak.rate_bytes_ps >= (1ULL << 32)) && + nla_put_u64_64bit(skb, TCA_POLICE_PEAKRATE64, +- police->params->peak.rate_bytes_ps, ++ p->peak.rate_bytes_ps, + TCA_POLICE_PAD)) + goto nla_put_failure; + } + if (p->pps_present) { + if (nla_put_u64_64bit(skb, TCA_POLICE_PKTRATE64, +- police->params->ppsrate.rate_pkts_ps, ++ p->ppsrate.rate_pkts_ps, + TCA_POLICE_PAD)) + goto nla_put_failure; + if (nla_put_u64_64bit(skb, TCA_POLICE_PKTBURST64, +-- +2.39.2 + diff --git a/queue-6.3/net-sched-add-rcu-annotations-around-qdisc-qdisc_sle.patch b/queue-6.3/net-sched-add-rcu-annotations-around-qdisc-qdisc_sle.patch new file mode 100644 index 00000000000..3091beaddb6 --- /dev/null +++ b/queue-6.3/net-sched-add-rcu-annotations-around-qdisc-qdisc_sle.patch @@ -0,0 +1,571 @@ +From 8c1934fb8cdb927becb68da3404ae54506c53e80 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 6 Jun 2023 11:19:29 +0000 +Subject: net: sched: add rcu annotations around qdisc->qdisc_sleeping + +From: Eric Dumazet + +[ Upstream commit d636fc5dd692c8f4e00ae6e0359c0eceeb5d9bdb ] + +syzbot reported a race around qdisc->qdisc_sleeping [1] + +It is time we add proper annotations to reads and writes to/from +qdisc->qdisc_sleeping. + +[1] +BUG: KCSAN: data-race in dev_graft_qdisc / qdisc_lookup_rcu + +read to 0xffff8881286fc618 of 8 bytes by task 6928 on cpu 1: +qdisc_lookup_rcu+0x192/0x2c0 net/sched/sch_api.c:331 +__tcf_qdisc_find+0x74/0x3c0 net/sched/cls_api.c:1174 +tc_get_tfilter+0x18f/0x990 net/sched/cls_api.c:2547 +rtnetlink_rcv_msg+0x7af/0x8c0 net/core/rtnetlink.c:6386 +netlink_rcv_skb+0x126/0x220 net/netlink/af_netlink.c:2546 +rtnetlink_rcv+0x1c/0x20 net/core/rtnetlink.c:6413 +netlink_unicast_kernel net/netlink/af_netlink.c:1339 [inline] +netlink_unicast+0x56f/0x640 net/netlink/af_netlink.c:1365 +netlink_sendmsg+0x665/0x770 net/netlink/af_netlink.c:1913 +sock_sendmsg_nosec net/socket.c:724 [inline] +sock_sendmsg net/socket.c:747 [inline] +____sys_sendmsg+0x375/0x4c0 net/socket.c:2503 +___sys_sendmsg net/socket.c:2557 [inline] +__sys_sendmsg+0x1e3/0x270 net/socket.c:2586 +__do_sys_sendmsg net/socket.c:2595 [inline] +__se_sys_sendmsg net/socket.c:2593 [inline] +__x64_sys_sendmsg+0x46/0x50 net/socket.c:2593 +do_syscall_x64 arch/x86/entry/common.c:50 [inline] +do_syscall_64+0x41/0xc0 arch/x86/entry/common.c:80 +entry_SYSCALL_64_after_hwframe+0x63/0xcd + +write to 0xffff8881286fc618 of 8 bytes by task 6912 on cpu 0: +dev_graft_qdisc+0x4f/0x80 net/sched/sch_generic.c:1115 +qdisc_graft+0x7d0/0xb60 net/sched/sch_api.c:1103 +tc_modify_qdisc+0x712/0xf10 net/sched/sch_api.c:1693 +rtnetlink_rcv_msg+0x807/0x8c0 net/core/rtnetlink.c:6395 +netlink_rcv_skb+0x126/0x220 net/netlink/af_netlink.c:2546 +rtnetlink_rcv+0x1c/0x20 net/core/rtnetlink.c:6413 +netlink_unicast_kernel net/netlink/af_netlink.c:1339 [inline] +netlink_unicast+0x56f/0x640 net/netlink/af_netlink.c:1365 +netlink_sendmsg+0x665/0x770 net/netlink/af_netlink.c:1913 +sock_sendmsg_nosec net/socket.c:724 [inline] +sock_sendmsg net/socket.c:747 [inline] +____sys_sendmsg+0x375/0x4c0 net/socket.c:2503 +___sys_sendmsg net/socket.c:2557 [inline] +__sys_sendmsg+0x1e3/0x270 net/socket.c:2586 +__do_sys_sendmsg net/socket.c:2595 [inline] +__se_sys_sendmsg net/socket.c:2593 [inline] +__x64_sys_sendmsg+0x46/0x50 net/socket.c:2593 +do_syscall_x64 arch/x86/entry/common.c:50 [inline] +do_syscall_64+0x41/0xc0 arch/x86/entry/common.c:80 +entry_SYSCALL_64_after_hwframe+0x63/0xcd + +Reported by Kernel Concurrency Sanitizer on: +CPU: 0 PID: 6912 Comm: syz-executor.5 Not tainted 6.4.0-rc3-syzkaller-00190-g0d85b27b0cc6 #0 +Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 05/16/2023 + +Fixes: 3a7d0d07a386 ("net: sched: extend Qdisc with rcu") +Reported-by: syzbot +Signed-off-by: Eric Dumazet +Cc: Vlad Buslov +Acked-by: Jamal Hadi Salim +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + include/linux/netdevice.h | 2 +- + include/net/sch_generic.h | 6 ++++-- + net/core/dev.c | 2 +- + net/sched/sch_api.c | 26 ++++++++++++++++---------- + net/sched/sch_fq_pie.c | 2 ++ + net/sched/sch_generic.c | 30 +++++++++++++++--------------- + net/sched/sch_mq.c | 8 ++++---- + net/sched/sch_mqprio.c | 8 ++++---- + net/sched/sch_pie.c | 5 ++++- + net/sched/sch_red.c | 5 ++++- + net/sched/sch_sfq.c | 5 ++++- + net/sched/sch_taprio.c | 6 +++--- + net/sched/sch_teql.c | 2 +- + 13 files changed, 63 insertions(+), 44 deletions(-) + +diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h +index 2771aa046ab2a..7ed63f5bbe056 100644 +--- a/include/linux/netdevice.h ++++ b/include/linux/netdevice.h +@@ -612,7 +612,7 @@ struct netdev_queue { + netdevice_tracker dev_tracker; + + struct Qdisc __rcu *qdisc; +- struct Qdisc *qdisc_sleeping; ++ struct Qdisc __rcu *qdisc_sleeping; + #ifdef CONFIG_SYSFS + struct kobject kobj; + #endif +diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h +index fab5ba3e61b7c..27271f2b37cb3 100644 +--- a/include/net/sch_generic.h ++++ b/include/net/sch_generic.h +@@ -545,7 +545,7 @@ static inline struct Qdisc *qdisc_root_bh(const struct Qdisc *qdisc) + + static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc) + { +- return qdisc->dev_queue->qdisc_sleeping; ++ return rcu_dereference_rtnl(qdisc->dev_queue->qdisc_sleeping); + } + + static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc) +@@ -754,7 +754,9 @@ static inline bool qdisc_tx_changing(const struct net_device *dev) + + for (i = 0; i < dev->num_tx_queues; i++) { + struct netdev_queue *txq = netdev_get_tx_queue(dev, i); +- if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping) ++ ++ if (rcu_access_pointer(txq->qdisc) != ++ rcu_access_pointer(txq->qdisc_sleeping)) + return true; + } + return false; +diff --git a/net/core/dev.c b/net/core/dev.c +index 6d46eb0402ccd..bcb654fd519bd 100644 +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -10507,7 +10507,7 @@ struct netdev_queue *dev_ingress_queue_create(struct net_device *dev) + return NULL; + netdev_init_one_queue(dev, queue, NULL); + RCU_INIT_POINTER(queue->qdisc, &noop_qdisc); +- queue->qdisc_sleeping = &noop_qdisc; ++ RCU_INIT_POINTER(queue->qdisc_sleeping, &noop_qdisc); + rcu_assign_pointer(dev->ingress_queue, queue); + #endif + return queue; +diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c +index 7045b67b5533e..b2a63d697a4aa 100644 +--- a/net/sched/sch_api.c ++++ b/net/sched/sch_api.c +@@ -309,7 +309,7 @@ struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) + + if (dev_ingress_queue(dev)) + q = qdisc_match_from_root( +- dev_ingress_queue(dev)->qdisc_sleeping, ++ rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping), + handle); + out: + return q; +@@ -328,7 +328,8 @@ struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle) + + nq = dev_ingress_queue_rcu(dev); + if (nq) +- q = qdisc_match_from_root(nq->qdisc_sleeping, handle); ++ q = qdisc_match_from_root(rcu_dereference(nq->qdisc_sleeping), ++ handle); + out: + return q; + } +@@ -634,8 +635,13 @@ EXPORT_SYMBOL(qdisc_watchdog_init); + void qdisc_watchdog_schedule_range_ns(struct qdisc_watchdog *wd, u64 expires, + u64 delta_ns) + { +- if (test_bit(__QDISC_STATE_DEACTIVATED, +- &qdisc_root_sleeping(wd->qdisc)->state)) ++ bool deactivated; ++ ++ rcu_read_lock(); ++ deactivated = test_bit(__QDISC_STATE_DEACTIVATED, ++ &qdisc_root_sleeping(wd->qdisc)->state); ++ rcu_read_unlock(); ++ if (deactivated) + return; + + if (hrtimer_is_queued(&wd->timer)) { +@@ -1476,7 +1482,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, + } + q = qdisc_leaf(p, clid); + } else if (dev_ingress_queue(dev)) { +- q = dev_ingress_queue(dev)->qdisc_sleeping; ++ q = rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping); + } + } else { + q = rtnl_dereference(dev->qdisc); +@@ -1562,7 +1568,7 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, + } + q = qdisc_leaf(p, clid); + } else if (dev_ingress_queue_create(dev)) { +- q = dev_ingress_queue(dev)->qdisc_sleeping; ++ q = rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping); + } + } else { + q = rtnl_dereference(dev->qdisc); +@@ -1803,8 +1809,8 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb) + + dev_queue = dev_ingress_queue(dev); + if (dev_queue && +- tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb, +- &q_idx, s_q_idx, false, ++ tc_dump_qdisc_root(rtnl_dereference(dev_queue->qdisc_sleeping), ++ skb, cb, &q_idx, s_q_idx, false, + tca[TCA_DUMP_INVISIBLE]) < 0) + goto done; + +@@ -2247,8 +2253,8 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb) + + dev_queue = dev_ingress_queue(dev); + if (dev_queue && +- tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb, +- &t, s_t, false) < 0) ++ tc_dump_tclass_root(rtnl_dereference(dev_queue->qdisc_sleeping), ++ skb, tcm, cb, &t, s_t, false) < 0) + goto done; + + done: +diff --git a/net/sched/sch_fq_pie.c b/net/sched/sch_fq_pie.c +index c699e5095607d..591d87d5e5c0f 100644 +--- a/net/sched/sch_fq_pie.c ++++ b/net/sched/sch_fq_pie.c +@@ -379,6 +379,7 @@ static void fq_pie_timer(struct timer_list *t) + spinlock_t *root_lock; /* to lock qdisc for probability calculations */ + u32 idx; + ++ rcu_read_lock(); + root_lock = qdisc_lock(qdisc_root_sleeping(sch)); + spin_lock(root_lock); + +@@ -391,6 +392,7 @@ static void fq_pie_timer(struct timer_list *t) + mod_timer(&q->adapt_timer, jiffies + q->p_params.tupdate); + + spin_unlock(root_lock); ++ rcu_read_unlock(); + } + + static int fq_pie_init(struct Qdisc *sch, struct nlattr *opt, +diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c +index a9aadc4e68581..ee43e8ac039ed 100644 +--- a/net/sched/sch_generic.c ++++ b/net/sched/sch_generic.c +@@ -648,7 +648,7 @@ struct Qdisc_ops noop_qdisc_ops __read_mostly = { + + static struct netdev_queue noop_netdev_queue = { + RCU_POINTER_INITIALIZER(qdisc, &noop_qdisc), +- .qdisc_sleeping = &noop_qdisc, ++ RCU_POINTER_INITIALIZER(qdisc_sleeping, &noop_qdisc), + }; + + struct Qdisc noop_qdisc = { +@@ -1103,7 +1103,7 @@ EXPORT_SYMBOL(qdisc_put_unlocked); + struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, + struct Qdisc *qdisc) + { +- struct Qdisc *oqdisc = dev_queue->qdisc_sleeping; ++ struct Qdisc *oqdisc = rtnl_dereference(dev_queue->qdisc_sleeping); + spinlock_t *root_lock; + + root_lock = qdisc_lock(oqdisc); +@@ -1112,7 +1112,7 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, + /* ... and graft new one */ + if (qdisc == NULL) + qdisc = &noop_qdisc; +- dev_queue->qdisc_sleeping = qdisc; ++ rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc); + rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc); + + spin_unlock_bh(root_lock); +@@ -1125,12 +1125,12 @@ static void shutdown_scheduler_queue(struct net_device *dev, + struct netdev_queue *dev_queue, + void *_qdisc_default) + { +- struct Qdisc *qdisc = dev_queue->qdisc_sleeping; ++ struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping); + struct Qdisc *qdisc_default = _qdisc_default; + + if (qdisc) { + rcu_assign_pointer(dev_queue->qdisc, qdisc_default); +- dev_queue->qdisc_sleeping = qdisc_default; ++ rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc_default); + + qdisc_put(qdisc); + } +@@ -1154,7 +1154,7 @@ static void attach_one_default_qdisc(struct net_device *dev, + + if (!netif_is_multiqueue(dev)) + qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; +- dev_queue->qdisc_sleeping = qdisc; ++ rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc); + } + + static void attach_default_qdiscs(struct net_device *dev) +@@ -1167,7 +1167,7 @@ static void attach_default_qdiscs(struct net_device *dev) + if (!netif_is_multiqueue(dev) || + dev->priv_flags & IFF_NO_QUEUE) { + netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); +- qdisc = txq->qdisc_sleeping; ++ qdisc = rtnl_dereference(txq->qdisc_sleeping); + rcu_assign_pointer(dev->qdisc, qdisc); + qdisc_refcount_inc(qdisc); + } else { +@@ -1186,7 +1186,7 @@ static void attach_default_qdiscs(struct net_device *dev) + netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); + dev->priv_flags |= IFF_NO_QUEUE; + netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); +- qdisc = txq->qdisc_sleeping; ++ qdisc = rtnl_dereference(txq->qdisc_sleeping); + rcu_assign_pointer(dev->qdisc, qdisc); + qdisc_refcount_inc(qdisc); + dev->priv_flags ^= IFF_NO_QUEUE; +@@ -1202,7 +1202,7 @@ static void transition_one_qdisc(struct net_device *dev, + struct netdev_queue *dev_queue, + void *_need_watchdog) + { +- struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping; ++ struct Qdisc *new_qdisc = rtnl_dereference(dev_queue->qdisc_sleeping); + int *need_watchdog_p = _need_watchdog; + + if (!(new_qdisc->flags & TCQ_F_BUILTIN)) +@@ -1272,7 +1272,7 @@ static void dev_reset_queue(struct net_device *dev, + struct Qdisc *qdisc; + bool nolock; + +- qdisc = dev_queue->qdisc_sleeping; ++ qdisc = rtnl_dereference(dev_queue->qdisc_sleeping); + if (!qdisc) + return; + +@@ -1303,7 +1303,7 @@ static bool some_qdisc_is_busy(struct net_device *dev) + int val; + + dev_queue = netdev_get_tx_queue(dev, i); +- q = dev_queue->qdisc_sleeping; ++ q = rtnl_dereference(dev_queue->qdisc_sleeping); + + root_lock = qdisc_lock(q); + spin_lock_bh(root_lock); +@@ -1379,7 +1379,7 @@ EXPORT_SYMBOL(dev_deactivate); + static int qdisc_change_tx_queue_len(struct net_device *dev, + struct netdev_queue *dev_queue) + { +- struct Qdisc *qdisc = dev_queue->qdisc_sleeping; ++ struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping); + const struct Qdisc_ops *ops = qdisc->ops; + + if (ops->change_tx_queue_len) +@@ -1404,7 +1404,7 @@ void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx) + unsigned int i; + + for (i = new_real_tx; i < dev->real_num_tx_queues; i++) { +- qdisc = netdev_get_tx_queue(dev, i)->qdisc_sleeping; ++ qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc_sleeping); + /* Only update the default qdiscs we created, + * qdiscs with handles are always hashed. + */ +@@ -1412,7 +1412,7 @@ void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx) + qdisc_hash_del(qdisc); + } + for (i = dev->real_num_tx_queues; i < new_real_tx; i++) { +- qdisc = netdev_get_tx_queue(dev, i)->qdisc_sleeping; ++ qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc_sleeping); + if (qdisc != &noop_qdisc && !qdisc->handle) + qdisc_hash_add(qdisc, false); + } +@@ -1449,7 +1449,7 @@ static void dev_init_scheduler_queue(struct net_device *dev, + struct Qdisc *qdisc = _qdisc; + + rcu_assign_pointer(dev_queue->qdisc, qdisc); +- dev_queue->qdisc_sleeping = qdisc; ++ rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc); + } + + void dev_init_scheduler(struct net_device *dev) +diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c +index d0bc660d7401f..c860119a8f091 100644 +--- a/net/sched/sch_mq.c ++++ b/net/sched/sch_mq.c +@@ -141,7 +141,7 @@ static int mq_dump(struct Qdisc *sch, struct sk_buff *skb) + * qdisc totals are added at end. + */ + for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { +- qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping; ++ qdisc = rtnl_dereference(netdev_get_tx_queue(dev, ntx)->qdisc_sleeping); + spin_lock_bh(qdisc_lock(qdisc)); + + gnet_stats_add_basic(&sch->bstats, qdisc->cpu_bstats, +@@ -202,7 +202,7 @@ static struct Qdisc *mq_leaf(struct Qdisc *sch, unsigned long cl) + { + struct netdev_queue *dev_queue = mq_queue_get(sch, cl); + +- return dev_queue->qdisc_sleeping; ++ return rtnl_dereference(dev_queue->qdisc_sleeping); + } + + static unsigned long mq_find(struct Qdisc *sch, u32 classid) +@@ -221,7 +221,7 @@ static int mq_dump_class(struct Qdisc *sch, unsigned long cl, + + tcm->tcm_parent = TC_H_ROOT; + tcm->tcm_handle |= TC_H_MIN(cl); +- tcm->tcm_info = dev_queue->qdisc_sleeping->handle; ++ tcm->tcm_info = rtnl_dereference(dev_queue->qdisc_sleeping)->handle; + return 0; + } + +@@ -230,7 +230,7 @@ static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl, + { + struct netdev_queue *dev_queue = mq_queue_get(sch, cl); + +- sch = dev_queue->qdisc_sleeping; ++ sch = rtnl_dereference(dev_queue->qdisc_sleeping); + if (gnet_stats_copy_basic(d, sch->cpu_bstats, &sch->bstats, true) < 0 || + qdisc_qstats_copy(d, sch) < 0) + return -1; +diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c +index fc6225f15fcdb..dd29c9470c784 100644 +--- a/net/sched/sch_mqprio.c ++++ b/net/sched/sch_mqprio.c +@@ -421,7 +421,7 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb) + * qdisc totals are added at end. + */ + for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { +- qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping; ++ qdisc = rtnl_dereference(netdev_get_tx_queue(dev, ntx)->qdisc_sleeping); + spin_lock_bh(qdisc_lock(qdisc)); + + gnet_stats_add_basic(&sch->bstats, qdisc->cpu_bstats, +@@ -465,7 +465,7 @@ static struct Qdisc *mqprio_leaf(struct Qdisc *sch, unsigned long cl) + if (!dev_queue) + return NULL; + +- return dev_queue->qdisc_sleeping; ++ return rtnl_dereference(dev_queue->qdisc_sleeping); + } + + static unsigned long mqprio_find(struct Qdisc *sch, u32 classid) +@@ -498,7 +498,7 @@ static int mqprio_dump_class(struct Qdisc *sch, unsigned long cl, + tcm->tcm_parent = (tc < 0) ? 0 : + TC_H_MAKE(TC_H_MAJ(sch->handle), + TC_H_MIN(tc + TC_H_MIN_PRIORITY)); +- tcm->tcm_info = dev_queue->qdisc_sleeping->handle; ++ tcm->tcm_info = rtnl_dereference(dev_queue->qdisc_sleeping)->handle; + } else { + tcm->tcm_parent = TC_H_ROOT; + tcm->tcm_info = 0; +@@ -554,7 +554,7 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, + } else { + struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl); + +- sch = dev_queue->qdisc_sleeping; ++ sch = rtnl_dereference(dev_queue->qdisc_sleeping); + if (gnet_stats_copy_basic(d, sch->cpu_bstats, + &sch->bstats, true) < 0 || + qdisc_qstats_copy(d, sch) < 0) +diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c +index 265c238047a42..b60b31ef71cc5 100644 +--- a/net/sched/sch_pie.c ++++ b/net/sched/sch_pie.c +@@ -421,8 +421,10 @@ static void pie_timer(struct timer_list *t) + { + struct pie_sched_data *q = from_timer(q, t, adapt_timer); + struct Qdisc *sch = q->sch; +- spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch)); ++ spinlock_t *root_lock; + ++ rcu_read_lock(); ++ root_lock = qdisc_lock(qdisc_root_sleeping(sch)); + spin_lock(root_lock); + pie_calculate_probability(&q->params, &q->vars, sch->qstats.backlog); + +@@ -430,6 +432,7 @@ static void pie_timer(struct timer_list *t) + if (q->params.tupdate) + mod_timer(&q->adapt_timer, jiffies + q->params.tupdate); + spin_unlock(root_lock); ++ rcu_read_unlock(); + } + + static int pie_init(struct Qdisc *sch, struct nlattr *opt, +diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c +index 98129324e1573..16277b6a0238d 100644 +--- a/net/sched/sch_red.c ++++ b/net/sched/sch_red.c +@@ -321,12 +321,15 @@ static inline void red_adaptative_timer(struct timer_list *t) + { + struct red_sched_data *q = from_timer(q, t, adapt_timer); + struct Qdisc *sch = q->sch; +- spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch)); ++ spinlock_t *root_lock; + ++ rcu_read_lock(); ++ root_lock = qdisc_lock(qdisc_root_sleeping(sch)); + spin_lock(root_lock); + red_adaptative_algo(&q->parms, &q->vars); + mod_timer(&q->adapt_timer, jiffies + HZ/2); + spin_unlock(root_lock); ++ rcu_read_unlock(); + } + + static int red_init(struct Qdisc *sch, struct nlattr *opt, +diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c +index abd436307d6a8..66dcb18638fea 100644 +--- a/net/sched/sch_sfq.c ++++ b/net/sched/sch_sfq.c +@@ -606,10 +606,12 @@ static void sfq_perturbation(struct timer_list *t) + { + struct sfq_sched_data *q = from_timer(q, t, perturb_timer); + struct Qdisc *sch = q->sch; +- spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch)); ++ spinlock_t *root_lock; + siphash_key_t nkey; + + get_random_bytes(&nkey, sizeof(nkey)); ++ rcu_read_lock(); ++ root_lock = qdisc_lock(qdisc_root_sleeping(sch)); + spin_lock(root_lock); + q->perturbation = nkey; + if (!q->filter_list && q->tail) +@@ -618,6 +620,7 @@ static void sfq_perturbation(struct timer_list *t) + + if (q->perturb_period) + mod_timer(&q->perturb_timer, jiffies + q->perturb_period); ++ rcu_read_unlock(); + } + + static int sfq_change(struct Qdisc *sch, struct nlattr *opt) +diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c +index cbad430191721..a6cf56a969421 100644 +--- a/net/sched/sch_taprio.c ++++ b/net/sched/sch_taprio.c +@@ -2319,7 +2319,7 @@ static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl) + if (!dev_queue) + return NULL; + +- return dev_queue->qdisc_sleeping; ++ return rtnl_dereference(dev_queue->qdisc_sleeping); + } + + static unsigned long taprio_find(struct Qdisc *sch, u32 classid) +@@ -2338,7 +2338,7 @@ static int taprio_dump_class(struct Qdisc *sch, unsigned long cl, + + tcm->tcm_parent = TC_H_ROOT; + tcm->tcm_handle |= TC_H_MIN(cl); +- tcm->tcm_info = dev_queue->qdisc_sleeping->handle; ++ tcm->tcm_info = rtnl_dereference(dev_queue->qdisc_sleeping)->handle; + + return 0; + } +@@ -2350,7 +2350,7 @@ static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, + { + struct netdev_queue *dev_queue = taprio_queue_get(sch, cl); + +- sch = dev_queue->qdisc_sleeping; ++ sch = rtnl_dereference(dev_queue->qdisc_sleeping); + if (gnet_stats_copy_basic(d, NULL, &sch->bstats, true) < 0 || + qdisc_qstats_copy(d, sch) < 0) + return -1; +diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c +index 16f9238aa51d1..7721239c185fb 100644 +--- a/net/sched/sch_teql.c ++++ b/net/sched/sch_teql.c +@@ -297,7 +297,7 @@ static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev) + struct net_device *slave = qdisc_dev(q); + struct netdev_queue *slave_txq = netdev_get_tx_queue(slave, 0); + +- if (slave_txq->qdisc_sleeping != q) ++ if (rcu_access_pointer(slave_txq->qdisc_sleeping) != q) + continue; + if (netif_xmit_stopped(netdev_get_tx_queue(slave, subq)) || + !netif_running(slave)) { +-- +2.39.2 + diff --git a/queue-6.3/net-sched-fix-possible-refcount-leak-in-tc_chain_tmp.patch b/queue-6.3/net-sched-fix-possible-refcount-leak-in-tc_chain_tmp.patch new file mode 100644 index 00000000000..79cdf576dab --- /dev/null +++ b/queue-6.3/net-sched-fix-possible-refcount-leak-in-tc_chain_tmp.patch @@ -0,0 +1,37 @@ +From 04545e7f6c98bd898e38888df63f40542bd0f3bc Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 7 Jun 2023 10:23:01 +0800 +Subject: net: sched: fix possible refcount leak in tc_chain_tmplt_add() + +From: Hangyu Hua + +[ Upstream commit 44f8baaf230c655c249467ca415b570deca8df77 ] + +try_module_get will be called in tcf_proto_lookup_ops. So module_put needs +to be called to drop the refcount if ops don't implement the required +function. + +Fixes: 9f407f1768d3 ("net: sched: introduce chain templates") +Signed-off-by: Hangyu Hua +Reviewed-by: Larysa Zaremba +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + net/sched/cls_api.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c +index b2432ee04f319..c877a6343fd47 100644 +--- a/net/sched/cls_api.c ++++ b/net/sched/cls_api.c +@@ -2950,6 +2950,7 @@ static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net, + return PTR_ERR(ops); + if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) { + NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier"); ++ module_put(ops->owner); + return -EOPNOTSUPP; + } + +-- +2.39.2 + diff --git a/queue-6.3/net-sched-fq_pie-ensure-reasonable-tca_fq_pie_quantu.patch b/queue-6.3/net-sched-fq_pie-ensure-reasonable-tca_fq_pie_quantu.patch new file mode 100644 index 00000000000..8a16c7af8a6 --- /dev/null +++ b/queue-6.3/net-sched-fq_pie-ensure-reasonable-tca_fq_pie_quantu.patch @@ -0,0 +1,129 @@ +From e5e33de5cff1cddf61ebc21b4468a2ed26b5fa18 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 2 Jun 2023 12:37:47 +0000 +Subject: net/sched: fq_pie: ensure reasonable TCA_FQ_PIE_QUANTUM values + +From: Eric Dumazet + +[ Upstream commit cd2b8113c2e8b9f5a88a942e1eaca61eba401b85 ] + +We got multiple syzbot reports, all duplicates of the following [1] + +syzbot managed to install fq_pie with a zero TCA_FQ_PIE_QUANTUM, +thus triggering infinite loops. + +Use limits similar to sch_fq, with commits +3725a269815b ("pkt_sched: fq: avoid hang when quantum 0") and +d9e15a273306 ("pkt_sched: fq: do not accept silly TCA_FQ_QUANTUM") + +[1] +watchdog: BUG: soft lockup - CPU#0 stuck for 26s! [swapper/0:0] +Modules linked in: +irq event stamp: 172817 +hardirqs last enabled at (172816): [] __el1_irq arch/arm64/kernel/entry-common.c:476 [inline] +hardirqs last enabled at (172816): [] el1_interrupt+0x58/0x68 arch/arm64/kernel/entry-common.c:486 +hardirqs last disabled at (172817): [] __el1_irq arch/arm64/kernel/entry-common.c:468 [inline] +hardirqs last disabled at (172817): [] el1_interrupt+0x24/0x68 arch/arm64/kernel/entry-common.c:486 +softirqs last enabled at (167634): [] softirq_handle_end kernel/softirq.c:414 [inline] +softirqs last enabled at (167634): [] __do_softirq+0xac0/0xd54 kernel/softirq.c:600 +softirqs last disabled at (167701): [] ____do_softirq+0x14/0x20 arch/arm64/kernel/irq.c:80 +CPU: 0 PID: 0 Comm: swapper/0 Not tainted 6.4.0-rc3-syzkaller-geb0f1697d729 #0 +Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 04/28/2023 +pstate: 80400005 (Nzcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--) +pc : fq_pie_qdisc_dequeue+0x10c/0x8ac net/sched/sch_fq_pie.c:246 +lr : fq_pie_qdisc_dequeue+0xe4/0x8ac net/sched/sch_fq_pie.c:240 +sp : ffff800008007210 +x29: ffff800008007280 x28: ffff0000c86f7890 x27: ffff0000cb20c2e8 +x26: ffff0000cb20c2f0 x25: dfff800000000000 x24: ffff0000cb20c2e0 +x23: ffff0000c86f7880 x22: 0000000000000040 x21: 1fffe000190def10 +x20: ffff0000cb20c2e0 x19: ffff0000cb20c2e0 x18: ffff800008006e60 +x17: 0000000000000000 x16: ffff80000850af6c x15: 0000000000000302 +x14: 0000000000000100 x13: 0000000000000000 x12: 0000000000000001 +x11: 0000000000000302 x10: 0000000000000100 x9 : 0000000000000000 +x8 : 0000000000000000 x7 : ffff80000841c468 x6 : 0000000000000000 +x5 : 0000000000000001 x4 : 0000000000000001 x3 : 0000000000000000 +x2 : ffff0000cb20c2e0 x1 : ffff0000cb20c2e0 x0 : 0000000000000001 +Call trace: +fq_pie_qdisc_dequeue+0x10c/0x8ac net/sched/sch_fq_pie.c:246 +dequeue_skb net/sched/sch_generic.c:292 [inline] +qdisc_restart net/sched/sch_generic.c:397 [inline] +__qdisc_run+0x1fc/0x231c net/sched/sch_generic.c:415 +__dev_xmit_skb net/core/dev.c:3868 [inline] +__dev_queue_xmit+0xc80/0x3318 net/core/dev.c:4210 +dev_queue_xmit include/linux/netdevice.h:3085 [inline] +neigh_connected_output+0x2f8/0x38c net/core/neighbour.c:1581 +neigh_output include/net/neighbour.h:544 [inline] +ip6_finish_output2+0xd60/0x1a1c net/ipv6/ip6_output.c:134 +__ip6_finish_output net/ipv6/ip6_output.c:195 [inline] +ip6_finish_output+0x538/0x8c8 net/ipv6/ip6_output.c:206 +NF_HOOK_COND include/linux/netfilter.h:292 [inline] +ip6_output+0x270/0x594 net/ipv6/ip6_output.c:227 +dst_output include/net/dst.h:458 [inline] +NF_HOOK include/linux/netfilter.h:303 [inline] +ndisc_send_skb+0xc30/0x1790 net/ipv6/ndisc.c:508 +ndisc_send_rs+0x47c/0x5d4 net/ipv6/ndisc.c:718 +addrconf_rs_timer+0x300/0x58c net/ipv6/addrconf.c:3936 +call_timer_fn+0x19c/0x8cc kernel/time/timer.c:1700 +expire_timers kernel/time/timer.c:1751 [inline] +__run_timers+0x55c/0x734 kernel/time/timer.c:2022 +run_timer_softirq+0x7c/0x114 kernel/time/timer.c:2035 +__do_softirq+0x2d0/0xd54 kernel/softirq.c:571 +____do_softirq+0x14/0x20 arch/arm64/kernel/irq.c:80 +call_on_irq_stack+0x24/0x4c arch/arm64/kernel/entry.S:882 +do_softirq_own_stack+0x20/0x2c arch/arm64/kernel/irq.c:85 +invoke_softirq kernel/softirq.c:452 [inline] +__irq_exit_rcu+0x28c/0x534 kernel/softirq.c:650 +irq_exit_rcu+0x14/0x84 kernel/softirq.c:662 +__el1_irq arch/arm64/kernel/entry-common.c:472 [inline] +el1_interrupt+0x38/0x68 arch/arm64/kernel/entry-common.c:486 +el1h_64_irq_handler+0x18/0x24 arch/arm64/kernel/entry-common.c:491 +el1h_64_irq+0x64/0x68 arch/arm64/kernel/entry.S:587 +__daif_local_irq_enable arch/arm64/include/asm/irqflags.h:33 [inline] +arch_local_irq_enable+0x8/0xc arch/arm64/include/asm/irqflags.h:55 +cpuidle_idle_call kernel/sched/idle.c:170 [inline] +do_idle+0x1f0/0x4e8 kernel/sched/idle.c:282 +cpu_startup_entry+0x24/0x28 kernel/sched/idle.c:379 +rest_init+0x2dc/0x2f4 init/main.c:735 +start_kernel+0x0/0x55c init/main.c:834 +start_kernel+0x3f0/0x55c init/main.c:1088 +__primary_switched+0xb8/0xc0 arch/arm64/kernel/head.S:523 + +Fixes: ec97ecf1ebe4 ("net: sched: add Flow Queue PIE packet scheduler") +Reported-by: syzbot +Signed-off-by: Eric Dumazet +Reviewed-by: Jamal Hadi Salim +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + net/sched/sch_fq_pie.c | 8 +++++++- + 1 file changed, 7 insertions(+), 1 deletion(-) + +diff --git a/net/sched/sch_fq_pie.c b/net/sched/sch_fq_pie.c +index 6980796d435d9..c699e5095607d 100644 +--- a/net/sched/sch_fq_pie.c ++++ b/net/sched/sch_fq_pie.c +@@ -201,6 +201,11 @@ static int fq_pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, + return NET_XMIT_CN; + } + ++static struct netlink_range_validation fq_pie_q_range = { ++ .min = 1, ++ .max = 1 << 20, ++}; ++ + static const struct nla_policy fq_pie_policy[TCA_FQ_PIE_MAX + 1] = { + [TCA_FQ_PIE_LIMIT] = {.type = NLA_U32}, + [TCA_FQ_PIE_FLOWS] = {.type = NLA_U32}, +@@ -208,7 +213,8 @@ static const struct nla_policy fq_pie_policy[TCA_FQ_PIE_MAX + 1] = { + [TCA_FQ_PIE_TUPDATE] = {.type = NLA_U32}, + [TCA_FQ_PIE_ALPHA] = {.type = NLA_U32}, + [TCA_FQ_PIE_BETA] = {.type = NLA_U32}, +- [TCA_FQ_PIE_QUANTUM] = {.type = NLA_U32}, ++ [TCA_FQ_PIE_QUANTUM] = ++ NLA_POLICY_FULL_RANGE(NLA_U32, &fq_pie_q_range), + [TCA_FQ_PIE_MEMORY_LIMIT] = {.type = NLA_U32}, + [TCA_FQ_PIE_ECN_PROB] = {.type = NLA_U32}, + [TCA_FQ_PIE_ECN] = {.type = NLA_U32}, +-- +2.39.2 + diff --git a/queue-6.3/net-sched-move-rtm_tca_policy-declaration-to-include.patch b/queue-6.3/net-sched-move-rtm_tca_policy-declaration-to-include.patch new file mode 100644 index 00000000000..e87dd31bcda --- /dev/null +++ b/queue-6.3/net-sched-move-rtm_tca_policy-declaration-to-include.patch @@ -0,0 +1,54 @@ +From 521ead1669713801697ba4818dbeff913d835c62 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 6 Jun 2023 11:42:33 +0000 +Subject: net: sched: move rtm_tca_policy declaration to include file + +From: Eric Dumazet + +[ Upstream commit 886bc7d6ed3357975c5f1d3c784da96000d4bbb4 ] + +rtm_tca_policy is used from net/sched/sch_api.c and net/sched/cls_api.c, +thus should be declared in an include file. + +This fixes the following sparse warning: +net/sched/sch_api.c:1434:25: warning: symbol 'rtm_tca_policy' was not declared. Should it be static? + +Fixes: e331473fee3d ("net/sched: cls_api: add missing validation of netlink attributes") +Signed-off-by: Eric Dumazet +Acked-by: Jamal Hadi Salim +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + include/net/pkt_sched.h | 2 ++ + net/sched/cls_api.c | 2 -- + 2 files changed, 2 insertions(+), 2 deletions(-) + +diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h +index fc688c7e95951..4df802f84eeba 100644 +--- a/include/net/pkt_sched.h ++++ b/include/net/pkt_sched.h +@@ -128,6 +128,8 @@ static inline void qdisc_run(struct Qdisc *q) + } + } + ++extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1]; ++ + /* Calculate maximal size of packet seen by hard_start_xmit + routine of this device. + */ +diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c +index 2621550bfddc1..b2432ee04f319 100644 +--- a/net/sched/cls_api.c ++++ b/net/sched/cls_api.c +@@ -43,8 +43,6 @@ + #include + #include + +-extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1]; +- + /* The list of all installed classifier types */ + static LIST_HEAD(tcf_proto_base); + +-- +2.39.2 + diff --git a/queue-6.3/net-smc-avoid-to-access-invalid-rmbs-mrs-in-smcrv1-a.patch b/queue-6.3/net-smc-avoid-to-access-invalid-rmbs-mrs-in-smcrv1-a.patch new file mode 100644 index 00000000000..d3d5873d8bc --- /dev/null +++ b/queue-6.3/net-smc-avoid-to-access-invalid-rmbs-mrs-in-smcrv1-a.patch @@ -0,0 +1,89 @@ +From 72bd3a1d0ee75b3bf18210ce77bd55a8dc7e2987 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 1 Jun 2023 16:41:52 +0800 +Subject: net/smc: Avoid to access invalid RMBs' MRs in SMCRv1 ADD LINK CONT + +From: Wen Gu + +[ Upstream commit c308e9ec004721a656c193243eab61a8be324657 ] + +SMCRv1 has a similar issue to SMCRv2 (see link below) that may access +invalid MRs of RMBs when construct LLC ADD LINK CONT messages. + + BUG: kernel NULL pointer dereference, address: 0000000000000014 + #PF: supervisor read access in kernel mode + #PF: error_code(0x0000) - not-present page + PGD 0 P4D 0 + Oops: 0000 [#1] PREEMPT SMP PTI + CPU: 5 PID: 48 Comm: kworker/5:0 Kdump: loaded Tainted: G W E 6.4.0-rc3+ #49 + Workqueue: events smc_llc_add_link_work [smc] + RIP: 0010:smc_llc_add_link_cont+0x160/0x270 [smc] + RSP: 0018:ffffa737801d3d50 EFLAGS: 00010286 + RAX: ffff964f82144000 RBX: ffffa737801d3dd8 RCX: 0000000000000000 + RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffff964f81370c30 + RBP: ffffa737801d3dd4 R08: ffff964f81370000 R09: ffffa737801d3db0 + R10: 0000000000000001 R11: 0000000000000060 R12: ffff964f82e70000 + R13: ffff964f81370c38 R14: ffffa737801d3dd3 R15: 0000000000000001 + FS: 0000000000000000(0000) GS:ffff9652bfd40000(0000) knlGS:0000000000000000 + CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 + CR2: 0000000000000014 CR3: 000000008fa20004 CR4: 00000000003706e0 + DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 + DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 + Call Trace: + + smc_llc_srv_rkey_exchange+0xa7/0x190 [smc] + smc_llc_srv_add_link+0x3ae/0x5a0 [smc] + smc_llc_add_link_work+0xb8/0x140 [smc] + process_one_work+0x1e5/0x3f0 + worker_thread+0x4d/0x2f0 + ? __pfx_worker_thread+0x10/0x10 + kthread+0xe5/0x120 + ? __pfx_kthread+0x10/0x10 + ret_from_fork+0x2c/0x50 + + +When an alernate RNIC is available in system, SMC will try to add a new +link based on the RNIC for resilience. All the RMBs in use will be mapped +to the new link. Then the RMBs' MRs corresponding to the new link will +be filled into LLC messages. For SMCRv1, they are ADD LINK CONT messages. + +However smc_llc_add_link_cont() may mistakenly access to unused RMBs which +haven't been mapped to the new link and have no valid MRs, thus causing a +crash. So this patch fixes it. + +Fixes: 87f88cda2128 ("net/smc: rkey processing for a new link as SMC client") +Link: https://lore.kernel.org/r/1685101741-74826-3-git-send-email-guwen@linux.alibaba.com +Signed-off-by: Wen Gu +Reviewed-by: Wenjia Zhang +Reviewed-by: Tony Lu +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + net/smc/smc_llc.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c +index 7a8d9163d186e..90f0b60b196ab 100644 +--- a/net/smc/smc_llc.c ++++ b/net/smc/smc_llc.c +@@ -851,6 +851,8 @@ static int smc_llc_add_link_cont(struct smc_link *link, + addc_llc->num_rkeys = *num_rkeys_todo; + n = *num_rkeys_todo; + for (i = 0; i < min_t(u8, n, SMC_LLC_RKEYS_PER_CONT_MSG); i++) { ++ while (*buf_pos && !(*buf_pos)->used) ++ *buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos); + if (!*buf_pos) { + addc_llc->num_rkeys = addc_llc->num_rkeys - + *num_rkeys_todo; +@@ -867,8 +869,6 @@ static int smc_llc_add_link_cont(struct smc_link *link, + + (*num_rkeys_todo)--; + *buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos); +- while (*buf_pos && !(*buf_pos)->used) +- *buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos); + } + addc_llc->hd.common.llc_type = SMC_LLC_ADD_LINK_CONT; + addc_llc->hd.length = sizeof(struct smc_llc_msg_add_link_cont); +-- +2.39.2 + diff --git a/queue-6.3/netfilter-conntrack-fix-null-pointer-dereference-in-.patch b/queue-6.3/netfilter-conntrack-fix-null-pointer-dereference-in-.patch new file mode 100644 index 00000000000..b0cf452aa15 --- /dev/null +++ b/queue-6.3/netfilter-conntrack-fix-null-pointer-dereference-in-.patch @@ -0,0 +1,59 @@ +From 21e9f134022a9a4ddc084015433b3eee706786cf Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 25 May 2023 12:25:26 +0200 +Subject: netfilter: conntrack: fix NULL pointer dereference in + nf_confirm_cthelper + +From: Tijs Van Buggenhout + +[ Upstream commit e1f543dc660b44618a1bd72ddb4ca0828a95f7ad ] + +An nf_conntrack_helper from nf_conn_help may become NULL after DNAT. + +Observed when TCP port 1720 (Q931_PORT), associated with h323 conntrack +helper, is DNAT'ed to another destination port (e.g. 1730), while +nfqueue is being used for final acceptance (e.g. snort). + +This happenned after transition from kernel 4.14 to 5.10.161. + +Workarounds: + * keep the same port (1720) in DNAT + * disable nfqueue + * disable/unload h323 NAT helper + +$ linux-5.10/scripts/decode_stacktrace.sh vmlinux < /tmp/kernel.log +BUG: kernel NULL pointer dereference, address: 0000000000000084 +[..] +RIP: 0010:nf_conntrack_update (net/netfilter/nf_conntrack_core.c:2080 net/netfilter/nf_conntrack_core.c:2134) nf_conntrack +[..] +nfqnl_reinject (net/netfilter/nfnetlink_queue.c:237) nfnetlink_queue +nfqnl_recv_verdict (net/netfilter/nfnetlink_queue.c:1230) nfnetlink_queue +nfnetlink_rcv_msg (net/netfilter/nfnetlink.c:241) nfnetlink +[..] + +Fixes: ee04805ff54a ("netfilter: conntrack: make conntrack userspace helpers work again") +Signed-off-by: Tijs Van Buggenhout +Signed-off-by: Florian Westphal +Signed-off-by: Pablo Neira Ayuso +Signed-off-by: Sasha Levin +--- + net/netfilter/nf_conntrack_core.c | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c +index 7ba6ab9b54b56..06582f0a5393c 100644 +--- a/net/netfilter/nf_conntrack_core.c ++++ b/net/netfilter/nf_conntrack_core.c +@@ -2260,6 +2260,9 @@ static int nf_confirm_cthelper(struct sk_buff *skb, struct nf_conn *ct, + return 0; + + helper = rcu_dereference(help->helper); ++ if (!helper) ++ return 0; ++ + if (!(helper->flags & NF_CT_HELPER_F_USERSPACE)) + return 0; + +-- +2.39.2 + diff --git a/queue-6.3/netfilter-ipset-add-schedule-point-in-call_ad.patch b/queue-6.3/netfilter-ipset-add-schedule-point-in-call_ad.patch new file mode 100644 index 00000000000..ddcf6e3abf9 --- /dev/null +++ b/queue-6.3/netfilter-ipset-add-schedule-point-in-call_ad.patch @@ -0,0 +1,100 @@ +From 1b286ff4457a81e1086730b6d72124380dc439f8 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 18 May 2023 10:33:00 -0700 +Subject: netfilter: ipset: Add schedule point in call_ad(). + +From: Kuniyuki Iwashima + +[ Upstream commit 24e227896bbf003165e006732dccb3516f87f88e ] + +syzkaller found a repro that causes Hung Task [0] with ipset. The repro +first creates an ipset and then tries to delete a large number of IPs +from the ipset concurrently: + + IPSET_ATTR_IPADDR_IPV4 : 172.20.20.187 + IPSET_ATTR_CIDR : 2 + +The first deleting thread hogs a CPU with nfnl_lock(NFNL_SUBSYS_IPSET) +held, and other threads wait for it to be released. + +Previously, the same issue existed in set->variant->uadt() that could run +so long under ip_set_lock(set). Commit 5e29dc36bd5e ("netfilter: ipset: +Rework long task execution when adding/deleting entries") tried to fix it, +but the issue still exists in the caller with another mutex. + +While adding/deleting many IPs, we should release the CPU periodically to +prevent someone from abusing ipset to hang the system. + +Note we need to increment the ipset's refcnt to prevent the ipset from +being destroyed while rescheduling. + +[0]: +INFO: task syz-executor174:268 blocked for more than 143 seconds. + Not tainted 6.4.0-rc1-00145-gba79e9a73284 #1 +"echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. +task:syz-executor174 state:D stack:0 pid:268 ppid:260 flags:0x0000000d +Call trace: + __switch_to+0x308/0x714 arch/arm64/kernel/process.c:556 + context_switch kernel/sched/core.c:5343 [inline] + __schedule+0xd84/0x1648 kernel/sched/core.c:6669 + schedule+0xf0/0x214 kernel/sched/core.c:6745 + schedule_preempt_disabled+0x58/0xf0 kernel/sched/core.c:6804 + __mutex_lock_common kernel/locking/mutex.c:679 [inline] + __mutex_lock+0x6fc/0xdb0 kernel/locking/mutex.c:747 + __mutex_lock_slowpath+0x14/0x20 kernel/locking/mutex.c:1035 + mutex_lock+0x98/0xf0 kernel/locking/mutex.c:286 + nfnl_lock net/netfilter/nfnetlink.c:98 [inline] + nfnetlink_rcv_msg+0x480/0x70c net/netfilter/nfnetlink.c:295 + netlink_rcv_skb+0x1c0/0x350 net/netlink/af_netlink.c:2546 + nfnetlink_rcv+0x18c/0x199c net/netfilter/nfnetlink.c:658 + netlink_unicast_kernel net/netlink/af_netlink.c:1339 [inline] + netlink_unicast+0x664/0x8cc net/netlink/af_netlink.c:1365 + netlink_sendmsg+0x6d0/0xa4c net/netlink/af_netlink.c:1913 + sock_sendmsg_nosec net/socket.c:724 [inline] + sock_sendmsg net/socket.c:747 [inline] + ____sys_sendmsg+0x4b8/0x810 net/socket.c:2503 + ___sys_sendmsg net/socket.c:2557 [inline] + __sys_sendmsg+0x1f8/0x2a4 net/socket.c:2586 + __do_sys_sendmsg net/socket.c:2595 [inline] + __se_sys_sendmsg net/socket.c:2593 [inline] + __arm64_sys_sendmsg+0x80/0x94 net/socket.c:2593 + __invoke_syscall arch/arm64/kernel/syscall.c:38 [inline] + invoke_syscall+0x84/0x270 arch/arm64/kernel/syscall.c:52 + el0_svc_common+0x134/0x24c arch/arm64/kernel/syscall.c:142 + do_el0_svc+0x64/0x198 arch/arm64/kernel/syscall.c:193 + el0_svc+0x2c/0x7c arch/arm64/kernel/entry-common.c:637 + el0t_64_sync_handler+0x84/0xf0 arch/arm64/kernel/entry-common.c:655 + el0t_64_sync+0x190/0x194 arch/arm64/kernel/entry.S:591 + +Reported-by: syzkaller +Fixes: a7b4f989a629 ("netfilter: ipset: IP set core support") +Signed-off-by: Kuniyuki Iwashima +Acked-by: Jozsef Kadlecsik +Signed-off-by: Pablo Neira Ayuso +Signed-off-by: Sasha Levin +--- + net/netfilter/ipset/ip_set_core.c | 8 ++++++++ + 1 file changed, 8 insertions(+) + +diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c +index 46ebee9400dab..9a6b64779e644 100644 +--- a/net/netfilter/ipset/ip_set_core.c ++++ b/net/netfilter/ipset/ip_set_core.c +@@ -1694,6 +1694,14 @@ call_ad(struct net *net, struct sock *ctnl, struct sk_buff *skb, + bool eexist = flags & IPSET_FLAG_EXIST, retried = false; + + do { ++ if (retried) { ++ __ip_set_get(set); ++ nfnl_unlock(NFNL_SUBSYS_IPSET); ++ cond_resched(); ++ nfnl_lock(NFNL_SUBSYS_IPSET); ++ __ip_set_put(set); ++ } ++ + ip_set_lock(set); + ret = set->variant->uadt(set, tb, adt, &lineno, flags, retried); + ip_set_unlock(set); +-- +2.39.2 + diff --git a/queue-6.3/netfilter-nf_tables-add-null-check-for-nla_nest_star.patch b/queue-6.3/netfilter-nf_tables-add-null-check-for-nla_nest_star.patch new file mode 100644 index 00000000000..b53e53e290c --- /dev/null +++ b/queue-6.3/netfilter-nf_tables-add-null-check-for-nla_nest_star.patch @@ -0,0 +1,41 @@ +From 339efefd5dc5ee42ca69b5d590951543169b7746 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 24 May 2023 12:25:27 +0000 +Subject: netfilter: nf_tables: Add null check for nla_nest_start_noflag() in + nft_dump_basechain_hook() + +From: Gavrilov Ilia + +[ Upstream commit bd058763a624a1fb5c20f3c46e632d623c043676 ] + +The nla_nest_start_noflag() function may fail and return NULL; +the return value needs to be checked. + +Found by InfoTeCS on behalf of Linux Verification Center +(linuxtesting.org) with SVACE. + +Fixes: d54725cd11a5 ("netfilter: nf_tables: support for multiple devices per netdev hook") +Signed-off-by: Gavrilov Ilia +Signed-off-by: Florian Westphal +Signed-off-by: Pablo Neira Ayuso +Signed-off-by: Sasha Levin +--- + net/netfilter/nf_tables_api.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c +index ef80504c3ccd2..8c74bb1ca78a0 100644 +--- a/net/netfilter/nf_tables_api.c ++++ b/net/netfilter/nf_tables_api.c +@@ -1593,6 +1593,8 @@ static int nft_dump_basechain_hook(struct sk_buff *skb, int family, + + if (nft_base_chain_netdev(family, ops->hooknum)) { + nest_devs = nla_nest_start_noflag(skb, NFTA_HOOK_DEVS); ++ if (!nest_devs) ++ goto nla_put_failure; + + if (!hook_list) + hook_list = &basechain->hook_list; +-- +2.39.2 + diff --git a/queue-6.3/netfilter-nf_tables-out-of-bound-check-in-chain-blob.patch b/queue-6.3/netfilter-nf_tables-out-of-bound-check-in-chain-blob.patch new file mode 100644 index 00000000000..9a20990baed --- /dev/null +++ b/queue-6.3/netfilter-nf_tables-out-of-bound-check-in-chain-blob.patch @@ -0,0 +1,34 @@ +From 0b55db3357d614e78404be8607e16c9c43bd0091 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 6 Jun 2023 16:32:44 +0200 +Subject: netfilter: nf_tables: out-of-bound check in chain blob + +From: Pablo Neira Ayuso + +[ Upstream commit 08e42a0d3ad30f276f9597b591f975971a1b0fcf ] + +Add current size of rule expressions to the boundary check. + +Fixes: 2c865a8a28a1 ("netfilter: nf_tables: add rule blob layout") +Signed-off-by: Pablo Neira Ayuso +Signed-off-by: Sasha Levin +--- + net/netfilter/nf_tables_api.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c +index 8c74bb1ca78a0..368aeabd8f8f1 100644 +--- a/net/netfilter/nf_tables_api.c ++++ b/net/netfilter/nf_tables_api.c +@@ -8921,7 +8921,7 @@ static int nf_tables_commit_chain_prepare(struct net *net, struct nft_chain *cha + continue; + } + +- if (WARN_ON_ONCE(data + expr->ops->size > data_boundary)) ++ if (WARN_ON_ONCE(data + size + expr->ops->size > data_boundary)) + return -ENOMEM; + + memcpy(data + size, expr, expr->ops->size); +-- +2.39.2 + diff --git a/queue-6.3/netfilter-nft_bitwise-fix-register-tracking.patch b/queue-6.3/netfilter-nft_bitwise-fix-register-tracking.patch new file mode 100644 index 00000000000..76f05d4f3db --- /dev/null +++ b/queue-6.3/netfilter-nft_bitwise-fix-register-tracking.patch @@ -0,0 +1,39 @@ +From 341ee5cd711a1458dc86bd8515167c4cea598c0b Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 25 May 2023 15:07:24 +0100 +Subject: netfilter: nft_bitwise: fix register tracking + +From: Jeremy Sowden + +[ Upstream commit 14e8b293903785590a0ef168745ac84250cb1f4c ] + +At the end of `nft_bitwise_reduce`, there is a loop which is intended to +update the bitwise expression associated with each tracked destination +register. However, currently, it just updates the first register +repeatedly. Fix it. + +Fixes: 34cc9e52884a ("netfilter: nf_tables: cancel tracking for clobbered destination registers") +Signed-off-by: Jeremy Sowden +Signed-off-by: Florian Westphal +Signed-off-by: Pablo Neira Ayuso +Signed-off-by: Sasha Levin +--- + net/netfilter/nft_bitwise.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/net/netfilter/nft_bitwise.c b/net/netfilter/nft_bitwise.c +index 84eae7cabc67a..2527a01486efc 100644 +--- a/net/netfilter/nft_bitwise.c ++++ b/net/netfilter/nft_bitwise.c +@@ -323,7 +323,7 @@ static bool nft_bitwise_reduce(struct nft_regs_track *track, + dreg = priv->dreg; + regcount = DIV_ROUND_UP(priv->len, NFT_REG32_SIZE); + for (i = 0; i < regcount; i++, dreg++) +- track->regs[priv->dreg].bitwise = expr; ++ track->regs[dreg].bitwise = expr; + + return false; + } +-- +2.39.2 + diff --git a/queue-6.3/platform-surface-aggregator-allow-completion-work-it.patch b/queue-6.3/platform-surface-aggregator-allow-completion-work-it.patch new file mode 100644 index 00000000000..28a132d66ed --- /dev/null +++ b/queue-6.3/platform-surface-aggregator-allow-completion-work-it.patch @@ -0,0 +1,67 @@ +From 4e05a84e17eaa66f4ae94a4a685d8fa9bfb169fb Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 25 May 2023 23:01:10 +0200 +Subject: platform/surface: aggregator: Allow completion work-items to be + executed in parallel + +From: Maximilian Luz + +[ Upstream commit 539e0a7f9105d19c00629c3f4da00330488e8c60 ] + +Currently, event completion work-items are restricted to be run strictly +in non-parallel fashion by the respective workqueue. However, this has +lead to some problems: + +In some instances, the event notifier function called inside this +completion workqueue takes a non-negligible amount of time to execute. +One such example is the battery event handling code (surface_battery.c), +which can result in a full battery information refresh, involving +further synchronous communication with the EC inside the event handler. +This is made worse if the communication fails spuriously, generally +incurring a multi-second timeout. + +Since the event completions are run strictly non-parallel, this blocks +other events from being propagated to the respective subsystems. This +becomes especially noticeable for keyboard and touchpad input, which +also funnel their events through this system. Here, users have reported +occasional multi-second "freezes". + +Note, however, that the event handling system was never intended to run +purely sequentially. Instead, we have one work struct per EC/SAM +subsystem, processing the event queue for that subsystem. These work +structs were intended to run in parallel, allowing sequential processing +of work items for each subsystem but parallel processing of work items +across subsystems. + +The only restriction to this is the way the workqueue is created. +Therefore, replace create_workqueue() with alloc_workqueue() and do not +restrict the maximum number of parallel work items to be executed on +that queue, resolving any cross-subsystem blockage. + +Fixes: c167b9c7e3d6 ("platform/surface: Add Surface Aggregator subsystem") +Link: https://github.com/linux-surface/linux-surface/issues/1026 +Signed-off-by: Maximilian Luz +Link: https://lore.kernel.org/r/20230525210110.2785470-1-luzmaximilian@gmail.com +Reviewed-by: Hans de Goede +Signed-off-by: Hans de Goede +Signed-off-by: Sasha Levin +--- + drivers/platform/surface/aggregator/controller.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/platform/surface/aggregator/controller.c b/drivers/platform/surface/aggregator/controller.c +index 535581c0471c5..7fc602e01487d 100644 +--- a/drivers/platform/surface/aggregator/controller.c ++++ b/drivers/platform/surface/aggregator/controller.c +@@ -825,7 +825,7 @@ static int ssam_cplt_init(struct ssam_cplt *cplt, struct device *dev) + + cplt->dev = dev; + +- cplt->wq = create_workqueue(SSAM_CPLT_WQ_NAME); ++ cplt->wq = alloc_workqueue(SSAM_CPLT_WQ_NAME, WQ_UNBOUND | WQ_MEM_RECLAIM, 0); + if (!cplt->wq) + return -ENOMEM; + +-- +2.39.2 + diff --git a/queue-6.3/platform-surface-aggregator_tabletsw-add-support-for.patch b/queue-6.3/platform-surface-aggregator_tabletsw-add-support-for.patch new file mode 100644 index 00000000000..0301d8caffa --- /dev/null +++ b/queue-6.3/platform-surface-aggregator_tabletsw-add-support-for.patch @@ -0,0 +1,61 @@ +From 11dafa29451bb097a68c432293bd60c4bac093b4 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 25 May 2023 23:32:17 +0200 +Subject: platform/surface: aggregator_tabletsw: Add support for book mode in + KIP subsystem + +From: Maximilian Luz + +[ Upstream commit 9bed667033e66083d363a11e9414ad401ecc242c ] + +Devices with a type-cover have an additional "book" mode, deactivating +type-cover input and turning off its backlight. This is currently +unsupported, leading to the warning + + surface_aggregator_tablet_mode_switch 01:0e:01:00:01: unknown KIP cover state: 6 + +Therefore, add support for this state and map it to enable tablet-mode. + +Fixes: 9f794056db5b ("platform/surface: Add KIP/POS tablet-mode switch driver") +Signed-off-by: Maximilian Luz +Link: https://lore.kernel.org/r/20230525213218.2797480-2-luzmaximilian@gmail.com +Reviewed-by: Hans de Goede +Signed-off-by: Hans de Goede +Signed-off-by: Sasha Levin +--- + drivers/platform/surface/surface_aggregator_tabletsw.c | 5 +++++ + 1 file changed, 5 insertions(+) + +diff --git a/drivers/platform/surface/surface_aggregator_tabletsw.c b/drivers/platform/surface/surface_aggregator_tabletsw.c +index 9fed800c7cc09..a18e9fc7896b3 100644 +--- a/drivers/platform/surface/surface_aggregator_tabletsw.c ++++ b/drivers/platform/surface/surface_aggregator_tabletsw.c +@@ -201,6 +201,7 @@ enum ssam_kip_cover_state { + SSAM_KIP_COVER_STATE_LAPTOP = 0x03, + SSAM_KIP_COVER_STATE_FOLDED_CANVAS = 0x04, + SSAM_KIP_COVER_STATE_FOLDED_BACK = 0x05, ++ SSAM_KIP_COVER_STATE_BOOK = 0x06, + }; + + static const char *ssam_kip_cover_state_name(struct ssam_tablet_sw *sw, u32 state) +@@ -221,6 +222,9 @@ static const char *ssam_kip_cover_state_name(struct ssam_tablet_sw *sw, u32 stat + case SSAM_KIP_COVER_STATE_FOLDED_BACK: + return "folded-back"; + ++ case SSAM_KIP_COVER_STATE_BOOK: ++ return "book"; ++ + default: + dev_warn(&sw->sdev->dev, "unknown KIP cover state: %u\n", state); + return ""; +@@ -233,6 +237,7 @@ static bool ssam_kip_cover_state_is_tablet_mode(struct ssam_tablet_sw *sw, u32 s + case SSAM_KIP_COVER_STATE_DISCONNECTED: + case SSAM_KIP_COVER_STATE_FOLDED_CANVAS: + case SSAM_KIP_COVER_STATE_FOLDED_BACK: ++ case SSAM_KIP_COVER_STATE_BOOK: + return true; + + case SSAM_KIP_COVER_STATE_CLOSED: +-- +2.39.2 + diff --git a/queue-6.3/qed-qede-fix-scheduling-while-atomic.patch b/queue-6.3/qed-qede-fix-scheduling-while-atomic.patch new file mode 100644 index 00000000000..23bdbd2845c --- /dev/null +++ b/queue-6.3/qed-qede-fix-scheduling-while-atomic.patch @@ -0,0 +1,275 @@ +From 5b778f6cc80085ddb4472ef488023c0a6a77d460 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 5 Jun 2023 16:56:00 +0530 +Subject: qed/qede: Fix scheduling while atomic + +From: Manish Chopra + +[ Upstream commit 42510dffd0e2c27046905f742172ed6662af5557 ] + +Statistics read through bond interface via sysfs causes +below bug and traces as it triggers the bonding module to +collect the slave device statistics while holding the spinlock, +beneath that qede->qed driver statistics flow gets scheduled out +due to usleep_range() used in PTT acquire logic + +[ 3673.988874] Hardware name: HPE ProLiant DL365 Gen10 Plus/ProLiant DL365 Gen10 Plus, BIOS A42 10/29/2021 +[ 3673.988878] Call Trace: +[ 3673.988891] dump_stack_lvl+0x34/0x44 +[ 3673.988908] __schedule_bug.cold+0x47/0x53 +[ 3673.988918] __schedule+0x3fb/0x560 +[ 3673.988929] schedule+0x43/0xb0 +[ 3673.988932] schedule_hrtimeout_range_clock+0xbf/0x1b0 +[ 3673.988937] ? __hrtimer_init+0xc0/0xc0 +[ 3673.988950] usleep_range+0x5e/0x80 +[ 3673.988955] qed_ptt_acquire+0x2b/0xd0 [qed] +[ 3673.988981] _qed_get_vport_stats+0x141/0x240 [qed] +[ 3673.989001] qed_get_vport_stats+0x18/0x80 [qed] +[ 3673.989016] qede_fill_by_demand_stats+0x37/0x400 [qede] +[ 3673.989028] qede_get_stats64+0x19/0xe0 [qede] +[ 3673.989034] dev_get_stats+0x5c/0xc0 +[ 3673.989045] netstat_show.constprop.0+0x52/0xb0 +[ 3673.989055] dev_attr_show+0x19/0x40 +[ 3673.989065] sysfs_kf_seq_show+0x9b/0xf0 +[ 3673.989076] seq_read_iter+0x120/0x4b0 +[ 3673.989087] new_sync_read+0x118/0x1a0 +[ 3673.989095] vfs_read+0xf3/0x180 +[ 3673.989099] ksys_read+0x5f/0xe0 +[ 3673.989102] do_syscall_64+0x3b/0x90 +[ 3673.989109] entry_SYSCALL_64_after_hwframe+0x44/0xae +[ 3673.989115] RIP: 0033:0x7f8467d0b082 +[ 3673.989119] Code: c0 e9 b2 fe ff ff 50 48 8d 3d ca 05 08 00 e8 35 e7 01 00 0f 1f 44 00 00 f3 0f 1e fa 64 8b 04 25 18 00 00 00 85 c0 75 10 0f 05 <48> 3d 00 f0 ff ff 77 56 c3 0f 1f 44 00 00 48 83 ec 28 48 89 54 24 +[ 3673.989121] RSP: 002b:00007ffffb21fd08 EFLAGS: 00000246 ORIG_RAX: 0000000000000000 +[ 3673.989127] RAX: ffffffffffffffda RBX: 000000000100eca0 RCX: 00007f8467d0b082 +[ 3673.989128] RDX: 00000000000003ff RSI: 00007ffffb21fdc0 RDI: 0000000000000003 +[ 3673.989130] RBP: 00007f8467b96028 R08: 0000000000000010 R09: 00007ffffb21ec00 +[ 3673.989132] R10: 00007ffffb27b170 R11: 0000000000000246 R12: 00000000000000f0 +[ 3673.989134] R13: 0000000000000003 R14: 00007f8467b92000 R15: 0000000000045a05 +[ 3673.989139] CPU: 30 PID: 285188 Comm: read_all Kdump: loaded Tainted: G W OE + +Fix this by collecting the statistics asynchronously from a periodic +delayed work scheduled at default stats coalescing interval and return +the recent copy of statisitcs from .ndo_get_stats64(), also add ability +to configure/retrieve stats coalescing interval using below commands - + +ethtool -C ethx stats-block-usecs +ethtool -c ethx + +Fixes: 133fac0eedc3 ("qede: Add basic ethtool support") +Cc: Sudarsana Kalluru +Cc: David Miller +Signed-off-by: Manish Chopra +Link: https://lore.kernel.org/r/20230605112600.48238-1-manishc@marvell.com +Signed-off-by: Paolo Abeni +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/qlogic/qed/qed_l2.c | 2 +- + drivers/net/ethernet/qlogic/qede/qede.h | 4 +++ + .../net/ethernet/qlogic/qede/qede_ethtool.c | 24 +++++++++++-- + drivers/net/ethernet/qlogic/qede/qede_main.c | 34 ++++++++++++++++++- + 4 files changed, 60 insertions(+), 4 deletions(-) + +diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c +index 2edd6bf64a3cc..7776d3bdd459a 100644 +--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c ++++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c +@@ -1903,7 +1903,7 @@ void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats) + { + u32 i; + +- if (!cdev) { ++ if (!cdev || cdev->recov_in_prog) { + memset(stats, 0, sizeof(*stats)); + return; + } +diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h +index f90dcfe9ee688..8a63f99d499c4 100644 +--- a/drivers/net/ethernet/qlogic/qede/qede.h ++++ b/drivers/net/ethernet/qlogic/qede/qede.h +@@ -271,6 +271,10 @@ struct qede_dev { + #define QEDE_ERR_WARN 3 + + struct qede_dump_info dump_info; ++ struct delayed_work periodic_task; ++ unsigned long stats_coal_ticks; ++ u32 stats_coal_usecs; ++ spinlock_t stats_lock; /* lock for vport stats access */ + }; + + enum QEDE_STATE { +diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +index 8034d812d5a00..d0a3395b2bc1f 100644 +--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c ++++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +@@ -430,6 +430,8 @@ static void qede_get_ethtool_stats(struct net_device *dev, + } + } + ++ spin_lock(&edev->stats_lock); ++ + for (i = 0; i < QEDE_NUM_STATS; i++) { + if (qede_is_irrelevant_stat(edev, i)) + continue; +@@ -439,6 +441,8 @@ static void qede_get_ethtool_stats(struct net_device *dev, + buf++; + } + ++ spin_unlock(&edev->stats_lock); ++ + __qede_unlock(edev); + } + +@@ -830,6 +834,7 @@ static int qede_get_coalesce(struct net_device *dev, + + coal->rx_coalesce_usecs = rx_coal; + coal->tx_coalesce_usecs = tx_coal; ++ coal->stats_block_coalesce_usecs = edev->stats_coal_usecs; + + return rc; + } +@@ -843,6 +848,19 @@ int qede_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal, + int i, rc = 0; + u16 rxc, txc; + ++ if (edev->stats_coal_usecs != coal->stats_block_coalesce_usecs) { ++ edev->stats_coal_usecs = coal->stats_block_coalesce_usecs; ++ if (edev->stats_coal_usecs) { ++ edev->stats_coal_ticks = usecs_to_jiffies(edev->stats_coal_usecs); ++ schedule_delayed_work(&edev->periodic_task, 0); ++ ++ DP_INFO(edev, "Configured stats coal ticks=%lu jiffies\n", ++ edev->stats_coal_ticks); ++ } else { ++ cancel_delayed_work_sync(&edev->periodic_task); ++ } ++ } ++ + if (!netif_running(dev)) { + DP_INFO(edev, "Interface is down\n"); + return -EINVAL; +@@ -2253,7 +2271,8 @@ static int qede_get_per_coalesce(struct net_device *dev, + } + + static const struct ethtool_ops qede_ethtool_ops = { +- .supported_coalesce_params = ETHTOOL_COALESCE_USECS, ++ .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ++ ETHTOOL_COALESCE_STATS_BLOCK_USECS, + .get_link_ksettings = qede_get_link_ksettings, + .set_link_ksettings = qede_set_link_ksettings, + .get_drvinfo = qede_get_drvinfo, +@@ -2304,7 +2323,8 @@ static const struct ethtool_ops qede_ethtool_ops = { + }; + + static const struct ethtool_ops qede_vf_ethtool_ops = { +- .supported_coalesce_params = ETHTOOL_COALESCE_USECS, ++ .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ++ ETHTOOL_COALESCE_STATS_BLOCK_USECS, + .get_link_ksettings = qede_get_link_ksettings, + .get_drvinfo = qede_get_drvinfo, + .get_msglevel = qede_get_msglevel, +diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c +index 261f982ca40da..36a75e84a084a 100644 +--- a/drivers/net/ethernet/qlogic/qede/qede_main.c ++++ b/drivers/net/ethernet/qlogic/qede/qede_main.c +@@ -308,6 +308,8 @@ void qede_fill_by_demand_stats(struct qede_dev *edev) + + edev->ops->get_vport_stats(edev->cdev, &stats); + ++ spin_lock(&edev->stats_lock); ++ + p_common->no_buff_discards = stats.common.no_buff_discards; + p_common->packet_too_big_discard = stats.common.packet_too_big_discard; + p_common->ttl0_discard = stats.common.ttl0_discard; +@@ -405,6 +407,8 @@ void qede_fill_by_demand_stats(struct qede_dev *edev) + p_ah->tx_1519_to_max_byte_packets = + stats.ah.tx_1519_to_max_byte_packets; + } ++ ++ spin_unlock(&edev->stats_lock); + } + + static void qede_get_stats64(struct net_device *dev, +@@ -413,9 +417,10 @@ static void qede_get_stats64(struct net_device *dev, + struct qede_dev *edev = netdev_priv(dev); + struct qede_stats_common *p_common; + +- qede_fill_by_demand_stats(edev); + p_common = &edev->stats.common; + ++ spin_lock(&edev->stats_lock); ++ + stats->rx_packets = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts + + p_common->rx_bcast_pkts; + stats->tx_packets = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts + +@@ -435,6 +440,8 @@ static void qede_get_stats64(struct net_device *dev, + stats->collisions = edev->stats.bb.tx_total_collisions; + stats->rx_crc_errors = p_common->rx_crc_errors; + stats->rx_frame_errors = p_common->rx_align_errors; ++ ++ spin_unlock(&edev->stats_lock); + } + + #ifdef CONFIG_QED_SRIOV +@@ -1064,6 +1071,23 @@ static void qede_unlock(struct qede_dev *edev) + rtnl_unlock(); + } + ++static void qede_periodic_task(struct work_struct *work) ++{ ++ struct qede_dev *edev = container_of(work, struct qede_dev, ++ periodic_task.work); ++ ++ qede_fill_by_demand_stats(edev); ++ schedule_delayed_work(&edev->periodic_task, edev->stats_coal_ticks); ++} ++ ++static void qede_init_periodic_task(struct qede_dev *edev) ++{ ++ INIT_DELAYED_WORK(&edev->periodic_task, qede_periodic_task); ++ spin_lock_init(&edev->stats_lock); ++ edev->stats_coal_usecs = USEC_PER_SEC; ++ edev->stats_coal_ticks = usecs_to_jiffies(USEC_PER_SEC); ++} ++ + static void qede_sp_task(struct work_struct *work) + { + struct qede_dev *edev = container_of(work, struct qede_dev, +@@ -1083,6 +1107,7 @@ static void qede_sp_task(struct work_struct *work) + */ + + if (test_and_clear_bit(QEDE_SP_RECOVERY, &edev->sp_flags)) { ++ cancel_delayed_work_sync(&edev->periodic_task); + #ifdef CONFIG_QED_SRIOV + /* SRIOV must be disabled outside the lock to avoid a deadlock. + * The recovery of the active VFs is currently not supported. +@@ -1273,6 +1298,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, + */ + INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task); + mutex_init(&edev->qede_lock); ++ qede_init_periodic_task(edev); + + rc = register_netdev(edev->ndev); + if (rc) { +@@ -1297,6 +1323,11 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, + edev->rx_copybreak = QEDE_RX_HDR_SIZE; + + qede_log_probe(edev); ++ ++ /* retain user config (for example - after recovery) */ ++ if (edev->stats_coal_usecs) ++ schedule_delayed_work(&edev->periodic_task, 0); ++ + return 0; + + err4: +@@ -1365,6 +1396,7 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode) + unregister_netdev(ndev); + + cancel_delayed_work_sync(&edev->sp_task); ++ cancel_delayed_work_sync(&edev->periodic_task); + + edev->ops->common->set_power_state(cdev, PCI_D0); + +-- +2.39.2 + diff --git a/queue-6.3/rfs-annotate-lockless-accesses-to-rfs-sock-flow-tabl.patch b/queue-6.3/rfs-annotate-lockless-accesses-to-rfs-sock-flow-tabl.patch new file mode 100644 index 00000000000..385939004d9 --- /dev/null +++ b/queue-6.3/rfs-annotate-lockless-accesses-to-rfs-sock-flow-tabl.patch @@ -0,0 +1,67 @@ +From 4f0457a175677c5706726bd37e13266b63875d1f Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 6 Jun 2023 07:41:15 +0000 +Subject: rfs: annotate lockless accesses to RFS sock flow table + +From: Eric Dumazet + +[ Upstream commit 5c3b74a92aa285a3df722bf6329ba7ccf70346d6 ] + +Add READ_ONCE()/WRITE_ONCE() on accesses to the sock flow table. + +This also prevents a (smart ?) compiler to remove the condition in: + +if (table->ents[index] != newval) + table->ents[index] = newval; + +We need the condition to avoid dirtying a shared cache line. + +Fixes: fec5e652e58f ("rfs: Receive Flow Steering") +Signed-off-by: Eric Dumazet +Reviewed-by: Simon Horman +Reviewed-by: Kuniyuki Iwashima +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + include/linux/netdevice.h | 7 +++++-- + net/core/dev.c | 6 ++++-- + 2 files changed, 9 insertions(+), 4 deletions(-) + +diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h +index 7db9f960221d3..2771aa046ab2a 100644 +--- a/include/linux/netdevice.h ++++ b/include/linux/netdevice.h +@@ -760,8 +760,11 @@ static inline void rps_record_sock_flow(struct rps_sock_flow_table *table, + /* We only give a hint, preemption can change CPU under us */ + val |= raw_smp_processor_id(); + +- if (table->ents[index] != val) +- table->ents[index] = val; ++ /* The following WRITE_ONCE() is paired with the READ_ONCE() ++ * here, and another one in get_rps_cpu(). ++ */ ++ if (READ_ONCE(table->ents[index]) != val) ++ WRITE_ONCE(table->ents[index], val); + } + } + +diff --git a/net/core/dev.c b/net/core/dev.c +index b3d8e74fcaf06..6d46eb0402ccd 100644 +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -4471,8 +4471,10 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, + u32 next_cpu; + u32 ident; + +- /* First check into global flow table if there is a match */ +- ident = sock_flow_table->ents[hash & sock_flow_table->mask]; ++ /* First check into global flow table if there is a match. ++ * This READ_ONCE() pairs with WRITE_ONCE() from rps_record_sock_flow(). ++ */ ++ ident = READ_ONCE(sock_flow_table->ents[hash & sock_flow_table->mask]); + if ((ident ^ hash) & ~rps_cpu_mask) + goto try_rps; + +-- +2.39.2 + diff --git a/queue-6.3/rfs-annotate-lockless-accesses-to-sk-sk_rxhash.patch b/queue-6.3/rfs-annotate-lockless-accesses-to-sk-sk_rxhash.patch new file mode 100644 index 00000000000..14e0437a2c2 --- /dev/null +++ b/queue-6.3/rfs-annotate-lockless-accesses-to-sk-sk_rxhash.patch @@ -0,0 +1,73 @@ +From ec80803fa8a0c97d73f5d2e92de35bd6f4727cb4 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 6 Jun 2023 07:41:14 +0000 +Subject: rfs: annotate lockless accesses to sk->sk_rxhash + +From: Eric Dumazet + +[ Upstream commit 1e5c647c3f6d4f8497dedcd226204e1880e0ffb3 ] + +Add READ_ONCE()/WRITE_ONCE() on accesses to sk->sk_rxhash. + +This also prevents a (smart ?) compiler to remove the condition in: + +if (sk->sk_rxhash != newval) + sk->sk_rxhash = newval; + +We need the condition to avoid dirtying a shared cache line. + +Fixes: fec5e652e58f ("rfs: Receive Flow Steering") +Signed-off-by: Eric Dumazet +Reviewed-by: Simon Horman +Reviewed-by: Kuniyuki Iwashima +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + include/net/sock.h | 18 +++++++++++++----- + 1 file changed, 13 insertions(+), 5 deletions(-) + +diff --git a/include/net/sock.h b/include/net/sock.h +index 45e46a1c4afc6..f0654c44acf5f 100644 +--- a/include/net/sock.h ++++ b/include/net/sock.h +@@ -1152,8 +1152,12 @@ static inline void sock_rps_record_flow(const struct sock *sk) + * OR an additional socket flag + * [1] : sk_state and sk_prot are in the same cache line. + */ +- if (sk->sk_state == TCP_ESTABLISHED) +- sock_rps_record_flow_hash(sk->sk_rxhash); ++ if (sk->sk_state == TCP_ESTABLISHED) { ++ /* This READ_ONCE() is paired with the WRITE_ONCE() ++ * from sock_rps_save_rxhash() and sock_rps_reset_rxhash(). ++ */ ++ sock_rps_record_flow_hash(READ_ONCE(sk->sk_rxhash)); ++ } + } + #endif + } +@@ -1162,15 +1166,19 @@ static inline void sock_rps_save_rxhash(struct sock *sk, + const struct sk_buff *skb) + { + #ifdef CONFIG_RPS +- if (unlikely(sk->sk_rxhash != skb->hash)) +- sk->sk_rxhash = skb->hash; ++ /* The following WRITE_ONCE() is paired with the READ_ONCE() ++ * here, and another one in sock_rps_record_flow(). ++ */ ++ if (unlikely(READ_ONCE(sk->sk_rxhash) != skb->hash)) ++ WRITE_ONCE(sk->sk_rxhash, skb->hash); + #endif + } + + static inline void sock_rps_reset_rxhash(struct sock *sk) + { + #ifdef CONFIG_RPS +- sk->sk_rxhash = 0; ++ /* Paired with READ_ONCE() in sock_rps_record_flow() */ ++ WRITE_ONCE(sk->sk_rxhash, 0); + #endif + } + +-- +2.39.2 + diff --git a/queue-6.3/selftests-bpf-fix-sockopt_sk-selftest.patch b/queue-6.3/selftests-bpf-fix-sockopt_sk-selftest.patch new file mode 100644 index 00000000000..877e087bc46 --- /dev/null +++ b/queue-6.3/selftests-bpf-fix-sockopt_sk-selftest.patch @@ -0,0 +1,54 @@ +From 118305d25b52503ed4967f2cc0268eead9ed9d5f Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 6 Jun 2023 10:22:02 -0700 +Subject: selftests/bpf: Fix sockopt_sk selftest + +From: Yonghong Song + +[ Upstream commit 69844e335d8c22454746c7903776533d8b4ab8fa ] + +Commit f4e4534850a9 ("net/netlink: fix NETLINK_LIST_MEMBERSHIPS length report") +fixed NETLINK_LIST_MEMBERSHIPS length report which caused +selftest sockopt_sk failure. The failure log looks like + + test_sockopt_sk:PASS:join_cgroup /sockopt_sk 0 nsec + run_test:PASS:skel_load 0 nsec + run_test:PASS:setsockopt_link 0 nsec + run_test:PASS:getsockopt_link 0 nsec + getsetsockopt:FAIL:Unexpected NETLINK_LIST_MEMBERSHIPS value unexpected Unexpected NETLINK_LIST_MEMBERSHIPS value: actual 8 != expected 4 + run_test:PASS:getsetsockopt 0 nsec + #201 sockopt_sk:FAIL + +In net/netlink/af_netlink.c, function netlink_getsockopt(), for NETLINK_LIST_MEMBERSHIPS, +nlk->ngroups equals to 36. Before Commit f4e4534850a9, the optlen is calculated as + ALIGN(nlk->ngroups / 8, sizeof(u32)) = 4 +After that commit, the optlen is + ALIGN(BITS_TO_BYTES(nlk->ngroups), sizeof(u32)) = 8 + +Fix the test by setting the expected optlen to be 8. + +Fixes: f4e4534850a9 ("net/netlink: fix NETLINK_LIST_MEMBERSHIPS length report") +Signed-off-by: Yonghong Song +Signed-off-by: Andrii Nakryiko +Link: https://lore.kernel.org/bpf/20230606172202.1606249-1-yhs@fb.com +Signed-off-by: Sasha Levin +--- + tools/testing/selftests/bpf/prog_tests/sockopt_sk.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c b/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c +index 4512dd808c335..05d0e07da3942 100644 +--- a/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c ++++ b/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c +@@ -209,7 +209,7 @@ static int getsetsockopt(void) + err, errno); + goto err; + } +- ASSERT_EQ(optlen, 4, "Unexpected NETLINK_LIST_MEMBERSHIPS value"); ++ ASSERT_EQ(optlen, 8, "Unexpected NETLINK_LIST_MEMBERSHIPS value"); + + free(big_buf); + close(fd); +-- +2.39.2 + diff --git a/queue-6.3/selftests-bpf-verify-optval-null-case.patch b/queue-6.3/selftests-bpf-verify-optval-null-case.patch new file mode 100644 index 00000000000..6d11abe97ee --- /dev/null +++ b/queue-6.3/selftests-bpf-verify-optval-null-case.patch @@ -0,0 +1,100 @@ +From 3055c79b6fe68f7f659680b0dd799050bd6779d9 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 18 Apr 2023 15:53:39 -0700 +Subject: selftests/bpf: Verify optval=NULL case + +From: Stanislav Fomichev + +[ Upstream commit 833d67ecdc5f35f1ebf59d0fccc1ce771434be9c ] + +Make sure we get optlen exported instead of getting EFAULT. + +Signed-off-by: Stanislav Fomichev +Signed-off-by: Daniel Borkmann +Link: https://lore.kernel.org/bpf/20230418225343.553806-3-sdf@google.com +Stable-dep-of: 69844e335d8c ("selftests/bpf: Fix sockopt_sk selftest") +Signed-off-by: Sasha Levin +--- + .../selftests/bpf/prog_tests/sockopt_sk.c | 28 +++++++++++++++++++ + .../testing/selftests/bpf/progs/sockopt_sk.c | 12 ++++++++ + 2 files changed, 40 insertions(+) + +diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c b/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c +index 60d952719d275..4512dd808c335 100644 +--- a/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c ++++ b/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c +@@ -3,6 +3,7 @@ + #include "cgroup_helpers.h" + + #include ++#include + #include "sockopt_sk.skel.h" + + #ifndef SOL_TCP +@@ -183,6 +184,33 @@ static int getsetsockopt(void) + goto err; + } + ++ /* optval=NULL case is handled correctly */ ++ ++ close(fd); ++ fd = socket(AF_NETLINK, SOCK_RAW, 0); ++ if (fd < 0) { ++ log_err("Failed to create AF_NETLINK socket"); ++ return -1; ++ } ++ ++ buf.u32 = 1; ++ optlen = sizeof(__u32); ++ err = setsockopt(fd, SOL_NETLINK, NETLINK_ADD_MEMBERSHIP, &buf, optlen); ++ if (err) { ++ log_err("Unexpected getsockopt(NETLINK_ADD_MEMBERSHIP) err=%d errno=%d", ++ err, errno); ++ goto err; ++ } ++ ++ optlen = 0; ++ err = getsockopt(fd, SOL_NETLINK, NETLINK_LIST_MEMBERSHIPS, NULL, &optlen); ++ if (err) { ++ log_err("Unexpected getsockopt(NETLINK_LIST_MEMBERSHIPS) err=%d errno=%d", ++ err, errno); ++ goto err; ++ } ++ ASSERT_EQ(optlen, 4, "Unexpected NETLINK_LIST_MEMBERSHIPS value"); ++ + free(big_buf); + close(fd); + return 0; +diff --git a/tools/testing/selftests/bpf/progs/sockopt_sk.c b/tools/testing/selftests/bpf/progs/sockopt_sk.c +index c8d810010a946..fe1df4cd206eb 100644 +--- a/tools/testing/selftests/bpf/progs/sockopt_sk.c ++++ b/tools/testing/selftests/bpf/progs/sockopt_sk.c +@@ -32,6 +32,12 @@ int _getsockopt(struct bpf_sockopt *ctx) + __u8 *optval_end = ctx->optval_end; + __u8 *optval = ctx->optval; + struct sockopt_sk *storage; ++ struct bpf_sock *sk; ++ ++ /* Bypass AF_NETLINK. */ ++ sk = ctx->sk; ++ if (sk && sk->family == AF_NETLINK) ++ return 1; + + /* Make sure bpf_get_netns_cookie is callable. + */ +@@ -131,6 +137,12 @@ int _setsockopt(struct bpf_sockopt *ctx) + __u8 *optval_end = ctx->optval_end; + __u8 *optval = ctx->optval; + struct sockopt_sk *storage; ++ struct bpf_sock *sk; ++ ++ /* Bypass AF_NETLINK. */ ++ sk = ctx->sk; ++ if (sk && sk->family == AF_NETLINK) ++ return 1; + + /* Make sure bpf_get_netns_cookie is callable. + */ +-- +2.39.2 + diff --git a/queue-6.3/series b/queue-6.3/series new file mode 100644 index 00000000000..e57da484047 --- /dev/null +++ b/queue-6.3/series @@ -0,0 +1,69 @@ +spi-mt65xx-make-sure-operations-completed-before-unl.patch +platform-surface-aggregator-allow-completion-work-it.patch +platform-surface-aggregator_tabletsw-add-support-for.patch +spi-qup-request-dma-before-enabling-clocks.patch +afs-fix-setting-of-mtime-when-creating-a-file-dir-sy.patch +wifi-mt76-mt7615-fix-possible-race-in-mt7615_mac_sta.patch +bpf-sockmap-avoid-potential-null-dereference-in-sk_p.patch +neighbour-fix-unaligned-access-to-pneigh_entry.patch +net-dsa-lan9303-allow-vid-0-in-port_fdb_-add-del-met.patch +net-ipv4-ping_group_range-allow-gid-from-2147483648-.patch +bpf-fix-uaf-in-task-local-storage.patch +bpf-fix-elem_size-not-being-set-for-inner-maps.patch +net-ipv6-fix-bool-int-mismatch-for-skip_notify_on_de.patch +net-smc-avoid-to-access-invalid-rmbs-mrs-in-smcrv1-a.patch +net-enetc-correct-the-statistics-of-rx-bytes.patch +net-enetc-correct-rx_bytes-statistics-of-xdp.patch +net-sched-fq_pie-ensure-reasonable-tca_fq_pie_quantu.patch +drm-i915-explain-the-magic-numbers-for-aux-sync-prec.patch +drm-i915-use-18-fast-wake-aux-sync-len.patch +bluetooth-split-bt_iso_qos-into-dedicated-structures.patch +bluetooth-iso-consider-right-cis-when-removing-cig-a.patch +bluetooth-iso-fix-cig-auto-allocation-to-select-conf.patch +bluetooth-hci_sync-add-lock-to-protect-hci_unregiste.patch +bluetooth-fix-l2cap_disconnect_req-deadlock.patch +bluetooth-iso-don-t-try-to-remove-cig-if-there-are-b.patch +bluetooth-hci_conn-add-support-for-linking-multiple-.patch +bluetooth-hci_conn-fix-not-matching-by-cis-id.patch +bluetooth-iso-use-correct-cis-order-in-set-cig-param.patch +bluetooth-l2cap-add-missing-checks-for-invalid-dcid.patch +wifi-mac80211-use-correct-iftype-he-cap.patch +wifi-cfg80211-reject-bad-ap-mld-address.patch +wifi-mac80211-mlme-fix-non-inheritence-element.patch +wifi-mac80211-don-t-translate-beacon-presp-addrs.patch +qed-qede-fix-scheduling-while-atomic.patch +accel-ivpu-ivpu_ipc-needs-generic_allocator.patch +accel-ivpu-reserve-all-non-command-bo-s-using-dma_re.patch +wifi-cfg80211-fix-locking-in-sched-scan-stop-work.patch +wifi-cfg80211-fix-locking-in-regulatory-disconnect.patch +selftests-bpf-verify-optval-null-case.patch +selftests-bpf-fix-sockopt_sk-selftest.patch +netfilter-nf_tables-add-null-check-for-nla_nest_star.patch +netfilter-nft_bitwise-fix-register-tracking.patch +netfilter-conntrack-fix-null-pointer-dereference-in-.patch +netfilter-ipset-add-schedule-point-in-call_ad.patch +netfilter-nf_tables-out-of-bound-check-in-chain-blob.patch +drm-lima-fix-sched-context-destroy.patch +ipv6-rpl-fix-route-of-death.patch +tcp-gso-really-support-big-tcp.patch +rfs-annotate-lockless-accesses-to-sk-sk_rxhash.patch +rfs-annotate-lockless-accesses-to-rfs-sock-flow-tabl.patch +net-sched-add-rcu-annotations-around-qdisc-qdisc_sle.patch +drm-i915-selftests-add-some-missing-error-propagatio.patch +ice-make-writes-to-dev-gnssx-synchronous.patch +net-sched-move-rtm_tca_policy-declaration-to-include.patch +net-openvswitch-fix-upcall-counter-access-before-all.patch +net-sched-act_police-fix-sparse-errors-in-tcf_police.patch +net-sched-fix-possible-refcount-leak-in-tc_chain_tmp.patch +bpf-add-extra-path-pointer-check-to-d_path-helper.patch +drm-amdgpu-fix-null-pointer-dereference-error-in-amd.patch +lib-cpu_rmap-fix-potential-use-after-free-in-irq_cpu.patch +net-bcmgenet-fix-eee-implementation.patch +accel-ivpu-do-not-use-mutex_lock_interruptible.patch +bnxt_en-fix-bnxt_hwrm_update_rss_hash_cfg.patch +bnxt_en-don-t-issue-ap-reset-during-ethtool-s-reset-.patch +bnxt_en-query-default-vlan-before-vnic-setup-on-a-vf.patch +bnxt_en-skip-firmware-fatal-error-recovery-if-chip-i.patch +bnxt_en-prevent-kernel-panic-when-receiving-unexpect.patch +bnxt_en-implement-.set_port-.unset_port-udp-tunnel-c.patch +drm-msm-a6xx-initialize-gmu-mutex-earlier.patch diff --git a/queue-6.3/spi-mt65xx-make-sure-operations-completed-before-unl.patch b/queue-6.3/spi-mt65xx-make-sure-operations-completed-before-unl.patch new file mode 100644 index 00000000000..5af61b4ae78 --- /dev/null +++ b/queue-6.3/spi-mt65xx-make-sure-operations-completed-before-unl.patch @@ -0,0 +1,77 @@ +From 3579d55e6ac01247a650b878806616925be511de Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 1 May 2023 19:33:14 +0100 +Subject: spi: mt65xx: make sure operations completed before unloading + +From: Daniel Golle + +[ Upstream commit 4be47a5d59cbc9396a6ffd327913eb4c8d67a32f ] + +When unloading the spi-mt65xx kernel module during an ongoing spi-mem +operation the kernel will Oops shortly after unloading the module. +This is because wait_for_completion_timeout was still running and +returning into the no longer loaded module: + +Internal error: Oops: 0000000096000005 [#1] SMP +Modules linked in: [many, but spi-mt65xx is no longer there] +CPU: 0 PID: 2578 Comm: block Tainted: G W O 6.3.0-next-20230428+ #0 +Hardware name: Bananapi BPI-R3 (DT) +pstate: 804000c5 (Nzcv daIF +PAN -UAO -TCO -DIT -SSBS BTYPE=--) +pc : __lock_acquire+0x18c/0x20e8 +lr : __lock_acquire+0x9b8/0x20e8 +sp : ffffffc009ec3400 +x29: ffffffc009ec3400 x28: 0000000000000001 x27: 0000000000000004 +x26: ffffff80082888c8 x25: 0000000000000000 x24: 0000000000000000 +x23: ffffffc009609da8 x22: ffffff8008288000 x21: ffffff8008288968 +x20: 00000000000003c2 x19: ffffff8008be7990 x18: 00000000000002af +x17: 0000000000000000 x16: 0000000000000000 x15: ffffffc008d78970 +x14: 000000000000080d x13: 00000000000002af x12: 00000000ffffffea +x11: 00000000ffffefff x10: ffffffc008dd0970 x9 : ffffffc008d78918 +x8 : 0000000000017fe8 x7 : 0000000000000001 x6 : 0000000000000000 +x5 : ffffff807fb53910 x4 : 0000000000000000 x3 : 0000000000000027 +x2 : 0000000000000027 x1 : 0000000000000000 x0 : 00000000000c03c2 +Call trace: + __lock_acquire+0x18c/0x20e8 + lock_acquire+0x100/0x2a4 + _raw_spin_lock_irq+0x58/0x74 + __wait_for_common+0xe0/0x1b4 + wait_for_completion_timeout+0x1c/0x24 + 0xffffffc000acc8a4 <--- used to be mtk_spi_transfer_wait + spi_mem_exec_op+0x390/0x3ec + spi_mem_no_dirmap_read+0x6c/0x88 + spi_mem_dirmap_read+0xcc/0x12c + spinand_read_page+0xf8/0x1dc + spinand_mtd_read+0x1b4/0x2fc + mtd_read_oob_std+0x58/0x7c + mtd_read_oob+0x8c/0x148 + mtd_read+0x50/0x6c + ... + +Prevent this by completing in mtk_spi_remove if needed. + +Fixes: 9f763fd20da7 ("spi: mediatek: add spi memory support for ipm design") +Signed-off-by: Daniel Golle +Link: https://lore.kernel.org/r/ZFAF6pJxMu1z6k4w@makrotopia.org +Signed-off-by: Mark Brown +Signed-off-by: Sasha Levin +--- + drivers/spi/spi-mt65xx.c | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c +index 9eab6c20dbc56..6e95efb50acbc 100644 +--- a/drivers/spi/spi-mt65xx.c ++++ b/drivers/spi/spi-mt65xx.c +@@ -1275,6 +1275,9 @@ static int mtk_spi_remove(struct platform_device *pdev) + struct mtk_spi *mdata = spi_master_get_devdata(master); + int ret; + ++ if (mdata->use_spimem && !completion_done(&mdata->spimem_done)) ++ complete(&mdata->spimem_done); ++ + ret = pm_runtime_resume_and_get(&pdev->dev); + if (ret < 0) + return ret; +-- +2.39.2 + diff --git a/queue-6.3/spi-qup-request-dma-before-enabling-clocks.patch b/queue-6.3/spi-qup-request-dma-before-enabling-clocks.patch new file mode 100644 index 00000000000..43bc18ac011 --- /dev/null +++ b/queue-6.3/spi-qup-request-dma-before-enabling-clocks.patch @@ -0,0 +1,122 @@ +From 64bd886cc51f705d256c4ba3ab22b9f1eeb9261a Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 18 May 2023 15:04:25 +0200 +Subject: spi: qup: Request DMA before enabling clocks + +From: Stephan Gerhold + +[ Upstream commit 0c331fd1dccfba657129380ee084b95c1cedfbef ] + +It is usually better to request all necessary resources (clocks, +regulators, ...) before starting to make use of them. That way they do +not change state in case one of the resources is not available yet and +probe deferral (-EPROBE_DEFER) is necessary. This is particularly +important for DMA channels and IOMMUs which are not enforced by +fw_devlink yet (unless you use fw_devlink.strict=1). + +spi-qup does this in the wrong order, the clocks are enabled and +disabled again when the DMA channels are not available yet. + +This causes issues in some cases: On most SoCs one of the SPI QUP +clocks is shared with the UART controller. When using earlycon UART is +actively used during boot but might not have probed yet, usually for +the same reason (waiting for the DMA controller). In this case, the +brief enable/disable cycle ends up gating the clock and further UART +console output will halt the system completely. + +Avoid this by requesting the DMA channels before changing the clock +state. + +Fixes: 612762e82ae6 ("spi: qup: Add DMA capabilities") +Signed-off-by: Stephan Gerhold +Link: https://lore.kernel.org/r/20230518-spi-qup-clk-defer-v1-1-f49fc9ca4e02@gerhold.net +Signed-off-by: Mark Brown +Signed-off-by: Sasha Levin +--- + drivers/spi/spi-qup.c | 37 ++++++++++++++++++------------------- + 1 file changed, 18 insertions(+), 19 deletions(-) + +diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c +index 205e54f157b4a..fb6b7738b4f55 100644 +--- a/drivers/spi/spi-qup.c ++++ b/drivers/spi/spi-qup.c +@@ -1029,23 +1029,8 @@ static int spi_qup_probe(struct platform_device *pdev) + return -ENXIO; + } + +- ret = clk_prepare_enable(cclk); +- if (ret) { +- dev_err(dev, "cannot enable core clock\n"); +- return ret; +- } +- +- ret = clk_prepare_enable(iclk); +- if (ret) { +- clk_disable_unprepare(cclk); +- dev_err(dev, "cannot enable iface clock\n"); +- return ret; +- } +- + master = spi_alloc_master(dev, sizeof(struct spi_qup)); + if (!master) { +- clk_disable_unprepare(cclk); +- clk_disable_unprepare(iclk); + dev_err(dev, "cannot allocate master\n"); + return -ENOMEM; + } +@@ -1093,6 +1078,19 @@ static int spi_qup_probe(struct platform_device *pdev) + spin_lock_init(&controller->lock); + init_completion(&controller->done); + ++ ret = clk_prepare_enable(cclk); ++ if (ret) { ++ dev_err(dev, "cannot enable core clock\n"); ++ goto error_dma; ++ } ++ ++ ret = clk_prepare_enable(iclk); ++ if (ret) { ++ clk_disable_unprepare(cclk); ++ dev_err(dev, "cannot enable iface clock\n"); ++ goto error_dma; ++ } ++ + iomode = readl_relaxed(base + QUP_IO_M_MODES); + + size = QUP_IO_M_OUTPUT_BLOCK_SIZE(iomode); +@@ -1122,7 +1120,7 @@ static int spi_qup_probe(struct platform_device *pdev) + ret = spi_qup_set_state(controller, QUP_STATE_RESET); + if (ret) { + dev_err(dev, "cannot set RESET state\n"); +- goto error_dma; ++ goto error_clk; + } + + writel_relaxed(0, base + QUP_OPERATIONAL); +@@ -1146,7 +1144,7 @@ static int spi_qup_probe(struct platform_device *pdev) + ret = devm_request_irq(dev, irq, spi_qup_qup_irq, + IRQF_TRIGGER_HIGH, pdev->name, controller); + if (ret) +- goto error_dma; ++ goto error_clk; + + pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC); + pm_runtime_use_autosuspend(dev); +@@ -1161,11 +1159,12 @@ static int spi_qup_probe(struct platform_device *pdev) + + disable_pm: + pm_runtime_disable(&pdev->dev); ++error_clk: ++ clk_disable_unprepare(cclk); ++ clk_disable_unprepare(iclk); + error_dma: + spi_qup_release_dma(master); + error: +- clk_disable_unprepare(cclk); +- clk_disable_unprepare(iclk); + spi_master_put(master); + return ret; + } +-- +2.39.2 + diff --git a/queue-6.3/tcp-gso-really-support-big-tcp.patch b/queue-6.3/tcp-gso-really-support-big-tcp.patch new file mode 100644 index 00000000000..282b4dfdb98 --- /dev/null +++ b/queue-6.3/tcp-gso-really-support-big-tcp.patch @@ -0,0 +1,100 @@ +From 5a84117f2450a526fd3c2bcb954f2c82e3a8fdca Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 5 Jun 2023 16:16:47 +0000 +Subject: tcp: gso: really support BIG TCP + +From: Eric Dumazet + +[ Upstream commit 82a01ab35bd02ba4b0b4e12bc95c5b69240eb7b0 ] + +We missed that tcp_gso_segment() was assuming skb->len was smaller than 65535 : + +oldlen = (u16)~skb->len; + +This part came with commit 0718bcc09b35 ("[NET]: Fix CHECKSUM_HW GSO problems.") + +This leads to wrong TCP checksum. + +Adapt the code to accept arbitrary packet length. + +v2: + - use two csum_add() instead of csum_fold() (Alexander Duyck) + - Change delta type to __wsum to reduce casts (Alexander Duyck) + +Fixes: 09f3d1a3a52c ("ipv6/gso: remove temporary HBH/jumbo header") +Signed-off-by: Eric Dumazet +Reviewed-by: Alexander Duyck +Reviewed-by: Simon Horman +Link: https://lore.kernel.org/r/20230605161647.3624428-1-edumazet@google.com +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + net/ipv4/tcp_offload.c | 19 +++++++++---------- + 1 file changed, 9 insertions(+), 10 deletions(-) + +diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c +index 45dda78893870..4851211aa60d6 100644 +--- a/net/ipv4/tcp_offload.c ++++ b/net/ipv4/tcp_offload.c +@@ -60,12 +60,12 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb, + struct tcphdr *th; + unsigned int thlen; + unsigned int seq; +- __be32 delta; + unsigned int oldlen; + unsigned int mss; + struct sk_buff *gso_skb = skb; + __sum16 newcheck; + bool ooo_okay, copy_destructor; ++ __wsum delta; + + th = tcp_hdr(skb); + thlen = th->doff * 4; +@@ -75,7 +75,7 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb, + if (!pskb_may_pull(skb, thlen)) + goto out; + +- oldlen = (u16)~skb->len; ++ oldlen = ~skb->len; + __skb_pull(skb, thlen); + + mss = skb_shinfo(skb)->gso_size; +@@ -110,7 +110,7 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb, + if (skb_is_gso(segs)) + mss *= skb_shinfo(segs)->gso_segs; + +- delta = htonl(oldlen + (thlen + mss)); ++ delta = (__force __wsum)htonl(oldlen + thlen + mss); + + skb = segs; + th = tcp_hdr(skb); +@@ -119,8 +119,7 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb, + if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP)) + tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss); + +- newcheck = ~csum_fold((__force __wsum)((__force u32)th->check + +- (__force u32)delta)); ++ newcheck = ~csum_fold(csum_add(csum_unfold(th->check), delta)); + + while (skb->next) { + th->fin = th->psh = 0; +@@ -165,11 +164,11 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb, + WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc)); + } + +- delta = htonl(oldlen + (skb_tail_pointer(skb) - +- skb_transport_header(skb)) + +- skb->data_len); +- th->check = ~csum_fold((__force __wsum)((__force u32)th->check + +- (__force u32)delta)); ++ delta = (__force __wsum)htonl(oldlen + ++ (skb_tail_pointer(skb) - ++ skb_transport_header(skb)) + ++ skb->data_len); ++ th->check = ~csum_fold(csum_add(csum_unfold(th->check), delta)); + if (skb->ip_summed == CHECKSUM_PARTIAL) + gso_reset_checksum(skb, ~th->check); + else +-- +2.39.2 + diff --git a/queue-6.3/wifi-cfg80211-fix-locking-in-regulatory-disconnect.patch b/queue-6.3/wifi-cfg80211-fix-locking-in-regulatory-disconnect.patch new file mode 100644 index 00000000000..142cc80856a --- /dev/null +++ b/queue-6.3/wifi-cfg80211-fix-locking-in-regulatory-disconnect.patch @@ -0,0 +1,41 @@ +From 2bfe8165ee81b54f0868f62da04242111d2f0ad6 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 6 Jun 2023 14:34:48 +0200 +Subject: wifi: cfg80211: fix locking in regulatory disconnect + +From: Johannes Berg + +[ Upstream commit f7e60032c6618dfd643c7210d5cba2789e2de2e2 ] + +This should use wiphy_lock() now instead of requiring the +RTNL, since __cfg80211_leave() via cfg80211_leave() is now +requiring that lock to be held. + +Fixes: a05829a7222e ("cfg80211: avoid holding the RTNL when calling the driver") +Signed-off-by: Johannes Berg +Signed-off-by: Sasha Levin +--- + net/wireless/reg.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/net/wireless/reg.c b/net/wireless/reg.c +index 0d40d6af7e10a..949e1fb3bec67 100644 +--- a/net/wireless/reg.c ++++ b/net/wireless/reg.c +@@ -2440,11 +2440,11 @@ static void reg_leave_invalid_chans(struct wiphy *wiphy) + struct wireless_dev *wdev; + struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); + +- ASSERT_RTNL(); +- ++ wiphy_lock(wiphy); + list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) + if (!reg_wdev_chan_valid(wiphy, wdev)) + cfg80211_leave(rdev, wdev); ++ wiphy_unlock(wiphy); + } + + static void reg_check_chans_work(struct work_struct *work) +-- +2.39.2 + diff --git a/queue-6.3/wifi-cfg80211-fix-locking-in-sched-scan-stop-work.patch b/queue-6.3/wifi-cfg80211-fix-locking-in-sched-scan-stop-work.patch new file mode 100644 index 00000000000..0714f1bb35d --- /dev/null +++ b/queue-6.3/wifi-cfg80211-fix-locking-in-sched-scan-stop-work.patch @@ -0,0 +1,41 @@ +From b0ef79e27230d98cf66182ce40c70b6505abef80 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 6 Jun 2023 14:34:47 +0200 +Subject: wifi: cfg80211: fix locking in sched scan stop work + +From: Johannes Berg + +[ Upstream commit 3e54ed8247c94c8bdf370bd872bd9dfe72b1b12b ] + +This should use wiphy_lock() now instead of acquiring the +RTNL, since cfg80211_stop_sched_scan_req() now needs that. + +Fixes: a05829a7222e ("cfg80211: avoid holding the RTNL when calling the driver") +Signed-off-by: Johannes Berg +Signed-off-by: Sasha Levin +--- + net/wireless/core.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/net/wireless/core.c b/net/wireless/core.c +index 5b0c4d5b80cf5..b3ec9eaec36b3 100644 +--- a/net/wireless/core.c ++++ b/net/wireless/core.c +@@ -368,12 +368,12 @@ static void cfg80211_sched_scan_stop_wk(struct work_struct *work) + rdev = container_of(work, struct cfg80211_registered_device, + sched_scan_stop_wk); + +- rtnl_lock(); ++ wiphy_lock(&rdev->wiphy); + list_for_each_entry_safe(req, tmp, &rdev->sched_scan_req_list, list) { + if (req->nl_owner_dead) + cfg80211_stop_sched_scan_req(rdev, req, false); + } +- rtnl_unlock(); ++ wiphy_unlock(&rdev->wiphy); + } + + static void cfg80211_propagate_radar_detect_wk(struct work_struct *work) +-- +2.39.2 + diff --git a/queue-6.3/wifi-cfg80211-reject-bad-ap-mld-address.patch b/queue-6.3/wifi-cfg80211-reject-bad-ap-mld-address.patch new file mode 100644 index 00000000000..81047600d25 --- /dev/null +++ b/queue-6.3/wifi-cfg80211-reject-bad-ap-mld-address.patch @@ -0,0 +1,39 @@ +From 6e0dc16bbcf2affbec53e7e721acc7c31e9d8eb7 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sun, 4 Jun 2023 12:11:18 +0300 +Subject: wifi: cfg80211: reject bad AP MLD address + +From: Johannes Berg + +[ Upstream commit 727073ca5e55ab6a07df316250be8a12606e8677 ] + +When trying to authenticate, if the AP MLD address isn't +a valid address, mac80211 can throw a warning. Avoid that +by rejecting such addresses. + +Fixes: d648c23024bd ("wifi: nl80211: support MLO in auth/assoc") +Signed-off-by: Johannes Berg +Signed-off-by: Gregory Greenman +Link: https://lore.kernel.org/r/20230604120651.89188912bd1d.I8dbc6c8ee0cb766138803eec59508ef4ce477709@changeid +Signed-off-by: Johannes Berg +Signed-off-by: Sasha Levin +--- + net/wireless/nl80211.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c +index 4f63059efd813..1922fccb96ace 100644 +--- a/net/wireless/nl80211.c ++++ b/net/wireless/nl80211.c +@@ -10642,6 +10642,8 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info) + if (!info->attrs[NL80211_ATTR_MLD_ADDR]) + return -EINVAL; + req.ap_mld_addr = nla_data(info->attrs[NL80211_ATTR_MLD_ADDR]); ++ if (!is_valid_ether_addr(req.ap_mld_addr)) ++ return -EINVAL; + } + + req.bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len, +-- +2.39.2 + diff --git a/queue-6.3/wifi-mac80211-don-t-translate-beacon-presp-addrs.patch b/queue-6.3/wifi-mac80211-don-t-translate-beacon-presp-addrs.patch new file mode 100644 index 00000000000..65f5ac7ccc3 --- /dev/null +++ b/queue-6.3/wifi-mac80211-don-t-translate-beacon-presp-addrs.patch @@ -0,0 +1,44 @@ +From d47d0b3ddc9e4b89edd8795c49f6b6d956cfdc57 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sun, 4 Jun 2023 12:11:15 +0300 +Subject: wifi: mac80211: don't translate beacon/presp addrs + +From: Johannes Berg + +[ Upstream commit 47c171a426e305f2225b92ed7b5e0a990c95f6d4 ] + +Don't do link address translation for beacons and probe responses, +this leads to reporting multiple scan list entries for the same AP +(one with the MLD address) which just breaks things. + +We might need to extend this in the future for some other (action) +frames that aren't MLD addressed. + +Fixes: 42fb9148c078 ("wifi: mac80211: do link->MLD address translation on RX") +Signed-off-by: Johannes Berg +Signed-off-by: Gregory Greenman +Link: https://lore.kernel.org/r/20230604120651.62adead1b43a.Ifc25eed26ebf3b269f60b1ec10060156d0e7ec0d@changeid +Signed-off-by: Johannes Berg +Signed-off-by: Sasha Levin +--- + net/mac80211/rx.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c +index af57616d2f1d9..0e66ece35f8e2 100644 +--- a/net/mac80211/rx.c ++++ b/net/mac80211/rx.c +@@ -4884,7 +4884,9 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx, + } + + if (unlikely(rx->sta && rx->sta->sta.mlo) && +- is_unicast_ether_addr(hdr->addr1)) { ++ is_unicast_ether_addr(hdr->addr1) && ++ !ieee80211_is_probe_resp(hdr->frame_control) && ++ !ieee80211_is_beacon(hdr->frame_control)) { + /* translate to MLD addresses */ + if (ether_addr_equal(link->conf->addr, hdr->addr1)) + ether_addr_copy(hdr->addr1, rx->sdata->vif.addr); +-- +2.39.2 + diff --git a/queue-6.3/wifi-mac80211-mlme-fix-non-inheritence-element.patch b/queue-6.3/wifi-mac80211-mlme-fix-non-inheritence-element.patch new file mode 100644 index 00000000000..94274757ed7 --- /dev/null +++ b/queue-6.3/wifi-mac80211-mlme-fix-non-inheritence-element.patch @@ -0,0 +1,72 @@ +From e2d9d86e00a8cec3cd30d522e6194f30481f94ad Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sun, 4 Jun 2023 12:11:16 +0300 +Subject: wifi: mac80211: mlme: fix non-inheritence element + +From: Johannes Berg + +[ Upstream commit 68c228557d52616cf040651abefda9839de7086a ] + +There were two bugs when creating the non-inheritence +element: + 1) 'at_extension' needs to be declared outside the loop, + otherwise the value resets every iteration and we + can never really switch properly + 2) 'added' never got set to true, so we always cut off + the extension element again at the end of the function + +This shows another issue that we might add a list but no +extension list, but we need to make the extension list a +zero-length one in that case. + +Fix all these issues. While at it, add a comment explaining +the trim. + +Fixes: 81151ce462e5 ("wifi: mac80211: support MLO authentication/association with one link") +Signed-off-by: Johannes Berg +Signed-off-by: Gregory Greenman +Link: https://lore.kernel.org/r/20230604120651.3addaa5c4782.If3a78f9305997ad7ef4ba7ffc17a8234c956f613@changeid +Signed-off-by: Johannes Berg +Signed-off-by: Sasha Levin +--- + net/mac80211/mlme.c | 8 +++++++- + 1 file changed, 7 insertions(+), 1 deletion(-) + +diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c +index 60792dfabc9d6..7a970b6dda640 100644 +--- a/net/mac80211/mlme.c ++++ b/net/mac80211/mlme.c +@@ -1217,6 +1217,7 @@ static void ieee80211_add_non_inheritance_elem(struct sk_buff *skb, + const u16 *inner) + { + unsigned int skb_len = skb->len; ++ bool at_extension = false; + bool added = false; + int i, j; + u8 *len, *list_len = NULL; +@@ -1228,7 +1229,6 @@ static void ieee80211_add_non_inheritance_elem(struct sk_buff *skb, + for (i = 0; i < PRESENT_ELEMS_MAX && outer[i]; i++) { + u16 elem = outer[i]; + bool have_inner = false; +- bool at_extension = false; + + /* should at least be sorted in the sense of normal -> ext */ + WARN_ON(at_extension && elem < PRESENT_ELEM_EXT_OFFS); +@@ -1257,8 +1257,14 @@ static void ieee80211_add_non_inheritance_elem(struct sk_buff *skb, + } + *list_len += 1; + skb_put_u8(skb, (u8)elem); ++ added = true; + } + ++ /* if we added a list but no extension list, make a zero-len one */ ++ if (added && (!at_extension || !list_len)) ++ skb_put_u8(skb, 0); ++ ++ /* if nothing added remove extension element completely */ + if (!added) + skb_trim(skb, skb_len); + else +-- +2.39.2 + diff --git a/queue-6.3/wifi-mac80211-use-correct-iftype-he-cap.patch b/queue-6.3/wifi-mac80211-use-correct-iftype-he-cap.patch new file mode 100644 index 00000000000..35502c52e2e --- /dev/null +++ b/queue-6.3/wifi-mac80211-use-correct-iftype-he-cap.patch @@ -0,0 +1,68 @@ +From 5e8428f86b12f73e37fe2904b65964b2613ae1f3 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sun, 4 Jun 2023 12:11:23 +0300 +Subject: wifi: mac80211: use correct iftype HE cap + +From: Johannes Berg + +[ Upstream commit c37ab22bb1a43cdca8bf69cc0a22f1ccfc449e68 ] + +We already check that the right iftype capa exists, +but then don't use it. Assign it to a variable so we +can actually use it, and then do that. + +Fixes: bac2fd3d7534 ("mac80211: remove use of ieee80211_get_he_sta_cap()") +Signed-off-by: Johannes Berg +Signed-off-by: Gregory Greenman +Link: https://lore.kernel.org/r/20230604120651.0e908e5c5fdd.Iac142549a6144ac949ebd116b921a59ae5282735@changeid +Signed-off-by: Johannes Berg +Signed-off-by: Sasha Levin +--- + net/mac80211/he.c | 15 ++++++++++----- + 1 file changed, 10 insertions(+), 5 deletions(-) + +diff --git a/net/mac80211/he.c b/net/mac80211/he.c +index 729f261520c77..0322abae08250 100644 +--- a/net/mac80211/he.c ++++ b/net/mac80211/he.c +@@ -3,7 +3,7 @@ + * HE handling + * + * Copyright(c) 2017 Intel Deutschland GmbH +- * Copyright(c) 2019 - 2022 Intel Corporation ++ * Copyright(c) 2019 - 2023 Intel Corporation + */ + + #include "ieee80211_i.h" +@@ -114,6 +114,7 @@ ieee80211_he_cap_ie_to_sta_he_cap(struct ieee80211_sub_if_data *sdata, + struct link_sta_info *link_sta) + { + struct ieee80211_sta_he_cap *he_cap = &link_sta->pub->he_cap; ++ const struct ieee80211_sta_he_cap *own_he_cap_ptr; + struct ieee80211_sta_he_cap own_he_cap; + struct ieee80211_he_cap_elem *he_cap_ie_elem = (void *)he_cap_ie; + u8 he_ppe_size; +@@ -123,12 +124,16 @@ ieee80211_he_cap_ie_to_sta_he_cap(struct ieee80211_sub_if_data *sdata, + + memset(he_cap, 0, sizeof(*he_cap)); + +- if (!he_cap_ie || +- !ieee80211_get_he_iftype_cap(sband, +- ieee80211_vif_type_p2p(&sdata->vif))) ++ if (!he_cap_ie) + return; + +- own_he_cap = sband->iftype_data->he_cap; ++ own_he_cap_ptr = ++ ieee80211_get_he_iftype_cap(sband, ++ ieee80211_vif_type_p2p(&sdata->vif)); ++ if (!own_he_cap_ptr) ++ return; ++ ++ own_he_cap = *own_he_cap_ptr; + + /* Make sure size is OK */ + mcs_nss_size = ieee80211_he_mcs_nss_size(he_cap_ie_elem); +-- +2.39.2 + diff --git a/queue-6.3/wifi-mt76-mt7615-fix-possible-race-in-mt7615_mac_sta.patch b/queue-6.3/wifi-mt76-mt7615-fix-possible-race-in-mt7615_mac_sta.patch new file mode 100644 index 00000000000..b1fc7df026b --- /dev/null +++ b/queue-6.3/wifi-mt76-mt7615-fix-possible-race-in-mt7615_mac_sta.patch @@ -0,0 +1,40 @@ +From 773f6f28e65c8b0e3cd65a7f73d8505f96a739b6 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 24 May 2023 16:39:32 +0200 +Subject: wifi: mt76: mt7615: fix possible race in mt7615_mac_sta_poll + +From: Lorenzo Bianconi + +[ Upstream commit 30bc32c7c1f975cc3c14e1c7dc437266311282cf ] + +Grab sta_poll_lock spinlock in mt7615_mac_sta_poll routine in order to +avoid possible races with mt7615_mac_add_txs() or mt7615_mac_fill_rx() +removing msta pointer from sta_poll_list. + +Fixes: a621372a04ac ("mt76: mt7615: rework mt7615_mac_sta_poll for usb code") +Signed-off-by: Lorenzo Bianconi +Signed-off-by: Kalle Valo +Link: https://lore.kernel.org/r/48b23404b759de4f1db2ef85975c72a4aeb1097c.1684938695.git.lorenzo@kernel.org +Signed-off-by: Sasha Levin +--- + drivers/net/wireless/mediatek/mt76/mt7615/mac.c | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c +index eafa0f204c1f8..12f7bcec53ae1 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c ++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c +@@ -919,7 +919,10 @@ void mt7615_mac_sta_poll(struct mt7615_dev *dev) + + msta = list_first_entry(&sta_poll_list, struct mt7615_sta, + poll_list); ++ ++ spin_lock_bh(&dev->sta_poll_lock); + list_del_init(&msta->poll_list); ++ spin_unlock_bh(&dev->sta_poll_lock); + + addr = mt7615_mac_wtbl_addr(dev, msta->wcid.idx) + 19 * 4; + +-- +2.39.2 +