--- /dev/null
+From 74e97958121aa1f5854da6effba70143f051b0cd Mon Sep 17 00:00:00 2001
+From: Boris Burkov <boris@bur.io>
+Date: Thu, 21 Mar 2024 10:02:04 -0700
+Subject: btrfs: qgroup: fix qgroup prealloc rsv leak in subvolume operations
+
+From: Boris Burkov <boris@bur.io>
+
+commit 74e97958121aa1f5854da6effba70143f051b0cd upstream.
+
+Create subvolume, create snapshot and delete subvolume all use
+btrfs_subvolume_reserve_metadata() to reserve metadata for the changes
+done to the parent subvolume's fs tree, which cannot be mediated in the
+normal way via start_transaction. When quota groups (squota or qgroups)
+are enabled, this reserves qgroup metadata of type PREALLOC. Once the
+operation is associated to a transaction, we convert PREALLOC to
+PERTRANS, which gets cleared in bulk at the end of the transaction.
+
+However, the error paths of these three operations were not implementing
+this lifecycle correctly. They unconditionally converted the PREALLOC to
+PERTRANS in a generic cleanup step regardless of errors or whether the
+operation was fully associated to a transaction or not. This resulted in
+error paths occasionally converting this rsv to PERTRANS without calling
+record_root_in_trans successfully, which meant that unless that root got
+recorded in the transaction by some other thread, the end of the
+transaction would not free that root's PERTRANS, leaking it. Ultimately,
+this resulted in hitting a WARN in CONFIG_BTRFS_DEBUG builds at unmount
+for the leaked reservation.
+
+The fix is to ensure that every qgroup PREALLOC reservation observes the
+following properties:
+
+1. any failure before record_root_in_trans is called successfully
+ results in freeing the PREALLOC reservation.
+2. after record_root_in_trans, we convert to PERTRANS, and now the
+ transaction owns freeing the reservation.
+
+This patch enforces those properties on the three operations. Without
+it, generic/269 with squotas enabled at mkfs time would fail in ~5-10
+runs on my system. With this patch, it ran successfully 1000 times in a
+row.
+
+Fixes: e85fde5162bf ("btrfs: qgroup: fix qgroup meta rsv leak for subvolume operations")
+CC: stable@vger.kernel.org # 6.1+
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Boris Burkov <boris@bur.io>
+Signed-off-by: David Sterba <dsterba@suse.com>
+[Xiangyu: BP to fix CVE-2024-35956, due to 6.1 btrfs_subvolume_release_metadata()
+defined in ctree.h, modified the header file name from root-tree.h to ctree.h]
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/ctree.h | 2 --
+ fs/btrfs/inode.c | 13 ++++++++++++-
+ fs/btrfs/ioctl.c | 36 ++++++++++++++++++++++++++++--------
+ fs/btrfs/root-tree.c | 10 ----------
+ 4 files changed, 40 insertions(+), 21 deletions(-)
+
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -2987,8 +2987,6 @@ enum btrfs_flush_state {
+ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
+ struct btrfs_block_rsv *rsv,
+ int nitems, bool use_global_rsv);
+-void btrfs_subvolume_release_metadata(struct btrfs_root *root,
+- struct btrfs_block_rsv *rsv);
+ void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes);
+
+ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes,
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -4707,6 +4707,7 @@ int btrfs_delete_subvolume(struct inode
+ struct btrfs_trans_handle *trans;
+ struct btrfs_block_rsv block_rsv;
+ u64 root_flags;
++ u64 qgroup_reserved = 0;
+ int ret;
+
+ down_write(&fs_info->subvol_sem);
+@@ -4751,12 +4752,20 @@ int btrfs_delete_subvolume(struct inode
+ ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 5, true);
+ if (ret)
+ goto out_undead;
++ qgroup_reserved = block_rsv.qgroup_rsv_reserved;
+
+ trans = btrfs_start_transaction(root, 0);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ goto out_release;
+ }
++ ret = btrfs_record_root_in_trans(trans, root);
++ if (ret) {
++ btrfs_abort_transaction(trans, ret);
++ goto out_end_trans;
++ }
++ btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved);
++ qgroup_reserved = 0;
+ trans->block_rsv = &block_rsv;
+ trans->bytes_reserved = block_rsv.size;
+
+@@ -4815,7 +4824,9 @@ out_end_trans:
+ ret = btrfs_end_transaction(trans);
+ inode->i_flags |= S_DEAD;
+ out_release:
+- btrfs_subvolume_release_metadata(root, &block_rsv);
++ btrfs_block_rsv_release(fs_info, &block_rsv, (u64)-1, NULL);
++ if (qgroup_reserved)
++ btrfs_qgroup_free_meta_prealloc(root, qgroup_reserved);
+ out_undead:
+ if (ret) {
+ spin_lock(&dest->root_item_lock);
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -592,6 +592,7 @@ static noinline int create_subvol(struct
+ int ret;
+ dev_t anon_dev;
+ u64 objectid;
++ u64 qgroup_reserved = 0;
+
+ root_item = kzalloc(sizeof(*root_item), GFP_KERNEL);
+ if (!root_item)
+@@ -629,13 +630,18 @@ static noinline int create_subvol(struct
+ trans_num_items, false);
+ if (ret)
+ goto out_new_inode_args;
++ qgroup_reserved = block_rsv.qgroup_rsv_reserved;
+
+ trans = btrfs_start_transaction(root, 0);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+- btrfs_subvolume_release_metadata(root, &block_rsv);
+- goto out_new_inode_args;
++ goto out_release_rsv;
+ }
++ ret = btrfs_record_root_in_trans(trans, BTRFS_I(dir)->root);
++ if (ret)
++ goto out;
++ btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved);
++ qgroup_reserved = 0;
+ trans->block_rsv = &block_rsv;
+ trans->bytes_reserved = block_rsv.size;
+
+@@ -744,12 +750,15 @@ static noinline int create_subvol(struct
+ out:
+ trans->block_rsv = NULL;
+ trans->bytes_reserved = 0;
+- btrfs_subvolume_release_metadata(root, &block_rsv);
+
+ if (ret)
+ btrfs_end_transaction(trans);
+ else
+ ret = btrfs_commit_transaction(trans);
++out_release_rsv:
++ btrfs_block_rsv_release(fs_info, &block_rsv, (u64)-1, NULL);
++ if (qgroup_reserved)
++ btrfs_qgroup_free_meta_prealloc(root, qgroup_reserved);
+ out_new_inode_args:
+ btrfs_new_inode_args_destroy(&new_inode_args);
+ out_inode:
+@@ -771,6 +780,8 @@ static int create_snapshot(struct btrfs_
+ struct btrfs_pending_snapshot *pending_snapshot;
+ unsigned int trans_num_items;
+ struct btrfs_trans_handle *trans;
++ struct btrfs_block_rsv *block_rsv;
++ u64 qgroup_reserved = 0;
+ int ret;
+
+ /* We do not support snapshotting right now. */
+@@ -807,19 +818,19 @@ static int create_snapshot(struct btrfs_
+ goto free_pending;
+ }
+
+- btrfs_init_block_rsv(&pending_snapshot->block_rsv,
+- BTRFS_BLOCK_RSV_TEMP);
++ block_rsv = &pending_snapshot->block_rsv;
++ btrfs_init_block_rsv(block_rsv, BTRFS_BLOCK_RSV_TEMP);
+ /*
+ * 1 to add dir item
+ * 1 to add dir index
+ * 1 to update parent inode item
+ */
+ trans_num_items = create_subvol_num_items(inherit) + 3;
+- ret = btrfs_subvolume_reserve_metadata(BTRFS_I(dir)->root,
+- &pending_snapshot->block_rsv,
++ ret = btrfs_subvolume_reserve_metadata(BTRFS_I(dir)->root, block_rsv,
+ trans_num_items, false);
+ if (ret)
+ goto free_pending;
++ qgroup_reserved = block_rsv->qgroup_rsv_reserved;
+
+ pending_snapshot->dentry = dentry;
+ pending_snapshot->root = root;
+@@ -832,6 +843,13 @@ static int create_snapshot(struct btrfs_
+ ret = PTR_ERR(trans);
+ goto fail;
+ }
++ ret = btrfs_record_root_in_trans(trans, BTRFS_I(dir)->root);
++ if (ret) {
++ btrfs_end_transaction(trans);
++ goto fail;
++ }
++ btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved);
++ qgroup_reserved = 0;
+
+ trans->pending_snapshot = pending_snapshot;
+
+@@ -861,7 +879,9 @@ fail:
+ if (ret && pending_snapshot->snap)
+ pending_snapshot->snap->anon_dev = 0;
+ btrfs_put_root(pending_snapshot->snap);
+- btrfs_subvolume_release_metadata(root, &pending_snapshot->block_rsv);
++ btrfs_block_rsv_release(fs_info, block_rsv, (u64)-1, NULL);
++ if (qgroup_reserved)
++ btrfs_qgroup_free_meta_prealloc(root, qgroup_reserved);
+ free_pending:
+ if (pending_snapshot->anon_dev)
+ free_anon_bdev(pending_snapshot->anon_dev);
+--- a/fs/btrfs/root-tree.c
++++ b/fs/btrfs/root-tree.c
+@@ -532,13 +532,3 @@ int btrfs_subvolume_reserve_metadata(str
+ }
+ return ret;
+ }
+-
+-void btrfs_subvolume_release_metadata(struct btrfs_root *root,
+- struct btrfs_block_rsv *rsv)
+-{
+- struct btrfs_fs_info *fs_info = root->fs_info;
+- u64 qgroup_to_release;
+-
+- btrfs_block_rsv_release(fs_info, rsv, (u64)-1, &qgroup_to_release);
+- btrfs_qgroup_convert_reserved_meta(root, qgroup_to_release);
+-}
--- /dev/null
+From cba7fec864172dadd953daefdd26e01742b71a6a Mon Sep 17 00:00:00 2001
+From: Srinivasan Shanmugam <srinivasan.shanmugam@amd.com>
+Date: Mon, 22 Jul 2024 16:21:19 +0530
+Subject: drm/amd/display: Add NULL check for clk_mgr and clk_mgr->funcs in dcn30_init_hw
+
+From: Srinivasan Shanmugam <srinivasan.shanmugam@amd.com>
+
+commit cba7fec864172dadd953daefdd26e01742b71a6a upstream.
+
+This commit addresses a potential null pointer dereference issue in the
+`dcn30_init_hw` function. The issue could occur when `dc->clk_mgr` or
+`dc->clk_mgr->funcs` is null.
+
+The fix adds a check to ensure `dc->clk_mgr` and `dc->clk_mgr->funcs` is
+not null before accessing its functions. This prevents a potential null
+pointer dereference.
+
+Reported by smatch:
+drivers/gpu/drm/amd/amdgpu/../display/dc/hwss/dcn30/dcn30_hwseq.c:789 dcn30_init_hw() error: we previously assumed 'dc->clk_mgr' could be null (see line 628)
+
+Cc: Tom Chung <chiahsuan.chung@amd.com>
+Cc: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
+Cc: Roman Li <roman.li@amd.com>
+Cc: Alex Hung <alex.hung@amd.com>
+Cc: Aurabindo Pillai <aurabindo.pillai@amd.com>
+Cc: Harry Wentland <harry.wentland@amd.com>
+Cc: Hamza Mahfooz <hamza.mahfooz@amd.com>
+Signed-off-by: Srinivasan Shanmugam <srinivasan.shanmugam@amd.com>
+Reviewed-by: Alex Hung <alex.hung@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+[Xiangyu: BP to fix CVE: CVE-2024-49917, modified the source path]
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+@@ -448,7 +448,7 @@ void dcn30_init_hw(struct dc *dc)
+ int edp_num;
+ uint32_t backlight = MAX_BACKLIGHT_LEVEL;
+
+- if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
++ if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->init_clocks)
+ dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
+
+ // Initialize the dccg
+@@ -631,11 +631,12 @@ void dcn30_init_hw(struct dc *dc)
+ if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks)
+ dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);
+
+- if (dc->clk_mgr->funcs->notify_wm_ranges)
++ if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->notify_wm_ranges)
+ dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
+
+ //if softmax is enabled then hardmax will be set by a different call
+- if (dc->clk_mgr->funcs->set_hard_max_memclk && !dc->clk_mgr->dc_mode_softmax_enabled)
++ if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->set_hard_max_memclk &&
++ !dc->clk_mgr->dc_mode_softmax_enabled)
+ dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
+
+ if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
--- /dev/null
+From c395fd47d1565bd67671f45cca281b3acc2c31ef Mon Sep 17 00:00:00 2001
+From: Srinivasan Shanmugam <srinivasan.shanmugam@amd.com>
+Date: Mon, 22 Jul 2024 16:44:40 +0530
+Subject: drm/amd/display: Add NULL check for clk_mgr in dcn32_init_hw
+
+From: Srinivasan Shanmugam <srinivasan.shanmugam@amd.com>
+
+commit c395fd47d1565bd67671f45cca281b3acc2c31ef upstream.
+
+This commit addresses a potential null pointer dereference issue in the
+`dcn32_init_hw` function. The issue could occur when `dc->clk_mgr` is
+null.
+
+The fix adds a check to ensure `dc->clk_mgr` is not null before
+accessing its functions. This prevents a potential null pointer
+dereference.
+
+Reported by smatch:
+drivers/gpu/drm/amd/amdgpu/../display/dc/hwss/dcn32/dcn32_hwseq.c:961 dcn32_init_hw() error: we previously assumed 'dc->clk_mgr' could be null (see line 782)
+
+Cc: Tom Chung <chiahsuan.chung@amd.com>
+Cc: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
+Cc: Roman Li <roman.li@amd.com>
+Cc: Alex Hung <alex.hung@amd.com>
+Cc: Aurabindo Pillai <aurabindo.pillai@amd.com>
+Cc: Harry Wentland <harry.wentland@amd.com>
+Cc: Hamza Mahfooz <hamza.mahfooz@amd.com>
+Signed-off-by: Srinivasan Shanmugam <srinivasan.shanmugam@amd.com>
+Reviewed-by: Alex Hung <alex.hung@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+[Xiangyu: BP to fix CVE: CVE-2024-49915, modified the source path]
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+@@ -811,7 +811,7 @@ void dcn32_init_hw(struct dc *dc)
+ int edp_num;
+ uint32_t backlight = MAX_BACKLIGHT_LEVEL;
+
+- if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
++ if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->init_clocks)
+ dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
+
+ // Initialize the dccg
+@@ -970,10 +970,11 @@ void dcn32_init_hw(struct dc *dc)
+ if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks)
+ dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);
+
+- if (dc->clk_mgr->funcs->notify_wm_ranges)
++ if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->notify_wm_ranges)
+ dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
+
+- if (dc->clk_mgr->funcs->set_hard_max_memclk && !dc->clk_mgr->dc_mode_softmax_enabled)
++ if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->set_hard_max_memclk &&
++ !dc->clk_mgr->dc_mode_softmax_enabled)
+ dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
+
+ if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
--- /dev/null
+From 62ed6f0f198da04e884062264df308277628004f Mon Sep 17 00:00:00 2001
+From: Srinivasan Shanmugam <srinivasan.shanmugam@amd.com>
+Date: Wed, 31 Jul 2024 13:09:28 +0530
+Subject: drm/amd/display: Add NULL check for function pointer in dcn20_set_output_transfer_func
+
+From: Srinivasan Shanmugam <srinivasan.shanmugam@amd.com>
+
+commit 62ed6f0f198da04e884062264df308277628004f upstream.
+
+This commit adds a null check for the set_output_gamma function pointer
+in the dcn20_set_output_transfer_func function. Previously,
+set_output_gamma was being checked for null at line 1030, but then it
+was being dereferenced without any null check at line 1048. This could
+potentially lead to a null pointer dereference error if set_output_gamma
+is null.
+
+To fix this, we now ensure that set_output_gamma is not null before
+dereferencing it. We do this by adding a null check for set_output_gamma
+before the call to set_output_gamma at line 1048.
+
+Cc: Tom Chung <chiahsuan.chung@amd.com>
+Cc: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
+Cc: Roman Li <roman.li@amd.com>
+Cc: Alex Hung <alex.hung@amd.com>
+Cc: Aurabindo Pillai <aurabindo.pillai@amd.com>
+Cc: Harry Wentland <harry.wentland@amd.com>
+Cc: Hamza Mahfooz <hamza.mahfooz@amd.com>
+Signed-off-by: Srinivasan Shanmugam <srinivasan.shanmugam@amd.com>
+Reviewed-by: Tom Chung <chiahsuan.chung@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+@@ -856,7 +856,8 @@ bool dcn20_set_output_transfer_func(stru
+ /*
+ * if above if is not executed then 'params' equal to 0 and set in bypass
+ */
+- mpc->funcs->set_output_gamma(mpc, mpcc_id, params);
++ if (mpc->funcs->set_output_gamma)
++ mpc->funcs->set_output_gamma(mpc, mpcc_id, params);
+
+ return true;
+ }
--- /dev/null
+From 3718a619a8c0a53152e76bb6769b6c414e1e83f4 Mon Sep 17 00:00:00 2001
+From: Alex Hung <alex.hung@amd.com>
+Date: Thu, 20 Jun 2024 20:23:41 -0600
+Subject: drm/amd/display: Check phantom_stream before it is used
+
+From: Alex Hung <alex.hung@amd.com>
+
+commit 3718a619a8c0a53152e76bb6769b6c414e1e83f4 upstream.
+
+dcn32_enable_phantom_stream can return null, so returned value
+must be checked before used.
+
+This fixes 1 NULL_RETURNS issue reported by Coverity.
+
+Reviewed-by: Rodrigo Siqueira <rodrigo.siqueira@amd.com>
+Signed-off-by: Jerry Zuo <jerry.zuo@amd.com>
+Signed-off-by: Alex Hung <alex.hung@amd.com>
+Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+[Xiangyu: BP to fix CVE: CVE-2024-49897, modified the source path]
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+@@ -1796,6 +1796,9 @@ void dcn32_add_phantom_pipes(struct dc *
+ // be a valid candidate for SubVP (i.e. has a plane, stream, doesn't
+ // already have phantom pipe assigned, etc.) by previous checks.
+ phantom_stream = dcn32_enable_phantom_stream(dc, context, pipes, pipe_cnt, index);
++ if (!phantom_stream)
++ return;
++
+ dcn32_enable_phantom_plane(dc, context, phantom_stream, index);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
--- /dev/null
+From 7af2ae1b1531feab5d38ec9c8f472dc6cceb4606 Mon Sep 17 00:00:00 2001
+From: Christian Brauner <brauner@kernel.org>
+Date: Fri, 19 Apr 2024 20:36:11 +0800
+Subject: erofs: reliably distinguish block based and fscache mode
+
+From: Christian Brauner <brauner@kernel.org>
+
+commit 7af2ae1b1531feab5d38ec9c8f472dc6cceb4606 upstream.
+
+When erofs_kill_sb() is called in block dev based mode, s_bdev may not
+have been initialised yet, and if CONFIG_EROFS_FS_ONDEMAND is enabled,
+it will be mistaken for fscache mode, and then attempt to free an anon_dev
+that has never been allocated, triggering the following warning:
+
+============================================
+ida_free called for id=0 which is not allocated.
+WARNING: CPU: 14 PID: 926 at lib/idr.c:525 ida_free+0x134/0x140
+Modules linked in:
+CPU: 14 PID: 926 Comm: mount Not tainted 6.9.0-rc3-dirty #630
+RIP: 0010:ida_free+0x134/0x140
+Call Trace:
+ <TASK>
+ erofs_kill_sb+0x81/0x90
+ deactivate_locked_super+0x35/0x80
+ get_tree_bdev+0x136/0x1e0
+ vfs_get_tree+0x2c/0xf0
+ do_new_mount+0x190/0x2f0
+ [...]
+============================================
+
+Now when erofs_kill_sb() is called, erofs_sb_info must have been
+initialised, so use sbi->fsid to distinguish between the two modes.
+
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Jingbo Xu <jefflexu@linux.alibaba.com>
+Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com>
+Reviewed-by: Chao Yu <chao@kernel.org>
+Link: https://lore.kernel.org/r/20240419123611.947084-3-libaokun1@huawei.com
+Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/erofs/super.c | 8 ++------
+ 1 file changed, 2 insertions(+), 6 deletions(-)
+
+--- a/fs/erofs/super.c
++++ b/fs/erofs/super.c
+@@ -892,7 +892,7 @@ static int erofs_init_fs_context(struct
+ */
+ static void erofs_kill_sb(struct super_block *sb)
+ {
+- struct erofs_sb_info *sbi;
++ struct erofs_sb_info *sbi = EROFS_SB(sb);
+
+ WARN_ON(sb->s_magic != EROFS_SUPER_MAGIC);
+
+@@ -902,15 +902,11 @@ static void erofs_kill_sb(struct super_b
+ return;
+ }
+
+- if (erofs_is_fscache_mode(sb))
++ if (IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && sbi->fsid)
+ kill_anon_super(sb);
+ else
+ kill_block_super(sb);
+
+- sbi = EROFS_SB(sb);
+- if (!sbi)
+- return;
+-
+ erofs_free_dev_context(sbi->devs);
+ fs_put_dax(sbi->dax_dev, NULL);
+ erofs_fscache_unregister_fs(sb);
--- /dev/null
+From 556a7c039a52c21da33eaae9269984a1ef59189b Mon Sep 17 00:00:00 2001
+From: Kan Liang <kan.liang@linux.intel.com>
+Date: Mon, 8 Jul 2024 12:33:34 -0700
+Subject: perf/x86/intel: Hide Topdown metrics events if the feature is not enumerated
+
+From: Kan Liang <kan.liang@linux.intel.com>
+
+commit 556a7c039a52c21da33eaae9269984a1ef59189b upstream.
+
+The below error is observed on Ice Lake VM.
+
+$ perf stat
+Error:
+The sys_perf_event_open() syscall returned with 22 (Invalid argument)
+for event (slots).
+/bin/dmesg | grep -i perf may provide additional information.
+
+In a virtualization env, the Topdown metrics and the slots event haven't
+been supported yet. The guest CPUID doesn't enumerate them. However, the
+current kernel unconditionally exposes the slots event and the Topdown
+metrics events to sysfs, which misleads the perf tool and triggers the
+error.
+
+Hide the perf-metrics topdown events and the slots event if the
+perf-metrics feature is not enumerated.
+
+The big core of a hybrid platform can also supports the perf-metrics
+feature. Fix the hybrid platform as well.
+
+Closes: https://lore.kernel.org/lkml/CAM9d7cj8z+ryyzUHR+P1Dcpot2jjW+Qcc4CPQpfafTXN=LEU0Q@mail.gmail.com/
+Reported-by: Dongli Zhang <dongli.zhang@oracle.com>
+Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Tested-by: Dongli Zhang <dongli.zhang@oracle.com>
+Link: https://lkml.kernel.org/r/20240708193336.1192217-2-kan.liang@linux.intel.com
+Signed-off-by: Hagar Hemdan <hagarhem@amazon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/events/intel/core.c | 34 +++++++++++++++++++++++++++++++++-
+ 1 file changed, 33 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -5409,8 +5409,22 @@ default_is_visible(struct kobject *kobj,
+ return attr->mode;
+ }
+
++static umode_t
++td_is_visible(struct kobject *kobj, struct attribute *attr, int i)
++{
++ /*
++ * Hide the perf metrics topdown events
++ * if the feature is not enumerated.
++ */
++ if (x86_pmu.num_topdown_events)
++ return x86_pmu.intel_cap.perf_metrics ? attr->mode : 0;
++
++ return attr->mode;
++}
++
+ static struct attribute_group group_events_td = {
+ .name = "events",
++ .is_visible = td_is_visible,
+ };
+
+ static struct attribute_group group_events_mem = {
+@@ -5587,9 +5601,27 @@ static umode_t hybrid_format_is_visible(
+ return (cpu >= 0) && (pmu->cpu_type & pmu_attr->pmu_type) ? attr->mode : 0;
+ }
+
++static umode_t hybrid_td_is_visible(struct kobject *kobj,
++ struct attribute *attr, int i)
++{
++ struct device *dev = kobj_to_dev(kobj);
++ struct x86_hybrid_pmu *pmu =
++ container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
++
++ if (!is_attr_for_this_pmu(kobj, attr))
++ return 0;
++
++
++ /* Only the big core supports perf metrics */
++ if (pmu->cpu_type == hybrid_big)
++ return pmu->intel_cap.perf_metrics ? attr->mode : 0;
++
++ return attr->mode;
++}
++
+ static struct attribute_group hybrid_group_events_td = {
+ .name = "events",
+- .is_visible = hybrid_events_is_visible,
++ .is_visible = hybrid_td_is_visible,
+ };
+
+ static struct attribute_group hybrid_group_events_mem = {
--- /dev/null
+From fd70e9f1d85f5323096ad313ba73f5fe3d15ea41 Mon Sep 17 00:00:00 2001
+From: Zqiang <qiang.zhang1211@gmail.com>
+Date: Wed, 10 Jul 2024 12:45:42 +0800
+Subject: rcu-tasks: Fix access non-existent percpu rtpcp variable in rcu_tasks_need_gpcb()
+
+From: Zqiang <qiang.zhang1211@gmail.com>
+
+commit fd70e9f1d85f5323096ad313ba73f5fe3d15ea41 upstream.
+
+For kernels built with CONFIG_FORCE_NR_CPUS=y, the nr_cpu_ids is
+defined as NR_CPUS instead of the number of possible cpus, this
+will cause the following system panic:
+
+smpboot: Allowing 4 CPUs, 0 hotplug CPUs
+...
+setup_percpu: NR_CPUS:512 nr_cpumask_bits:512 nr_cpu_ids:512 nr_node_ids:1
+...
+BUG: unable to handle page fault for address: ffffffff9911c8c8
+Oops: 0000 [#1] PREEMPT SMP PTI
+CPU: 0 PID: 15 Comm: rcu_tasks_trace Tainted: G W
+6.6.21 #1 5dc7acf91a5e8e9ac9dcfc35bee0245691283ea6
+RIP: 0010:rcu_tasks_need_gpcb+0x25d/0x2c0
+RSP: 0018:ffffa371c00a3e60 EFLAGS: 00010082
+CR2: ffffffff9911c8c8 CR3: 000000040fa20005 CR4: 00000000001706f0
+Call Trace:
+<TASK>
+? __die+0x23/0x80
+? page_fault_oops+0xa4/0x180
+? exc_page_fault+0x152/0x180
+? asm_exc_page_fault+0x26/0x40
+? rcu_tasks_need_gpcb+0x25d/0x2c0
+? __pfx_rcu_tasks_kthread+0x40/0x40
+rcu_tasks_one_gp+0x69/0x180
+rcu_tasks_kthread+0x94/0xc0
+kthread+0xe8/0x140
+? __pfx_kthread+0x40/0x40
+ret_from_fork+0x34/0x80
+? __pfx_kthread+0x40/0x40
+ret_from_fork_asm+0x1b/0x80
+</TASK>
+
+Considering that there may be holes in the CPU numbers, use the
+maximum possible cpu number, instead of nr_cpu_ids, for configuring
+enqueue and dequeue limits.
+
+[ neeraj.upadhyay: Fix htmldocs build error reported by Stephen Rothwell ]
+
+Closes: https://lore.kernel.org/linux-input/CALMA0xaTSMN+p4xUXkzrtR5r6k7hgoswcaXx7baR_z9r5jjskw@mail.gmail.com/T/#u
+Reported-by: Zhixu Liu <zhixu.liu@gmail.com>
+Signed-off-by: Zqiang <qiang.zhang1211@gmail.com>
+Signed-off-by: Neeraj Upadhyay <neeraj.upadhyay@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+[Xiangyu: BP to fix CVE:CVE-2024-49926, minor conflict resolution]
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/rcu/tasks.h | 82 ++++++++++++++++++++++++++++++++++-------------------
+ 1 file changed, 54 insertions(+), 28 deletions(-)
+
+--- a/kernel/rcu/tasks.h
++++ b/kernel/rcu/tasks.h
+@@ -31,6 +31,7 @@ typedef void (*postgp_func_t)(struct rcu
+ * @barrier_q_head: RCU callback for barrier operation.
+ * @rtp_blkd_tasks: List of tasks blocked as readers.
+ * @cpu: CPU number corresponding to this entry.
++ * @index: Index of this CPU in rtpcp_array of the rcu_tasks structure.
+ * @rtpp: Pointer to the rcu_tasks structure.
+ */
+ struct rcu_tasks_percpu {
+@@ -43,6 +44,7 @@ struct rcu_tasks_percpu {
+ struct rcu_head barrier_q_head;
+ struct list_head rtp_blkd_tasks;
+ int cpu;
++ int index;
+ struct rcu_tasks *rtpp;
+ };
+
+@@ -68,6 +70,7 @@ struct rcu_tasks_percpu {
+ * @postgp_func: This flavor's post-grace-period function (optional).
+ * @call_func: This flavor's call_rcu()-equivalent function.
+ * @rtpcpu: This flavor's rcu_tasks_percpu structure.
++ * @rtpcp_array: Array of pointers to rcu_tasks_percpu structure of CPUs in cpu_possible_mask.
+ * @percpu_enqueue_shift: Shift down CPU ID this much when enqueuing callbacks.
+ * @percpu_enqueue_lim: Number of per-CPU callback queues in use for enqueuing.
+ * @percpu_dequeue_lim: Number of per-CPU callback queues in use for dequeuing.
+@@ -100,6 +103,7 @@ struct rcu_tasks {
+ postgp_func_t postgp_func;
+ call_rcu_func_t call_func;
+ struct rcu_tasks_percpu __percpu *rtpcpu;
++ struct rcu_tasks_percpu **rtpcp_array;
+ int percpu_enqueue_shift;
+ int percpu_enqueue_lim;
+ int percpu_dequeue_lim;
+@@ -164,6 +168,8 @@ module_param(rcu_task_contend_lim, int,
+ static int rcu_task_collapse_lim __read_mostly = 10;
+ module_param(rcu_task_collapse_lim, int, 0444);
+
++static int rcu_task_cpu_ids;
++
+ /* RCU tasks grace-period state for debugging. */
+ #define RTGS_INIT 0
+ #define RTGS_WAIT_WAIT_CBS 1
+@@ -228,6 +234,8 @@ static void cblist_init_generic(struct r
+ unsigned long flags;
+ int lim;
+ int shift;
++ int maxcpu;
++ int index = 0;
+
+ raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
+ if (rcu_task_enqueue_lim < 0) {
+@@ -238,14 +246,9 @@ static void cblist_init_generic(struct r
+ }
+ lim = rcu_task_enqueue_lim;
+
+- if (lim > nr_cpu_ids)
+- lim = nr_cpu_ids;
+- shift = ilog2(nr_cpu_ids / lim);
+- if (((nr_cpu_ids - 1) >> shift) >= lim)
+- shift++;
+- WRITE_ONCE(rtp->percpu_enqueue_shift, shift);
+- WRITE_ONCE(rtp->percpu_dequeue_lim, lim);
+- smp_store_release(&rtp->percpu_enqueue_lim, lim);
++ rtp->rtpcp_array = kcalloc(num_possible_cpus(), sizeof(struct rcu_tasks_percpu *), GFP_KERNEL);
++ BUG_ON(!rtp->rtpcp_array);
++
+ for_each_possible_cpu(cpu) {
+ struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
+
+@@ -258,16 +261,33 @@ static void cblist_init_generic(struct r
+ INIT_WORK(&rtpcp->rtp_work, rcu_tasks_invoke_cbs_wq);
+ rtpcp->cpu = cpu;
+ rtpcp->rtpp = rtp;
++ rtpcp->index = index;
++ rtp->rtpcp_array[index] = rtpcp;
++ index++;
+ if (!rtpcp->rtp_blkd_tasks.next)
+ INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks);
+ raw_spin_unlock_rcu_node(rtpcp); // irqs remain disabled.
++ maxcpu = cpu;
+ }
+ raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
+
+ if (rcu_task_cb_adjust)
+ pr_info("%s: Setting adjustable number of callback queues.\n", __func__);
+
+- pr_info("%s: Setting shift to %d and lim to %d.\n", __func__, data_race(rtp->percpu_enqueue_shift), data_race(rtp->percpu_enqueue_lim));
++ rcu_task_cpu_ids = maxcpu + 1;
++ if (lim > rcu_task_cpu_ids)
++ lim = rcu_task_cpu_ids;
++ shift = ilog2(rcu_task_cpu_ids / lim);
++ if (((rcu_task_cpu_ids - 1) >> shift) >= lim)
++ shift++;
++ WRITE_ONCE(rtp->percpu_enqueue_shift, shift);
++ WRITE_ONCE(rtp->percpu_dequeue_lim, lim);
++ smp_store_release(&rtp->percpu_enqueue_lim, lim);
++
++ pr_info("%s: Setting shift to %d and lim to %d rcu_task_cb_adjust=%d rcu_task_cpu_ids=%d.\n",
++ rtp->name, data_race(rtp->percpu_enqueue_shift), data_race(rtp->percpu_enqueue_lim),
++ rcu_task_cb_adjust, rcu_task_cpu_ids);
++
+ }
+
+ // IRQ-work handler that does deferred wakeup for call_rcu_tasks_generic().
+@@ -307,7 +327,7 @@ static void call_rcu_tasks_generic(struc
+ rtpcp->rtp_n_lock_retries = 0;
+ }
+ if (rcu_task_cb_adjust && ++rtpcp->rtp_n_lock_retries > rcu_task_contend_lim &&
+- READ_ONCE(rtp->percpu_enqueue_lim) != nr_cpu_ids)
++ READ_ONCE(rtp->percpu_enqueue_lim) != rcu_task_cpu_ids)
+ needadjust = true; // Defer adjustment to avoid deadlock.
+ }
+ if (!rcu_segcblist_is_enabled(&rtpcp->cblist)) {
+@@ -320,10 +340,10 @@ static void call_rcu_tasks_generic(struc
+ raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
+ if (unlikely(needadjust)) {
+ raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
+- if (rtp->percpu_enqueue_lim != nr_cpu_ids) {
++ if (rtp->percpu_enqueue_lim != rcu_task_cpu_ids) {
+ WRITE_ONCE(rtp->percpu_enqueue_shift, 0);
+- WRITE_ONCE(rtp->percpu_dequeue_lim, nr_cpu_ids);
+- smp_store_release(&rtp->percpu_enqueue_lim, nr_cpu_ids);
++ WRITE_ONCE(rtp->percpu_dequeue_lim, rcu_task_cpu_ids);
++ smp_store_release(&rtp->percpu_enqueue_lim, rcu_task_cpu_ids);
+ pr_info("Switching %s to per-CPU callback queuing.\n", rtp->name);
+ }
+ raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
+@@ -394,6 +414,8 @@ static int rcu_tasks_need_gpcb(struct rc
+ int needgpcb = 0;
+
+ for (cpu = 0; cpu < smp_load_acquire(&rtp->percpu_dequeue_lim); cpu++) {
++ if (!cpu_possible(cpu))
++ continue;
+ struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
+
+ /* Advance and accelerate any new callbacks. */
+@@ -426,7 +448,7 @@ static int rcu_tasks_need_gpcb(struct rc
+ if (rcu_task_cb_adjust && ncbs <= rcu_task_collapse_lim) {
+ raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
+ if (rtp->percpu_enqueue_lim > 1) {
+- WRITE_ONCE(rtp->percpu_enqueue_shift, order_base_2(nr_cpu_ids));
++ WRITE_ONCE(rtp->percpu_enqueue_shift, order_base_2(rcu_task_cpu_ids));
+ smp_store_release(&rtp->percpu_enqueue_lim, 1);
+ rtp->percpu_dequeue_gpseq = get_state_synchronize_rcu();
+ gpdone = false;
+@@ -441,7 +463,9 @@ static int rcu_tasks_need_gpcb(struct rc
+ pr_info("Completing switch %s to CPU-0 callback queuing.\n", rtp->name);
+ }
+ if (rtp->percpu_dequeue_lim == 1) {
+- for (cpu = rtp->percpu_dequeue_lim; cpu < nr_cpu_ids; cpu++) {
++ for (cpu = rtp->percpu_dequeue_lim; cpu < rcu_task_cpu_ids; cpu++) {
++ if (!cpu_possible(cpu))
++ continue;
+ struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
+
+ WARN_ON_ONCE(rcu_segcblist_n_cbs(&rtpcp->cblist));
+@@ -456,30 +480,32 @@ static int rcu_tasks_need_gpcb(struct rc
+ // Advance callbacks and invoke any that are ready.
+ static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp, struct rcu_tasks_percpu *rtpcp)
+ {
+- int cpu;
+- int cpunext;
+ int cpuwq;
+ unsigned long flags;
+ int len;
++ int index;
+ struct rcu_head *rhp;
+ struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
+ struct rcu_tasks_percpu *rtpcp_next;
+
+- cpu = rtpcp->cpu;
+- cpunext = cpu * 2 + 1;
+- if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
+- rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext);
+- cpuwq = rcu_cpu_beenfullyonline(cpunext) ? cpunext : WORK_CPU_UNBOUND;
+- queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work);
+- cpunext++;
+- if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
+- rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext);
+- cpuwq = rcu_cpu_beenfullyonline(cpunext) ? cpunext : WORK_CPU_UNBOUND;
++ index = rtpcp->index * 2 + 1;
++ if (index < num_possible_cpus()) {
++ rtpcp_next = rtp->rtpcp_array[index];
++ if (rtpcp_next->cpu < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
++ cpuwq = rcu_cpu_beenfullyonline(rtpcp_next->cpu) ? rtpcp_next->cpu : WORK_CPU_UNBOUND;
+ queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work);
++ index++;
++ if (index < num_possible_cpus()) {
++ rtpcp_next = rtp->rtpcp_array[index];
++ if (rtpcp_next->cpu < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
++ cpuwq = rcu_cpu_beenfullyonline(rtpcp_next->cpu) ? rtpcp_next->cpu : WORK_CPU_UNBOUND;
++ queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work);
++ }
++ }
+ }
+ }
+
+- if (rcu_segcblist_empty(&rtpcp->cblist) || !cpu_possible(cpu))
++ if (rcu_segcblist_empty(&rtpcp->cblist))
+ return;
+ raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
+ rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq));
alsa-usb-audio-fix-out-of-bounds-reads-when-finding-clock-sources.patch
usb-ehci-spear-fix-call-balance-of-sehci-clk-handling-routines.patch
media-aspeed-fix-memory-overwrite-if-timing-is-1600x900.patch
+wifi-iwlwifi-mvm-avoid-null-pointer-dereference.patch
+drm-amd-display-add-null-check-for-clk_mgr-and-clk_mgr-funcs-in-dcn30_init_hw.patch
+drm-amd-display-add-null-check-for-clk_mgr-in-dcn32_init_hw.patch
+drm-amd-display-add-null-check-for-function-pointer-in-dcn20_set_output_transfer_func.patch
+drm-amd-display-check-phantom_stream-before-it-is-used.patch
+erofs-reliably-distinguish-block-based-and-fscache-mode.patch
+rcu-tasks-fix-access-non-existent-percpu-rtpcp-variable-in-rcu_tasks_need_gpcb.patch
+btrfs-qgroup-fix-qgroup-prealloc-rsv-leak-in-subvolume-operations.patch
+perf-x86-intel-hide-topdown-metrics-events-if-the-feature-is-not-enumerated.patch
--- /dev/null
+From 557a6cd847645e667f3b362560bd7e7c09aac284 Mon Sep 17 00:00:00 2001
+From: Miri Korenblit <miriam.rachel.korenblit@intel.com>
+Date: Sun, 25 Aug 2024 19:17:09 +0300
+Subject: wifi: iwlwifi: mvm: avoid NULL pointer dereference
+
+From: Miri Korenblit <miriam.rachel.korenblit@intel.com>
+
+commit 557a6cd847645e667f3b362560bd7e7c09aac284 upstream.
+
+iwl_mvm_tx_skb_sta() and iwl_mvm_tx_mpdu() verify that the mvmvsta
+pointer is not NULL.
+It retrieves this pointer using iwl_mvm_sta_from_mac80211, which is
+dereferencing the ieee80211_sta pointer.
+If sta is NULL, iwl_mvm_sta_from_mac80211 will dereference a NULL
+pointer.
+Fix this by checking the sta pointer before retrieving the mvmsta
+from it. If sta is not NULL, then mvmsta isn't either.
+
+Signed-off-by: Miri Korenblit <miriam.rachel.korenblit@intel.com>
+Reviewed-by: Johannes Berg <johannes.berg@intel.com>
+Link: https://patch.msgid.link/20240825191257.880921ce23b7.I340052d70ab6d3410724ce955eb00da10e08188f@changeid
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/intel/iwlwifi/mvm/tx.c | 12 +++++++-----
+ 1 file changed, 7 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+@@ -1105,6 +1105,9 @@ static int iwl_mvm_tx_mpdu(struct iwl_mv
+ bool is_ampdu = false;
+ int hdrlen;
+
++ if (WARN_ON_ONCE(!sta))
++ return -1;
++
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ fc = hdr->frame_control;
+ hdrlen = ieee80211_hdrlen(fc);
+@@ -1112,9 +1115,6 @@ static int iwl_mvm_tx_mpdu(struct iwl_mv
+ if (IWL_MVM_NON_TRANSMITTING_AP && ieee80211_is_probe_resp(fc))
+ return -1;
+
+- if (WARN_ON_ONCE(!mvmsta))
+- return -1;
+-
+ if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA))
+ return -1;
+
+@@ -1242,16 +1242,18 @@ drop:
+ int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct ieee80211_sta *sta)
+ {
+- struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
++ struct iwl_mvm_sta *mvmsta;
+ struct ieee80211_tx_info info;
+ struct sk_buff_head mpdus_skbs;
+ unsigned int payload_len;
+ int ret;
+ struct sk_buff *orig_skb = skb;
+
+- if (WARN_ON_ONCE(!mvmsta))
++ if (WARN_ON_ONCE(!sta))
+ return -1;
+
++ mvmsta = iwl_mvm_sta_from_mac80211(sta);
++
+ if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA))
+ return -1;
+