From 25c449eb9e87876499fc04986ad7e69c7e92346b Mon Sep 17 00:00:00 2001 From: Sasha Levin Date: Thu, 21 Dec 2023 09:52:40 -0500 Subject: [PATCH] Fixes for 6.6 Signed-off-by: Sasha Levin --- ...p-pertrans-reserve-on-transaction-ab.patch | 119 +++++++ ...rate-qgroups-without-memory-allocati.patch | 168 ++++++++++ ...-qgroup_iterator-in-qgroup_convert_m.patch | 81 +++++ ...fix-hw-rotated-modes-when-psr-su-is-.patch | 96 ++++++ ...rship-check-in-drm_master_check_perm.patch | 49 +++ ...-t-write-to-dp_link_bw_set-when-usin.patch | 88 +++++ queue-6.6/drm-i915-fix-fec-state-dump.patch | 66 ++++ ...ntroduce-crtc_state-enhanced_framing.patch | 203 +++++++++++ .../drm-update-file-owner-during-use.patch | 316 ++++++++++++++++++ ...ke-damon_start-waits-until-kdamond_f.patch | 89 +++++ ...e-number-of-passed-access-sampling-a.patch | 259 ++++++++++++++ queue-6.6/series | 12 + ...ectly-configure-burst-length-when-us.patch | 58 ++++ 13 files changed, 1604 insertions(+) create mode 100644 queue-6.6/btrfs-free-qgroup-pertrans-reserve-on-transaction-ab.patch create mode 100644 queue-6.6/btrfs-qgroup-iterate-qgroups-without-memory-allocati.patch create mode 100644 queue-6.6/btrfs-qgroup-use-qgroup_iterator-in-qgroup_convert_m.patch create mode 100644 queue-6.6/drm-amd-display-fix-hw-rotated-modes-when-psr-su-is-.patch create mode 100644 queue-6.6/drm-fix-fd-ownership-check-in-drm_master_check_perm.patch create mode 100644 queue-6.6/drm-i915-edp-don-t-write-to-dp_link_bw_set-when-usin.patch create mode 100644 queue-6.6/drm-i915-fix-fec-state-dump.patch create mode 100644 queue-6.6/drm-i915-introduce-crtc_state-enhanced_framing.patch create mode 100644 queue-6.6/drm-update-file-owner-during-use.patch create mode 100644 queue-6.6/mm-damon-core-make-damon_start-waits-until-kdamond_f.patch create mode 100644 queue-6.6/mm-damon-core-use-number-of-passed-access-sampling-a.patch create mode 100644 queue-6.6/spi-spi-imx-correctly-configure-burst-length-when-us.patch diff --git a/queue-6.6/btrfs-free-qgroup-pertrans-reserve-on-transaction-ab.patch b/queue-6.6/btrfs-free-qgroup-pertrans-reserve-on-transaction-ab.patch new file mode 100644 index 00000000000..01e3081960d --- /dev/null +++ b/queue-6.6/btrfs-free-qgroup-pertrans-reserve-on-transaction-ab.patch @@ -0,0 +1,119 @@ +From d0d1be3ef0c2fd24d9b364beb5c3ace5c7626de8 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 1 Dec 2023 13:00:11 -0800 +Subject: btrfs: free qgroup pertrans reserve on transaction abort + +From: Boris Burkov + +[ Upstream commit b321a52cce062ec7ed385333a33905d22159ce36 ] + +If we abort a transaction, we never run the code that frees the pertrans +qgroup reservation. This results in warnings on unmount as that +reservation has been leaked. The leak isn't a huge issue since the fs is +read-only, but it's better to clean it up when we know we can/should. Do +it during the cleanup_transaction step of aborting. + +CC: stable@vger.kernel.org # 5.15+ +Reviewed-by: Qu Wenruo +Signed-off-by: Boris Burkov +Signed-off-by: David Sterba +Signed-off-by: Sasha Levin +--- + fs/btrfs/disk-io.c | 28 ++++++++++++++++++++++++++++ + fs/btrfs/qgroup.c | 5 +++-- + fs/btrfs/transaction.c | 2 -- + fs/btrfs/transaction.h | 3 +++ + 4 files changed, 34 insertions(+), 4 deletions(-) + +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c +index 71efb6883f307..b79781df70714 100644 +--- a/fs/btrfs/disk-io.c ++++ b/fs/btrfs/disk-io.c +@@ -4836,6 +4836,32 @@ void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans, + } + } + ++static void btrfs_free_all_qgroup_pertrans(struct btrfs_fs_info *fs_info) ++{ ++ struct btrfs_root *gang[8]; ++ int i; ++ int ret; ++ ++ spin_lock(&fs_info->fs_roots_radix_lock); ++ while (1) { ++ ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix, ++ (void **)gang, 0, ++ ARRAY_SIZE(gang), ++ BTRFS_ROOT_TRANS_TAG); ++ if (ret == 0) ++ break; ++ for (i = 0; i < ret; i++) { ++ struct btrfs_root *root = gang[i]; ++ ++ btrfs_qgroup_free_meta_all_pertrans(root); ++ radix_tree_tag_clear(&fs_info->fs_roots_radix, ++ (unsigned long)root->root_key.objectid, ++ BTRFS_ROOT_TRANS_TAG); ++ } ++ } ++ spin_unlock(&fs_info->fs_roots_radix_lock); ++} ++ + void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans, + struct btrfs_fs_info *fs_info) + { +@@ -4864,6 +4890,8 @@ void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans, + EXTENT_DIRTY); + btrfs_destroy_pinned_extent(fs_info, &cur_trans->pinned_extents); + ++ btrfs_free_all_qgroup_pertrans(fs_info); ++ + cur_trans->state =TRANS_STATE_COMPLETED; + wake_up(&cur_trans->commit_wait); + } +diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c +index 0d2212fa0ce85..a006f5160e6b4 100644 +--- a/fs/btrfs/qgroup.c ++++ b/fs/btrfs/qgroup.c +@@ -4124,8 +4124,9 @@ static void qgroup_convert_meta(struct btrfs_fs_info *fs_info, u64 ref_root, + + qgroup_rsv_release(fs_info, qgroup, num_bytes, + BTRFS_QGROUP_RSV_META_PREALLOC); +- qgroup_rsv_add(fs_info, qgroup, num_bytes, +- BTRFS_QGROUP_RSV_META_PERTRANS); ++ if (!sb_rdonly(fs_info->sb)) ++ qgroup_rsv_add(fs_info, qgroup, num_bytes, ++ BTRFS_QGROUP_RSV_META_PERTRANS); + + list_for_each_entry(glist, &qgroup->groups, next_group) + qgroup_iterator_add(&qgroup_list, glist->group); +diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c +index c780d37294636..0ac2d191cd34f 100644 +--- a/fs/btrfs/transaction.c ++++ b/fs/btrfs/transaction.c +@@ -37,8 +37,6 @@ + + static struct kmem_cache *btrfs_trans_handle_cachep; + +-#define BTRFS_ROOT_TRANS_TAG 0 +- + /* + * Transaction states and transitions + * +diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h +index 93869cda6af99..238a0ab85df9b 100644 +--- a/fs/btrfs/transaction.h ++++ b/fs/btrfs/transaction.h +@@ -12,6 +12,9 @@ + #include "ctree.h" + #include "misc.h" + ++/* Radix-tree tag for roots that are part of the trasaction. */ ++#define BTRFS_ROOT_TRANS_TAG 0 ++ + enum btrfs_trans_state { + TRANS_STATE_RUNNING, + TRANS_STATE_COMMIT_PREP, +-- +2.43.0 + diff --git a/queue-6.6/btrfs-qgroup-iterate-qgroups-without-memory-allocati.patch b/queue-6.6/btrfs-qgroup-iterate-qgroups-without-memory-allocati.patch new file mode 100644 index 00000000000..02023b9b400 --- /dev/null +++ b/queue-6.6/btrfs-qgroup-iterate-qgroups-without-memory-allocati.patch @@ -0,0 +1,168 @@ +From 2aa7832ea0260b2239199b51bca3ccf2734115b3 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sat, 2 Sep 2023 08:13:52 +0800 +Subject: btrfs: qgroup: iterate qgroups without memory allocation for + qgroup_reserve() + +From: Qu Wenruo + +[ Upstream commit 686c4a5a42635e0d2889e3eb461c554fd0b616b4 ] + +Qgroup heavily relies on ulist to go through all the involved +qgroups, but since we're using ulist inside fs_info->qgroup_lock +spinlock, this means we're doing a lot of GFP_ATOMIC allocations. + +This patch reduces the GFP_ATOMIC usage for qgroup_reserve() by +eliminating the memory allocation completely. + +This is done by moving the needed memory to btrfs_qgroup::iterator +list_head, so that we can put all the involved qgroup into a on-stack +list, thus eliminating the need to allocate memory while holding +spinlock. + +The only cost is the slightly higher memory usage, but considering the +reduce GFP_ATOMIC during a hot path, it should still be acceptable. + +Function qgroup_reserve() is the perfect start point for this +conversion. + +Reviewed-by: Boris Burkov +Signed-off-by: Qu Wenruo +Reviewed-by: David Sterba +Signed-off-by: David Sterba +Stable-dep-of: b321a52cce06 ("btrfs: free qgroup pertrans reserve on transaction abort") +Signed-off-by: Sasha Levin +--- + fs/btrfs/qgroup.c | 61 ++++++++++++++++++++++------------------------- + fs/btrfs/qgroup.h | 9 +++++++ + 2 files changed, 38 insertions(+), 32 deletions(-) + +diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c +index 7c92494381549..32e5defe4eff4 100644 +--- a/fs/btrfs/qgroup.c ++++ b/fs/btrfs/qgroup.c +@@ -208,6 +208,7 @@ static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info, + INIT_LIST_HEAD(&qgroup->groups); + INIT_LIST_HEAD(&qgroup->members); + INIT_LIST_HEAD(&qgroup->dirty); ++ INIT_LIST_HEAD(&qgroup->iterator); + + rb_link_node(&qgroup->node, parent, p); + rb_insert_color(&qgroup->node, &fs_info->qgroup_tree); +@@ -1342,6 +1343,24 @@ static void qgroup_dirty(struct btrfs_fs_info *fs_info, + list_add(&qgroup->dirty, &fs_info->dirty_qgroups); + } + ++static void qgroup_iterator_add(struct list_head *head, struct btrfs_qgroup *qgroup) ++{ ++ if (!list_empty(&qgroup->iterator)) ++ return; ++ ++ list_add_tail(&qgroup->iterator, head); ++} ++ ++static void qgroup_iterator_clean(struct list_head *head) ++{ ++ while (!list_empty(head)) { ++ struct btrfs_qgroup *qgroup; ++ ++ qgroup = list_first_entry(head, struct btrfs_qgroup, iterator); ++ list_del_init(&qgroup->iterator); ++ } ++} ++ + /* + * The easy accounting, we're updating qgroup relationship whose child qgroup + * only has exclusive extents. +@@ -3125,8 +3144,7 @@ static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce, + struct btrfs_fs_info *fs_info = root->fs_info; + u64 ref_root = root->root_key.objectid; + int ret = 0; +- struct ulist_node *unode; +- struct ulist_iterator uiter; ++ LIST_HEAD(qgroup_list); + + if (!is_fstree(ref_root)) + return 0; +@@ -3146,49 +3164,28 @@ static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce, + if (!qgroup) + goto out; + +- /* +- * in a first step, we check all affected qgroups if any limits would +- * be exceeded +- */ +- ulist_reinit(fs_info->qgroup_ulist); +- ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid, +- qgroup_to_aux(qgroup), GFP_ATOMIC); +- if (ret < 0) +- goto out; +- ULIST_ITER_INIT(&uiter); +- while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) { +- struct btrfs_qgroup *qg; ++ qgroup_iterator_add(&qgroup_list, qgroup); ++ list_for_each_entry(qgroup, &qgroup_list, iterator) { + struct btrfs_qgroup_list *glist; + +- qg = unode_aux_to_qgroup(unode); +- +- if (enforce && !qgroup_check_limits(qg, num_bytes)) { ++ if (enforce && !qgroup_check_limits(qgroup, num_bytes)) { + ret = -EDQUOT; + goto out; + } + +- list_for_each_entry(glist, &qg->groups, next_group) { +- ret = ulist_add(fs_info->qgroup_ulist, +- glist->group->qgroupid, +- qgroup_to_aux(glist->group), GFP_ATOMIC); +- if (ret < 0) +- goto out; +- } ++ list_for_each_entry(glist, &qgroup->groups, next_group) ++ qgroup_iterator_add(&qgroup_list, glist->group); + } ++ + ret = 0; + /* + * no limits exceeded, now record the reservation into all qgroups + */ +- ULIST_ITER_INIT(&uiter); +- while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) { +- struct btrfs_qgroup *qg; +- +- qg = unode_aux_to_qgroup(unode); +- +- qgroup_rsv_add(fs_info, qg, num_bytes, type); +- } ++ list_for_each_entry(qgroup, &qgroup_list, iterator) ++ qgroup_rsv_add(fs_info, qgroup, num_bytes, type); + + out: ++ qgroup_iterator_clean(&qgroup_list); + spin_unlock(&fs_info->qgroup_lock); + return ret; + } +diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h +index 104c9bd3c3379..1203f06320991 100644 +--- a/fs/btrfs/qgroup.h ++++ b/fs/btrfs/qgroup.h +@@ -220,6 +220,15 @@ struct btrfs_qgroup { + struct list_head groups; /* groups this group is member of */ + struct list_head members; /* groups that are members of this group */ + struct list_head dirty; /* dirty groups */ ++ ++ /* ++ * For qgroup iteration usage. ++ * ++ * The iteration list should always be empty until qgroup_iterator_add() ++ * is called. And should be reset to empty after the iteration is ++ * finished. ++ */ ++ struct list_head iterator; + struct rb_node node; /* tree of qgroups */ + + /* +-- +2.43.0 + diff --git a/queue-6.6/btrfs-qgroup-use-qgroup_iterator-in-qgroup_convert_m.patch b/queue-6.6/btrfs-qgroup-use-qgroup_iterator-in-qgroup_convert_m.patch new file mode 100644 index 00000000000..274c430f016 --- /dev/null +++ b/queue-6.6/btrfs-qgroup-use-qgroup_iterator-in-qgroup_convert_m.patch @@ -0,0 +1,81 @@ +From 30924dd2258ba3ce14862a28b3d33a970b66f286 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sat, 2 Sep 2023 08:13:54 +0800 +Subject: btrfs: qgroup: use qgroup_iterator in qgroup_convert_meta() + +From: Qu Wenruo + +[ Upstream commit 0913445082496c2b29668ee26521401b273838b8 ] + +With the new qgroup_iterator_add() and qgroup_iterator_clean(), we can +get rid of the ulist and its GFP_ATOMIC memory allocation. + +Reviewed-by: Boris Burkov +Signed-off-by: Qu Wenruo +Reviewed-by: David Sterba +Signed-off-by: David Sterba +Stable-dep-of: b321a52cce06 ("btrfs: free qgroup pertrans reserve on transaction abort") +Signed-off-by: Sasha Levin +--- + fs/btrfs/qgroup.c | 32 ++++++++++---------------------- + 1 file changed, 10 insertions(+), 22 deletions(-) + +diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c +index 32e5defe4eff4..0d2212fa0ce85 100644 +--- a/fs/btrfs/qgroup.c ++++ b/fs/btrfs/qgroup.c +@@ -4106,9 +4106,7 @@ static void qgroup_convert_meta(struct btrfs_fs_info *fs_info, u64 ref_root, + int num_bytes) + { + struct btrfs_qgroup *qgroup; +- struct ulist_node *unode; +- struct ulist_iterator uiter; +- int ret = 0; ++ LIST_HEAD(qgroup_list); + + if (num_bytes == 0) + return; +@@ -4119,31 +4117,21 @@ static void qgroup_convert_meta(struct btrfs_fs_info *fs_info, u64 ref_root, + qgroup = find_qgroup_rb(fs_info, ref_root); + if (!qgroup) + goto out; +- ulist_reinit(fs_info->qgroup_ulist); +- ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid, +- qgroup_to_aux(qgroup), GFP_ATOMIC); +- if (ret < 0) +- goto out; +- ULIST_ITER_INIT(&uiter); +- while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) { +- struct btrfs_qgroup *qg; +- struct btrfs_qgroup_list *glist; + +- qg = unode_aux_to_qgroup(unode); ++ qgroup_iterator_add(&qgroup_list, qgroup); ++ list_for_each_entry(qgroup, &qgroup_list, iterator) { ++ struct btrfs_qgroup_list *glist; + +- qgroup_rsv_release(fs_info, qg, num_bytes, ++ qgroup_rsv_release(fs_info, qgroup, num_bytes, + BTRFS_QGROUP_RSV_META_PREALLOC); +- qgroup_rsv_add(fs_info, qg, num_bytes, ++ qgroup_rsv_add(fs_info, qgroup, num_bytes, + BTRFS_QGROUP_RSV_META_PERTRANS); +- list_for_each_entry(glist, &qg->groups, next_group) { +- ret = ulist_add(fs_info->qgroup_ulist, +- glist->group->qgroupid, +- qgroup_to_aux(glist->group), GFP_ATOMIC); +- if (ret < 0) +- goto out; +- } ++ ++ list_for_each_entry(glist, &qgroup->groups, next_group) ++ qgroup_iterator_add(&qgroup_list, glist->group); + } + out: ++ qgroup_iterator_clean(&qgroup_list); + spin_unlock(&fs_info->qgroup_lock); + } + +-- +2.43.0 + diff --git a/queue-6.6/drm-amd-display-fix-hw-rotated-modes-when-psr-su-is-.patch b/queue-6.6/drm-amd-display-fix-hw-rotated-modes-when-psr-su-is-.patch new file mode 100644 index 00000000000..b77efebddcf --- /dev/null +++ b/queue-6.6/drm-amd-display-fix-hw-rotated-modes-when-psr-su-is-.patch @@ -0,0 +1,96 @@ +From e26334f293d7c46fcc3490424cce820b48da0776 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 5 Dec 2023 14:55:04 -0500 +Subject: drm/amd/display: fix hw rotated modes when PSR-SU is enabled + +From: Hamza Mahfooz + +[ Upstream commit f528ee145bd0076cd0ed7e7b2d435893e6329e98 ] + +We currently don't support dirty rectangles on hardware rotated modes. +So, if a user is using hardware rotated modes with PSR-SU enabled, +use PSR-SU FFU for all rotated planes (including cursor planes). + +Cc: stable@vger.kernel.org +Fixes: 30ebe41582d1 ("drm/amd/display: add FB_DAMAGE_CLIPS support") +Reported-by: Kai-Heng Feng +Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/2952 +Tested-by: Kai-Heng Feng +Tested-by: Bin Li +Reviewed-by: Mario Limonciello +Signed-off-by: Hamza Mahfooz +Signed-off-by: Alex Deucher +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 3 +++ + drivers/gpu/drm/amd/display/dc/dc_hw_types.h | 1 + + .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 3 ++- + drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c | 12 ++++++++++-- + 4 files changed, 16 insertions(+), 3 deletions(-) + +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +index f5fdb61c821d0..d63360127834b 100644 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +@@ -5170,6 +5170,9 @@ static void fill_dc_dirty_rects(struct drm_plane *plane, + if (plane->type == DRM_PLANE_TYPE_CURSOR) + return; + ++ if (new_plane_state->rotation != DRM_MODE_ROTATE_0) ++ goto ffu; ++ + num_clips = drm_plane_get_damage_clips_count(new_plane_state); + clips = drm_plane_get_damage_clips(new_plane_state); + +diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h +index 100d62162b717..99880b08cda0c 100644 +--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h ++++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h +@@ -465,6 +465,7 @@ struct dc_cursor_mi_param { + struct fixed31_32 v_scale_ratio; + enum dc_rotation_angle rotation; + bool mirror; ++ struct dc_stream_state *stream; + }; + + /* IPP related types */ +diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +index 79befa17bb037..13ccb57379c7a 100644 +--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c ++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +@@ -3407,7 +3407,8 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx) + .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz, + .v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert, + .rotation = pipe_ctx->plane_state->rotation, +- .mirror = pipe_ctx->plane_state->horizontal_mirror ++ .mirror = pipe_ctx->plane_state->horizontal_mirror, ++ .stream = pipe_ctx->stream, + }; + bool pipe_split_on = false; + bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) || +diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c +index 4566bc7abf17e..aa252dc263267 100644 +--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c ++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c +@@ -1075,8 +1075,16 @@ void hubp2_cursor_set_position( + if (src_y_offset < 0) + src_y_offset = 0; + /* Save necessary cursor info x, y position. w, h is saved in attribute func. */ +- hubp->cur_rect.x = src_x_offset + param->viewport.x; +- hubp->cur_rect.y = src_y_offset + param->viewport.y; ++ if (param->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 && ++ param->rotation != ROTATION_ANGLE_0) { ++ hubp->cur_rect.x = 0; ++ hubp->cur_rect.y = 0; ++ hubp->cur_rect.w = param->stream->timing.h_addressable; ++ hubp->cur_rect.h = param->stream->timing.v_addressable; ++ } else { ++ hubp->cur_rect.x = src_x_offset + param->viewport.x; ++ hubp->cur_rect.y = src_y_offset + param->viewport.y; ++ } + } + + void hubp2_clk_cntl(struct hubp *hubp, bool enable) +-- +2.43.0 + diff --git a/queue-6.6/drm-fix-fd-ownership-check-in-drm_master_check_perm.patch b/queue-6.6/drm-fix-fd-ownership-check-in-drm_master_check_perm.patch new file mode 100644 index 00000000000..71b7f29b7ee --- /dev/null +++ b/queue-6.6/drm-fix-fd-ownership-check-in-drm_master_check_perm.patch @@ -0,0 +1,49 @@ +From 11903bfbdb14c1b0a0b280bc630c758a3c2385d9 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 6 Dec 2023 13:51:58 +0000 +Subject: drm: Fix FD ownership check in drm_master_check_perm() +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Lingkai Dong + +[ Upstream commit 5a6c9a05e55cb2972396cc991af9d74c8c15029a ] + +The DRM subsystem keeps a record of the owner of a DRM device file +descriptor using thread group ID (TGID) instead of process ID (PID), to +ensures all threads within the same userspace process are considered the +owner. However, the DRM master ownership check compares the current +thread's PID against the record, so the thread is incorrectly considered to +be not the FD owner if the PID is not equal to the TGID. This causes DRM +ioctls to be denied master privileges, even if the same thread that opened +the FD performs an ioctl. Fix this by checking TGID. + +Fixes: 4230cea89cafb ("drm: Track clients by tgid and not tid") +Signed-off-by: Lingkai Dong +Reviewed-by: Christian König +Reviewed-by: Tvrtko Ursulin +Cc: # v6.4+ +Link: https://patchwork.freedesktop.org/patch/msgid/PA6PR08MB107665920BE9A96658CDA04CE8884A@PA6PR08MB10766.eurprd08.prod.outlook.com +Signed-off-by: Christian König +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/drm_auth.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c +index 2ed2585ded378..6899b3dc1f12a 100644 +--- a/drivers/gpu/drm/drm_auth.c ++++ b/drivers/gpu/drm/drm_auth.c +@@ -236,7 +236,7 @@ static int + drm_master_check_perm(struct drm_device *dev, struct drm_file *file_priv) + { + if (file_priv->was_master && +- rcu_access_pointer(file_priv->pid) == task_pid(current)) ++ rcu_access_pointer(file_priv->pid) == task_tgid(current)) + return 0; + + if (!capable(CAP_SYS_ADMIN)) +-- +2.43.0 + diff --git a/queue-6.6/drm-i915-edp-don-t-write-to-dp_link_bw_set-when-usin.patch b/queue-6.6/drm-i915-edp-don-t-write-to-dp_link_bw_set-when-usin.patch new file mode 100644 index 00000000000..1421f175e43 --- /dev/null +++ b/queue-6.6/drm-i915-edp-don-t-write-to-dp_link_bw_set-when-usin.patch @@ -0,0 +1,88 @@ +From 15ecf3cb748ba93a3ce41e41820edfaddb2580ee Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 5 Dec 2023 20:05:51 +0200 +Subject: drm/i915/edp: don't write to DP_LINK_BW_SET when using rate select +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Jani Nikula + +[ Upstream commit e6861d8264cd43c5eb20196e53df36fd71ec5698 ] + +The eDP 1.5 spec adds a clarification for eDP 1.4x: + +> For eDP v1.4x, if the Source device chooses the Main-Link rate by way +> of DPCD 00100h, the Sink device shall ignore DPCD 00115h[2:0]. + +We write 0 to DP_LINK_BW_SET (DPCD 100h) even when using +DP_LINK_RATE_SET (DPCD 114h). Stop doing that, as it can cause the panel +to ignore the rate set method. + +Moreover, 0 is a reserved value for DP_LINK_BW_SET, and should not be +used. + +v2: Improve the comments (Ville) + +Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/9081 +Tested-by: Animesh Manna +Reviewed-by: Uma Shankar +Cc: Ville Syrjälä +Signed-off-by: Jani Nikula +Link: https://patchwork.freedesktop.org/patch/msgid/20231205180551.2476228-1-jani.nikula@intel.com +(cherry picked from commit 23b392b94acb0499f69706c5808c099f590ebcf4) +Cc: stable@vger.kernel.org +Signed-off-by: Jani Nikula +Signed-off-by: Sasha Levin +--- + .../drm/i915/display/intel_dp_link_training.c | 31 +++++++++++++------ + 1 file changed, 21 insertions(+), 10 deletions(-) + +diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c +index d09e43a38fa61..a62bca622b0a1 100644 +--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c ++++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c +@@ -650,19 +650,30 @@ intel_dp_update_link_bw_set(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, + u8 link_bw, u8 rate_select) + { +- u8 link_config[2]; ++ u8 lane_count = crtc_state->lane_count; + +- /* Write the link configuration data */ +- link_config[0] = link_bw; +- link_config[1] = crtc_state->lane_count; + if (crtc_state->enhanced_framing) +- link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; +- drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2); ++ lane_count |= DP_LANE_COUNT_ENHANCED_FRAME_EN; ++ ++ if (link_bw) { ++ /* DP and eDP v1.3 and earlier link bw set method. */ ++ u8 link_config[] = { link_bw, lane_count }; + +- /* eDP 1.4 rate select method. */ +- if (!link_bw) +- drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET, +- &rate_select, 1); ++ drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, ++ ARRAY_SIZE(link_config)); ++ } else { ++ /* ++ * eDP v1.4 and later link rate set method. ++ * ++ * eDP v1.4x sinks shall ignore DP_LINK_RATE_SET if ++ * DP_LINK_BW_SET is set. Avoid writing DP_LINK_BW_SET. ++ * ++ * eDP v1.5 sinks allow choosing either, and the last choice ++ * shall be active. ++ */ ++ drm_dp_dpcd_writeb(&intel_dp->aux, DP_LANE_COUNT_SET, lane_count); ++ drm_dp_dpcd_writeb(&intel_dp->aux, DP_LINK_RATE_SET, rate_select); ++ } + } + + /* +-- +2.43.0 + diff --git a/queue-6.6/drm-i915-fix-fec-state-dump.patch b/queue-6.6/drm-i915-fix-fec-state-dump.patch new file mode 100644 index 00000000000..fed509a918c --- /dev/null +++ b/queue-6.6/drm-i915-fix-fec-state-dump.patch @@ -0,0 +1,66 @@ +From 0793d9b02889e35480859b58639b6e8d6d1ddc44 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 2 May 2023 17:39:01 +0300 +Subject: drm/i915: Fix FEC state dump +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Ville Syrjälä + +[ Upstream commit 3dfeb80b308882cc6e1f5f6c36fd9a7f4cae5fc6 ] + +Stop dumping state while reading it out. We have a proper +place for that stuff. + +Signed-off-by: Ville Syrjälä +Link: https://patchwork.freedesktop.org/patch/msgid/20230502143906.2401-7-ville.syrjala@linux.intel.com +Reviewed-by: Luca Coelho +Stable-dep-of: e6861d8264cd ("drm/i915/edp: don't write to DP_LINK_BW_SET when using rate select") +Signed-off-by: Sasha Levin +--- + .../gpu/drm/i915/display/intel_crtc_state_dump.c | 2 ++ + drivers/gpu/drm/i915/display/intel_ddi.c | 13 +++---------- + 2 files changed, 5 insertions(+), 10 deletions(-) + +diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c +index 8d4640d0fd346..8b34fa55fa1bd 100644 +--- a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c ++++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c +@@ -258,6 +258,8 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config, + intel_dump_m_n_config(pipe_config, "dp m2_n2", + pipe_config->lane_count, + &pipe_config->dp_m2_n2); ++ drm_dbg_kms(&i915->drm, "fec: %s\n", ++ str_enabled_disabled(pipe_config->fec_enable)); + } + + drm_dbg_kms(&i915->drm, "framestart delay: %d, MSA timing delay: %d\n", +diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c +index 84bbf854337aa..85e2263e688de 100644 +--- a/drivers/gpu/drm/i915/display/intel_ddi.c ++++ b/drivers/gpu/drm/i915/display/intel_ddi.c +@@ -3724,17 +3724,10 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder, + intel_cpu_transcoder_get_m2_n2(crtc, cpu_transcoder, + &pipe_config->dp_m2_n2); + +- if (DISPLAY_VER(dev_priv) >= 11) { +- i915_reg_t dp_tp_ctl = dp_tp_ctl_reg(encoder, pipe_config); +- ++ if (DISPLAY_VER(dev_priv) >= 11) + pipe_config->fec_enable = +- intel_de_read(dev_priv, dp_tp_ctl) & DP_TP_CTL_FEC_ENABLE; +- +- drm_dbg_kms(&dev_priv->drm, +- "[ENCODER:%d:%s] Fec status: %u\n", +- encoder->base.base.id, encoder->base.name, +- pipe_config->fec_enable); +- } ++ intel_de_read(dev_priv, ++ dp_tp_ctl_reg(encoder, pipe_config)) & DP_TP_CTL_FEC_ENABLE; + + if (dig_port->lspcon.active && intel_dp_has_hdmi_sink(&dig_port->dp)) + pipe_config->infoframes.enable |= +-- +2.43.0 + diff --git a/queue-6.6/drm-i915-introduce-crtc_state-enhanced_framing.patch b/queue-6.6/drm-i915-introduce-crtc_state-enhanced_framing.patch new file mode 100644 index 00000000000..4fd74bc001f --- /dev/null +++ b/queue-6.6/drm-i915-introduce-crtc_state-enhanced_framing.patch @@ -0,0 +1,203 @@ +From effccad7e49f1ac9e62a6105be83ba410e26db99 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 3 May 2023 14:36:59 +0300 +Subject: drm/i915: Introduce crtc_state->enhanced_framing +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Ville Syrjälä + +[ Upstream commit 3072a24c778a7102d70692af5556e47363114c67 ] + +Track DP enhanced framing properly in the crtc state instead +of relying just on the cached DPCD everywhere, and hook it +up into the state check and dump. + +v2: Actually set enhanced_framing in .compute_config() + +Signed-off-by: Ville Syrjälä +Link: https://patchwork.freedesktop.org/patch/msgid/20230503113659.16305-1-ville.syrjala@linux.intel.com +Reviewed-by: Luca Coelho +Stable-dep-of: e6861d8264cd ("drm/i915/edp: don't write to DP_LINK_BW_SET when using rate select") +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/i915/display/g4x_dp.c | 10 ++++++++-- + drivers/gpu/drm/i915/display/intel_crt.c | 2 ++ + drivers/gpu/drm/i915/display/intel_crtc_state_dump.c | 5 +++-- + drivers/gpu/drm/i915/display/intel_ddi.c | 11 +++++++++-- + drivers/gpu/drm/i915/display/intel_display.c | 1 + + drivers/gpu/drm/i915/display/intel_display_types.h | 2 ++ + drivers/gpu/drm/i915/display/intel_dp.c | 3 +++ + drivers/gpu/drm/i915/display/intel_dp_link_training.c | 2 +- + 8 files changed, 29 insertions(+), 7 deletions(-) + +diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c +index 4c7187f7913ea..e8ee0a08947e8 100644 +--- a/drivers/gpu/drm/i915/display/g4x_dp.c ++++ b/drivers/gpu/drm/i915/display/g4x_dp.c +@@ -141,7 +141,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder, + + intel_de_rmw(dev_priv, TRANS_DP_CTL(crtc->pipe), + TRANS_DP_ENH_FRAMING, +- drm_dp_enhanced_frame_cap(intel_dp->dpcd) ? ++ pipe_config->enhanced_framing ? + TRANS_DP_ENH_FRAMING : 0); + } else { + if (IS_G4X(dev_priv) && pipe_config->limited_color_range) +@@ -153,7 +153,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder, + intel_dp->DP |= DP_SYNC_VS_HIGH; + intel_dp->DP |= DP_LINK_TRAIN_OFF; + +- if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) ++ if (pipe_config->enhanced_framing) + intel_dp->DP |= DP_ENHANCED_FRAMING; + + if (IS_CHERRYVIEW(dev_priv)) +@@ -351,6 +351,9 @@ static void intel_dp_get_config(struct intel_encoder *encoder, + u32 trans_dp = intel_de_read(dev_priv, + TRANS_DP_CTL(crtc->pipe)); + ++ if (trans_dp & TRANS_DP_ENH_FRAMING) ++ pipe_config->enhanced_framing = true; ++ + if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH) + flags |= DRM_MODE_FLAG_PHSYNC; + else +@@ -361,6 +364,9 @@ static void intel_dp_get_config(struct intel_encoder *encoder, + else + flags |= DRM_MODE_FLAG_NVSYNC; + } else { ++ if (tmp & DP_ENHANCED_FRAMING) ++ pipe_config->enhanced_framing = true; ++ + if (tmp & DP_SYNC_HS_HIGH) + flags |= DRM_MODE_FLAG_PHSYNC; + else +diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c +index d23020eb87f46..4352f90177615 100644 +--- a/drivers/gpu/drm/i915/display/intel_crt.c ++++ b/drivers/gpu/drm/i915/display/intel_crt.c +@@ -456,6 +456,8 @@ static int hsw_crt_compute_config(struct intel_encoder *encoder, + /* FDI must always be 2.7 GHz */ + pipe_config->port_clock = 135000 * 2; + ++ pipe_config->enhanced_framing = true; ++ + adjusted_mode->crtc_clock = lpt_iclkip(pipe_config); + + return 0; +diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c +index 8b34fa55fa1bd..66fe880af8f3f 100644 +--- a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c ++++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c +@@ -258,8 +258,9 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config, + intel_dump_m_n_config(pipe_config, "dp m2_n2", + pipe_config->lane_count, + &pipe_config->dp_m2_n2); +- drm_dbg_kms(&i915->drm, "fec: %s\n", +- str_enabled_disabled(pipe_config->fec_enable)); ++ drm_dbg_kms(&i915->drm, "fec: %s, enhanced framing: %s\n", ++ str_enabled_disabled(pipe_config->fec_enable), ++ str_enabled_disabled(pipe_config->enhanced_framing)); + } + + drm_dbg_kms(&i915->drm, "framestart delay: %d, MSA timing delay: %d\n", +diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c +index 85e2263e688de..c7e00f57cb7ab 100644 +--- a/drivers/gpu/drm/i915/display/intel_ddi.c ++++ b/drivers/gpu/drm/i915/display/intel_ddi.c +@@ -3432,7 +3432,7 @@ static void mtl_ddi_prepare_link_retrain(struct intel_dp *intel_dp, + dp_tp_ctl |= DP_TP_CTL_MODE_MST; + } else { + dp_tp_ctl |= DP_TP_CTL_MODE_SST; +- if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) ++ if (crtc_state->enhanced_framing) + dp_tp_ctl |= DP_TP_CTL_ENHANCED_FRAME_ENABLE; + } + intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), dp_tp_ctl); +@@ -3489,7 +3489,7 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp, + dp_tp_ctl |= DP_TP_CTL_MODE_MST; + } else { + dp_tp_ctl |= DP_TP_CTL_MODE_SST; +- if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) ++ if (crtc_state->enhanced_framing) + dp_tp_ctl |= DP_TP_CTL_ENHANCED_FRAME_ENABLE; + } + intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), dp_tp_ctl); +@@ -3724,6 +3724,10 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder, + intel_cpu_transcoder_get_m2_n2(crtc, cpu_transcoder, + &pipe_config->dp_m2_n2); + ++ pipe_config->enhanced_framing = ++ intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, pipe_config)) & ++ DP_TP_CTL_ENHANCED_FRAME_ENABLE; ++ + if (DISPLAY_VER(dev_priv) >= 11) + pipe_config->fec_enable = + intel_de_read(dev_priv, +@@ -3740,6 +3744,9 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder, + if (!HAS_DP20(dev_priv)) { + /* FDI */ + pipe_config->output_types |= BIT(INTEL_OUTPUT_ANALOG); ++ pipe_config->enhanced_framing = ++ intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, pipe_config)) & ++ DP_TP_CTL_ENHANCED_FRAME_ENABLE; + break; + } + fallthrough; /* 128b/132b */ +diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c +index 1e2b09ae09b9c..2d9d96ecbb251 100644 +--- a/drivers/gpu/drm/i915/display/intel_display.c ++++ b/drivers/gpu/drm/i915/display/intel_display.c +@@ -5255,6 +5255,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, + PIPE_CONF_CHECK_BOOL(hdmi_scrambling); + PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio); + PIPE_CONF_CHECK_BOOL(has_infoframe); ++ PIPE_CONF_CHECK_BOOL(enhanced_framing); + PIPE_CONF_CHECK_BOOL(fec_enable); + + PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio); +diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h +index 731f2ec04d5cd..7fc92b1474cc4 100644 +--- a/drivers/gpu/drm/i915/display/intel_display_types.h ++++ b/drivers/gpu/drm/i915/display/intel_display_types.h +@@ -1362,6 +1362,8 @@ struct intel_crtc_state { + u16 linetime; + u16 ips_linetime; + ++ bool enhanced_framing; ++ + /* Forward Error correction State */ + bool fec_enable; + +diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c +index 66e35f8443e1a..b4fb7ce39d06f 100644 +--- a/drivers/gpu/drm/i915/display/intel_dp.c ++++ b/drivers/gpu/drm/i915/display/intel_dp.c +@@ -2312,6 +2312,9 @@ intel_dp_compute_config(struct intel_encoder *encoder, + pipe_config->limited_color_range = + intel_dp_limited_color_range(pipe_config, conn_state); + ++ pipe_config->enhanced_framing = ++ drm_dp_enhanced_frame_cap(intel_dp->dpcd); ++ + if (pipe_config->dsc.compression_enable) + output_bpp = pipe_config->dsc.compressed_bpp; + else +diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c +index a263773f4d68a..d09e43a38fa61 100644 +--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c ++++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c +@@ -655,7 +655,7 @@ intel_dp_update_link_bw_set(struct intel_dp *intel_dp, + /* Write the link configuration data */ + link_config[0] = link_bw; + link_config[1] = crtc_state->lane_count; +- if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) ++ if (crtc_state->enhanced_framing) + link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; + drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2); + +-- +2.43.0 + diff --git a/queue-6.6/drm-update-file-owner-during-use.patch b/queue-6.6/drm-update-file-owner-during-use.patch new file mode 100644 index 00000000000..455c8873b9d --- /dev/null +++ b/queue-6.6/drm-update-file-owner-during-use.patch @@ -0,0 +1,316 @@ +From cad72a1f3cf8fe25975a226103935861ed2afb56 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 21 Jun 2023 10:48:24 +0100 +Subject: drm: Update file owner during use +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Tvrtko Ursulin + +[ Upstream commit 1c7a387ffef894b1ab3942f0482dac7a6e0a909c ] + +With the typical model where the display server opens the file descriptor +and then hands it over to the client(*), we were showing stale data in +debugfs. + +Fix it by updating the drm_file->pid on ioctl access from a different +process. + +The field is also made RCU protected to allow for lockless readers. Update +side is protected with dev->filelist_mutex. + +Before: + +$ cat /sys/kernel/debug/dri/0/clients + command pid dev master a uid magic + Xorg 2344 0 y y 0 0 + Xorg 2344 0 n y 0 2 + Xorg 2344 0 n y 0 3 + Xorg 2344 0 n y 0 4 + +After: + +$ cat /sys/kernel/debug/dri/0/clients + command tgid dev master a uid magic + Xorg 830 0 y y 0 0 + xfce4-session 880 0 n y 0 1 + xfwm4 943 0 n y 0 2 + neverball 1095 0 n y 0 3 + +*) +More detailed and historically accurate description of various handover +implementation kindly provided by Emil Velikov: + +""" +The traditional model, the server was the orchestrator managing the +primary device node. From the fd, to the master status and +authentication. But looking at the fd alone, this has varied across +the years. + +IIRC in the DRI1 days, Xorg (libdrm really) would have a list of open +fd(s) and reuse those whenever needed, DRI2 the client was responsible +for open() themselves and with DRI3 the fd was passed to the client. + +Around the inception of DRI3 and systemd-logind, the latter became +another possible orchestrator. Whereby Xorg and Wayland compositors +could ask it for the fd. For various reasons (hysterical and genuine +ones) Xorg has a fallback path going the open(), whereas Wayland +compositors are moving to solely relying on logind... some never had +fallback even. + +Over the past few years, more projects have emerged which provide +functionality similar (be that on API level, Dbus, or otherwise) to +systemd-logind. +""" + +v2: + * Fixed typo in commit text and added a fine historical explanation + from Emil. + +Signed-off-by: Tvrtko Ursulin +Cc: "Christian König" +Cc: Daniel Vetter +Acked-by: Christian König +Reviewed-by: Emil Velikov +Reviewed-by: Rob Clark +Tested-by: Rob Clark +Link: https://patchwork.freedesktop.org/patch/msgid/20230621094824.2348732-1-tvrtko.ursulin@linux.intel.com +Signed-off-by: Christian König +Stable-dep-of: 5a6c9a05e55c ("drm: Fix FD ownership check in drm_master_check_perm()") +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 6 ++-- + drivers/gpu/drm/drm_auth.c | 3 +- + drivers/gpu/drm/drm_debugfs.c | 10 ++++--- + drivers/gpu/drm/drm_file.c | 40 +++++++++++++++++++++++-- + drivers/gpu/drm/drm_ioctl.c | 3 ++ + drivers/gpu/drm/nouveau/nouveau_drm.c | 5 +++- + drivers/gpu/drm/vmwgfx/vmwgfx_gem.c | 6 ++-- + include/drm/drm_file.h | 13 ++++++-- + 8 files changed, 71 insertions(+), 15 deletions(-) + +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +index ca4d2d430e28c..a1b15d0d6c489 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +@@ -962,6 +962,7 @@ static int amdgpu_debugfs_gem_info_show(struct seq_file *m, void *unused) + list_for_each_entry(file, &dev->filelist, lhead) { + struct task_struct *task; + struct drm_gem_object *gobj; ++ struct pid *pid; + int id; + + /* +@@ -971,8 +972,9 @@ static int amdgpu_debugfs_gem_info_show(struct seq_file *m, void *unused) + * Therefore, we need to protect this ->comm access using RCU. + */ + rcu_read_lock(); +- task = pid_task(file->pid, PIDTYPE_TGID); +- seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid), ++ pid = rcu_dereference(file->pid); ++ task = pid_task(pid, PIDTYPE_TGID); ++ seq_printf(m, "pid %8d command %s:\n", pid_nr(pid), + task ? task->comm : ""); + rcu_read_unlock(); + +diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c +index cf92a9ae8034c..2ed2585ded378 100644 +--- a/drivers/gpu/drm/drm_auth.c ++++ b/drivers/gpu/drm/drm_auth.c +@@ -235,7 +235,8 @@ static int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv) + static int + drm_master_check_perm(struct drm_device *dev, struct drm_file *file_priv) + { +- if (file_priv->pid == task_pid(current) && file_priv->was_master) ++ if (file_priv->was_master && ++ rcu_access_pointer(file_priv->pid) == task_pid(current)) + return 0; + + if (!capable(CAP_SYS_ADMIN)) +diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c +index 2de43ff3ce0a4..41b0682c638ef 100644 +--- a/drivers/gpu/drm/drm_debugfs.c ++++ b/drivers/gpu/drm/drm_debugfs.c +@@ -92,15 +92,17 @@ static int drm_clients_info(struct seq_file *m, void *data) + */ + mutex_lock(&dev->filelist_mutex); + list_for_each_entry_reverse(priv, &dev->filelist, lhead) { +- struct task_struct *task; + bool is_current_master = drm_is_current_master(priv); ++ struct task_struct *task; ++ struct pid *pid; + +- rcu_read_lock(); /* locks pid_task()->comm */ +- task = pid_task(priv->pid, PIDTYPE_TGID); ++ rcu_read_lock(); /* Locks priv->pid and pid_task()->comm! */ ++ pid = rcu_dereference(priv->pid); ++ task = pid_task(pid, PIDTYPE_TGID); + uid = task ? __task_cred(task)->euid : GLOBAL_ROOT_UID; + seq_printf(m, "%20s %5d %3d %c %c %5d %10u\n", + task ? task->comm : "", +- pid_vnr(priv->pid), ++ pid_vnr(pid), + priv->minor->index, + is_current_master ? 'y' : 'n', + priv->authenticated ? 'y' : 'n', +diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c +index 883d83bc0e3d5..e692770ef6d3c 100644 +--- a/drivers/gpu/drm/drm_file.c ++++ b/drivers/gpu/drm/drm_file.c +@@ -160,7 +160,7 @@ struct drm_file *drm_file_alloc(struct drm_minor *minor) + + /* Get a unique identifier for fdinfo: */ + file->client_id = atomic64_inc_return(&ident); +- file->pid = get_pid(task_tgid(current)); ++ rcu_assign_pointer(file->pid, get_pid(task_tgid(current))); + file->minor = minor; + + /* for compatibility root is always authenticated */ +@@ -200,7 +200,7 @@ struct drm_file *drm_file_alloc(struct drm_minor *minor) + drm_syncobj_release(file); + if (drm_core_check_feature(dev, DRIVER_GEM)) + drm_gem_release(dev, file); +- put_pid(file->pid); ++ put_pid(rcu_access_pointer(file->pid)); + kfree(file); + + return ERR_PTR(ret); +@@ -291,7 +291,7 @@ void drm_file_free(struct drm_file *file) + + WARN_ON(!list_empty(&file->event_list)); + +- put_pid(file->pid); ++ put_pid(rcu_access_pointer(file->pid)); + kfree(file); + } + +@@ -505,6 +505,40 @@ int drm_release(struct inode *inode, struct file *filp) + } + EXPORT_SYMBOL(drm_release); + ++void drm_file_update_pid(struct drm_file *filp) ++{ ++ struct drm_device *dev; ++ struct pid *pid, *old; ++ ++ /* ++ * Master nodes need to keep the original ownership in order for ++ * drm_master_check_perm to keep working correctly. (See comment in ++ * drm_auth.c.) ++ */ ++ if (filp->was_master) ++ return; ++ ++ pid = task_tgid(current); ++ ++ /* ++ * Quick unlocked check since the model is a single handover followed by ++ * exclusive repeated use. ++ */ ++ if (pid == rcu_access_pointer(filp->pid)) ++ return; ++ ++ dev = filp->minor->dev; ++ mutex_lock(&dev->filelist_mutex); ++ old = rcu_replace_pointer(filp->pid, pid, 1); ++ mutex_unlock(&dev->filelist_mutex); ++ ++ if (pid != old) { ++ get_pid(pid); ++ synchronize_rcu(); ++ put_pid(old); ++ } ++} ++ + /** + * drm_release_noglobal - release method for DRM file + * @inode: device inode +diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c +index f03ffbacfe9b4..77590b0f38fa3 100644 +--- a/drivers/gpu/drm/drm_ioctl.c ++++ b/drivers/gpu/drm/drm_ioctl.c +@@ -776,6 +776,9 @@ long drm_ioctl_kernel(struct file *file, drm_ioctl_t *func, void *kdata, + struct drm_device *dev = file_priv->minor->dev; + int retcode; + ++ /* Update drm_file owner if fd was passed along. */ ++ drm_file_update_pid(file_priv); ++ + if (drm_dev_is_unplugged(dev)) + return -ENODEV; + +diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c +index 4396f501b16a3..50589f982d1a4 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_drm.c ++++ b/drivers/gpu/drm/nouveau/nouveau_drm.c +@@ -1133,7 +1133,10 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv) + } + + get_task_comm(tmpname, current); +- snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid)); ++ rcu_read_lock(); ++ snprintf(name, sizeof(name), "%s[%d]", ++ tmpname, pid_nr(rcu_dereference(fpriv->pid))); ++ rcu_read_unlock(); + + if (!(cli = kzalloc(sizeof(*cli), GFP_KERNEL))) { + ret = -ENOMEM; +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c +index 8b1eb0061610c..12787bb9c111d 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c +@@ -244,6 +244,7 @@ static int vmw_debugfs_gem_info_show(struct seq_file *m, void *unused) + list_for_each_entry(file, &dev->filelist, lhead) { + struct task_struct *task; + struct drm_gem_object *gobj; ++ struct pid *pid; + int id; + + /* +@@ -253,8 +254,9 @@ static int vmw_debugfs_gem_info_show(struct seq_file *m, void *unused) + * Therefore, we need to protect this ->comm access using RCU. + */ + rcu_read_lock(); +- task = pid_task(file->pid, PIDTYPE_TGID); +- seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid), ++ pid = rcu_dereference(file->pid); ++ task = pid_task(pid, PIDTYPE_TGID); ++ seq_printf(m, "pid %8d command %s:\n", pid_nr(pid), + task ? task->comm : ""); + rcu_read_unlock(); + +diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h +index 010239392adfb..9c47a43f42a62 100644 +--- a/include/drm/drm_file.h ++++ b/include/drm/drm_file.h +@@ -256,8 +256,15 @@ struct drm_file { + /** @master_lookup_lock: Serializes @master. */ + spinlock_t master_lookup_lock; + +- /** @pid: Process that opened this file. */ +- struct pid *pid; ++ /** ++ * @pid: Process that is using this file. ++ * ++ * Must only be dereferenced under a rcu_read_lock or equivalent. ++ * ++ * Updates are guarded with dev->filelist_mutex and reference must be ++ * dropped after a RCU grace period to accommodate lockless readers. ++ */ ++ struct pid __rcu *pid; + + /** @client_id: A unique id for fdinfo */ + u64 client_id; +@@ -420,6 +427,8 @@ static inline bool drm_is_accel_client(const struct drm_file *file_priv) + return file_priv->minor->type == DRM_MINOR_ACCEL; + } + ++void drm_file_update_pid(struct drm_file *); ++ + int drm_open(struct inode *inode, struct file *filp); + int drm_open_helper(struct file *filp, struct drm_minor *minor); + ssize_t drm_read(struct file *filp, char __user *buffer, +-- +2.43.0 + diff --git a/queue-6.6/mm-damon-core-make-damon_start-waits-until-kdamond_f.patch b/queue-6.6/mm-damon-core-make-damon_start-waits-until-kdamond_f.patch new file mode 100644 index 00000000000..ccb7e8cb67b --- /dev/null +++ b/queue-6.6/mm-damon-core-make-damon_start-waits-until-kdamond_f.patch @@ -0,0 +1,89 @@ +From 3e849a5b8acdcdfc99f1925220b31ab299747d5a Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 8 Dec 2023 17:50:18 +0000 +Subject: mm/damon/core: make damon_start() waits until kdamond_fn() starts + +From: SeongJae Park + +[ Upstream commit 6376a824595607e99d032a39ba3394988b4fce96 ] + +The cleanup tasks of kdamond threads including reset of corresponding +DAMON context's ->kdamond field and decrease of global nr_running_ctxs +counter is supposed to be executed by kdamond_fn(). However, commit +0f91d13366a4 ("mm/damon: simplify stop mechanism") made neither +damon_start() nor damon_stop() ensure the corresponding kdamond has +started the execution of kdamond_fn(). + +As a result, the cleanup can be skipped if damon_stop() is called fast +enough after the previous damon_start(). Especially the skipped reset +of ->kdamond could cause a use-after-free. + +Fix it by waiting for start of kdamond_fn() execution from +damon_start(). + +Link: https://lkml.kernel.org/r/20231208175018.63880-1-sj@kernel.org +Fixes: 0f91d13366a4 ("mm/damon: simplify stop mechanism") +Signed-off-by: SeongJae Park +Reported-by: Jakub Acs +Cc: Changbin Du +Cc: Jakub Acs +Cc: # 5.15.x +Signed-off-by: Andrew Morton +Signed-off-by: Sasha Levin +--- + include/linux/damon.h | 2 ++ + mm/damon/core.c | 6 ++++++ + 2 files changed, 8 insertions(+) + +diff --git a/include/linux/damon.h b/include/linux/damon.h +index 506118916378b..a953d7083cd59 100644 +--- a/include/linux/damon.h ++++ b/include/linux/damon.h +@@ -534,6 +534,8 @@ struct damon_ctx { + * update + */ + unsigned long next_ops_update_sis; ++ /* for waiting until the execution of the kdamond_fn is started */ ++ struct completion kdamond_started; + + /* public: */ + struct task_struct *kdamond; +diff --git a/mm/damon/core.c b/mm/damon/core.c +index 30c93de59475f..aff611b6eafe1 100644 +--- a/mm/damon/core.c ++++ b/mm/damon/core.c +@@ -423,6 +423,8 @@ struct damon_ctx *damon_new_ctx(void) + if (!ctx) + return NULL; + ++ init_completion(&ctx->kdamond_started); ++ + ctx->attrs.sample_interval = 5 * 1000; + ctx->attrs.aggr_interval = 100 * 1000; + ctx->attrs.ops_update_interval = 60 * 1000 * 1000; +@@ -636,11 +638,14 @@ static int __damon_start(struct damon_ctx *ctx) + mutex_lock(&ctx->kdamond_lock); + if (!ctx->kdamond) { + err = 0; ++ reinit_completion(&ctx->kdamond_started); + ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d", + nr_running_ctxs); + if (IS_ERR(ctx->kdamond)) { + err = PTR_ERR(ctx->kdamond); + ctx->kdamond = NULL; ++ } else { ++ wait_for_completion(&ctx->kdamond_started); + } + } + mutex_unlock(&ctx->kdamond_lock); +@@ -1347,6 +1352,7 @@ static int kdamond_fn(void *data) + + pr_debug("kdamond (%d) starts\n", current->pid); + ++ complete(&ctx->kdamond_started); + kdamond_init_intervals_sis(ctx); + + if (ctx->ops.init) +-- +2.43.0 + diff --git a/queue-6.6/mm-damon-core-use-number-of-passed-access-sampling-a.patch b/queue-6.6/mm-damon-core-use-number-of-passed-access-sampling-a.patch new file mode 100644 index 00000000000..4cb9140c8df --- /dev/null +++ b/queue-6.6/mm-damon-core-use-number-of-passed-access-sampling-a.patch @@ -0,0 +1,259 @@ +From dfda8d41e94ee98ebd2ad78c7cb49625a8c92474 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 14 Sep 2023 02:15:23 +0000 +Subject: mm/damon/core: use number of passed access sampling as a timer + +From: SeongJae Park + +[ Upstream commit 4472edf63d6630e6cf65e205b4fc8c3c94d0afe5 ] + +DAMON sleeps for sampling interval after each sampling, and check if the +aggregation interval and the ops update interval have passed using +ktime_get_coarse_ts64() and baseline timestamps for the intervals. That +design is for making the operations occur at deterministic timing +regardless of the time that spend for each work. However, it turned out +it is not that useful, and incur not-that-intuitive results. + +After all, timer functions, and especially sleep functions that DAMON uses +to wait for specific timing, are not necessarily strictly accurate. It is +legal design, so no problem. However, depending on such inaccuracies, the +nr_accesses can be larger than aggregation interval divided by sampling +interval. For example, with the default setting (5 ms sampling interval +and 100 ms aggregation interval) we frequently show regions having +nr_accesses larger than 20. Also, if the execution of a DAMOS scheme +takes a long time, next aggregation could happen before enough number of +samples are collected. This is not what usual users would intuitively +expect. + +Since access check sampling is the smallest unit work of DAMON, using the +number of passed sampling intervals as the DAMON-internal timer can easily +avoid these problems. That is, convert aggregation and ops update +intervals to numbers of sampling intervals that need to be passed before +those operations be executed, count the number of passed sampling +intervals, and invoke the operations as soon as the specific amount of +sampling intervals passed. Make the change. + +Note that this could make a behavioral change to settings that using +intervals that not aligned by the sampling interval. For example, if the +sampling interval is 5 ms and the aggregation interval is 12 ms, DAMON +effectively uses 15 ms as its aggregation interval, because it checks +whether the aggregation interval after sleeping the sampling interval. +This change will make DAMON to effectively use 10 ms as aggregation +interval, since it uses 'aggregation interval / sampling interval * +sampling interval' as the effective aggregation interval, and we don't use +floating point types. Usual users would have used aligned intervals, so +this behavioral change is not expected to make any meaningful impact, so +just make this change. + +Link: https://lkml.kernel.org/r/20230914021523.60649-1-sj@kernel.org +Signed-off-by: SeongJae Park +Signed-off-by: Andrew Morton +Stable-dep-of: 6376a8245956 ("mm/damon/core: make damon_start() waits until kdamond_fn() starts") +Signed-off-by: Sasha Levin +--- + include/linux/damon.h | 14 ++++++- + mm/damon/core.c | 96 +++++++++++++++++++++---------------------- + 2 files changed, 59 insertions(+), 51 deletions(-) + +diff --git a/include/linux/damon.h b/include/linux/damon.h +index c70cca8a839f7..506118916378b 100644 +--- a/include/linux/damon.h ++++ b/include/linux/damon.h +@@ -522,8 +522,18 @@ struct damon_ctx { + struct damon_attrs attrs; + + /* private: internal use only */ +- struct timespec64 last_aggregation; +- struct timespec64 last_ops_update; ++ /* number of sample intervals that passed since this context started */ ++ unsigned long passed_sample_intervals; ++ /* ++ * number of sample intervals that should be passed before next ++ * aggregation ++ */ ++ unsigned long next_aggregation_sis; ++ /* ++ * number of sample intervals that should be passed before next ops ++ * update ++ */ ++ unsigned long next_ops_update_sis; + + /* public: */ + struct task_struct *kdamond; +diff --git a/mm/damon/core.c b/mm/damon/core.c +index fd5be73f699f4..30c93de59475f 100644 +--- a/mm/damon/core.c ++++ b/mm/damon/core.c +@@ -427,8 +427,10 @@ struct damon_ctx *damon_new_ctx(void) + ctx->attrs.aggr_interval = 100 * 1000; + ctx->attrs.ops_update_interval = 60 * 1000 * 1000; + +- ktime_get_coarse_ts64(&ctx->last_aggregation); +- ctx->last_ops_update = ctx->last_aggregation; ++ ctx->passed_sample_intervals = 0; ++ /* These will be set from kdamond_init_intervals_sis() */ ++ ctx->next_aggregation_sis = 0; ++ ctx->next_ops_update_sis = 0; + + mutex_init(&ctx->kdamond_lock); + +@@ -542,6 +544,9 @@ static void damon_update_monitoring_results(struct damon_ctx *ctx, + */ + int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs) + { ++ unsigned long sample_interval = attrs->sample_interval ? ++ attrs->sample_interval : 1; ++ + if (attrs->min_nr_regions < 3) + return -EINVAL; + if (attrs->min_nr_regions > attrs->max_nr_regions) +@@ -549,6 +554,11 @@ int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs) + if (attrs->sample_interval > attrs->aggr_interval) + return -EINVAL; + ++ ctx->next_aggregation_sis = ctx->passed_sample_intervals + ++ attrs->aggr_interval / sample_interval; ++ ctx->next_ops_update_sis = ctx->passed_sample_intervals + ++ attrs->ops_update_interval / sample_interval; ++ + damon_update_monitoring_results(ctx, attrs); + ctx->attrs = *attrs; + return 0; +@@ -722,38 +732,6 @@ int damon_stop(struct damon_ctx **ctxs, int nr_ctxs) + return err; + } + +-/* +- * damon_check_reset_time_interval() - Check if a time interval is elapsed. +- * @baseline: the time to check whether the interval has elapsed since +- * @interval: the time interval (microseconds) +- * +- * See whether the given time interval has passed since the given baseline +- * time. If so, it also updates the baseline to current time for next check. +- * +- * Return: true if the time interval has passed, or false otherwise. +- */ +-static bool damon_check_reset_time_interval(struct timespec64 *baseline, +- unsigned long interval) +-{ +- struct timespec64 now; +- +- ktime_get_coarse_ts64(&now); +- if ((timespec64_to_ns(&now) - timespec64_to_ns(baseline)) < +- interval * 1000) +- return false; +- *baseline = now; +- return true; +-} +- +-/* +- * Check whether it is time to flush the aggregated information +- */ +-static bool kdamond_aggregate_interval_passed(struct damon_ctx *ctx) +-{ +- return damon_check_reset_time_interval(&ctx->last_aggregation, +- ctx->attrs.aggr_interval); +-} +- + /* + * Reset the aggregated monitoring results ('nr_accesses' of each region). + */ +@@ -1234,18 +1212,6 @@ static void kdamond_split_regions(struct damon_ctx *ctx) + last_nr_regions = nr_regions; + } + +-/* +- * Check whether it is time to check and apply the operations-related data +- * structures. +- * +- * Returns true if it is. +- */ +-static bool kdamond_need_update_operations(struct damon_ctx *ctx) +-{ +- return damon_check_reset_time_interval(&ctx->last_ops_update, +- ctx->attrs.ops_update_interval); +-} +- + /* + * Check whether current monitoring should be stopped + * +@@ -1357,6 +1323,17 @@ static int kdamond_wait_activation(struct damon_ctx *ctx) + return -EBUSY; + } + ++static void kdamond_init_intervals_sis(struct damon_ctx *ctx) ++{ ++ unsigned long sample_interval = ctx->attrs.sample_interval ? ++ ctx->attrs.sample_interval : 1; ++ ++ ctx->passed_sample_intervals = 0; ++ ctx->next_aggregation_sis = ctx->attrs.aggr_interval / sample_interval; ++ ctx->next_ops_update_sis = ctx->attrs.ops_update_interval / ++ sample_interval; ++} ++ + /* + * The monitoring daemon that runs as a kernel thread + */ +@@ -1370,6 +1347,8 @@ static int kdamond_fn(void *data) + + pr_debug("kdamond (%d) starts\n", current->pid); + ++ kdamond_init_intervals_sis(ctx); ++ + if (ctx->ops.init) + ctx->ops.init(ctx); + if (ctx->callback.before_start && ctx->callback.before_start(ctx)) +@@ -1378,6 +1357,17 @@ static int kdamond_fn(void *data) + sz_limit = damon_region_sz_limit(ctx); + + while (!kdamond_need_stop(ctx)) { ++ /* ++ * ctx->attrs and ctx->next_{aggregation,ops_update}_sis could ++ * be changed from after_wmarks_check() or after_aggregation() ++ * callbacks. Read the values here, and use those for this ++ * iteration. That is, damon_set_attrs() updated new values ++ * are respected from next iteration. ++ */ ++ unsigned long next_aggregation_sis = ctx->next_aggregation_sis; ++ unsigned long next_ops_update_sis = ctx->next_ops_update_sis; ++ unsigned long sample_interval = ctx->attrs.sample_interval; ++ + if (kdamond_wait_activation(ctx)) + break; + +@@ -1387,12 +1377,17 @@ static int kdamond_fn(void *data) + ctx->callback.after_sampling(ctx)) + break; + +- kdamond_usleep(ctx->attrs.sample_interval); ++ kdamond_usleep(sample_interval); ++ ctx->passed_sample_intervals++; + + if (ctx->ops.check_accesses) + max_nr_accesses = ctx->ops.check_accesses(ctx); + +- if (kdamond_aggregate_interval_passed(ctx)) { ++ sample_interval = ctx->attrs.sample_interval ? ++ ctx->attrs.sample_interval : 1; ++ if (ctx->passed_sample_intervals == next_aggregation_sis) { ++ ctx->next_aggregation_sis = next_aggregation_sis + ++ ctx->attrs.aggr_interval / sample_interval; + kdamond_merge_regions(ctx, + max_nr_accesses / 10, + sz_limit); +@@ -1407,7 +1402,10 @@ static int kdamond_fn(void *data) + ctx->ops.reset_aggregated(ctx); + } + +- if (kdamond_need_update_operations(ctx)) { ++ if (ctx->passed_sample_intervals == next_ops_update_sis) { ++ ctx->next_ops_update_sis = next_ops_update_sis + ++ ctx->attrs.ops_update_interval / ++ sample_interval; + if (ctx->ops.update) + ctx->ops.update(ctx); + sz_limit = damon_region_sz_limit(ctx); +-- +2.43.0 + diff --git a/queue-6.6/series b/queue-6.6/series index 88e888be9c6..3d3cf4bffa4 100644 --- a/queue-6.6/series +++ b/queue-6.6/series @@ -1 +1,13 @@ bpf-fix-prog_array_map_poke_run-map-poke-update.patch +mm-damon-core-use-number-of-passed-access-sampling-a.patch +mm-damon-core-make-damon_start-waits-until-kdamond_f.patch +btrfs-qgroup-iterate-qgroups-without-memory-allocati.patch +btrfs-qgroup-use-qgroup_iterator-in-qgroup_convert_m.patch +btrfs-free-qgroup-pertrans-reserve-on-transaction-ab.patch +drm-amd-display-fix-hw-rotated-modes-when-psr-su-is-.patch +drm-i915-fix-fec-state-dump.patch +drm-i915-introduce-crtc_state-enhanced_framing.patch +drm-i915-edp-don-t-write-to-dp_link_bw_set-when-usin.patch +drm-update-file-owner-during-use.patch +drm-fix-fd-ownership-check-in-drm_master_check_perm.patch +spi-spi-imx-correctly-configure-burst-length-when-us.patch diff --git a/queue-6.6/spi-spi-imx-correctly-configure-burst-length-when-us.patch b/queue-6.6/spi-spi-imx-correctly-configure-burst-length-when-us.patch new file mode 100644 index 00000000000..78fdcb0f529 --- /dev/null +++ b/queue-6.6/spi-spi-imx-correctly-configure-burst-length-when-us.patch @@ -0,0 +1,58 @@ +From 62ef9f16be90f11753047bec8d5b53aeff5aa9e9 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sat, 9 Dec 2023 23:23:26 +0100 +Subject: spi: spi-imx: correctly configure burst length when using dma + +From: Benjamin Bigler + +[ Upstream commit e9b220aeacf109684cce36a94fc24ed37be92b05 ] + +If DMA is used, burst length should be set to the bus width of the DMA. +Otherwise, the SPI hardware will transmit/receive one word per DMA +request. +Since this issue affects both transmission and reception, it cannot be +detected with a loopback test. +Replace magic numbers 512 and 0xfff with MX51_ECSPI_CTRL_MAX_BURST. + +Reported-by Stefan Bigler + +Signed-off-by: Benjamin Bigler +Fixes: 15a6af94a277 ("spi: Increase imx51 ecspi burst length based on transfer length") +Link: https://lore.kernel.org/r/8a415902c751cdbb4b20ce76569216ed@mail.infomaniak.com +Link: https://lore.kernel.org/r/20231209222338.5564-1-benjamin@bigler.one +Signed-off-by: Mark Brown +Signed-off-by: Sasha Levin +--- + drivers/spi/spi-imx.c | 15 +++++++++++---- + 1 file changed, 11 insertions(+), 4 deletions(-) + +diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c +index 498e35c8db2c1..272bc871a848b 100644 +--- a/drivers/spi/spi-imx.c ++++ b/drivers/spi/spi-imx.c +@@ -659,11 +659,18 @@ static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx, + ctrl |= (spi_imx->target_burst * 8 - 1) + << MX51_ECSPI_CTRL_BL_OFFSET; + else { +- if (spi_imx->count >= 512) +- ctrl |= 0xFFF << MX51_ECSPI_CTRL_BL_OFFSET; +- else +- ctrl |= (spi_imx->count * spi_imx->bits_per_word - 1) ++ if (spi_imx->usedma) { ++ ctrl |= (spi_imx->bits_per_word * ++ spi_imx_bytes_per_word(spi_imx->bits_per_word) - 1) + << MX51_ECSPI_CTRL_BL_OFFSET; ++ } else { ++ if (spi_imx->count >= MX51_ECSPI_CTRL_MAX_BURST) ++ ctrl |= (MX51_ECSPI_CTRL_MAX_BURST - 1) ++ << MX51_ECSPI_CTRL_BL_OFFSET; ++ else ++ ctrl |= (spi_imx->count * spi_imx->bits_per_word - 1) ++ << MX51_ECSPI_CTRL_BL_OFFSET; ++ } + } + + /* set clock speed */ +-- +2.43.0 + -- 2.47.3