]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
drm/xe/xe_query: Use separate iterator while filling GT list
authorMatt Roper <matthew.d.roper@intel.com>
Tue, 1 Jul 2025 20:13:28 +0000 (13:13 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 20 Aug 2025 16:30:34 +0000 (18:30 +0200)
[ Upstream commit d4eb4a010262ea7801e576d1033b355910f2f7d4 ]

The 'id' value updated by for_each_gt() is the uapi GT ID of the GTs
being iterated over, and may skip over values if a GT is not present on
the device.  Use a separate iterator for GT list array assignments to
ensure that the array will be filled properly on future platforms where
index in the GT query list may not match the uapi ID.

v2:
 - Include the missing increment of the iterator.  (Jonathan)

Cc: Jonathan Cavitt <jonathan.cavitt@intel.com>
Reviewed-by: Jonathan Cavitt <jonathan.cavitt@intel.com>
Link: https://lore.kernel.org/r/20250701201320.2514369-16-matthew.d.roper@intel.com
Signed-off-by: Matt Roper <matthew.d.roper@intel.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/gpu/drm/xe/xe_query.c

index 6fec5d1a1eb44b0b7298f982140872743f1cf2ef..6e7c940d7e2275159918169266a5ab74ce0d42cf 100644 (file)
@@ -366,6 +366,7 @@ static int query_gt_list(struct xe_device *xe, struct drm_xe_device_query *query
        struct drm_xe_query_gt_list __user *query_ptr =
                u64_to_user_ptr(query->data);
        struct drm_xe_query_gt_list *gt_list;
+       int iter = 0;
        u8 id;
 
        if (query->size == 0) {
@@ -383,12 +384,12 @@ static int query_gt_list(struct xe_device *xe, struct drm_xe_device_query *query
 
        for_each_gt(gt, xe, id) {
                if (xe_gt_is_media_type(gt))
-                       gt_list->gt_list[id].type = DRM_XE_QUERY_GT_TYPE_MEDIA;
+                       gt_list->gt_list[iter].type = DRM_XE_QUERY_GT_TYPE_MEDIA;
                else
-                       gt_list->gt_list[id].type = DRM_XE_QUERY_GT_TYPE_MAIN;
-               gt_list->gt_list[id].tile_id = gt_to_tile(gt)->id;
-               gt_list->gt_list[id].gt_id = gt->info.id;
-               gt_list->gt_list[id].reference_clock = gt->info.reference_clock;
+                       gt_list->gt_list[iter].type = DRM_XE_QUERY_GT_TYPE_MAIN;
+               gt_list->gt_list[iter].tile_id = gt_to_tile(gt)->id;
+               gt_list->gt_list[iter].gt_id = gt->info.id;
+               gt_list->gt_list[iter].reference_clock = gt->info.reference_clock;
                /*
                 * The mem_regions indexes in the mask below need to
                 * directly identify the struct
@@ -404,19 +405,21 @@ static int query_gt_list(struct xe_device *xe, struct drm_xe_device_query *query
                 * assumption.
                 */
                if (!IS_DGFX(xe))
-                       gt_list->gt_list[id].near_mem_regions = 0x1;
+                       gt_list->gt_list[iter].near_mem_regions = 0x1;
                else
-                       gt_list->gt_list[id].near_mem_regions =
+                       gt_list->gt_list[iter].near_mem_regions =
                                BIT(gt_to_tile(gt)->id) << 1;
-               gt_list->gt_list[id].far_mem_regions = xe->info.mem_region_mask ^
-                       gt_list->gt_list[id].near_mem_regions;
+               gt_list->gt_list[iter].far_mem_regions = xe->info.mem_region_mask ^
+                       gt_list->gt_list[iter].near_mem_regions;
 
-               gt_list->gt_list[id].ip_ver_major =
+               gt_list->gt_list[iter].ip_ver_major =
                        REG_FIELD_GET(GMD_ID_ARCH_MASK, gt->info.gmdid);
-               gt_list->gt_list[id].ip_ver_minor =
+               gt_list->gt_list[iter].ip_ver_minor =
                        REG_FIELD_GET(GMD_ID_RELEASE_MASK, gt->info.gmdid);
-               gt_list->gt_list[id].ip_ver_rev =
+               gt_list->gt_list[iter].ip_ver_rev =
                        REG_FIELD_GET(GMD_ID_REVID, gt->info.gmdid);
+
+               iter++;
        }
 
        if (copy_to_user(query_ptr, gt_list, size)) {