]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/xe/vf: Use primary GT ordered work queue on media GT on PTL VF
authorMatthew Brost <matthew.brost@intel.com>
Wed, 8 Oct 2025 21:45:29 +0000 (14:45 -0700)
committerMatthew Brost <matthew.brost@intel.com>
Thu, 9 Oct 2025 10:24:28 +0000 (03:24 -0700)
VF CCS restore is a primary GT operation on which the media GT depends.
Therefore, it doesn't make much sense to run these operations in
parallel. To address this, point the media GT's ordered work queue to
the primary GT's ordered work queue on platforms that require (PTL VFs)
CCS restore as part of VF post-migration recovery.

v7:
 - Remove bool from xe_gt_alloc (Lucas)
v9:
 - Fix typo (Lucas)

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Lucas De Marchi <lucas.demarchi@intel.com>
Link: https://lore.kernel.org/r/20251008214532.3442967-32-matthew.brost@intel.com
drivers/gpu/drm/xe/xe_device_types.h
drivers/gpu/drm/xe/xe_gt.c
drivers/gpu/drm/xe/xe_pci.c
drivers/gpu/drm/xe/xe_pci_types.h

index 54d4034659cb9f8cacb349aa5c4d9f0cdf6ec061..9e3666a226da8eb6c5dfa40685dac4af50e17727 100644 (file)
@@ -329,6 +329,8 @@ struct xe_device {
                u8 skip_mtcfg:1;
                /** @info.skip_pcode: skip access to PCODE uC */
                u8 skip_pcode:1;
+               /** @info.needs_shared_vf_gt_wq: needs shared GT WQ on VF */
+               u8 needs_shared_vf_gt_wq:1;
        } info;
 
        /** @wa_active: keep track of active workarounds */
index 6951fedd4350fca38bf45918c29180787254ac5c..d8e94fb8b9bd07898782ce4745806822e3fa2035 100644 (file)
 
 struct xe_gt *xe_gt_alloc(struct xe_tile *tile)
 {
-       struct drm_device *drm = &tile_to_xe(tile)->drm;
+       struct xe_device *xe = tile_to_xe(tile);
+       struct drm_device *drm = &xe->drm;
+       bool shared_wq = xe->info.needs_shared_vf_gt_wq && tile->primary_gt &&
+               IS_SRIOV_VF(xe);
+       struct workqueue_struct *ordered_wq;
        struct xe_gt *gt;
 
        gt = drmm_kzalloc(drm, sizeof(*gt), GFP_KERNEL);
@@ -75,9 +79,15 @@ struct xe_gt *xe_gt_alloc(struct xe_tile *tile)
                return ERR_PTR(-ENOMEM);
 
        gt->tile = tile;
-       gt->ordered_wq = drmm_alloc_ordered_workqueue(drm, "gt-ordered-wq", WQ_MEM_RECLAIM);
-       if (IS_ERR(gt->ordered_wq))
-               return ERR_CAST(gt->ordered_wq);
+       if (shared_wq && tile->primary_gt->ordered_wq)
+               ordered_wq = tile->primary_gt->ordered_wq;
+       else
+               ordered_wq = drmm_alloc_ordered_workqueue(drm, "gt-ordered-wq",
+                                                         WQ_MEM_RECLAIM);
+       if (IS_ERR(ordered_wq))
+               return ERR_CAST(ordered_wq);
+
+       gt->ordered_wq = ordered_wq;
 
        return gt;
 }
index be91343829dd5d88ce51c6e1bf9eaaa22f5f74ec..687e919f8a8d09ad04ddca34462c7fb5e98f623d 100644 (file)
@@ -347,6 +347,7 @@ static const struct xe_device_desc ptl_desc = {
        .has_sriov = true,
        .max_gt_per_tile = 2,
        .needs_scratch = true,
+       .needs_shared_vf_gt_wq = true,
 };
 
 #undef PLATFORM
@@ -596,6 +597,7 @@ static int xe_info_init_early(struct xe_device *xe,
        xe->info.skip_mtcfg = desc->skip_mtcfg;
        xe->info.skip_pcode = desc->skip_pcode;
        xe->info.needs_scratch = desc->needs_scratch;
+       xe->info.needs_shared_vf_gt_wq = desc->needs_shared_vf_gt_wq;
 
        xe->info.probe_display = IS_ENABLED(CONFIG_DRM_XE_DISPLAY) &&
                                 xe_modparam.probe_display &&
index 9b9766a3baa3466f5f1dc621dba8dfcac3e744de..b11bf6abda5b94f4a49ccb54b2748c898bd7f67f 100644 (file)
@@ -48,6 +48,7 @@ struct xe_device_desc {
        u8 skip_guc_pc:1;
        u8 skip_mtcfg:1;
        u8 skip_pcode:1;
+       u8 needs_shared_vf_gt_wq:1;
 };
 
 struct xe_graphics_desc {