]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
drm/xe/lrc: Allow to add user commands mid context switch
authorLucas De Marchi <lucas.demarchi@intel.com>
Tue, 16 Sep 2025 21:15:43 +0000 (14:15 -0700)
committerLucas De Marchi <lucas.demarchi@intel.com>
Thu, 18 Sep 2025 21:20:39 +0000 (14:20 -0700)
Like done for post-context-restore commands, allow to add commands from
configfs in the middle of context restore. Since currently the indirect
ctx hardcodes the offset to CTX_INDIRECT_CTX_OFFSET_DEFAULT, this is
executed in the very beginning of engine context restore.

Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://lore.kernel.org/r/20250916-wa-bb-cmds-v5-6-306bddbc15da@intel.com
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
drivers/gpu/drm/xe/xe_configfs.c
drivers/gpu/drm/xe/xe_configfs.h
drivers/gpu/drm/xe/xe_lrc.c

index 9a30dc958c35c0dbed950aff0d820e46f4e7ee24..90bc5a4b5da71f850771e7f2f00c41cece2d05a5 100644 (file)
@@ -897,6 +897,21 @@ bool xe_configfs_get_psmi_enabled(struct pci_dev *pdev)
        return ret;
 }
 
+/**
+ * xe_configfs_get_ctx_restore_mid_bb - get configfs ctx_restore_mid_bb setting
+ * @pdev: pci device
+ * @class: hw engine class
+ * @cs: pointer to the bb to use - only valid during probe
+ *
+ * Return: Number of dwords used in the mid_ctx_restore setting in configfs
+ */
+u32 xe_configfs_get_ctx_restore_mid_bb(struct pci_dev *pdev,
+                                      enum xe_engine_class class,
+                                      const u32 **cs)
+{
+       return 0;
+}
+
 /**
  * xe_configfs_get_ctx_restore_post_bb - get configfs ctx_restore_post_bb setting
  * @pdev: pci device
index eff2645b5f59313c6ca31d1a3cf34bea40e408c0..c61e0e47ed94c1f70c39895bff08b83ff2185af1 100644 (file)
@@ -19,6 +19,8 @@ void xe_configfs_check_device(struct pci_dev *pdev);
 bool xe_configfs_get_survivability_mode(struct pci_dev *pdev);
 u64 xe_configfs_get_engines_allowed(struct pci_dev *pdev);
 bool xe_configfs_get_psmi_enabled(struct pci_dev *pdev);
+u32 xe_configfs_get_ctx_restore_mid_bb(struct pci_dev *pdev, enum xe_engine_class,
+                                      const u32 **cs);
 u32 xe_configfs_get_ctx_restore_post_bb(struct pci_dev *pdev, enum xe_engine_class,
                                        const u32 **cs);
 #else
@@ -28,6 +30,8 @@ static inline void xe_configfs_check_device(struct pci_dev *pdev) { }
 static inline bool xe_configfs_get_survivability_mode(struct pci_dev *pdev) { return false; }
 static inline u64 xe_configfs_get_engines_allowed(struct pci_dev *pdev) { return U64_MAX; }
 static inline bool xe_configfs_get_psmi_enabled(struct pci_dev *pdev) { return false; }
+static inline u32 xe_configfs_get_ctx_restore_mid_bb(struct pci_dev *pdev, enum xe_engine_class,
+                                                    const u32 **cs) { return 0; }
 static inline u32 xe_configfs_get_ctx_restore_post_bb(struct pci_dev *pdev, enum xe_engine_class,
                                                      const u32 **cs) { return 0; }
 #endif
index 0ab99c210d8829adaf98c24a5efa6832cae89578..47e9df7750725a42dc532b584070aed09bf52b73 100644 (file)
@@ -77,11 +77,17 @@ lrc_to_xe(struct xe_lrc *lrc)
 static bool
 gt_engine_needs_indirect_ctx(struct xe_gt *gt, enum xe_engine_class class)
 {
+       struct xe_device *xe = gt_to_xe(gt);
+
        if (XE_GT_WA(gt, 16010904313) &&
            (class == XE_ENGINE_CLASS_RENDER ||
             class == XE_ENGINE_CLASS_COMPUTE))
                return true;
 
+       if (xe_configfs_get_ctx_restore_mid_bb(to_pci_dev(xe->drm.dev),
+                                              class, NULL))
+               return true;
+
        return false;
 }
 
@@ -1133,6 +1139,35 @@ static ssize_t setup_configfs_post_ctx_restore_bb(struct xe_lrc *lrc,
        return cmd - batch;
 }
 
+static ssize_t setup_configfs_mid_ctx_restore_bb(struct xe_lrc *lrc,
+                                                struct xe_hw_engine *hwe,
+                                                u32 *batch, size_t max_len)
+{
+       struct xe_device *xe = gt_to_xe(lrc->gt);
+       const u32 *user_batch;
+       u32 *cmd = batch;
+       u32 count;
+
+       count = xe_configfs_get_ctx_restore_mid_bb(to_pci_dev(xe->drm.dev),
+                                                  hwe->class, &user_batch);
+       if (!count)
+               return 0;
+
+       if (count > max_len)
+               return -ENOSPC;
+
+       /*
+        * This should be used only for tests and validation. Taint the kernel
+        * as anything could be submitted directly in context switches
+        */
+       add_taint(TAINT_TEST, LOCKDEP_STILL_OK);
+
+       memcpy(cmd, user_batch, count * sizeof(u32));
+       cmd += count;
+
+       return cmd - batch;
+}
+
 static ssize_t setup_invalidate_state_cache_wa(struct xe_lrc *lrc,
                                               struct xe_hw_engine *hwe,
                                               u32 *batch, size_t max_len)
@@ -1283,8 +1318,10 @@ setup_indirect_ctx(struct xe_lrc *lrc, struct xe_hw_engine *hwe)
 {
        static const struct bo_setup rcs_funcs[] = {
                { .setup = setup_timestamp_wa },
+               { .setup = setup_configfs_mid_ctx_restore_bb },
        };
        static const struct bo_setup xcs_funcs[] = {
+               { .setup = setup_configfs_mid_ctx_restore_bb },
        };
        struct bo_setup_state state = {
                .lrc = lrc,