]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/xe/multi_queue: Support active group after primary is destroyed
authorNiranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
Thu, 11 Dec 2025 01:03:03 +0000 (17:03 -0800)
committerNiranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
Fri, 12 Dec 2025 03:22:05 +0000 (19:22 -0800)
Add support to keep the group active after the primary queue is
destroyed. Instead of killing the primary queue during exec_queue
destroy ioctl, kill it when all the secondary queues of the group
are killed.

Signed-off-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Link: https://patch.msgid.link/20251211010249.1647839-34-niranjana.vishwanathapura@intel.com
drivers/gpu/drm/xe/xe_device.c
drivers/gpu/drm/xe/xe_exec_queue.c
drivers/gpu/drm/xe/xe_exec_queue.h
drivers/gpu/drm/xe/xe_exec_queue_types.h
include/uapi/drm/xe_drm.h

index 7a498c8db7b1f5805d625c88bc76a43a2936b73a..24efb6a3e0ea6ff174cd352f11b4c967e310bc1d 100644 (file)
@@ -177,7 +177,12 @@ static void xe_file_close(struct drm_device *dev, struct drm_file *file)
        xa_for_each(&xef->exec_queue.xa, idx, q) {
                if (q->vm && q->hwe->hw_engine_group)
                        xe_hw_engine_group_del_exec_queue(q->hwe->hw_engine_group, q);
-               xe_exec_queue_kill(q);
+
+               if (xe_exec_queue_is_multi_queue_primary(q))
+                       xe_exec_queue_group_kill_put(q->multi_queue.group);
+               else
+                       xe_exec_queue_kill(q);
+
                xe_exec_queue_put(q);
        }
        xa_for_each(&xef->vm.xa, idx, vm)
index d337b7bc2b809eb49edb357033768110c23291d6..3f4840d135a066ec177ac7a1e4903c01b19d4f8d 100644 (file)
@@ -418,6 +418,26 @@ struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe,
 }
 ALLOW_ERROR_INJECTION(xe_exec_queue_create_bind, ERRNO);
 
+static void xe_exec_queue_group_kill(struct kref *ref)
+{
+       struct xe_exec_queue_group *group = container_of(ref, struct xe_exec_queue_group,
+                                                        kill_refcount);
+       xe_exec_queue_kill(group->primary);
+}
+
+static inline void xe_exec_queue_group_kill_get(struct xe_exec_queue_group *group)
+{
+       kref_get(&group->kill_refcount);
+}
+
+void xe_exec_queue_group_kill_put(struct xe_exec_queue_group *group)
+{
+       if (!group)
+               return;
+
+       kref_put(&group->kill_refcount, xe_exec_queue_group_kill);
+}
+
 void xe_exec_queue_destroy(struct kref *ref)
 {
        struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount);
@@ -650,6 +670,7 @@ static int xe_exec_queue_group_init(struct xe_device *xe, struct xe_exec_queue *
        group->primary = q;
        group->cgp_bo = bo;
        INIT_LIST_HEAD(&group->list);
+       kref_init(&group->kill_refcount);
        xa_init_flags(&group->xa, XA_FLAGS_ALLOC1);
        mutex_init(&group->list_lock);
        q->multi_queue.group = group;
@@ -725,6 +746,11 @@ static int xe_exec_queue_group_add(struct xe_device *xe, struct xe_exec_queue *q
 
        q->multi_queue.pos = pos;
 
+       if (group->primary->multi_queue.keep_active) {
+               xe_exec_queue_group_kill_get(group);
+               q->multi_queue.keep_active = true;
+       }
+
        return 0;
 }
 
@@ -738,6 +764,11 @@ static void xe_exec_queue_group_delete(struct xe_device *xe, struct xe_exec_queu
        lrc = xa_erase(&group->xa, q->multi_queue.pos);
        xe_assert(xe, lrc);
        xe_lrc_put(lrc);
+
+       if (q->multi_queue.keep_active) {
+               xe_exec_queue_group_kill_put(group);
+               q->multi_queue.keep_active = false;
+       }
 }
 
 static int exec_queue_set_multi_group(struct xe_device *xe, struct xe_exec_queue *q,
@@ -759,12 +790,24 @@ static int exec_queue_set_multi_group(struct xe_device *xe, struct xe_exec_queue
                return -EINVAL;
 
        if (value & DRM_XE_MULTI_GROUP_CREATE) {
-               if (XE_IOCTL_DBG(xe, value & ~DRM_XE_MULTI_GROUP_CREATE))
+               if (XE_IOCTL_DBG(xe, value & ~(DRM_XE_MULTI_GROUP_CREATE |
+                                              DRM_XE_MULTI_GROUP_KEEP_ACTIVE)))
+                       return -EINVAL;
+
+               /*
+                * KEEP_ACTIVE is not supported in preempt fence mode as in that mode,
+                * VM_DESTROY ioctl expects all exec queues of that VM are already killed.
+                */
+               if (XE_IOCTL_DBG(xe, (value & DRM_XE_MULTI_GROUP_KEEP_ACTIVE) &&
+                                xe_vm_in_preempt_fence_mode(q->vm)))
                        return -EINVAL;
 
                q->multi_queue.valid = true;
                q->multi_queue.is_primary = true;
                q->multi_queue.pos = 0;
+               if (value & DRM_XE_MULTI_GROUP_KEEP_ACTIVE)
+                       q->multi_queue.keep_active = true;
+
                return 0;
        }
 
@@ -1312,6 +1355,11 @@ void xe_exec_queue_kill(struct xe_exec_queue *q)
 
        q->ops->kill(q);
        xe_vm_remove_compute_exec_queue(q->vm, q);
+
+       if (!xe_exec_queue_is_multi_queue_primary(q) && q->multi_queue.keep_active) {
+               xe_exec_queue_group_kill_put(q->multi_queue.group);
+               q->multi_queue.keep_active = false;
+       }
 }
 
 int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
@@ -1338,7 +1386,10 @@ int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
        if (q->vm && q->hwe->hw_engine_group)
                xe_hw_engine_group_del_exec_queue(q->hwe->hw_engine_group, q);
 
-       xe_exec_queue_kill(q);
+       if (xe_exec_queue_is_multi_queue_primary(q))
+               xe_exec_queue_group_kill_put(q->multi_queue.group);
+       else
+               xe_exec_queue_kill(q);
 
        trace_xe_exec_queue_close(q);
        xe_exec_queue_put(q);
index ffcc1feb879eccbb37077a29a5fb5ce6051c77ea..10abed98fb6b68a8a646d9ca14856ebf73e8e61d 100644 (file)
@@ -113,6 +113,8 @@ static inline struct xe_exec_queue *xe_exec_queue_multi_queue_primary(struct xe_
        return xe_exec_queue_is_multi_queue(q) ? q->multi_queue.group->primary : q;
 }
 
+void xe_exec_queue_group_kill_put(struct xe_exec_queue_group *group);
+
 bool xe_exec_queue_is_lr(struct xe_exec_queue *q);
 
 bool xe_exec_queue_is_idle(struct xe_exec_queue *q);
index 5fc516b0bb77f7706268e4be3918f7c01e350cd6..67ea5eebf70b97108b127dae1f962b0a46452bb9 100644 (file)
@@ -62,6 +62,8 @@ struct xe_exec_queue_group {
        struct list_head list;
        /** @list_lock: Secondary queue list lock */
        struct mutex list_lock;
+       /** @kill_refcount: ref count to kill primary queue */
+       struct kref kill_refcount;
        /** @sync_pending: CGP_SYNC_DONE g2h response pending */
        bool sync_pending;
        /** @banned: Group banned */
@@ -161,6 +163,8 @@ struct xe_exec_queue {
                u8 valid:1;
                /** @multi_queue.is_primary: Is primary queue (Q0) of the group */
                u8 is_primary:1;
+               /** @multi_queue.keep_active: Keep the group active after primary is destroyed */
+               u8 keep_active:1;
        } multi_queue;
 
        /** @sched_props: scheduling properties */
index 705081bf0d817771210732eb13add988b004f086..bd6154e3b728f9c8c8fad5e66e0c882ae9a378f9 100644 (file)
@@ -1280,6 +1280,9 @@ struct drm_xe_vm_bind {
  *    then a new multi-queue group is created with this queue as the primary queue
  *    (Q0). Otherwise, the queue gets added to the multi-queue group whose primary
  *    queue's exec_queue_id is specified in the lower 32 bits of the 'value' field.
+ *    If the extension's 'value' field has %DRM_XE_MULTI_GROUP_KEEP_ACTIVE flag
+ *    set, then the multi-queue group is kept active after the primary queue is
+ *    destroyed.
  *    All the other non-relevant bits of extension's 'value' field while adding the
  *    primary or the secondary queues of the group must be set to 0.
  *  - %DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_QUEUE_PRIORITY - Set the queue
@@ -1328,6 +1331,7 @@ struct drm_xe_exec_queue_create {
 #define   DRM_XE_EXEC_QUEUE_SET_HANG_REPLAY_STATE              3
 #define   DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_GROUP           4
 #define     DRM_XE_MULTI_GROUP_CREATE                          (1ull << 63)
+#define     DRM_XE_MULTI_GROUP_KEEP_ACTIVE                     (1ull << 62)
 #define   DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_QUEUE_PRIORITY  5
        /** @extensions: Pointer to the first extension struct, if any */
        __u64 extensions;