}
ALLOW_ERROR_INJECTION(xe_exec_queue_create_bind, ERRNO);
+static void xe_exec_queue_group_kill(struct kref *ref)
+{
+ struct xe_exec_queue_group *group = container_of(ref, struct xe_exec_queue_group,
+ kill_refcount);
+ xe_exec_queue_kill(group->primary);
+}
+
+static inline void xe_exec_queue_group_kill_get(struct xe_exec_queue_group *group)
+{
+ kref_get(&group->kill_refcount);
+}
+
+void xe_exec_queue_group_kill_put(struct xe_exec_queue_group *group)
+{
+ if (!group)
+ return;
+
+ kref_put(&group->kill_refcount, xe_exec_queue_group_kill);
+}
+
void xe_exec_queue_destroy(struct kref *ref)
{
struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount);
group->primary = q;
group->cgp_bo = bo;
INIT_LIST_HEAD(&group->list);
+ kref_init(&group->kill_refcount);
xa_init_flags(&group->xa, XA_FLAGS_ALLOC1);
mutex_init(&group->list_lock);
q->multi_queue.group = group;
q->multi_queue.pos = pos;
+ if (group->primary->multi_queue.keep_active) {
+ xe_exec_queue_group_kill_get(group);
+ q->multi_queue.keep_active = true;
+ }
+
return 0;
}
lrc = xa_erase(&group->xa, q->multi_queue.pos);
xe_assert(xe, lrc);
xe_lrc_put(lrc);
+
+ if (q->multi_queue.keep_active) {
+ xe_exec_queue_group_kill_put(group);
+ q->multi_queue.keep_active = false;
+ }
}
static int exec_queue_set_multi_group(struct xe_device *xe, struct xe_exec_queue *q,
return -EINVAL;
if (value & DRM_XE_MULTI_GROUP_CREATE) {
- if (XE_IOCTL_DBG(xe, value & ~DRM_XE_MULTI_GROUP_CREATE))
+ if (XE_IOCTL_DBG(xe, value & ~(DRM_XE_MULTI_GROUP_CREATE |
+ DRM_XE_MULTI_GROUP_KEEP_ACTIVE)))
+ return -EINVAL;
+
+ /*
+ * KEEP_ACTIVE is not supported in preempt fence mode as in that mode,
+ * VM_DESTROY ioctl expects all exec queues of that VM are already killed.
+ */
+ if (XE_IOCTL_DBG(xe, (value & DRM_XE_MULTI_GROUP_KEEP_ACTIVE) &&
+ xe_vm_in_preempt_fence_mode(q->vm)))
return -EINVAL;
q->multi_queue.valid = true;
q->multi_queue.is_primary = true;
q->multi_queue.pos = 0;
+ if (value & DRM_XE_MULTI_GROUP_KEEP_ACTIVE)
+ q->multi_queue.keep_active = true;
+
return 0;
}
q->ops->kill(q);
xe_vm_remove_compute_exec_queue(q->vm, q);
+
+ if (!xe_exec_queue_is_multi_queue_primary(q) && q->multi_queue.keep_active) {
+ xe_exec_queue_group_kill_put(q->multi_queue.group);
+ q->multi_queue.keep_active = false;
+ }
}
int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
if (q->vm && q->hwe->hw_engine_group)
xe_hw_engine_group_del_exec_queue(q->hwe->hw_engine_group, q);
- xe_exec_queue_kill(q);
+ if (xe_exec_queue_is_multi_queue_primary(q))
+ xe_exec_queue_group_kill_put(q->multi_queue.group);
+ else
+ xe_exec_queue_kill(q);
trace_xe_exec_queue_close(q);
xe_exec_queue_put(q);
* then a new multi-queue group is created with this queue as the primary queue
* (Q0). Otherwise, the queue gets added to the multi-queue group whose primary
* queue's exec_queue_id is specified in the lower 32 bits of the 'value' field.
+ * If the extension's 'value' field has %DRM_XE_MULTI_GROUP_KEEP_ACTIVE flag
+ * set, then the multi-queue group is kept active after the primary queue is
+ * destroyed.
* All the other non-relevant bits of extension's 'value' field while adding the
* primary or the secondary queues of the group must be set to 0.
* - %DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_QUEUE_PRIORITY - Set the queue
#define DRM_XE_EXEC_QUEUE_SET_HANG_REPLAY_STATE 3
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_GROUP 4
#define DRM_XE_MULTI_GROUP_CREATE (1ull << 63)
+#define DRM_XE_MULTI_GROUP_KEEP_ACTIVE (1ull << 62)
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_QUEUE_PRIORITY 5
/** @extensions: Pointer to the first extension struct, if any */
__u64 extensions;