return xe_gt_recovery_pending(guc_to_gt(guc));
}
+static void xe_guc_exec_queue_trigger_cleanup(struct xe_exec_queue *q)
+{
+ struct xe_guc *guc = exec_queue_to_guc(q);
+ struct xe_device *xe = guc_to_xe(guc);
+
+ /** to wakeup xe_wait_user_fence ioctl if exec queue is reset */
+ wake_up_all(&xe->ufence_wq);
+
+ if (xe_exec_queue_is_lr(q))
+ queue_work(guc_to_gt(guc)->ordered_wq, &q->guc->lr_tdr);
+ else
+ xe_sched_tdr_queue_imm(&q->guc->sched);
+}
+
+static void xe_guc_exec_queue_reset_trigger_cleanup(struct xe_exec_queue *q)
+{
+ if (xe_exec_queue_is_multi_queue(q)) {
+ struct xe_exec_queue *primary = xe_exec_queue_multi_queue_primary(q);
+ struct xe_exec_queue_group *group = q->multi_queue.group;
+ struct xe_exec_queue *eq;
+
+ set_exec_queue_reset(primary);
+ if (!exec_queue_banned(primary) && !exec_queue_check_timeout(primary))
+ xe_guc_exec_queue_trigger_cleanup(primary);
+
+ mutex_lock(&group->list_lock);
+ list_for_each_entry(eq, &group->list, multi_queue.link) {
+ set_exec_queue_reset(eq);
+ if (!exec_queue_banned(eq) && !exec_queue_check_timeout(eq))
+ xe_guc_exec_queue_trigger_cleanup(eq);
+ }
+ mutex_unlock(&group->list_lock);
+ } else {
+ set_exec_queue_reset(q);
+ if (!exec_queue_banned(q) && !exec_queue_check_timeout(q))
+ xe_guc_exec_queue_trigger_cleanup(q);
+ }
+}
+
#define parallel_read(xe_, map_, field_) \
xe_map_rd_field(xe_, &map_, 0, struct guc_submit_parallel_scratch, \
field_)
G2H_LEN_DW_DEREGISTER_CONTEXT, 2);
}
-static void xe_guc_exec_queue_trigger_cleanup(struct xe_exec_queue *q)
-{
- struct xe_guc *guc = exec_queue_to_guc(q);
- struct xe_device *xe = guc_to_xe(guc);
-
- /** to wakeup xe_wait_user_fence ioctl if exec queue is reset */
- wake_up_all(&xe->ufence_wq);
-
- if (xe_exec_queue_is_lr(q))
- queue_work(guc_to_gt(guc)->ordered_wq, &q->guc->lr_tdr);
- else
- xe_sched_tdr_queue_imm(&q->guc->sched);
-}
-
/**
* xe_guc_submit_wedge() - Wedge GuC submission
* @guc: the GuC object
guard(xe_pm_runtime)(guc_to_xe(guc));
trace_xe_exec_queue_destroy(q);
+ if (xe_exec_queue_is_multi_queue_secondary(q)) {
+ struct xe_exec_queue_group *group = q->multi_queue.group;
+
+ mutex_lock(&group->list_lock);
+ list_del(&q->multi_queue.link);
+ mutex_unlock(&group->list_lock);
+ }
+
if (xe_exec_queue_is_lr(q))
cancel_work_sync(&ge->lr_tdr);
/* Confirm no work left behind accessing device structures */
xe_exec_queue_assign_name(q, q->guc->id);
+ /*
+ * Maintain secondary queues of the multi queue group in a list
+ * for handling dependencies across the queues in the group.
+ */
+ if (xe_exec_queue_is_multi_queue_secondary(q)) {
+ struct xe_exec_queue_group *group = q->multi_queue.group;
+
+ INIT_LIST_HEAD(&q->multi_queue.link);
+ mutex_lock(&group->list_lock);
+ list_add_tail(&q->multi_queue.link, &group->list);
+ mutex_unlock(&group->list_lock);
+ }
+
trace_xe_exec_queue_create(q);
return 0;
static bool guc_exec_queue_reset_status(struct xe_exec_queue *q)
{
+ if (xe_exec_queue_is_multi_queue_secondary(q) &&
+ guc_exec_queue_reset_status(xe_exec_queue_multi_queue_primary(q)))
+ return true;
+
return exec_queue_reset(q) || exec_queue_killed_or_banned_or_wedged(q);
}
* jobs by setting timeout of the job to the minimum value kicking
* guc_exec_queue_timedout_job.
*/
- set_exec_queue_reset(q);
- if (!exec_queue_banned(q) && !exec_queue_check_timeout(q))
- xe_guc_exec_queue_trigger_cleanup(q);
+ xe_guc_exec_queue_reset_trigger_cleanup(q);
return 0;
}
trace_xe_exec_queue_memory_cat_error(q);
/* Treat the same as engine reset */
- set_exec_queue_reset(q);
- if (!exec_queue_banned(q) && !exec_queue_check_timeout(q))
- xe_guc_exec_queue_trigger_cleanup(q);
+ xe_guc_exec_queue_reset_trigger_cleanup(q);
return 0;
}