u64 addresses[XE_HW_ENGINE_MAX_INSTANCE];
struct drm_gpuvm_exec vm_exec = {.extra.fn = xe_exec_fn};
struct drm_exec *exec = &vm_exec.exec;
- u32 i, num_syncs, num_ufence = 0;
+ u32 i, num_syncs, num_in_sync = 0, num_ufence = 0;
struct xe_validation_ctx ctx;
struct xe_sched_job *job;
struct xe_vm *vm;
if (xe_sync_is_ufence(&syncs[num_syncs]))
num_ufence++;
+
+ if (!num_in_sync && xe_sync_needs_wait(&syncs[num_syncs]))
+ num_in_sync++;
}
if (XE_IOCTL_DBG(xe, num_ufence > 1)) {
mode = xe_hw_engine_group_find_exec_mode(q);
if (mode == EXEC_MODE_DMA_FENCE) {
- err = xe_hw_engine_group_get_mode(group, mode, &previous_mode);
+ err = xe_hw_engine_group_get_mode(group, mode, &previous_mode,
+ syncs, num_in_sync ?
+ num_syncs : 0);
if (err)
goto err_syncs;
}
#include "xe_gt.h"
#include "xe_gt_stats.h"
#include "xe_hw_engine_group.h"
+#include "xe_sync.h"
#include "xe_vm.h"
static void
int err;
enum xe_hw_engine_group_execution_mode previous_mode;
- err = xe_hw_engine_group_get_mode(group, EXEC_MODE_LR, &previous_mode);
+ err = xe_hw_engine_group_get_mode(group, EXEC_MODE_LR, &previous_mode,
+ NULL, 0);
if (err)
return;
/**
* xe_hw_engine_group_suspend_faulting_lr_jobs() - Suspend the faulting LR jobs of this group
* @group: The hw engine group
+ * @has_deps: dma-fence job triggering suspend has dependencies
*
* Return: 0 on success, negative error code on error.
*/
-static int xe_hw_engine_group_suspend_faulting_lr_jobs(struct xe_hw_engine_group *group)
+static int xe_hw_engine_group_suspend_faulting_lr_jobs(struct xe_hw_engine_group *group,
+ bool has_deps)
{
int err;
struct xe_exec_queue *q;
lockdep_assert_held_write(&group->mode_sem);
list_for_each_entry(q, &group->exec_queue_list, hw_engine_group_link) {
+ bool idle_skip_suspend;
+
if (!xe_vm_in_fault_mode(q->vm))
continue;
+ idle_skip_suspend = xe_exec_queue_idle_skip_suspend(q);
+ if (!idle_skip_suspend && has_deps)
+ return -EAGAIN;
+
xe_gt_stats_incr(q->gt, XE_GT_STATS_ID_HW_ENGINE_GROUP_SUSPEND_LR_QUEUE_COUNT, 1);
- need_resume |= !xe_exec_queue_idle_skip_suspend(q);
+
+ need_resume |= !idle_skip_suspend;
q->ops->suspend(q);
}
return 0;
}
-static int switch_mode(struct xe_hw_engine_group *group)
+static int switch_mode(struct xe_hw_engine_group *group, bool has_deps)
{
int err = 0;
enum xe_hw_engine_group_execution_mode new_mode;
switch (group->cur_mode) {
case EXEC_MODE_LR:
new_mode = EXEC_MODE_DMA_FENCE;
- err = xe_hw_engine_group_suspend_faulting_lr_jobs(group);
+ err = xe_hw_engine_group_suspend_faulting_lr_jobs(group,
+ has_deps);
break;
case EXEC_MODE_DMA_FENCE:
new_mode = EXEC_MODE_LR;
return 0;
}
+static int wait_syncs(struct xe_sync_entry *syncs, int num_syncs)
+{
+ int err, i;
+
+ for (i = 0; i < num_syncs; ++i) {
+ err = xe_sync_entry_wait(syncs + i);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
/**
* xe_hw_engine_group_get_mode() - Get the group to execute in the new mode
* @group: The hw engine group
* @new_mode: The new execution mode
* @previous_mode: Pointer to the previous mode provided for use by caller
+ * @syncs: Syncs from exec IOCTL
+ * @num_syncs: Number of syncs from exec IOCTL
*
* Return: 0 if successful, -EINTR if locking failed.
*/
int xe_hw_engine_group_get_mode(struct xe_hw_engine_group *group,
enum xe_hw_engine_group_execution_mode new_mode,
- enum xe_hw_engine_group_execution_mode *previous_mode)
+ enum xe_hw_engine_group_execution_mode *previous_mode,
+ struct xe_sync_entry *syncs, int num_syncs)
__acquires(&group->mode_sem)
{
+ bool has_deps = !!num_syncs;
int err = down_read_interruptible(&group->mode_sem);
if (err)
if (new_mode != group->cur_mode) {
up_read(&group->mode_sem);
+retry:
err = down_write_killable(&group->mode_sem);
if (err)
return err;
if (new_mode != group->cur_mode) {
- err = switch_mode(group);
+ err = switch_mode(group, has_deps);
if (err) {
up_write(&group->mode_sem);
- return err;
+
+ if (err != -EAGAIN)
+ return err;
+
+ err = wait_syncs(syncs, num_syncs);
+ if (err)
+ return err;
+
+ has_deps = false;
+ goto retry;
}
}
downgrade_write(&group->mode_sem);
return 0;
}
+/**
+ * xe_sync_entry_wait() - Wait on in-sync
+ * @sync: Sync object
+ *
+ * If the sync is in an in-sync, wait on the sync to signal.
+ *
+ * Return: 0 on success, -ERESTARTSYS on failure (interruption)
+ */
+int xe_sync_entry_wait(struct xe_sync_entry *sync)
+{
+ if (sync->flags & DRM_XE_SYNC_FLAG_SIGNAL)
+ return 0;
+
+ return dma_fence_wait(sync->fence, true);
+}
+
+/**
+ * xe_sync_needs_wait() - Sync needs a wait (input dma-fence not signaled)
+ * @sync: Sync object
+ *
+ * Return: True if sync needs a wait, False otherwise
+ */
+bool xe_sync_needs_wait(struct xe_sync_entry *sync)
+{
+ return !(sync->flags & DRM_XE_SYNC_FLAG_SIGNAL) &&
+ !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &sync->fence->flags);
+}
+
void xe_sync_entry_signal(struct xe_sync_entry *sync, struct dma_fence *fence)
{
if (!(sync->flags & DRM_XE_SYNC_FLAG_SIGNAL))