cmd_abo = job->cmd_bo;
if (unlikely(job->job_timeout)) {
- amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_TIMEOUT);
+ amdxdna_cmd_set_error(cmd_abo, job, 0, ERT_CMD_STATE_TIMEOUT);
ret = -EINVAL;
goto out;
}
if (unlikely(!data) || unlikely(size != sizeof(u32))) {
- amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ABORT);
+ amdxdna_cmd_set_error(cmd_abo, job, 0, ERT_CMD_STATE_ABORT);
ret = -EINVAL;
goto out;
}
if (status == AIE2_STATUS_SUCCESS)
amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_COMPLETED);
else
- amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ERROR);
+ amdxdna_cmd_set_error(cmd_abo, job, 0, ERT_CMD_STATE_ERROR);
out:
aie2_sched_notify(job);
cmd_abo = job->cmd_bo;
if (unlikely(job->job_timeout)) {
- amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_TIMEOUT);
+ amdxdna_cmd_set_error(cmd_abo, job, 0, ERT_CMD_STATE_TIMEOUT);
ret = -EINVAL;
goto out;
}
if (unlikely(!data) || unlikely(size != sizeof(u32) * 3)) {
- amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ABORT);
+ amdxdna_cmd_set_error(cmd_abo, job, 0, ERT_CMD_STATE_ABORT);
ret = -EINVAL;
goto out;
}
fail_cmd_idx, fail_cmd_status);
if (fail_cmd_status == AIE2_STATUS_SUCCESS) {
- amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ABORT);
+ amdxdna_cmd_set_error(cmd_abo, job, fail_cmd_idx, ERT_CMD_STATE_ABORT);
ret = -EINVAL;
- goto out;
+ } else {
+ amdxdna_cmd_set_error(cmd_abo, job, fail_cmd_idx, ERT_CMD_STATE_ERROR);
}
- amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ERROR);
- if (amdxdna_cmd_get_op(cmd_abo) == ERT_CMD_CHAIN) {
- struct amdxdna_cmd_chain *cc = amdxdna_cmd_get_payload(cmd_abo, NULL);
-
- cc->error_index = fail_cmd_idx;
- if (cc->error_index >= cc->command_count)
- cc->error_index = 0;
- }
out:
aie2_sched_notify(job);
return ret;
return INVALID_CU_IDX;
}
+int amdxdna_cmd_set_error(struct amdxdna_gem_obj *abo,
+ struct amdxdna_sched_job *job, u32 cmd_idx,
+ enum ert_cmd_state error_state)
+{
+ struct amdxdna_client *client = job->hwctx->client;
+ struct amdxdna_cmd *cmd = abo->mem.kva;
+ struct amdxdna_cmd_chain *cc = NULL;
+
+ cmd->header &= ~AMDXDNA_CMD_STATE;
+ cmd->header |= FIELD_PREP(AMDXDNA_CMD_STATE, error_state);
+
+ if (amdxdna_cmd_get_op(abo) == ERT_CMD_CHAIN) {
+ cc = amdxdna_cmd_get_payload(abo, NULL);
+ cc->error_index = (cmd_idx < cc->command_count) ? cmd_idx : 0;
+ abo = amdxdna_gem_get_obj(client, cc->data[0], AMDXDNA_BO_CMD);
+ if (!abo)
+ return -EINVAL;
+ cmd = abo->mem.kva;
+ }
+
+ memset(cmd->data, 0xff, abo->mem.size - sizeof(*cmd));
+ if (cc)
+ amdxdna_gem_put_obj(abo);
+
+ return 0;
+}
+
/*
* This should be called in close() and remove(). DO NOT call in other syscalls.
* This guarantee that when hwctx and resources will be released, if user
void *amdxdna_cmd_get_payload(struct amdxdna_gem_obj *abo, u32 *size);
u32 amdxdna_cmd_get_cu_idx(struct amdxdna_gem_obj *abo);
+int amdxdna_cmd_set_error(struct amdxdna_gem_obj *abo,
+ struct amdxdna_sched_job *job, u32 cmd_idx,
+ enum ert_cmd_state error_state);
void amdxdna_sched_job_cleanup(struct amdxdna_sched_job *job);
void amdxdna_hwctx_remove_all(struct amdxdna_client *client);