int r = 0;
if (queue->state == AMDGPU_USERQ_STATE_MAPPED) {
- r = userq_funcs->preempt(uq_mgr, queue);
+ r = userq_funcs->preempt(queue);
if (r) {
queue->state = AMDGPU_USERQ_STATE_HUNG;
found_hung_queue = true;
int r = 0;
if (queue->state == AMDGPU_USERQ_STATE_PREEMPTED) {
- r = userq_funcs->restore(uq_mgr, queue);
+ r = userq_funcs->restore(queue);
if (r) {
queue->state = AMDGPU_USERQ_STATE_HUNG;
} else {
if ((queue->state == AMDGPU_USERQ_STATE_MAPPED) ||
(queue->state == AMDGPU_USERQ_STATE_PREEMPTED)) {
- r = userq_funcs->unmap(uq_mgr, queue);
+ r = userq_funcs->unmap(queue);
if (r) {
queue->state = AMDGPU_USERQ_STATE_HUNG;
found_hung_queue = true;
int r = 0;
if (queue->state == AMDGPU_USERQ_STATE_UNMAPPED) {
- r = userq_funcs->map(uq_mgr, queue);
+ r = userq_funcs->map(queue);
if (r) {
queue->state = AMDGPU_USERQ_STATE_HUNG;
amdgpu_userq_detect_and_reset_queues(uq_mgr);
/* Drop the userq reference. */
amdgpu_userq_buffer_vas_list_cleanup(adev, queue);
- uq_funcs->mqd_destroy(uq_mgr, queue);
+ uq_funcs->mqd_destroy(queue);
amdgpu_userq_fence_driver_free(queue);
/* Use interrupt-safe locking since IRQ handlers may access these XArrays */
xa_erase_irq(&uq_mgr->userq_mgr_xa, (unsigned long)queue_id);
db_info.db_obj = &queue->db_obj;
db_info.doorbell_offset = args->in.doorbell_offset;
+ queue->userq_mgr = uq_mgr;
/* Validate the userq virtual address.*/
if (amdgpu_userq_input_va_validate(adev, queue, args->in.queue_va, args->in.queue_size) ||
amdgpu_userq_input_va_validate(adev, queue, args->in.rptr_va, AMDGPU_GPU_PAGE_SIZE) ||
goto unlock;
}
- r = uq_funcs->mqd_create(uq_mgr, &args->in, queue);
+ r = uq_funcs->mqd_create(queue, &args->in);
if (r) {
drm_file_err(uq_mgr->file, "Failed to create Queue\n");
amdgpu_userq_fence_driver_free(queue);
if (r) {
drm_file_err(uq_mgr->file, "Failed to allocate a queue id\n");
amdgpu_userq_fence_driver_free(queue);
- uq_funcs->mqd_destroy(uq_mgr, queue);
+ uq_funcs->mqd_destroy(queue);
kfree(queue);
r = -ENOMEM;
up_read(&adev->reset_domain->sem);
goto unlock;
}
up_read(&adev->reset_domain->sem);
- queue->userq_mgr = uq_mgr;
/* don't map the queue if scheduling is halted */
if (adev->userq_halt_for_enforce_isolation &&
drm_file_err(uq_mgr->file, "Failed to map Queue\n");
xa_erase(&uq_mgr->userq_mgr_xa, qid);
amdgpu_userq_fence_driver_free(queue);
- uq_funcs->mqd_destroy(uq_mgr, queue);
+ uq_funcs->mqd_destroy(queue);
kfree(queue);
goto unlock;
}
if (queue->state == AMDGPU_USERQ_STATE_MAPPED) {
amdgpu_userq_wait_for_last_fence(uqm, queue);
userq_funcs = adev->userq_funcs[queue->queue_type];
- userq_funcs->unmap(uqm, queue);
+ userq_funcs->unmap(queue);
/* just mark all queues as hung at this point.
* if unmap succeeds, we could map again
* in amdgpu_userq_post_reset() if vram is not lost
* at this point, we should be able to map it again
* and continue if vram is not lost.
*/
- struct amdgpu_userq_mgr *uqm;
struct amdgpu_usermode_queue *queue;
const struct amdgpu_userq_funcs *userq_funcs;
unsigned long queue_id;
int r = 0;
xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
- uqm = queue->userq_mgr;
if (queue->state == AMDGPU_USERQ_STATE_HUNG && !vram_lost) {
userq_funcs = adev->userq_funcs[queue->queue_type];
/* Re-map queue */
- r = userq_funcs->map(uqm, queue);
+ r = userq_funcs->map(queue);
if (r) {
dev_err(adev->dev, "Failed to remap queue %ld\n", queue_id);
continue;
};
struct amdgpu_userq_funcs {
- int (*mqd_create)(struct amdgpu_userq_mgr *uq_mgr,
- struct drm_amdgpu_userq_in *args,
- struct amdgpu_usermode_queue *queue);
- void (*mqd_destroy)(struct amdgpu_userq_mgr *uq_mgr,
- struct amdgpu_usermode_queue *uq);
- int (*unmap)(struct amdgpu_userq_mgr *uq_mgr,
- struct amdgpu_usermode_queue *queue);
- int (*map)(struct amdgpu_userq_mgr *uq_mgr,
- struct amdgpu_usermode_queue *queue);
- int (*preempt)(struct amdgpu_userq_mgr *uq_mgr,
- struct amdgpu_usermode_queue *queue);
- int (*restore)(struct amdgpu_userq_mgr *uq_mgr,
- struct amdgpu_usermode_queue *queue);
+ int (*mqd_create)(struct amdgpu_usermode_queue *queue,
+ struct drm_amdgpu_userq_in *args);
+ void (*mqd_destroy)(struct amdgpu_usermode_queue *uq);
+ int (*unmap)(struct amdgpu_usermode_queue *queue);
+ int (*map)(struct amdgpu_usermode_queue *queue);
+ int (*preempt)(struct amdgpu_usermode_queue *queue);
+ int (*restore)(struct amdgpu_usermode_queue *queue);
int (*detect_and_reset)(struct amdgpu_device *adev,
int queue_type);
};
}
}
-static int mes_userq_map(struct amdgpu_userq_mgr *uq_mgr,
- struct amdgpu_usermode_queue *queue)
+static int mes_userq_map(struct amdgpu_usermode_queue *queue)
{
+ struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr;
struct amdgpu_device *adev = uq_mgr->adev;
struct amdgpu_userq_obj *ctx = &queue->fw_obj;
struct amdgpu_mqd_prop *userq_props = queue->userq_prop;
return 0;
}
-static int mes_userq_unmap(struct amdgpu_userq_mgr *uq_mgr,
- struct amdgpu_usermode_queue *queue)
+static int mes_userq_unmap(struct amdgpu_usermode_queue *queue)
{
+ struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr;
struct amdgpu_device *adev = uq_mgr->adev;
struct mes_remove_queue_input queue_input;
struct amdgpu_userq_obj *ctx = &queue->fw_obj;
return r;
}
-static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
- struct drm_amdgpu_userq_in *args_in,
- struct amdgpu_usermode_queue *queue)
+static int mes_userq_mqd_create(struct amdgpu_usermode_queue *queue,
+ struct drm_amdgpu_userq_in *args_in)
{
+ struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr;
struct amdgpu_device *adev = uq_mgr->adev;
struct amdgpu_mqd *mqd_hw_default = &adev->mqds[queue->queue_type];
struct drm_amdgpu_userq_in *mqd_user = args_in;
return r;
}
-static void
-mes_userq_mqd_destroy(struct amdgpu_userq_mgr *uq_mgr,
- struct amdgpu_usermode_queue *queue)
+static void mes_userq_mqd_destroy(struct amdgpu_usermode_queue *queue)
{
+ struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr;
+
amdgpu_userq_destroy_object(uq_mgr, &queue->fw_obj);
kfree(queue->userq_prop);
amdgpu_userq_destroy_object(uq_mgr, &queue->mqd);
}
-static int mes_userq_preempt(struct amdgpu_userq_mgr *uq_mgr,
- struct amdgpu_usermode_queue *queue)
+static int mes_userq_preempt(struct amdgpu_usermode_queue *queue)
{
+ struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr;
struct amdgpu_device *adev = uq_mgr->adev;
struct mes_suspend_gang_input queue_input;
struct amdgpu_userq_obj *ctx = &queue->fw_obj;
return r;
}
-static int mes_userq_restore(struct amdgpu_userq_mgr *uq_mgr,
- struct amdgpu_usermode_queue *queue)
+static int mes_userq_restore(struct amdgpu_usermode_queue *queue)
{
+ struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr;
struct amdgpu_device *adev = uq_mgr->adev;
struct mes_resume_gang_input queue_input;
struct amdgpu_userq_obj *ctx = &queue->fw_obj;