]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/amdgpu: Rename userq_mgr_xa to userq_xa
authorLijo Lazar <lijo.lazar@amd.com>
Mon, 24 Nov 2025 08:51:43 +0000 (14:21 +0530)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 8 Dec 2025 18:56:39 +0000 (13:56 -0500)
Rename since it is an xarray of userq pointers

Signed-off-by: Lijo Lazar <lijo.lazar@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c

index ad16db05863fb3c31ba226d69eef7beb8e1cfe16..545a238ba1256fc551df9db2ebb56c7573233189 100644 (file)
@@ -398,7 +398,7 @@ static void amdgpu_userq_cleanup(struct amdgpu_usermode_queue *queue,
        uq_funcs->mqd_destroy(queue);
        amdgpu_userq_fence_driver_free(queue);
        /* Use interrupt-safe locking since IRQ handlers may access these XArrays */
-       xa_erase_irq(&uq_mgr->userq_mgr_xa, (unsigned long)queue_id);
+       xa_erase_irq(&uq_mgr->userq_xa, (unsigned long)queue_id);
        xa_erase_irq(&adev->userq_doorbell_xa, queue->doorbell_index);
        queue->userq_mgr = NULL;
        list_del(&queue->userq_va_list);
@@ -410,7 +410,7 @@ static void amdgpu_userq_cleanup(struct amdgpu_usermode_queue *queue,
 static struct amdgpu_usermode_queue *
 amdgpu_userq_find(struct amdgpu_userq_mgr *uq_mgr, int qid)
 {
-       return xa_load(&uq_mgr->userq_mgr_xa, qid);
+       return xa_load(&uq_mgr->userq_xa, qid);
 }
 
 void
@@ -768,7 +768,8 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
                goto unlock;
        }
 
-       r = xa_alloc(&uq_mgr->userq_mgr_xa, &qid, queue, XA_LIMIT(1, AMDGPU_MAX_USERQ_COUNT), GFP_KERNEL);
+       r = xa_alloc(&uq_mgr->userq_xa, &qid, queue,
+                    XA_LIMIT(1, AMDGPU_MAX_USERQ_COUNT), GFP_KERNEL);
        if (r) {
                drm_file_err(uq_mgr->file, "Failed to allocate a queue id\n");
                amdgpu_userq_fence_driver_free(queue);
@@ -791,7 +792,7 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
                r = amdgpu_userq_map_helper(queue);
                if (r) {
                        drm_file_err(uq_mgr->file, "Failed to map Queue\n");
-                       xa_erase(&uq_mgr->userq_mgr_xa, qid);
+                       xa_erase(&uq_mgr->userq_xa, qid);
                        amdgpu_userq_fence_driver_free(queue);
                        uq_funcs->mqd_destroy(queue);
                        kfree(queue);
@@ -918,8 +919,7 @@ amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr)
        int ret = 0, r;
 
        /* Resume all the queues for this process */
-       xa_for_each(&uq_mgr->userq_mgr_xa, queue_id, queue) {
-
+       xa_for_each(&uq_mgr->userq_xa, queue_id, queue) {
                if (!amdgpu_userq_buffer_vas_mapped(queue)) {
                        drm_file_err(uq_mgr->file,
                                     "trying restore queue without va mapping\n");
@@ -1162,7 +1162,7 @@ amdgpu_userq_evict_all(struct amdgpu_userq_mgr *uq_mgr)
 
        amdgpu_userq_detect_and_reset_queues(uq_mgr);
        /* Try to unmap all the queues in this process ctx */
-       xa_for_each(&uq_mgr->userq_mgr_xa, queue_id, queue) {
+       xa_for_each(&uq_mgr->userq_xa, queue_id, queue) {
                r = amdgpu_userq_preempt_helper(queue);
                if (r)
                        ret = r;
@@ -1197,7 +1197,7 @@ amdgpu_userq_wait_for_signal(struct amdgpu_userq_mgr *uq_mgr)
        unsigned long queue_id;
        int ret;
 
-       xa_for_each(&uq_mgr->userq_mgr_xa, queue_id, queue) {
+       xa_for_each(&uq_mgr->userq_xa, queue_id, queue) {
                struct dma_fence *f = queue->last_fence;
 
                if (!f || dma_fence_is_signaled(f))
@@ -1247,7 +1247,7 @@ int amdgpu_userq_mgr_init(struct amdgpu_userq_mgr *userq_mgr, struct drm_file *f
                          struct amdgpu_device *adev)
 {
        mutex_init(&userq_mgr->userq_mutex);
-       xa_init_flags(&userq_mgr->userq_mgr_xa, XA_FLAGS_ALLOC);
+       xa_init_flags(&userq_mgr->userq_xa, XA_FLAGS_ALLOC);
        userq_mgr->adev = adev;
        userq_mgr->file = file_priv;
 
@@ -1264,13 +1264,13 @@ void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr)
 
        mutex_lock(&userq_mgr->userq_mutex);
        amdgpu_userq_detect_and_reset_queues(userq_mgr);
-       xa_for_each(&userq_mgr->userq_mgr_xa, queue_id, queue) {
+       xa_for_each(&userq_mgr->userq_xa, queue_id, queue) {
                amdgpu_userq_wait_for_last_fence(queue);
                amdgpu_userq_unmap_helper(queue);
                amdgpu_userq_cleanup(queue, queue_id);
        }
 
-       xa_destroy(&userq_mgr->userq_mgr_xa);
+       xa_destroy(&userq_mgr->userq_xa);
        mutex_unlock(&userq_mgr->userq_mutex);
        mutex_destroy(&userq_mgr->userq_mutex);
 }
index 7c6dfc0e27a856b9763fc8a382d6dfc3ff29b389..1eaa94f8a2913f1ed038d8f9d79996cbf9dd76a3 100644 (file)
@@ -91,11 +91,11 @@ struct amdgpu_userq_funcs {
 /* Usermode queues for gfx */
 struct amdgpu_userq_mgr {
        /**
-        * @userq_mgr_xa: Per-process user queue map (queue ID → queue)
+        * @userq_xa: Per-process user queue map (queue ID → queue)
         * Key: queue_id (unique ID within the process's userq manager)
         * Value: struct amdgpu_usermode_queue
         */
-       struct xarray                   userq_mgr_xa;
+       struct xarray                   userq_xa;
        struct mutex                    userq_mutex;
        struct amdgpu_device            *adev;
        struct delayed_work             resume_work;
index f44cb17b1e2ff2b1fbfb7f778563c36055a2f8da..25f17853646959fa7fe83e78ae817b9bc3873a1b 100644 (file)
@@ -542,7 +542,7 @@ int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
        }
 
        /* Retrieve the user queue */
-       queue = xa_load(&userq_mgr->userq_mgr_xa, args->queue_id);
+       queue = xa_load(&userq_mgr->userq_xa, args->queue_id);
        if (!queue) {
                r = -ENOENT;
                goto put_gobj_write;
@@ -904,7 +904,7 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
                 */
                num_fences = dma_fence_dedup_array(fences, num_fences);
 
-               waitq = xa_load(&userq_mgr->userq_mgr_xa, wait_info->waitq_id);
+               waitq = xa_load(&userq_mgr->userq_xa, wait_info->waitq_id);
                if (!waitq) {
                        r = -EINVAL;
                        goto free_fences;