]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
drm/amdkfd: Rename queue_count to active_queue_count
authorYong Zhao <Yong.Zhao@amd.com>
Thu, 30 Jan 2020 23:25:50 +0000 (18:25 -0500)
committerAlex Deucher <alexander.deucher@amd.com>
Wed, 26 Feb 2020 19:19:38 +0000 (14:19 -0500)
The name is easier to understand the code.

Signed-off-by: Yong Zhao <Yong.Zhao@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c

index 80d22bf702e814c72a91949d7e94f5a11510fe5c..7ef9b89f5c70c59c3758c1ffe14e493497eb119b 100644 (file)
@@ -359,7 +359,7 @@ add_queue_to_list:
        list_add(&q->list, &qpd->queues_list);
        qpd->queue_count++;
        if (q->properties.is_active)
-               dqm->queue_count++;
+               dqm->active_queue_count++;
 
        if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
                dqm->sdma_queue_count++;
@@ -494,7 +494,7 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
        }
        qpd->queue_count--;
        if (q->properties.is_active)
-               dqm->queue_count--;
+               dqm->active_queue_count--;
 
        return retval;
 }
@@ -563,13 +563,13 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
        /*
         * check active state vs. the previous state and modify
         * counter accordingly. map_queues_cpsch uses the
-        * dqm->queue_count to determine whether a new runlist must be
+        * dqm->active_queue_count to determine whether a new runlist must be
         * uploaded.
         */
        if (q->properties.is_active && !prev_active)
-               dqm->queue_count++;
+               dqm->active_queue_count++;
        else if (!q->properties.is_active && prev_active)
-               dqm->queue_count--;
+               dqm->active_queue_count--;
 
        if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS)
                retval = map_queues_cpsch(dqm);
@@ -618,7 +618,7 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
                mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
                                q->properties.type)];
                q->properties.is_active = false;
-               dqm->queue_count--;
+               dqm->active_queue_count--;
 
                if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n"))
                        continue;
@@ -662,7 +662,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
                        continue;
 
                q->properties.is_active = false;
-               dqm->queue_count--;
+               dqm->active_queue_count--;
        }
        retval = execute_queues_cpsch(dqm,
                                qpd->is_debug ?
@@ -731,7 +731,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
                mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
                                q->properties.type)];
                q->properties.is_active = true;
-               dqm->queue_count++;
+               dqm->active_queue_count++;
 
                if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n"))
                        continue;
@@ -786,7 +786,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
                        continue;
 
                q->properties.is_active = true;
-               dqm->queue_count++;
+               dqm->active_queue_count++;
        }
        retval = execute_queues_cpsch(dqm,
                                KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
@@ -899,7 +899,7 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
 
        mutex_init(&dqm->lock_hidden);
        INIT_LIST_HEAD(&dqm->queues);
-       dqm->queue_count = dqm->next_pipe_to_allocate = 0;
+       dqm->active_queue_count = dqm->next_pipe_to_allocate = 0;
        dqm->sdma_queue_count = 0;
        dqm->xgmi_sdma_queue_count = 0;
 
@@ -924,7 +924,7 @@ static void uninitialize(struct device_queue_manager *dqm)
 {
        int i;
 
-       WARN_ON(dqm->queue_count > 0 || dqm->processes_count > 0);
+       WARN_ON(dqm->active_queue_count > 0 || dqm->processes_count > 0);
 
        kfree(dqm->allocated_queues);
        for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
@@ -1064,7 +1064,7 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
 
        mutex_init(&dqm->lock_hidden);
        INIT_LIST_HEAD(&dqm->queues);
-       dqm->queue_count = dqm->processes_count = 0;
+       dqm->active_queue_count = dqm->processes_count = 0;
        dqm->sdma_queue_count = 0;
        dqm->xgmi_sdma_queue_count = 0;
        dqm->active_runlist = false;
@@ -1158,7 +1158,7 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
                        dqm->total_queue_count);
 
        list_add(&kq->list, &qpd->priv_queue_list);
-       dqm->queue_count++;
+       dqm->active_queue_count++;
        qpd->is_debug = true;
        execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
        dqm_unlock(dqm);
@@ -1172,7 +1172,7 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
 {
        dqm_lock(dqm);
        list_del(&kq->list);
-       dqm->queue_count--;
+       dqm->active_queue_count--;
        qpd->is_debug = false;
        execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
        /*
@@ -1244,7 +1244,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
                dqm->xgmi_sdma_queue_count++;
 
        if (q->properties.is_active) {
-               dqm->queue_count++;
+               dqm->active_queue_count++;
                retval = execute_queues_cpsch(dqm,
                                KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
        }
@@ -1319,7 +1319,7 @@ static int map_queues_cpsch(struct device_queue_manager *dqm)
 
        if (!dqm->sched_running)
                return 0;
-       if (dqm->queue_count <= 0 || dqm->processes_count <= 0)
+       if (dqm->active_queue_count <= 0 || dqm->processes_count <= 0)
                return 0;
        if (dqm->active_runlist)
                return 0;
@@ -1438,7 +1438,7 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
        list_del(&q->list);
        qpd->queue_count--;
        if (q->properties.is_active) {
-               dqm->queue_count--;
+               dqm->active_queue_count--;
                retval = execute_queues_cpsch(dqm,
                                KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
                if (retval == -ETIME)
@@ -1648,7 +1648,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
        /* Clean all kernel queues */
        list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) {
                list_del(&kq->list);
-               dqm->queue_count--;
+               dqm->active_queue_count--;
                qpd->is_debug = false;
                dqm->total_queue_count--;
                filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES;
@@ -1665,7 +1665,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
                }
 
                if (q->properties.is_active)
-                       dqm->queue_count--;
+                       dqm->active_queue_count--;
 
                dqm->total_queue_count--;
        }
index 871d3b628d2dd85b6f6a227b5b1c51d6cad06c53..ee3400e92c3049c3afeb956af3e9907bc5c20435 100644 (file)
@@ -180,7 +180,7 @@ struct device_queue_manager {
        struct list_head        queues;
        unsigned int            saved_flags;
        unsigned int            processes_count;
-       unsigned int            queue_count;
+       unsigned int            active_queue_count;
        unsigned int            sdma_queue_count;
        unsigned int            xgmi_sdma_queue_count;
        unsigned int            total_queue_count;
index dc406e6dee234bae33dc4d202d317b05d2db7ca9..393c218734fd236748b23638dfe8b17473a4c56f 100644 (file)
@@ -47,7 +47,7 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
        struct kfd_dev *dev = pm->dqm->dev;
 
        process_count = pm->dqm->processes_count;
-       queue_count = pm->dqm->queue_count;
+       queue_count = pm->dqm->active_queue_count;
        compute_queue_count = queue_count - pm->dqm->sdma_queue_count -
                                pm->dqm->xgmi_sdma_queue_count;
 
@@ -141,7 +141,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
        pm->ib_size_bytes = alloc_size_bytes;
 
        pr_debug("Building runlist ib process count: %d queues count %d\n",
-               pm->dqm->processes_count, pm->dqm->queue_count);
+               pm->dqm->processes_count, pm->dqm->active_queue_count);
 
        /* build the run list ib packet */
        list_for_each_entry(cur, queues, list) {
index 0fb04a2185c4ea95e1e11c15ad98befbd4de4314..9e2aec5fad86b6a0dffc6d84a63e32aba84467d8 100644 (file)
@@ -266,7 +266,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
                if ((dev->dqm->sched_policy ==
                     KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
                ((dev->dqm->processes_count >= dev->vm_info.vmid_num_kfd) ||
-               (dev->dqm->queue_count >= get_queues_num(dev->dqm)))) {
+               (dev->dqm->active_queue_count >= get_queues_num(dev->dqm)))) {
                        pr_debug("Over-subscription is not allowed when amdkfd.sched_policy == 1\n");
                        retval = -EPERM;
                        goto err_create_queue;