2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/mutex.h>
24 #include <linux/log2.h>
25 #include <linux/sched.h>
26 #include <linux/sched/mm.h>
27 #include <linux/sched/task.h>
28 #include <linux/mmu_context.h>
29 #include <linux/slab.h>
30 #include <linux/amd-iommu.h>
31 #include <linux/notifier.h>
32 #include <linux/compat.h>
33 #include <linux/mman.h>
34 #include <linux/file.h>
35 #include <linux/pm_runtime.h>
36 #include "amdgpu_amdkfd.h"
42 #include "kfd_device_queue_manager.h"
43 #include "kfd_dbgmgr.h"
44 #include "kfd_iommu.h"
47 * List of struct kfd_process (field kfd_process).
48 * Unique/indexed by mm_struct*
50 DEFINE_HASHTABLE(kfd_processes_table
, KFD_PROCESS_TABLE_SIZE
);
51 static DEFINE_MUTEX(kfd_processes_mutex
);
53 DEFINE_SRCU(kfd_processes_srcu
);
55 /* For process termination handling */
56 static struct workqueue_struct
*kfd_process_wq
;
58 /* Ordered, single-threaded workqueue for restoring evicted
59 * processes. Restoring multiple processes concurrently under memory
60 * pressure can lead to processes blocking each other from validating
61 * their BOs and result in a live-lock situation where processes
62 * remain evicted indefinitely.
64 static struct workqueue_struct
*kfd_restore_wq
;
66 static struct kfd_process
*find_process(const struct task_struct
*thread
);
67 static void kfd_process_ref_release(struct kref
*ref
);
68 static struct kfd_process
*create_process(const struct task_struct
*thread
);
69 static int kfd_process_init_cwsr_apu(struct kfd_process
*p
, struct file
*filep
);
71 static void evict_process_worker(struct work_struct
*work
);
72 static void restore_process_worker(struct work_struct
*work
);
74 struct kfd_procfs_tree
{
78 static struct kfd_procfs_tree procfs
;
81 * Structure for SDMA activity tracking
83 struct kfd_sdma_activity_handler_workarea
{
84 struct work_struct sdma_activity_work
;
85 struct kfd_process_device
*pdd
;
86 uint64_t sdma_activity_counter
;
89 struct temp_sdma_queue_list
{
92 unsigned int queue_id
;
93 struct list_head list
;
96 static void kfd_sdma_activity_worker(struct work_struct
*work
)
98 struct kfd_sdma_activity_handler_workarea
*workarea
;
99 struct kfd_process_device
*pdd
;
101 struct mm_struct
*mm
;
103 struct qcm_process_device
*qpd
;
104 struct device_queue_manager
*dqm
;
106 struct temp_sdma_queue_list sdma_q_list
;
107 struct temp_sdma_queue_list
*sdma_q
, *next
;
109 workarea
= container_of(work
, struct kfd_sdma_activity_handler_workarea
,
122 * Total SDMA activity is current SDMA activity + past SDMA activity
123 * Past SDMA count is stored in pdd.
124 * To get the current activity counters for all active SDMA queues,
125 * we loop over all SDMA queues and get their counts from user-space.
127 * We cannot call get_user() with dqm_lock held as it can cause
128 * a circular lock dependency situation. To read the SDMA stats,
129 * we need to do the following:
131 * 1. Create a temporary list of SDMA queue nodes from the qpd->queues_list,
132 * with dqm_lock/dqm_unlock().
133 * 2. Call get_user() for each node in temporary list without dqm_lock.
134 * Save the SDMA count for each node and also add the count to the total
135 * SDMA count counter.
136 * Its possible, during this step, a few SDMA queue nodes got deleted
137 * from the qpd->queues_list.
138 * 3. Do a second pass over qpd->queues_list to check if any nodes got deleted.
139 * If any node got deleted, its SDMA count would be captured in the sdma
140 * past activity counter. So subtract the SDMA counter stored in step 2
141 * for this node from the total SDMA count.
143 INIT_LIST_HEAD(&sdma_q_list
.list
);
146 * Create the temp list of all SDMA queues
150 list_for_each_entry(q
, &qpd
->queues_list
, list
) {
151 if ((q
->properties
.type
!= KFD_QUEUE_TYPE_SDMA
) &&
152 (q
->properties
.type
!= KFD_QUEUE_TYPE_SDMA_XGMI
))
155 sdma_q
= kzalloc(sizeof(struct temp_sdma_queue_list
), GFP_KERNEL
);
161 INIT_LIST_HEAD(&sdma_q
->list
);
162 sdma_q
->rptr
= (uint64_t)q
->properties
.read_ptr
;
163 sdma_q
->queue_id
= q
->properties
.queue_id
;
164 list_add_tail(&sdma_q
->list
, &sdma_q_list
.list
);
168 * If the temp list is empty, then no SDMA queues nodes were found in
169 * qpd->queues_list. Return the past activity count as the total sdma
172 if (list_empty(&sdma_q_list
.list
)) {
173 workarea
->sdma_activity_counter
= pdd
->sdma_past_activity_counter
;
181 * Get the usage count for each SDMA queue in temp_list.
183 mm
= get_task_mm(pdd
->process
->lead_thread
);
189 list_for_each_entry(sdma_q
, &sdma_q_list
.list
, list
) {
191 ret
= read_sdma_queue_counter(sdma_q
->rptr
, &val
);
193 pr_debug("Failed to read SDMA queue active counter for queue id: %d",
196 sdma_q
->sdma_val
= val
;
197 workarea
->sdma_activity_counter
+= val
;
201 kthread_unuse_mm(mm
);
205 * Do a second iteration over qpd_queues_list to check if any SDMA
206 * nodes got deleted while fetching SDMA counter.
210 workarea
->sdma_activity_counter
+= pdd
->sdma_past_activity_counter
;
212 list_for_each_entry(q
, &qpd
->queues_list
, list
) {
213 if (list_empty(&sdma_q_list
.list
))
216 if ((q
->properties
.type
!= KFD_QUEUE_TYPE_SDMA
) &&
217 (q
->properties
.type
!= KFD_QUEUE_TYPE_SDMA_XGMI
))
220 list_for_each_entry_safe(sdma_q
, next
, &sdma_q_list
.list
, list
) {
221 if (((uint64_t)q
->properties
.read_ptr
== sdma_q
->rptr
) &&
222 (sdma_q
->queue_id
== q
->properties
.queue_id
)) {
223 list_del(&sdma_q
->list
);
233 * If temp list is not empty, it implies some queues got deleted
234 * from qpd->queues_list during SDMA usage read. Subtract the SDMA
235 * count for each node from the total SDMA count.
237 list_for_each_entry_safe(sdma_q
, next
, &sdma_q_list
.list
, list
) {
238 workarea
->sdma_activity_counter
-= sdma_q
->sdma_val
;
239 list_del(&sdma_q
->list
);
246 list_for_each_entry_safe(sdma_q
, next
, &sdma_q_list
.list
, list
) {
247 list_del(&sdma_q
->list
);
252 static ssize_t
kfd_procfs_show(struct kobject
*kobj
, struct attribute
*attr
,
255 if (strcmp(attr
->name
, "pasid") == 0) {
256 struct kfd_process
*p
= container_of(attr
, struct kfd_process
,
259 return snprintf(buffer
, PAGE_SIZE
, "%d\n", p
->pasid
);
260 } else if (strncmp(attr
->name
, "vram_", 5) == 0) {
261 struct kfd_process_device
*pdd
= container_of(attr
, struct kfd_process_device
,
263 return snprintf(buffer
, PAGE_SIZE
, "%llu\n", READ_ONCE(pdd
->vram_usage
));
264 } else if (strncmp(attr
->name
, "sdma_", 5) == 0) {
265 struct kfd_process_device
*pdd
= container_of(attr
, struct kfd_process_device
,
267 struct kfd_sdma_activity_handler_workarea sdma_activity_work_handler
;
269 INIT_WORK(&sdma_activity_work_handler
.sdma_activity_work
,
270 kfd_sdma_activity_worker
);
272 sdma_activity_work_handler
.pdd
= pdd
;
274 schedule_work(&sdma_activity_work_handler
.sdma_activity_work
);
276 flush_work(&sdma_activity_work_handler
.sdma_activity_work
);
278 return snprintf(buffer
, PAGE_SIZE
, "%llu\n",
279 (sdma_activity_work_handler
.sdma_activity_counter
)/
280 SDMA_ACTIVITY_DIVISOR
);
282 pr_err("Invalid attribute");
289 static void kfd_procfs_kobj_release(struct kobject
*kobj
)
294 static const struct sysfs_ops kfd_procfs_ops
= {
295 .show
= kfd_procfs_show
,
298 static struct kobj_type procfs_type
= {
299 .release
= kfd_procfs_kobj_release
,
300 .sysfs_ops
= &kfd_procfs_ops
,
303 void kfd_procfs_init(void)
307 procfs
.kobj
= kfd_alloc_struct(procfs
.kobj
);
311 ret
= kobject_init_and_add(procfs
.kobj
, &procfs_type
,
312 &kfd_device
->kobj
, "proc");
314 pr_warn("Could not create procfs proc folder");
315 /* If we fail to create the procfs, clean up */
316 kfd_procfs_shutdown();
320 void kfd_procfs_shutdown(void)
323 kobject_del(procfs
.kobj
);
324 kobject_put(procfs
.kobj
);
329 static ssize_t
kfd_procfs_queue_show(struct kobject
*kobj
,
330 struct attribute
*attr
, char *buffer
)
332 struct queue
*q
= container_of(kobj
, struct queue
, kobj
);
334 if (!strcmp(attr
->name
, "size"))
335 return snprintf(buffer
, PAGE_SIZE
, "%llu",
336 q
->properties
.queue_size
);
337 else if (!strcmp(attr
->name
, "type"))
338 return snprintf(buffer
, PAGE_SIZE
, "%d", q
->properties
.type
);
339 else if (!strcmp(attr
->name
, "gpuid"))
340 return snprintf(buffer
, PAGE_SIZE
, "%u", q
->device
->id
);
342 pr_err("Invalid attribute");
347 static struct attribute attr_queue_size
= {
349 .mode
= KFD_SYSFS_FILE_MODE
352 static struct attribute attr_queue_type
= {
354 .mode
= KFD_SYSFS_FILE_MODE
357 static struct attribute attr_queue_gpuid
= {
359 .mode
= KFD_SYSFS_FILE_MODE
362 static struct attribute
*procfs_queue_attrs
[] = {
369 static const struct sysfs_ops procfs_queue_ops
= {
370 .show
= kfd_procfs_queue_show
,
373 static struct kobj_type procfs_queue_type
= {
374 .sysfs_ops
= &procfs_queue_ops
,
375 .default_attrs
= procfs_queue_attrs
,
378 int kfd_procfs_add_queue(struct queue
*q
)
380 struct kfd_process
*proc
;
383 if (!q
|| !q
->process
)
387 /* Create proc/<pid>/queues/<queue id> folder */
388 if (!proc
->kobj_queues
)
390 ret
= kobject_init_and_add(&q
->kobj
, &procfs_queue_type
,
391 proc
->kobj_queues
, "%u", q
->properties
.queue_id
);
393 pr_warn("Creating proc/<pid>/queues/%u failed",
394 q
->properties
.queue_id
);
395 kobject_put(&q
->kobj
);
402 static int kfd_sysfs_create_file(struct kfd_process
*p
, struct attribute
*attr
,
407 if (!p
|| !attr
|| !name
)
411 attr
->mode
= KFD_SYSFS_FILE_MODE
;
412 sysfs_attr_init(attr
);
414 ret
= sysfs_create_file(p
->kobj
, attr
);
419 static int kfd_procfs_add_sysfs_files(struct kfd_process
*p
)
422 struct kfd_process_device
*pdd
;
431 * Create sysfs files for each GPU:
432 * - proc/<pid>/vram_<gpuid>
433 * - proc/<pid>/sdma_<gpuid>
435 list_for_each_entry(pdd
, &p
->per_device_data
, per_device_list
) {
436 snprintf(pdd
->vram_filename
, MAX_SYSFS_FILENAME_LEN
, "vram_%u",
438 ret
= kfd_sysfs_create_file(p
, &pdd
->attr_vram
, pdd
->vram_filename
);
440 pr_warn("Creating vram usage for gpu id %d failed",
443 snprintf(pdd
->sdma_filename
, MAX_SYSFS_FILENAME_LEN
, "sdma_%u",
445 ret
= kfd_sysfs_create_file(p
, &pdd
->attr_sdma
, pdd
->sdma_filename
);
447 pr_warn("Creating sdma usage for gpu id %d failed",
455 void kfd_procfs_del_queue(struct queue
*q
)
460 kobject_del(&q
->kobj
);
461 kobject_put(&q
->kobj
);
464 int kfd_process_create_wq(void)
467 kfd_process_wq
= alloc_workqueue("kfd_process_wq", 0, 0);
469 kfd_restore_wq
= alloc_ordered_workqueue("kfd_restore_wq", 0);
471 if (!kfd_process_wq
|| !kfd_restore_wq
) {
472 kfd_process_destroy_wq();
479 void kfd_process_destroy_wq(void)
481 if (kfd_process_wq
) {
482 destroy_workqueue(kfd_process_wq
);
483 kfd_process_wq
= NULL
;
485 if (kfd_restore_wq
) {
486 destroy_workqueue(kfd_restore_wq
);
487 kfd_restore_wq
= NULL
;
491 static void kfd_process_free_gpuvm(struct kgd_mem
*mem
,
492 struct kfd_process_device
*pdd
)
494 struct kfd_dev
*dev
= pdd
->dev
;
496 amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev
->kgd
, mem
, pdd
->vm
);
497 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev
->kgd
, mem
, NULL
);
500 /* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process
501 * This function should be only called right after the process
502 * is created and when kfd_processes_mutex is still being held
503 * to avoid concurrency. Because of that exclusiveness, we do
504 * not need to take p->mutex.
506 static int kfd_process_alloc_gpuvm(struct kfd_process_device
*pdd
,
507 uint64_t gpu_va
, uint32_t size
,
508 uint32_t flags
, void **kptr
)
510 struct kfd_dev
*kdev
= pdd
->dev
;
511 struct kgd_mem
*mem
= NULL
;
515 err
= amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev
->kgd
, gpu_va
, size
,
516 pdd
->vm
, &mem
, NULL
, flags
);
520 err
= amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev
->kgd
, mem
, pdd
->vm
);
524 err
= amdgpu_amdkfd_gpuvm_sync_memory(kdev
->kgd
, mem
, true);
526 pr_debug("Sync memory failed, wait interrupted by user signal\n");
527 goto sync_memory_failed
;
530 /* Create an obj handle so kfd_process_device_remove_obj_handle
531 * will take care of the bo removal when the process finishes.
532 * We do not need to take p->mutex, because the process is just
533 * created and the ioctls have not had the chance to run.
535 handle
= kfd_process_device_create_obj_handle(pdd
, mem
);
543 err
= amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kdev
->kgd
,
544 (struct kgd_mem
*)mem
, kptr
, NULL
);
546 pr_debug("Map GTT BO to kernel failed\n");
547 goto free_obj_handle
;
554 kfd_process_device_remove_obj_handle(pdd
, handle
);
557 kfd_process_free_gpuvm(mem
, pdd
);
561 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev
->kgd
, mem
, NULL
);
567 /* kfd_process_device_reserve_ib_mem - Reserve memory inside the
568 * process for IB usage The memory reserved is for KFD to submit
569 * IB to AMDGPU from kernel. If the memory is reserved
570 * successfully, ib_kaddr will have the CPU/kernel
571 * address. Check ib_kaddr before accessing the memory.
573 static int kfd_process_device_reserve_ib_mem(struct kfd_process_device
*pdd
)
575 struct qcm_process_device
*qpd
= &pdd
->qpd
;
576 uint32_t flags
= KFD_IOC_ALLOC_MEM_FLAGS_GTT
|
577 KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE
|
578 KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE
|
579 KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE
;
583 if (qpd
->ib_kaddr
|| !qpd
->ib_base
)
586 /* ib_base is only set for dGPU */
587 ret
= kfd_process_alloc_gpuvm(pdd
, qpd
->ib_base
, PAGE_SIZE
, flags
,
592 qpd
->ib_kaddr
= kaddr
;
597 struct kfd_process
*kfd_create_process(struct file
*filep
)
599 struct kfd_process
*process
;
600 struct task_struct
*thread
= current
;
604 return ERR_PTR(-EINVAL
);
606 /* Only the pthreads threading model is supported. */
607 if (thread
->group_leader
->mm
!= thread
->mm
)
608 return ERR_PTR(-EINVAL
);
611 * take kfd processes mutex before starting of process creation
612 * so there won't be a case where two threads of the same process
613 * create two kfd_process structures
615 mutex_lock(&kfd_processes_mutex
);
617 /* A prior open of /dev/kfd could have already created the process. */
618 process
= find_process(thread
);
620 pr_debug("Process already found\n");
622 process
= create_process(thread
);
626 ret
= kfd_process_init_cwsr_apu(process
, filep
);
628 process
= ERR_PTR(ret
);
635 process
->kobj
= kfd_alloc_struct(process
->kobj
);
636 if (!process
->kobj
) {
637 pr_warn("Creating procfs kobject failed");
640 ret
= kobject_init_and_add(process
->kobj
, &procfs_type
,
642 (int)process
->lead_thread
->pid
);
644 pr_warn("Creating procfs pid directory failed");
645 kobject_put(process
->kobj
);
649 process
->attr_pasid
.name
= "pasid";
650 process
->attr_pasid
.mode
= KFD_SYSFS_FILE_MODE
;
651 sysfs_attr_init(&process
->attr_pasid
);
652 ret
= sysfs_create_file(process
->kobj
, &process
->attr_pasid
);
654 pr_warn("Creating pasid for pid %d failed",
655 (int)process
->lead_thread
->pid
);
657 process
->kobj_queues
= kobject_create_and_add("queues",
659 if (!process
->kobj_queues
)
660 pr_warn("Creating KFD proc/queues folder failed");
662 ret
= kfd_procfs_add_sysfs_files(process
);
664 pr_warn("Creating sysfs usage file for pid %d failed",
665 (int)process
->lead_thread
->pid
);
668 if (!IS_ERR(process
))
669 kref_get(&process
->ref
);
670 mutex_unlock(&kfd_processes_mutex
);
675 struct kfd_process
*kfd_get_process(const struct task_struct
*thread
)
677 struct kfd_process
*process
;
680 return ERR_PTR(-EINVAL
);
682 /* Only the pthreads threading model is supported. */
683 if (thread
->group_leader
->mm
!= thread
->mm
)
684 return ERR_PTR(-EINVAL
);
686 process
= find_process(thread
);
688 return ERR_PTR(-EINVAL
);
693 static struct kfd_process
*find_process_by_mm(const struct mm_struct
*mm
)
695 struct kfd_process
*process
;
697 hash_for_each_possible_rcu(kfd_processes_table
, process
,
698 kfd_processes
, (uintptr_t)mm
)
699 if (process
->mm
== mm
)
705 static struct kfd_process
*find_process(const struct task_struct
*thread
)
707 struct kfd_process
*p
;
710 idx
= srcu_read_lock(&kfd_processes_srcu
);
711 p
= find_process_by_mm(thread
->mm
);
712 srcu_read_unlock(&kfd_processes_srcu
, idx
);
717 void kfd_unref_process(struct kfd_process
*p
)
719 kref_put(&p
->ref
, kfd_process_ref_release
);
722 static void kfd_process_device_free_bos(struct kfd_process_device
*pdd
)
724 struct kfd_process
*p
= pdd
->process
;
729 * Remove all handles from idr and release appropriate
730 * local memory object
732 idr_for_each_entry(&pdd
->alloc_idr
, mem
, id
) {
733 struct kfd_process_device
*peer_pdd
;
735 list_for_each_entry(peer_pdd
, &p
->per_device_data
,
739 amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
740 peer_pdd
->dev
->kgd
, mem
, peer_pdd
->vm
);
743 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd
->dev
->kgd
, mem
, NULL
);
744 kfd_process_device_remove_obj_handle(pdd
, id
);
748 static void kfd_process_free_outstanding_kfd_bos(struct kfd_process
*p
)
750 struct kfd_process_device
*pdd
;
752 list_for_each_entry(pdd
, &p
->per_device_data
, per_device_list
)
753 kfd_process_device_free_bos(pdd
);
756 static void kfd_process_destroy_pdds(struct kfd_process
*p
)
758 struct kfd_process_device
*pdd
, *temp
;
760 list_for_each_entry_safe(pdd
, temp
, &p
->per_device_data
,
762 pr_debug("Releasing pdd (topology id %d) for process (pasid 0x%x)\n",
763 pdd
->dev
->id
, p
->pasid
);
766 amdgpu_amdkfd_gpuvm_release_process_vm(
767 pdd
->dev
->kgd
, pdd
->vm
);
771 amdgpu_amdkfd_gpuvm_destroy_process_vm(
772 pdd
->dev
->kgd
, pdd
->vm
);
774 list_del(&pdd
->per_device_list
);
776 if (pdd
->qpd
.cwsr_kaddr
&& !pdd
->qpd
.cwsr_base
)
777 free_pages((unsigned long)pdd
->qpd
.cwsr_kaddr
,
778 get_order(KFD_CWSR_TBA_TMA_SIZE
));
780 kfree(pdd
->qpd
.doorbell_bitmap
);
781 idr_destroy(&pdd
->alloc_idr
);
784 * before destroying pdd, make sure to report availability
787 if (pdd
->runtime_inuse
) {
788 pm_runtime_mark_last_busy(pdd
->dev
->ddev
->dev
);
789 pm_runtime_put_autosuspend(pdd
->dev
->ddev
->dev
);
790 pdd
->runtime_inuse
= false;
797 /* No process locking is needed in this function, because the process
798 * is not findable any more. We must assume that no other thread is
799 * using it any more, otherwise we couldn't safely free the process
800 * structure in the end.
802 static void kfd_process_wq_release(struct work_struct
*work
)
804 struct kfd_process
*p
= container_of(work
, struct kfd_process
,
806 struct kfd_process_device
*pdd
;
808 /* Remove the procfs files */
810 sysfs_remove_file(p
->kobj
, &p
->attr_pasid
);
811 kobject_del(p
->kobj_queues
);
812 kobject_put(p
->kobj_queues
);
813 p
->kobj_queues
= NULL
;
815 list_for_each_entry(pdd
, &p
->per_device_data
, per_device_list
) {
816 sysfs_remove_file(p
->kobj
, &pdd
->attr_vram
);
817 sysfs_remove_file(p
->kobj
, &pdd
->attr_sdma
);
820 kobject_del(p
->kobj
);
821 kobject_put(p
->kobj
);
825 kfd_iommu_unbind_process(p
);
827 kfd_process_free_outstanding_kfd_bos(p
);
829 kfd_process_destroy_pdds(p
);
830 dma_fence_put(p
->ef
);
832 kfd_event_free_process(p
);
834 kfd_pasid_free(p
->pasid
);
835 kfd_free_process_doorbells(p
);
837 mutex_destroy(&p
->mutex
);
839 put_task_struct(p
->lead_thread
);
844 static void kfd_process_ref_release(struct kref
*ref
)
846 struct kfd_process
*p
= container_of(ref
, struct kfd_process
, ref
);
848 INIT_WORK(&p
->release_work
, kfd_process_wq_release
);
849 queue_work(kfd_process_wq
, &p
->release_work
);
852 static void kfd_process_free_notifier(struct mmu_notifier
*mn
)
854 kfd_unref_process(container_of(mn
, struct kfd_process
, mmu_notifier
));
857 static void kfd_process_notifier_release(struct mmu_notifier
*mn
,
858 struct mm_struct
*mm
)
860 struct kfd_process
*p
;
861 struct kfd_process_device
*pdd
= NULL
;
864 * The kfd_process structure can not be free because the
865 * mmu_notifier srcu is read locked
867 p
= container_of(mn
, struct kfd_process
, mmu_notifier
);
868 if (WARN_ON(p
->mm
!= mm
))
871 mutex_lock(&kfd_processes_mutex
);
872 hash_del_rcu(&p
->kfd_processes
);
873 mutex_unlock(&kfd_processes_mutex
);
874 synchronize_srcu(&kfd_processes_srcu
);
876 cancel_delayed_work_sync(&p
->eviction_work
);
877 cancel_delayed_work_sync(&p
->restore_work
);
879 mutex_lock(&p
->mutex
);
881 /* Iterate over all process device data structures and if the
882 * pdd is in debug mode, we should first force unregistration,
883 * then we will be able to destroy the queues
885 list_for_each_entry(pdd
, &p
->per_device_data
, per_device_list
) {
886 struct kfd_dev
*dev
= pdd
->dev
;
888 mutex_lock(kfd_get_dbgmgr_mutex());
889 if (dev
&& dev
->dbgmgr
&& dev
->dbgmgr
->pasid
== p
->pasid
) {
890 if (!kfd_dbgmgr_unregister(dev
->dbgmgr
, p
)) {
891 kfd_dbgmgr_destroy(dev
->dbgmgr
);
895 mutex_unlock(kfd_get_dbgmgr_mutex());
898 kfd_process_dequeue_from_all_devices(p
);
901 /* Indicate to other users that MM is no longer valid */
903 /* Signal the eviction fence after user mode queues are
904 * destroyed. This allows any BOs to be freed without
905 * triggering pointless evictions or waiting for fences.
907 dma_fence_signal(p
->ef
);
909 mutex_unlock(&p
->mutex
);
911 mmu_notifier_put(&p
->mmu_notifier
);
914 static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops
= {
915 .release
= kfd_process_notifier_release
,
916 .free_notifier
= kfd_process_free_notifier
,
919 static int kfd_process_init_cwsr_apu(struct kfd_process
*p
, struct file
*filep
)
921 unsigned long offset
;
922 struct kfd_process_device
*pdd
;
924 list_for_each_entry(pdd
, &p
->per_device_data
, per_device_list
) {
925 struct kfd_dev
*dev
= pdd
->dev
;
926 struct qcm_process_device
*qpd
= &pdd
->qpd
;
928 if (!dev
->cwsr_enabled
|| qpd
->cwsr_kaddr
|| qpd
->cwsr_base
)
931 offset
= KFD_MMAP_TYPE_RESERVED_MEM
| KFD_MMAP_GPU_ID(dev
->id
);
932 qpd
->tba_addr
= (int64_t)vm_mmap(filep
, 0,
933 KFD_CWSR_TBA_TMA_SIZE
, PROT_READ
| PROT_EXEC
,
936 if (IS_ERR_VALUE(qpd
->tba_addr
)) {
937 int err
= qpd
->tba_addr
;
939 pr_err("Failure to set tba address. error %d.\n", err
);
941 qpd
->cwsr_kaddr
= NULL
;
945 memcpy(qpd
->cwsr_kaddr
, dev
->cwsr_isa
, dev
->cwsr_isa_size
);
947 qpd
->tma_addr
= qpd
->tba_addr
+ KFD_CWSR_TMA_OFFSET
;
948 pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
949 qpd
->tba_addr
, qpd
->tma_addr
, qpd
->cwsr_kaddr
);
955 static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device
*pdd
)
957 struct kfd_dev
*dev
= pdd
->dev
;
958 struct qcm_process_device
*qpd
= &pdd
->qpd
;
959 uint32_t flags
= KFD_IOC_ALLOC_MEM_FLAGS_GTT
960 | KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE
961 | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE
;
965 if (!dev
->cwsr_enabled
|| qpd
->cwsr_kaddr
|| !qpd
->cwsr_base
)
968 /* cwsr_base is only set for dGPU */
969 ret
= kfd_process_alloc_gpuvm(pdd
, qpd
->cwsr_base
,
970 KFD_CWSR_TBA_TMA_SIZE
, flags
, &kaddr
);
974 qpd
->cwsr_kaddr
= kaddr
;
975 qpd
->tba_addr
= qpd
->cwsr_base
;
977 memcpy(qpd
->cwsr_kaddr
, dev
->cwsr_isa
, dev
->cwsr_isa_size
);
979 qpd
->tma_addr
= qpd
->tba_addr
+ KFD_CWSR_TMA_OFFSET
;
980 pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
981 qpd
->tba_addr
, qpd
->tma_addr
, qpd
->cwsr_kaddr
);
987 * On return the kfd_process is fully operational and will be freed when the
990 static struct kfd_process
*create_process(const struct task_struct
*thread
)
992 struct kfd_process
*process
;
995 process
= kzalloc(sizeof(*process
), GFP_KERNEL
);
997 goto err_alloc_process
;
999 kref_init(&process
->ref
);
1000 mutex_init(&process
->mutex
);
1001 process
->mm
= thread
->mm
;
1002 process
->lead_thread
= thread
->group_leader
;
1003 INIT_LIST_HEAD(&process
->per_device_data
);
1004 INIT_DELAYED_WORK(&process
->eviction_work
, evict_process_worker
);
1005 INIT_DELAYED_WORK(&process
->restore_work
, restore_process_worker
);
1006 process
->last_restore_timestamp
= get_jiffies_64();
1007 kfd_event_init_process(process
);
1008 process
->is_32bit_user_mode
= in_compat_syscall();
1010 process
->pasid
= kfd_pasid_alloc();
1011 if (process
->pasid
== 0)
1012 goto err_alloc_pasid
;
1014 if (kfd_alloc_process_doorbells(process
) < 0)
1015 goto err_alloc_doorbells
;
1017 err
= pqm_init(&process
->pqm
, process
);
1019 goto err_process_pqm_init
;
1021 /* init process apertures*/
1022 err
= kfd_init_apertures(process
);
1024 goto err_init_apertures
;
1026 /* Must be last, have to use release destruction after this */
1027 process
->mmu_notifier
.ops
= &kfd_process_mmu_notifier_ops
;
1028 err
= mmu_notifier_register(&process
->mmu_notifier
, process
->mm
);
1030 goto err_register_notifier
;
1032 get_task_struct(process
->lead_thread
);
1033 hash_add_rcu(kfd_processes_table
, &process
->kfd_processes
,
1034 (uintptr_t)process
->mm
);
1038 err_register_notifier
:
1039 kfd_process_free_outstanding_kfd_bos(process
);
1040 kfd_process_destroy_pdds(process
);
1042 pqm_uninit(&process
->pqm
);
1043 err_process_pqm_init
:
1044 kfd_free_process_doorbells(process
);
1045 err_alloc_doorbells
:
1046 kfd_pasid_free(process
->pasid
);
1048 mutex_destroy(&process
->mutex
);
1051 return ERR_PTR(err
);
1054 static int init_doorbell_bitmap(struct qcm_process_device
*qpd
,
1055 struct kfd_dev
*dev
)
1058 int range_start
= dev
->shared_resources
.non_cp_doorbells_start
;
1059 int range_end
= dev
->shared_resources
.non_cp_doorbells_end
;
1061 if (!KFD_IS_SOC15(dev
->device_info
->asic_family
))
1064 qpd
->doorbell_bitmap
=
1065 kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS
,
1066 BITS_PER_BYTE
), GFP_KERNEL
);
1067 if (!qpd
->doorbell_bitmap
)
1070 /* Mask out doorbells reserved for SDMA, IH, and VCN on SOC15. */
1071 pr_debug("reserved doorbell 0x%03x - 0x%03x\n", range_start
, range_end
);
1072 pr_debug("reserved doorbell 0x%03x - 0x%03x\n",
1073 range_start
+ KFD_QUEUE_DOORBELL_MIRROR_OFFSET
,
1074 range_end
+ KFD_QUEUE_DOORBELL_MIRROR_OFFSET
);
1076 for (i
= 0; i
< KFD_MAX_NUM_OF_QUEUES_PER_PROCESS
/ 2; i
++) {
1077 if (i
>= range_start
&& i
<= range_end
) {
1078 set_bit(i
, qpd
->doorbell_bitmap
);
1079 set_bit(i
+ KFD_QUEUE_DOORBELL_MIRROR_OFFSET
,
1080 qpd
->doorbell_bitmap
);
1087 struct kfd_process_device
*kfd_get_process_device_data(struct kfd_dev
*dev
,
1088 struct kfd_process
*p
)
1090 struct kfd_process_device
*pdd
= NULL
;
1092 list_for_each_entry(pdd
, &p
->per_device_data
, per_device_list
)
1093 if (pdd
->dev
== dev
)
1099 struct kfd_process_device
*kfd_create_process_device_data(struct kfd_dev
*dev
,
1100 struct kfd_process
*p
)
1102 struct kfd_process_device
*pdd
= NULL
;
1104 pdd
= kzalloc(sizeof(*pdd
), GFP_KERNEL
);
1108 if (init_doorbell_bitmap(&pdd
->qpd
, dev
)) {
1109 pr_err("Failed to init doorbell for process\n");
1115 INIT_LIST_HEAD(&pdd
->qpd
.queues_list
);
1116 INIT_LIST_HEAD(&pdd
->qpd
.priv_queue_list
);
1117 pdd
->qpd
.dqm
= dev
->dqm
;
1118 pdd
->qpd
.pqm
= &p
->pqm
;
1119 pdd
->qpd
.evicted
= 0;
1120 pdd
->qpd
.mapped_gws_queue
= false;
1122 pdd
->bound
= PDD_UNBOUND
;
1123 pdd
->already_dequeued
= false;
1124 pdd
->runtime_inuse
= false;
1125 pdd
->vram_usage
= 0;
1126 pdd
->sdma_past_activity_counter
= 0;
1127 list_add(&pdd
->per_device_list
, &p
->per_device_data
);
1129 /* Init idr used for memory handle translation */
1130 idr_init(&pdd
->alloc_idr
);
1136 * kfd_process_device_init_vm - Initialize a VM for a process-device
1138 * @pdd: The process-device
1139 * @drm_file: Optional pointer to a DRM file descriptor
1141 * If @drm_file is specified, it will be used to acquire the VM from
1142 * that file descriptor. If successful, the @pdd takes ownership of
1143 * the file descriptor.
1145 * If @drm_file is NULL, a new VM is created.
1147 * Returns 0 on success, -errno on failure.
1149 int kfd_process_device_init_vm(struct kfd_process_device
*pdd
,
1150 struct file
*drm_file
)
1152 struct kfd_process
*p
;
1153 struct kfd_dev
*dev
;
1157 return drm_file
? -EBUSY
: 0;
1163 ret
= amdgpu_amdkfd_gpuvm_acquire_process_vm(
1164 dev
->kgd
, drm_file
, p
->pasid
,
1165 &pdd
->vm
, &p
->kgd_process_info
, &p
->ef
);
1167 ret
= amdgpu_amdkfd_gpuvm_create_process_vm(dev
->kgd
, p
->pasid
,
1168 &pdd
->vm
, &p
->kgd_process_info
, &p
->ef
);
1170 pr_err("Failed to create process VM object\n");
1174 amdgpu_vm_set_task_info(pdd
->vm
);
1176 ret
= kfd_process_device_reserve_ib_mem(pdd
);
1178 goto err_reserve_ib_mem
;
1179 ret
= kfd_process_device_init_cwsr_dgpu(pdd
);
1183 pdd
->drm_file
= drm_file
;
1189 kfd_process_device_free_bos(pdd
);
1191 amdgpu_amdkfd_gpuvm_destroy_process_vm(dev
->kgd
, pdd
->vm
);
1198 * Direct the IOMMU to bind the process (specifically the pasid->mm)
1200 * Unbinding occurs when the process dies or the device is removed.
1202 * Assumes that the process lock is held.
1204 struct kfd_process_device
*kfd_bind_process_to_device(struct kfd_dev
*dev
,
1205 struct kfd_process
*p
)
1207 struct kfd_process_device
*pdd
;
1210 pdd
= kfd_get_process_device_data(dev
, p
);
1212 pr_err("Process device data doesn't exist\n");
1213 return ERR_PTR(-ENOMEM
);
1217 * signal runtime-pm system to auto resume and prevent
1218 * further runtime suspend once device pdd is created until
1221 if (!pdd
->runtime_inuse
) {
1222 err
= pm_runtime_get_sync(dev
->ddev
->dev
);
1224 pm_runtime_put_autosuspend(dev
->ddev
->dev
);
1225 return ERR_PTR(err
);
1229 err
= kfd_iommu_bind_process_to_device(pdd
);
1233 err
= kfd_process_device_init_vm(pdd
, NULL
);
1238 * make sure that runtime_usage counter is incremented just once
1241 pdd
->runtime_inuse
= true;
1246 /* balance runpm reference count and exit with error */
1247 if (!pdd
->runtime_inuse
) {
1248 pm_runtime_mark_last_busy(dev
->ddev
->dev
);
1249 pm_runtime_put_autosuspend(dev
->ddev
->dev
);
1252 return ERR_PTR(err
);
1255 struct kfd_process_device
*kfd_get_first_process_device_data(
1256 struct kfd_process
*p
)
1258 return list_first_entry(&p
->per_device_data
,
1259 struct kfd_process_device
,
1263 struct kfd_process_device
*kfd_get_next_process_device_data(
1264 struct kfd_process
*p
,
1265 struct kfd_process_device
*pdd
)
1267 if (list_is_last(&pdd
->per_device_list
, &p
->per_device_data
))
1269 return list_next_entry(pdd
, per_device_list
);
1272 bool kfd_has_process_device_data(struct kfd_process
*p
)
1274 return !(list_empty(&p
->per_device_data
));
1277 /* Create specific handle mapped to mem from process local memory idr
1278 * Assumes that the process lock is held.
1280 int kfd_process_device_create_obj_handle(struct kfd_process_device
*pdd
,
1283 return idr_alloc(&pdd
->alloc_idr
, mem
, 0, 0, GFP_KERNEL
);
1286 /* Translate specific handle from process local memory idr
1287 * Assumes that the process lock is held.
1289 void *kfd_process_device_translate_handle(struct kfd_process_device
*pdd
,
1295 return idr_find(&pdd
->alloc_idr
, handle
);
1298 /* Remove specific handle from process local memory idr
1299 * Assumes that the process lock is held.
1301 void kfd_process_device_remove_obj_handle(struct kfd_process_device
*pdd
,
1305 idr_remove(&pdd
->alloc_idr
, handle
);
1308 /* This increments the process->ref counter. */
1309 struct kfd_process
*kfd_lookup_process_by_pasid(unsigned int pasid
)
1311 struct kfd_process
*p
, *ret_p
= NULL
;
1314 int idx
= srcu_read_lock(&kfd_processes_srcu
);
1316 hash_for_each_rcu(kfd_processes_table
, temp
, p
, kfd_processes
) {
1317 if (p
->pasid
== pasid
) {
1324 srcu_read_unlock(&kfd_processes_srcu
, idx
);
1329 /* This increments the process->ref counter. */
1330 struct kfd_process
*kfd_lookup_process_by_mm(const struct mm_struct
*mm
)
1332 struct kfd_process
*p
;
1334 int idx
= srcu_read_lock(&kfd_processes_srcu
);
1336 p
= find_process_by_mm(mm
);
1340 srcu_read_unlock(&kfd_processes_srcu
, idx
);
1345 /* kfd_process_evict_queues - Evict all user queues of a process
1347 * Eviction is reference-counted per process-device. This means multiple
1348 * evictions from different sources can be nested safely.
1350 int kfd_process_evict_queues(struct kfd_process
*p
)
1352 struct kfd_process_device
*pdd
;
1354 unsigned int n_evicted
= 0;
1356 list_for_each_entry(pdd
, &p
->per_device_data
, per_device_list
) {
1357 r
= pdd
->dev
->dqm
->ops
.evict_process_queues(pdd
->dev
->dqm
,
1360 pr_err("Failed to evict process queues\n");
1369 /* To keep state consistent, roll back partial eviction by
1372 list_for_each_entry(pdd
, &p
->per_device_data
, per_device_list
) {
1375 if (pdd
->dev
->dqm
->ops
.restore_process_queues(pdd
->dev
->dqm
,
1377 pr_err("Failed to restore queues\n");
1385 /* kfd_process_restore_queues - Restore all user queues of a process */
1386 int kfd_process_restore_queues(struct kfd_process
*p
)
1388 struct kfd_process_device
*pdd
;
1391 list_for_each_entry(pdd
, &p
->per_device_data
, per_device_list
) {
1392 r
= pdd
->dev
->dqm
->ops
.restore_process_queues(pdd
->dev
->dqm
,
1395 pr_err("Failed to restore process queues\n");
1404 static void evict_process_worker(struct work_struct
*work
)
1407 struct kfd_process
*p
;
1408 struct delayed_work
*dwork
;
1410 dwork
= to_delayed_work(work
);
1412 /* Process termination destroys this worker thread. So during the
1413 * lifetime of this thread, kfd_process p will be valid
1415 p
= container_of(dwork
, struct kfd_process
, eviction_work
);
1416 WARN_ONCE(p
->last_eviction_seqno
!= p
->ef
->seqno
,
1417 "Eviction fence mismatch\n");
1419 /* Narrow window of overlap between restore and evict work
1420 * item is possible. Once amdgpu_amdkfd_gpuvm_restore_process_bos
1421 * unreserves KFD BOs, it is possible to evicted again. But
1422 * restore has few more steps of finish. So lets wait for any
1423 * previous restore work to complete
1425 flush_delayed_work(&p
->restore_work
);
1427 pr_debug("Started evicting pasid 0x%x\n", p
->pasid
);
1428 ret
= kfd_process_evict_queues(p
);
1430 dma_fence_signal(p
->ef
);
1431 dma_fence_put(p
->ef
);
1433 queue_delayed_work(kfd_restore_wq
, &p
->restore_work
,
1434 msecs_to_jiffies(PROCESS_RESTORE_TIME_MS
));
1436 pr_debug("Finished evicting pasid 0x%x\n", p
->pasid
);
1438 pr_err("Failed to evict queues of pasid 0x%x\n", p
->pasid
);
1441 static void restore_process_worker(struct work_struct
*work
)
1443 struct delayed_work
*dwork
;
1444 struct kfd_process
*p
;
1447 dwork
= to_delayed_work(work
);
1449 /* Process termination destroys this worker thread. So during the
1450 * lifetime of this thread, kfd_process p will be valid
1452 p
= container_of(dwork
, struct kfd_process
, restore_work
);
1453 pr_debug("Started restoring pasid 0x%x\n", p
->pasid
);
1455 /* Setting last_restore_timestamp before successful restoration.
1456 * Otherwise this would have to be set by KGD (restore_process_bos)
1457 * before KFD BOs are unreserved. If not, the process can be evicted
1458 * again before the timestamp is set.
1459 * If restore fails, the timestamp will be set again in the next
1460 * attempt. This would mean that the minimum GPU quanta would be
1461 * PROCESS_ACTIVE_TIME_MS - (time to execute the following two
1465 p
->last_restore_timestamp
= get_jiffies_64();
1466 ret
= amdgpu_amdkfd_gpuvm_restore_process_bos(p
->kgd_process_info
,
1469 pr_debug("Failed to restore BOs of pasid 0x%x, retry after %d ms\n",
1470 p
->pasid
, PROCESS_BACK_OFF_TIME_MS
);
1471 ret
= queue_delayed_work(kfd_restore_wq
, &p
->restore_work
,
1472 msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS
));
1473 WARN(!ret
, "reschedule restore work failed\n");
1477 ret
= kfd_process_restore_queues(p
);
1479 pr_debug("Finished restoring pasid 0x%x\n", p
->pasid
);
1481 pr_err("Failed to restore queues of pasid 0x%x\n", p
->pasid
);
1484 void kfd_suspend_all_processes(void)
1486 struct kfd_process
*p
;
1488 int idx
= srcu_read_lock(&kfd_processes_srcu
);
1490 hash_for_each_rcu(kfd_processes_table
, temp
, p
, kfd_processes
) {
1491 cancel_delayed_work_sync(&p
->eviction_work
);
1492 cancel_delayed_work_sync(&p
->restore_work
);
1494 if (kfd_process_evict_queues(p
))
1495 pr_err("Failed to suspend process 0x%x\n", p
->pasid
);
1496 dma_fence_signal(p
->ef
);
1497 dma_fence_put(p
->ef
);
1500 srcu_read_unlock(&kfd_processes_srcu
, idx
);
1503 int kfd_resume_all_processes(void)
1505 struct kfd_process
*p
;
1507 int ret
= 0, idx
= srcu_read_lock(&kfd_processes_srcu
);
1509 hash_for_each_rcu(kfd_processes_table
, temp
, p
, kfd_processes
) {
1510 if (!queue_delayed_work(kfd_restore_wq
, &p
->restore_work
, 0)) {
1511 pr_err("Restore process %d failed during resume\n",
1516 srcu_read_unlock(&kfd_processes_srcu
, idx
);
1520 int kfd_reserved_mem_mmap(struct kfd_dev
*dev
, struct kfd_process
*process
,
1521 struct vm_area_struct
*vma
)
1523 struct kfd_process_device
*pdd
;
1524 struct qcm_process_device
*qpd
;
1526 if ((vma
->vm_end
- vma
->vm_start
) != KFD_CWSR_TBA_TMA_SIZE
) {
1527 pr_err("Incorrect CWSR mapping size.\n");
1531 pdd
= kfd_get_process_device_data(dev
, process
);
1536 qpd
->cwsr_kaddr
= (void *)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
,
1537 get_order(KFD_CWSR_TBA_TMA_SIZE
));
1538 if (!qpd
->cwsr_kaddr
) {
1539 pr_err("Error allocating per process CWSR buffer.\n");
1543 vma
->vm_flags
|= VM_IO
| VM_DONTCOPY
| VM_DONTEXPAND
1544 | VM_NORESERVE
| VM_DONTDUMP
| VM_PFNMAP
;
1545 /* Mapping pages to user process */
1546 return remap_pfn_range(vma
, vma
->vm_start
,
1547 PFN_DOWN(__pa(qpd
->cwsr_kaddr
)),
1548 KFD_CWSR_TBA_TMA_SIZE
, vma
->vm_page_prot
);
1551 void kfd_flush_tlb(struct kfd_process_device
*pdd
)
1553 struct kfd_dev
*dev
= pdd
->dev
;
1555 if (dev
->dqm
->sched_policy
== KFD_SCHED_POLICY_NO_HWS
) {
1556 /* Nothing to flush until a VMID is assigned, which
1557 * only happens when the first queue is created.
1560 amdgpu_amdkfd_flush_gpu_tlb_vmid(dev
->kgd
,
1563 amdgpu_amdkfd_flush_gpu_tlb_pasid(dev
->kgd
,
1564 pdd
->process
->pasid
);
1568 #if defined(CONFIG_DEBUG_FS)
1570 int kfd_debugfs_mqds_by_process(struct seq_file
*m
, void *data
)
1572 struct kfd_process
*p
;
1576 int idx
= srcu_read_lock(&kfd_processes_srcu
);
1578 hash_for_each_rcu(kfd_processes_table
, temp
, p
, kfd_processes
) {
1579 seq_printf(m
, "Process %d PASID 0x%x:\n",
1580 p
->lead_thread
->tgid
, p
->pasid
);
1582 mutex_lock(&p
->mutex
);
1583 r
= pqm_debugfs_mqds(m
, &p
->pqm
);
1584 mutex_unlock(&p
->mutex
);
1590 srcu_read_unlock(&kfd_processes_srcu
, idx
);