2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/bsearch.h>
24 #include <linux/pci.h>
25 #include <linux/slab.h>
27 #include "kfd_device_queue_manager.h"
28 #include "kfd_pm4_headers_vi.h"
29 #include "cwsr_trap_handler.h"
30 #include "kfd_iommu.h"
32 #define MQD_SIZE_ALIGNED 768
35 * kfd_locked is used to lock the kfd driver during suspend or reset
36 * once locked, kfd driver will stop any further GPU execution.
37 * create process (open) will return -EAGAIN.
39 static atomic_t kfd_locked
= ATOMIC_INIT(0);
41 #ifdef KFD_SUPPORT_IOMMU_V2
42 static const struct kfd_device_info kaveri_device_info
= {
43 .asic_family
= CHIP_KAVERI
,
45 /* max num of queues for KV.TODO should be a dynamic value */
48 .ih_ring_entry_size
= 4 * sizeof(uint32_t),
49 .event_interrupt_class
= &event_interrupt_class_cik
,
50 .num_of_watch_points
= 4,
51 .mqd_size_aligned
= MQD_SIZE_ALIGNED
,
52 .supports_cwsr
= false,
53 .needs_iommu_device
= true,
54 .needs_pci_atomics
= false,
55 .num_sdma_engines
= 2,
56 .num_sdma_queues_per_engine
= 2,
59 static const struct kfd_device_info carrizo_device_info
= {
60 .asic_family
= CHIP_CARRIZO
,
62 /* max num of queues for CZ.TODO should be a dynamic value */
65 .ih_ring_entry_size
= 4 * sizeof(uint32_t),
66 .event_interrupt_class
= &event_interrupt_class_cik
,
67 .num_of_watch_points
= 4,
68 .mqd_size_aligned
= MQD_SIZE_ALIGNED
,
69 .supports_cwsr
= true,
70 .needs_iommu_device
= true,
71 .needs_pci_atomics
= false,
72 .num_sdma_engines
= 2,
73 .num_sdma_queues_per_engine
= 2,
76 static const struct kfd_device_info raven_device_info
= {
77 .asic_family
= CHIP_RAVEN
,
81 .ih_ring_entry_size
= 8 * sizeof(uint32_t),
82 .event_interrupt_class
= &event_interrupt_class_v9
,
83 .num_of_watch_points
= 4,
84 .mqd_size_aligned
= MQD_SIZE_ALIGNED
,
85 .supports_cwsr
= true,
86 .needs_iommu_device
= true,
87 .needs_pci_atomics
= true,
88 .num_sdma_engines
= 1,
89 .num_sdma_queues_per_engine
= 2,
93 static const struct kfd_device_info hawaii_device_info
= {
94 .asic_family
= CHIP_HAWAII
,
96 /* max num of queues for KV.TODO should be a dynamic value */
99 .ih_ring_entry_size
= 4 * sizeof(uint32_t),
100 .event_interrupt_class
= &event_interrupt_class_cik
,
101 .num_of_watch_points
= 4,
102 .mqd_size_aligned
= MQD_SIZE_ALIGNED
,
103 .supports_cwsr
= false,
104 .needs_iommu_device
= false,
105 .needs_pci_atomics
= false,
106 .num_sdma_engines
= 2,
107 .num_sdma_queues_per_engine
= 2,
110 static const struct kfd_device_info tonga_device_info
= {
111 .asic_family
= CHIP_TONGA
,
112 .max_pasid_bits
= 16,
115 .ih_ring_entry_size
= 4 * sizeof(uint32_t),
116 .event_interrupt_class
= &event_interrupt_class_cik
,
117 .num_of_watch_points
= 4,
118 .mqd_size_aligned
= MQD_SIZE_ALIGNED
,
119 .supports_cwsr
= false,
120 .needs_iommu_device
= false,
121 .needs_pci_atomics
= true,
122 .num_sdma_engines
= 2,
123 .num_sdma_queues_per_engine
= 2,
126 static const struct kfd_device_info fiji_device_info
= {
127 .asic_family
= CHIP_FIJI
,
128 .max_pasid_bits
= 16,
131 .ih_ring_entry_size
= 4 * sizeof(uint32_t),
132 .event_interrupt_class
= &event_interrupt_class_cik
,
133 .num_of_watch_points
= 4,
134 .mqd_size_aligned
= MQD_SIZE_ALIGNED
,
135 .supports_cwsr
= true,
136 .needs_iommu_device
= false,
137 .needs_pci_atomics
= true,
138 .num_sdma_engines
= 2,
139 .num_sdma_queues_per_engine
= 2,
142 static const struct kfd_device_info fiji_vf_device_info
= {
143 .asic_family
= CHIP_FIJI
,
144 .max_pasid_bits
= 16,
147 .ih_ring_entry_size
= 4 * sizeof(uint32_t),
148 .event_interrupt_class
= &event_interrupt_class_cik
,
149 .num_of_watch_points
= 4,
150 .mqd_size_aligned
= MQD_SIZE_ALIGNED
,
151 .supports_cwsr
= true,
152 .needs_iommu_device
= false,
153 .needs_pci_atomics
= false,
154 .num_sdma_engines
= 2,
155 .num_sdma_queues_per_engine
= 2,
159 static const struct kfd_device_info polaris10_device_info
= {
160 .asic_family
= CHIP_POLARIS10
,
161 .max_pasid_bits
= 16,
164 .ih_ring_entry_size
= 4 * sizeof(uint32_t),
165 .event_interrupt_class
= &event_interrupt_class_cik
,
166 .num_of_watch_points
= 4,
167 .mqd_size_aligned
= MQD_SIZE_ALIGNED
,
168 .supports_cwsr
= true,
169 .needs_iommu_device
= false,
170 .needs_pci_atomics
= true,
171 .num_sdma_engines
= 2,
172 .num_sdma_queues_per_engine
= 2,
175 static const struct kfd_device_info polaris10_vf_device_info
= {
176 .asic_family
= CHIP_POLARIS10
,
177 .max_pasid_bits
= 16,
180 .ih_ring_entry_size
= 4 * sizeof(uint32_t),
181 .event_interrupt_class
= &event_interrupt_class_cik
,
182 .num_of_watch_points
= 4,
183 .mqd_size_aligned
= MQD_SIZE_ALIGNED
,
184 .supports_cwsr
= true,
185 .needs_iommu_device
= false,
186 .needs_pci_atomics
= false,
187 .num_sdma_engines
= 2,
188 .num_sdma_queues_per_engine
= 2,
191 static const struct kfd_device_info polaris11_device_info
= {
192 .asic_family
= CHIP_POLARIS11
,
193 .max_pasid_bits
= 16,
196 .ih_ring_entry_size
= 4 * sizeof(uint32_t),
197 .event_interrupt_class
= &event_interrupt_class_cik
,
198 .num_of_watch_points
= 4,
199 .mqd_size_aligned
= MQD_SIZE_ALIGNED
,
200 .supports_cwsr
= true,
201 .needs_iommu_device
= false,
202 .needs_pci_atomics
= true,
203 .num_sdma_engines
= 2,
204 .num_sdma_queues_per_engine
= 2,
207 static const struct kfd_device_info vega10_device_info
= {
208 .asic_family
= CHIP_VEGA10
,
209 .max_pasid_bits
= 16,
212 .ih_ring_entry_size
= 8 * sizeof(uint32_t),
213 .event_interrupt_class
= &event_interrupt_class_v9
,
214 .num_of_watch_points
= 4,
215 .mqd_size_aligned
= MQD_SIZE_ALIGNED
,
216 .supports_cwsr
= true,
217 .needs_iommu_device
= false,
218 .needs_pci_atomics
= false,
219 .num_sdma_engines
= 2,
220 .num_sdma_queues_per_engine
= 2,
223 static const struct kfd_device_info vega10_vf_device_info
= {
224 .asic_family
= CHIP_VEGA10
,
225 .max_pasid_bits
= 16,
228 .ih_ring_entry_size
= 8 * sizeof(uint32_t),
229 .event_interrupt_class
= &event_interrupt_class_v9
,
230 .num_of_watch_points
= 4,
231 .mqd_size_aligned
= MQD_SIZE_ALIGNED
,
232 .supports_cwsr
= true,
233 .needs_iommu_device
= false,
234 .needs_pci_atomics
= false,
235 .num_sdma_engines
= 2,
236 .num_sdma_queues_per_engine
= 2,
239 static const struct kfd_device_info vega20_device_info
= {
240 .asic_family
= CHIP_VEGA20
,
241 .max_pasid_bits
= 16,
244 .ih_ring_entry_size
= 8 * sizeof(uint32_t),
245 .event_interrupt_class
= &event_interrupt_class_v9
,
246 .num_of_watch_points
= 4,
247 .mqd_size_aligned
= MQD_SIZE_ALIGNED
,
248 .supports_cwsr
= true,
249 .needs_iommu_device
= false,
250 .needs_pci_atomics
= false,
251 .num_sdma_engines
= 2,
252 .num_sdma_queues_per_engine
= 8,
255 struct kfd_deviceid
{
257 const struct kfd_device_info
*device_info
;
260 static const struct kfd_deviceid supported_devices
[] = {
261 #ifdef KFD_SUPPORT_IOMMU_V2
262 { 0x1304, &kaveri_device_info
}, /* Kaveri */
263 { 0x1305, &kaveri_device_info
}, /* Kaveri */
264 { 0x1306, &kaveri_device_info
}, /* Kaveri */
265 { 0x1307, &kaveri_device_info
}, /* Kaveri */
266 { 0x1309, &kaveri_device_info
}, /* Kaveri */
267 { 0x130A, &kaveri_device_info
}, /* Kaveri */
268 { 0x130B, &kaveri_device_info
}, /* Kaveri */
269 { 0x130C, &kaveri_device_info
}, /* Kaveri */
270 { 0x130D, &kaveri_device_info
}, /* Kaveri */
271 { 0x130E, &kaveri_device_info
}, /* Kaveri */
272 { 0x130F, &kaveri_device_info
}, /* Kaveri */
273 { 0x1310, &kaveri_device_info
}, /* Kaveri */
274 { 0x1311, &kaveri_device_info
}, /* Kaveri */
275 { 0x1312, &kaveri_device_info
}, /* Kaveri */
276 { 0x1313, &kaveri_device_info
}, /* Kaveri */
277 { 0x1315, &kaveri_device_info
}, /* Kaveri */
278 { 0x1316, &kaveri_device_info
}, /* Kaveri */
279 { 0x1317, &kaveri_device_info
}, /* Kaveri */
280 { 0x1318, &kaveri_device_info
}, /* Kaveri */
281 { 0x131B, &kaveri_device_info
}, /* Kaveri */
282 { 0x131C, &kaveri_device_info
}, /* Kaveri */
283 { 0x131D, &kaveri_device_info
}, /* Kaveri */
284 { 0x9870, &carrizo_device_info
}, /* Carrizo */
285 { 0x9874, &carrizo_device_info
}, /* Carrizo */
286 { 0x9875, &carrizo_device_info
}, /* Carrizo */
287 { 0x9876, &carrizo_device_info
}, /* Carrizo */
288 { 0x9877, &carrizo_device_info
}, /* Carrizo */
289 { 0x15DD, &raven_device_info
}, /* Raven */
291 { 0x67A0, &hawaii_device_info
}, /* Hawaii */
292 { 0x67A1, &hawaii_device_info
}, /* Hawaii */
293 { 0x67A2, &hawaii_device_info
}, /* Hawaii */
294 { 0x67A8, &hawaii_device_info
}, /* Hawaii */
295 { 0x67A9, &hawaii_device_info
}, /* Hawaii */
296 { 0x67AA, &hawaii_device_info
}, /* Hawaii */
297 { 0x67B0, &hawaii_device_info
}, /* Hawaii */
298 { 0x67B1, &hawaii_device_info
}, /* Hawaii */
299 { 0x67B8, &hawaii_device_info
}, /* Hawaii */
300 { 0x67B9, &hawaii_device_info
}, /* Hawaii */
301 { 0x67BA, &hawaii_device_info
}, /* Hawaii */
302 { 0x67BE, &hawaii_device_info
}, /* Hawaii */
303 { 0x6920, &tonga_device_info
}, /* Tonga */
304 { 0x6921, &tonga_device_info
}, /* Tonga */
305 { 0x6928, &tonga_device_info
}, /* Tonga */
306 { 0x6929, &tonga_device_info
}, /* Tonga */
307 { 0x692B, &tonga_device_info
}, /* Tonga */
308 { 0x6938, &tonga_device_info
}, /* Tonga */
309 { 0x6939, &tonga_device_info
}, /* Tonga */
310 { 0x7300, &fiji_device_info
}, /* Fiji */
311 { 0x730F, &fiji_vf_device_info
}, /* Fiji vf*/
312 { 0x67C0, &polaris10_device_info
}, /* Polaris10 */
313 { 0x67C1, &polaris10_device_info
}, /* Polaris10 */
314 { 0x67C2, &polaris10_device_info
}, /* Polaris10 */
315 { 0x67C4, &polaris10_device_info
}, /* Polaris10 */
316 { 0x67C7, &polaris10_device_info
}, /* Polaris10 */
317 { 0x67C8, &polaris10_device_info
}, /* Polaris10 */
318 { 0x67C9, &polaris10_device_info
}, /* Polaris10 */
319 { 0x67CA, &polaris10_device_info
}, /* Polaris10 */
320 { 0x67CC, &polaris10_device_info
}, /* Polaris10 */
321 { 0x67CF, &polaris10_device_info
}, /* Polaris10 */
322 { 0x67D0, &polaris10_vf_device_info
}, /* Polaris10 vf*/
323 { 0x67DF, &polaris10_device_info
}, /* Polaris10 */
324 { 0x67E0, &polaris11_device_info
}, /* Polaris11 */
325 { 0x67E1, &polaris11_device_info
}, /* Polaris11 */
326 { 0x67E3, &polaris11_device_info
}, /* Polaris11 */
327 { 0x67E7, &polaris11_device_info
}, /* Polaris11 */
328 { 0x67E8, &polaris11_device_info
}, /* Polaris11 */
329 { 0x67E9, &polaris11_device_info
}, /* Polaris11 */
330 { 0x67EB, &polaris11_device_info
}, /* Polaris11 */
331 { 0x67EF, &polaris11_device_info
}, /* Polaris11 */
332 { 0x67FF, &polaris11_device_info
}, /* Polaris11 */
333 { 0x6860, &vega10_device_info
}, /* Vega10 */
334 { 0x6861, &vega10_device_info
}, /* Vega10 */
335 { 0x6862, &vega10_device_info
}, /* Vega10 */
336 { 0x6863, &vega10_device_info
}, /* Vega10 */
337 { 0x6864, &vega10_device_info
}, /* Vega10 */
338 { 0x6867, &vega10_device_info
}, /* Vega10 */
339 { 0x6868, &vega10_device_info
}, /* Vega10 */
340 { 0x6869, &vega10_device_info
}, /* Vega10 */
341 { 0x686A, &vega10_device_info
}, /* Vega10 */
342 { 0x686B, &vega10_device_info
}, /* Vega10 */
343 { 0x686C, &vega10_vf_device_info
}, /* Vega10 vf*/
344 { 0x686D, &vega10_device_info
}, /* Vega10 */
345 { 0x686E, &vega10_device_info
}, /* Vega10 */
346 { 0x686F, &vega10_device_info
}, /* Vega10 */
347 { 0x687F, &vega10_device_info
}, /* Vega10 */
348 { 0x66a0, &vega20_device_info
}, /* Vega20 */
349 { 0x66a1, &vega20_device_info
}, /* Vega20 */
350 { 0x66a2, &vega20_device_info
}, /* Vega20 */
351 { 0x66a3, &vega20_device_info
}, /* Vega20 */
352 { 0x66a7, &vega20_device_info
}, /* Vega20 */
353 { 0x66af, &vega20_device_info
} /* Vega20 */
356 static int kfd_gtt_sa_init(struct kfd_dev
*kfd
, unsigned int buf_size
,
357 unsigned int chunk_size
);
358 static void kfd_gtt_sa_fini(struct kfd_dev
*kfd
);
360 static int kfd_resume(struct kfd_dev
*kfd
);
362 static const struct kfd_device_info
*lookup_device_info(unsigned short did
)
366 for (i
= 0; i
< ARRAY_SIZE(supported_devices
); i
++) {
367 if (supported_devices
[i
].did
== did
) {
368 WARN_ON(!supported_devices
[i
].device_info
);
369 return supported_devices
[i
].device_info
;
373 dev_warn(kfd_device
, "DID %04x is missing in supported_devices\n",
379 struct kfd_dev
*kgd2kfd_probe(struct kgd_dev
*kgd
,
380 struct pci_dev
*pdev
, const struct kfd2kgd_calls
*f2g
)
384 const struct kfd_device_info
*device_info
=
385 lookup_device_info(pdev
->device
);
388 dev_err(kfd_device
, "kgd2kfd_probe failed\n");
392 kfd
= kzalloc(sizeof(*kfd
), GFP_KERNEL
);
396 /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
397 * 32 and 64-bit requests are possible and must be
400 ret
= pci_enable_atomic_ops_to_root(pdev
,
401 PCI_EXP_DEVCAP2_ATOMIC_COMP32
|
402 PCI_EXP_DEVCAP2_ATOMIC_COMP64
);
403 if (device_info
->needs_pci_atomics
&& ret
< 0) {
405 "skipped device %x:%x, PCI rejects atomics\n",
406 pdev
->vendor
, pdev
->device
);
410 kfd
->pci_atomic_requested
= true;
413 kfd
->device_info
= device_info
;
415 kfd
->init_complete
= false;
418 mutex_init(&kfd
->doorbell_mutex
);
419 memset(&kfd
->doorbell_available_index
, 0,
420 sizeof(kfd
->doorbell_available_index
));
425 static void kfd_cwsr_init(struct kfd_dev
*kfd
)
427 if (cwsr_enable
&& kfd
->device_info
->supports_cwsr
) {
428 if (kfd
->device_info
->asic_family
< CHIP_VEGA10
) {
429 BUILD_BUG_ON(sizeof(cwsr_trap_gfx8_hex
) > PAGE_SIZE
);
430 kfd
->cwsr_isa
= cwsr_trap_gfx8_hex
;
431 kfd
->cwsr_isa_size
= sizeof(cwsr_trap_gfx8_hex
);
433 BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_hex
) > PAGE_SIZE
);
434 kfd
->cwsr_isa
= cwsr_trap_gfx9_hex
;
435 kfd
->cwsr_isa_size
= sizeof(cwsr_trap_gfx9_hex
);
438 kfd
->cwsr_enabled
= true;
442 bool kgd2kfd_device_init(struct kfd_dev
*kfd
,
443 const struct kgd2kfd_shared_resources
*gpu_resources
)
447 kfd
->mec_fw_version
= kfd
->kfd2kgd
->get_fw_version(kfd
->kgd
,
449 kfd
->sdma_fw_version
= kfd
->kfd2kgd
->get_fw_version(kfd
->kgd
,
451 kfd
->shared_resources
= *gpu_resources
;
453 kfd
->vm_info
.first_vmid_kfd
= ffs(gpu_resources
->compute_vmid_bitmap
)-1;
454 kfd
->vm_info
.last_vmid_kfd
= fls(gpu_resources
->compute_vmid_bitmap
)-1;
455 kfd
->vm_info
.vmid_num_kfd
= kfd
->vm_info
.last_vmid_kfd
456 - kfd
->vm_info
.first_vmid_kfd
+ 1;
458 /* Verify module parameters regarding mapped process number*/
459 if ((hws_max_conc_proc
< 0)
460 || (hws_max_conc_proc
> kfd
->vm_info
.vmid_num_kfd
)) {
462 "hws_max_conc_proc %d must be between 0 and %d, use %d instead\n",
463 hws_max_conc_proc
, kfd
->vm_info
.vmid_num_kfd
,
464 kfd
->vm_info
.vmid_num_kfd
);
465 kfd
->max_proc_per_quantum
= kfd
->vm_info
.vmid_num_kfd
;
467 kfd
->max_proc_per_quantum
= hws_max_conc_proc
;
469 /* calculate max size of mqds needed for queues */
470 size
= max_num_of_queues_per_device
*
471 kfd
->device_info
->mqd_size_aligned
;
474 * calculate max size of runlist packet.
475 * There can be only 2 packets at once
477 size
+= (KFD_MAX_NUM_OF_PROCESSES
* sizeof(struct pm4_mes_map_process
) +
478 max_num_of_queues_per_device
* sizeof(struct pm4_mes_map_queues
)
479 + sizeof(struct pm4_mes_runlist
)) * 2;
481 /* Add size of HIQ & DIQ */
482 size
+= KFD_KERNEL_QUEUE_SIZE
* 2;
484 /* add another 512KB for all other allocations on gart (HPD, fences) */
487 if (kfd
->kfd2kgd
->init_gtt_mem_allocation(
488 kfd
->kgd
, size
, &kfd
->gtt_mem
,
489 &kfd
->gtt_start_gpu_addr
, &kfd
->gtt_start_cpu_ptr
,
491 dev_err(kfd_device
, "Could not allocate %d bytes\n", size
);
495 dev_info(kfd_device
, "Allocated %d bytes on gart\n", size
);
497 /* Initialize GTT sa with 512 byte chunk size */
498 if (kfd_gtt_sa_init(kfd
, size
, 512) != 0) {
499 dev_err(kfd_device
, "Error initializing gtt sub-allocator\n");
500 goto kfd_gtt_sa_init_error
;
503 if (kfd_doorbell_init(kfd
)) {
505 "Error initializing doorbell aperture\n");
506 goto kfd_doorbell_error
;
509 if (kfd
->kfd2kgd
->get_hive_id
)
510 kfd
->hive_id
= kfd
->kfd2kgd
->get_hive_id(kfd
->kgd
);
512 if (kfd_topology_add_device(kfd
)) {
513 dev_err(kfd_device
, "Error adding device to topology\n");
514 goto kfd_topology_add_device_error
;
517 if (kfd_interrupt_init(kfd
)) {
518 dev_err(kfd_device
, "Error initializing interrupts\n");
519 goto kfd_interrupt_error
;
522 kfd
->dqm
= device_queue_manager_init(kfd
);
524 dev_err(kfd_device
, "Error initializing queue manager\n");
525 goto device_queue_manager_error
;
528 if (kfd_iommu_device_init(kfd
)) {
529 dev_err(kfd_device
, "Error initializing iommuv2\n");
530 goto device_iommu_error
;
536 goto kfd_resume_error
;
540 kfd
->init_complete
= true;
541 dev_info(kfd_device
, "added device %x:%x\n", kfd
->pdev
->vendor
,
544 pr_debug("Starting kfd with the following scheduling policy %d\n",
545 kfd
->dqm
->sched_policy
);
551 device_queue_manager_uninit(kfd
->dqm
);
552 device_queue_manager_error
:
553 kfd_interrupt_exit(kfd
);
555 kfd_topology_remove_device(kfd
);
556 kfd_topology_add_device_error
:
557 kfd_doorbell_fini(kfd
);
559 kfd_gtt_sa_fini(kfd
);
560 kfd_gtt_sa_init_error
:
561 kfd
->kfd2kgd
->free_gtt_mem(kfd
->kgd
, kfd
->gtt_mem
);
563 "device %x:%x NOT added due to errors\n",
564 kfd
->pdev
->vendor
, kfd
->pdev
->device
);
566 return kfd
->init_complete
;
569 void kgd2kfd_device_exit(struct kfd_dev
*kfd
)
571 if (kfd
->init_complete
) {
572 kgd2kfd_suspend(kfd
);
573 device_queue_manager_uninit(kfd
->dqm
);
574 kfd_interrupt_exit(kfd
);
575 kfd_topology_remove_device(kfd
);
576 kfd_doorbell_fini(kfd
);
577 kfd_gtt_sa_fini(kfd
);
578 kfd
->kfd2kgd
->free_gtt_mem(kfd
->kgd
, kfd
->gtt_mem
);
584 int kgd2kfd_pre_reset(struct kfd_dev
*kfd
)
586 if (!kfd
->init_complete
)
588 kgd2kfd_suspend(kfd
);
590 /* hold dqm->lock to prevent further execution*/
593 kfd_signal_reset_event(kfd
);
598 * Fix me. KFD won't be able to resume existing process for now.
599 * We will keep all existing process in a evicted state and
600 * wait the process to be terminated.
603 int kgd2kfd_post_reset(struct kfd_dev
*kfd
)
607 if (!kfd
->init_complete
)
610 dqm_unlock(kfd
->dqm
);
612 ret
= kfd_resume(kfd
);
615 count
= atomic_dec_return(&kfd_locked
);
616 WARN_ONCE(count
!= 0, "KFD reset ref. error");
620 bool kfd_is_locked(void)
622 return (atomic_read(&kfd_locked
) > 0);
625 void kgd2kfd_suspend(struct kfd_dev
*kfd
)
627 if (!kfd
->init_complete
)
630 /* For first KFD device suspend all the KFD processes */
631 if (atomic_inc_return(&kfd_locked
) == 1)
632 kfd_suspend_all_processes();
634 kfd
->dqm
->ops
.stop(kfd
->dqm
);
636 kfd_iommu_suspend(kfd
);
639 int kgd2kfd_resume(struct kfd_dev
*kfd
)
643 if (!kfd
->init_complete
)
646 ret
= kfd_resume(kfd
);
650 count
= atomic_dec_return(&kfd_locked
);
651 WARN_ONCE(count
< 0, "KFD suspend / resume ref. error");
653 ret
= kfd_resume_all_processes();
658 static int kfd_resume(struct kfd_dev
*kfd
)
662 err
= kfd_iommu_resume(kfd
);
665 "Failed to resume IOMMU for device %x:%x\n",
666 kfd
->pdev
->vendor
, kfd
->pdev
->device
);
670 err
= kfd
->dqm
->ops
.start(kfd
->dqm
);
673 "Error starting queue manager for device %x:%x\n",
674 kfd
->pdev
->vendor
, kfd
->pdev
->device
);
675 goto dqm_start_error
;
681 kfd_iommu_suspend(kfd
);
685 /* This is called directly from KGD at ISR. */
686 void kgd2kfd_interrupt(struct kfd_dev
*kfd
, const void *ih_ring_entry
)
688 uint32_t patched_ihre
[KFD_MAX_RING_ENTRY_SIZE
];
689 bool is_patched
= false;
691 if (!kfd
->init_complete
)
694 if (kfd
->device_info
->ih_ring_entry_size
> sizeof(patched_ihre
)) {
695 dev_err_once(kfd_device
, "Ring entry too small\n");
699 spin_lock(&kfd
->interrupt_lock
);
701 if (kfd
->interrupts_active
702 && interrupt_is_wanted(kfd
, ih_ring_entry
,
703 patched_ihre
, &is_patched
)
704 && enqueue_ih_ring_entry(kfd
,
705 is_patched
? patched_ihre
: ih_ring_entry
))
706 queue_work(kfd
->ih_wq
, &kfd
->interrupt_work
);
708 spin_unlock(&kfd
->interrupt_lock
);
711 int kgd2kfd_quiesce_mm(struct mm_struct
*mm
)
713 struct kfd_process
*p
;
716 /* Because we are called from arbitrary context (workqueue) as opposed
717 * to process context, kfd_process could attempt to exit while we are
718 * running so the lookup function increments the process ref count.
720 p
= kfd_lookup_process_by_mm(mm
);
724 r
= kfd_process_evict_queues(p
);
726 kfd_unref_process(p
);
730 int kgd2kfd_resume_mm(struct mm_struct
*mm
)
732 struct kfd_process
*p
;
735 /* Because we are called from arbitrary context (workqueue) as opposed
736 * to process context, kfd_process could attempt to exit while we are
737 * running so the lookup function increments the process ref count.
739 p
= kfd_lookup_process_by_mm(mm
);
743 r
= kfd_process_restore_queues(p
);
745 kfd_unref_process(p
);
749 /** kgd2kfd_schedule_evict_and_restore_process - Schedules work queue that will
750 * prepare for safe eviction of KFD BOs that belong to the specified
753 * @mm: mm_struct that identifies the specified KFD process
754 * @fence: eviction fence attached to KFD process BOs
757 int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct
*mm
,
758 struct dma_fence
*fence
)
760 struct kfd_process
*p
;
761 unsigned long active_time
;
762 unsigned long delay_jiffies
= msecs_to_jiffies(PROCESS_ACTIVE_TIME_MS
);
767 if (dma_fence_is_signaled(fence
))
770 p
= kfd_lookup_process_by_mm(mm
);
774 if (fence
->seqno
== p
->last_eviction_seqno
)
777 p
->last_eviction_seqno
= fence
->seqno
;
779 /* Avoid KFD process starvation. Wait for at least
780 * PROCESS_ACTIVE_TIME_MS before evicting the process again
782 active_time
= get_jiffies_64() - p
->last_restore_timestamp
;
783 if (delay_jiffies
> active_time
)
784 delay_jiffies
-= active_time
;
788 /* During process initialization eviction_work.dwork is initialized
789 * to kfd_evict_bo_worker
791 schedule_delayed_work(&p
->eviction_work
, delay_jiffies
);
793 kfd_unref_process(p
);
797 static int kfd_gtt_sa_init(struct kfd_dev
*kfd
, unsigned int buf_size
,
798 unsigned int chunk_size
)
800 unsigned int num_of_longs
;
802 if (WARN_ON(buf_size
< chunk_size
))
804 if (WARN_ON(buf_size
== 0))
806 if (WARN_ON(chunk_size
== 0))
809 kfd
->gtt_sa_chunk_size
= chunk_size
;
810 kfd
->gtt_sa_num_of_chunks
= buf_size
/ chunk_size
;
812 num_of_longs
= (kfd
->gtt_sa_num_of_chunks
+ BITS_PER_LONG
- 1) /
815 kfd
->gtt_sa_bitmap
= kcalloc(num_of_longs
, sizeof(long), GFP_KERNEL
);
817 if (!kfd
->gtt_sa_bitmap
)
820 pr_debug("gtt_sa_num_of_chunks = %d, gtt_sa_bitmap = %p\n",
821 kfd
->gtt_sa_num_of_chunks
, kfd
->gtt_sa_bitmap
);
823 mutex_init(&kfd
->gtt_sa_lock
);
829 static void kfd_gtt_sa_fini(struct kfd_dev
*kfd
)
831 mutex_destroy(&kfd
->gtt_sa_lock
);
832 kfree(kfd
->gtt_sa_bitmap
);
835 static inline uint64_t kfd_gtt_sa_calc_gpu_addr(uint64_t start_addr
,
836 unsigned int bit_num
,
837 unsigned int chunk_size
)
839 return start_addr
+ bit_num
* chunk_size
;
842 static inline uint32_t *kfd_gtt_sa_calc_cpu_addr(void *start_addr
,
843 unsigned int bit_num
,
844 unsigned int chunk_size
)
846 return (uint32_t *) ((uint64_t) start_addr
+ bit_num
* chunk_size
);
849 int kfd_gtt_sa_allocate(struct kfd_dev
*kfd
, unsigned int size
,
850 struct kfd_mem_obj
**mem_obj
)
852 unsigned int found
, start_search
, cur_size
;
857 if (size
> kfd
->gtt_sa_num_of_chunks
* kfd
->gtt_sa_chunk_size
)
860 *mem_obj
= kzalloc(sizeof(struct kfd_mem_obj
), GFP_KERNEL
);
864 pr_debug("Allocated mem_obj = %p for size = %d\n", *mem_obj
, size
);
868 mutex_lock(&kfd
->gtt_sa_lock
);
870 kfd_gtt_restart_search
:
871 /* Find the first chunk that is free */
872 found
= find_next_zero_bit(kfd
->gtt_sa_bitmap
,
873 kfd
->gtt_sa_num_of_chunks
,
876 pr_debug("Found = %d\n", found
);
878 /* If there wasn't any free chunk, bail out */
879 if (found
== kfd
->gtt_sa_num_of_chunks
)
880 goto kfd_gtt_no_free_chunk
;
882 /* Update fields of mem_obj */
883 (*mem_obj
)->range_start
= found
;
884 (*mem_obj
)->range_end
= found
;
885 (*mem_obj
)->gpu_addr
= kfd_gtt_sa_calc_gpu_addr(
886 kfd
->gtt_start_gpu_addr
,
888 kfd
->gtt_sa_chunk_size
);
889 (*mem_obj
)->cpu_ptr
= kfd_gtt_sa_calc_cpu_addr(
890 kfd
->gtt_start_cpu_ptr
,
892 kfd
->gtt_sa_chunk_size
);
894 pr_debug("gpu_addr = %p, cpu_addr = %p\n",
895 (uint64_t *) (*mem_obj
)->gpu_addr
, (*mem_obj
)->cpu_ptr
);
897 /* If we need only one chunk, mark it as allocated and get out */
898 if (size
<= kfd
->gtt_sa_chunk_size
) {
899 pr_debug("Single bit\n");
900 set_bit(found
, kfd
->gtt_sa_bitmap
);
904 /* Otherwise, try to see if we have enough contiguous chunks */
905 cur_size
= size
- kfd
->gtt_sa_chunk_size
;
907 (*mem_obj
)->range_end
=
908 find_next_zero_bit(kfd
->gtt_sa_bitmap
,
909 kfd
->gtt_sa_num_of_chunks
, ++found
);
911 * If next free chunk is not contiguous than we need to
912 * restart our search from the last free chunk we found (which
913 * wasn't contiguous to the previous ones
915 if ((*mem_obj
)->range_end
!= found
) {
916 start_search
= found
;
917 goto kfd_gtt_restart_search
;
921 * If we reached end of buffer, bail out with error
923 if (found
== kfd
->gtt_sa_num_of_chunks
)
924 goto kfd_gtt_no_free_chunk
;
926 /* Check if we don't need another chunk */
927 if (cur_size
<= kfd
->gtt_sa_chunk_size
)
930 cur_size
-= kfd
->gtt_sa_chunk_size
;
932 } while (cur_size
> 0);
934 pr_debug("range_start = %d, range_end = %d\n",
935 (*mem_obj
)->range_start
, (*mem_obj
)->range_end
);
937 /* Mark the chunks as allocated */
938 for (found
= (*mem_obj
)->range_start
;
939 found
<= (*mem_obj
)->range_end
;
941 set_bit(found
, kfd
->gtt_sa_bitmap
);
944 mutex_unlock(&kfd
->gtt_sa_lock
);
947 kfd_gtt_no_free_chunk
:
948 pr_debug("Allocation failed with mem_obj = %p\n", mem_obj
);
949 mutex_unlock(&kfd
->gtt_sa_lock
);
954 int kfd_gtt_sa_free(struct kfd_dev
*kfd
, struct kfd_mem_obj
*mem_obj
)
958 /* Act like kfree when trying to free a NULL object */
962 pr_debug("Free mem_obj = %p, range_start = %d, range_end = %d\n",
963 mem_obj
, mem_obj
->range_start
, mem_obj
->range_end
);
965 mutex_lock(&kfd
->gtt_sa_lock
);
967 /* Mark the chunks as free */
968 for (bit
= mem_obj
->range_start
;
969 bit
<= mem_obj
->range_end
;
971 clear_bit(bit
, kfd
->gtt_sa_bitmap
);
973 mutex_unlock(&kfd
->gtt_sa_lock
);
979 #if defined(CONFIG_DEBUG_FS)
981 /* This function will send a package to HIQ to hang the HWS
982 * which will trigger a GPU reset and bring the HWS back to normal state
984 int kfd_debugfs_hang_hws(struct kfd_dev
*dev
)
988 if (dev
->dqm
->sched_policy
!= KFD_SCHED_POLICY_HWS
) {
989 pr_err("HWS is not enabled");
993 r
= pm_debugfs_hang_hws(&dev
->dqm
->packets
);
995 r
= dqm_debugfs_execute_queues(dev
->dqm
);