]> git.ipfire.org Git - thirdparty/linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
gpu: host1x: Use SMMU on Tegra124 and Tegra210
[thirdparty/linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28 #include <linux/power_supply.h>
29 #include <linux/kthread.h>
30 #include <linux/module.h>
31 #include <linux/console.h>
32 #include <linux/slab.h>
33
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_probe_helper.h>
36 #include <drm/amdgpu_drm.h>
37 #include <linux/vgaarb.h>
38 #include <linux/vga_switcheroo.h>
39 #include <linux/efi.h>
40 #include "amdgpu.h"
41 #include "amdgpu_trace.h"
42 #include "amdgpu_i2c.h"
43 #include "atom.h"
44 #include "amdgpu_atombios.h"
45 #include "amdgpu_atomfirmware.h"
46 #include "amd_pcie.h"
47 #ifdef CONFIG_DRM_AMDGPU_SI
48 #include "si.h"
49 #endif
50 #ifdef CONFIG_DRM_AMDGPU_CIK
51 #include "cik.h"
52 #endif
53 #include "vi.h"
54 #include "soc15.h"
55 #include "nv.h"
56 #include "bif/bif_4_1_d.h"
57 #include <linux/pci.h>
58 #include <linux/firmware.h>
59 #include "amdgpu_vf_error.h"
60
61 #include "amdgpu_amdkfd.h"
62 #include "amdgpu_pm.h"
63
64 #include "amdgpu_xgmi.h"
65 #include "amdgpu_ras.h"
66 #include "amdgpu_pmu.h"
67
68 #include <linux/suspend.h>
69 #include <drm/task_barrier.h>
70
71 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
72 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
73 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
74 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
75 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
76 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
77 MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin");
78 MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
79 MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
80 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
81
82 #define AMDGPU_RESUME_MS 2000
83
84 const char *amdgpu_asic_name[] = {
85 "TAHITI",
86 "PITCAIRN",
87 "VERDE",
88 "OLAND",
89 "HAINAN",
90 "BONAIRE",
91 "KAVERI",
92 "KABINI",
93 "HAWAII",
94 "MULLINS",
95 "TOPAZ",
96 "TONGA",
97 "FIJI",
98 "CARRIZO",
99 "STONEY",
100 "POLARIS10",
101 "POLARIS11",
102 "POLARIS12",
103 "VEGAM",
104 "VEGA10",
105 "VEGA12",
106 "VEGA20",
107 "RAVEN",
108 "ARCTURUS",
109 "RENOIR",
110 "NAVI10",
111 "NAVI14",
112 "NAVI12",
113 "LAST",
114 };
115
116 /**
117 * DOC: pcie_replay_count
118 *
119 * The amdgpu driver provides a sysfs API for reporting the total number
120 * of PCIe replays (NAKs)
121 * The file pcie_replay_count is used for this and returns the total
122 * number of replays as a sum of the NAKs generated and NAKs received
123 */
124
125 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
126 struct device_attribute *attr, char *buf)
127 {
128 struct drm_device *ddev = dev_get_drvdata(dev);
129 struct amdgpu_device *adev = ddev->dev_private;
130 uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
131
132 return snprintf(buf, PAGE_SIZE, "%llu\n", cnt);
133 }
134
135 static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
136 amdgpu_device_get_pcie_replay_count, NULL);
137
138 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
139
140 /**
141 * amdgpu_device_supports_boco - Is the device a dGPU with HG/PX power control
142 *
143 * @dev: drm_device pointer
144 *
145 * Returns true if the device is a dGPU with HG/PX power control,
146 * otherwise return false.
147 */
148 bool amdgpu_device_supports_boco(struct drm_device *dev)
149 {
150 struct amdgpu_device *adev = dev->dev_private;
151
152 if (adev->flags & AMD_IS_PX)
153 return true;
154 return false;
155 }
156
157 /**
158 * amdgpu_device_supports_baco - Does the device support BACO
159 *
160 * @dev: drm_device pointer
161 *
162 * Returns true if the device supporte BACO,
163 * otherwise return false.
164 */
165 bool amdgpu_device_supports_baco(struct drm_device *dev)
166 {
167 struct amdgpu_device *adev = dev->dev_private;
168
169 return amdgpu_asic_supports_baco(adev);
170 }
171
172 /**
173 * VRAM access helper functions.
174 *
175 * amdgpu_device_vram_access - read/write a buffer in vram
176 *
177 * @adev: amdgpu_device pointer
178 * @pos: offset of the buffer in vram
179 * @buf: virtual address of the buffer in system memory
180 * @size: read/write size, sizeof(@buf) must > @size
181 * @write: true - write to vram, otherwise - read from vram
182 */
183 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
184 uint32_t *buf, size_t size, bool write)
185 {
186 unsigned long flags;
187 uint32_t hi = ~0;
188 uint64_t last;
189
190
191 #ifdef CONFIG_64BIT
192 last = min(pos + size, adev->gmc.visible_vram_size);
193 if (last > pos) {
194 void __iomem *addr = adev->mman.aper_base_kaddr + pos;
195 size_t count = last - pos;
196
197 if (write) {
198 memcpy_toio(addr, buf, count);
199 mb();
200 amdgpu_asic_flush_hdp(adev, NULL);
201 } else {
202 amdgpu_asic_invalidate_hdp(adev, NULL);
203 mb();
204 memcpy_fromio(buf, addr, count);
205 }
206
207 if (count == size)
208 return;
209
210 pos += count;
211 buf += count / 4;
212 size -= count;
213 }
214 #endif
215
216 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
217 for (last = pos + size; pos < last; pos += 4) {
218 uint32_t tmp = pos >> 31;
219
220 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
221 if (tmp != hi) {
222 WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
223 hi = tmp;
224 }
225 if (write)
226 WREG32_NO_KIQ(mmMM_DATA, *buf++);
227 else
228 *buf++ = RREG32_NO_KIQ(mmMM_DATA);
229 }
230 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
231 }
232
233 /*
234 * MMIO register access helper functions.
235 */
236 /**
237 * amdgpu_mm_rreg - read a memory mapped IO register
238 *
239 * @adev: amdgpu_device pointer
240 * @reg: dword aligned register offset
241 * @acc_flags: access flags which require special behavior
242 *
243 * Returns the 32 bit value from the offset specified.
244 */
245 uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
246 uint32_t acc_flags)
247 {
248 uint32_t ret;
249
250 if ((acc_flags & AMDGPU_REGS_KIQ) || (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)))
251 return amdgpu_kiq_rreg(adev, reg);
252
253 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
254 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
255 else {
256 unsigned long flags;
257
258 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
259 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
260 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
261 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
262 }
263 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
264 return ret;
265 }
266
267 /*
268 * MMIO register read with bytes helper functions
269 * @offset:bytes offset from MMIO start
270 *
271 */
272
273 /**
274 * amdgpu_mm_rreg8 - read a memory mapped IO register
275 *
276 * @adev: amdgpu_device pointer
277 * @offset: byte aligned register offset
278 *
279 * Returns the 8 bit value from the offset specified.
280 */
281 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) {
282 if (offset < adev->rmmio_size)
283 return (readb(adev->rmmio + offset));
284 BUG();
285 }
286
287 /*
288 * MMIO register write with bytes helper functions
289 * @offset:bytes offset from MMIO start
290 * @value: the value want to be written to the register
291 *
292 */
293 /**
294 * amdgpu_mm_wreg8 - read a memory mapped IO register
295 *
296 * @adev: amdgpu_device pointer
297 * @offset: byte aligned register offset
298 * @value: 8 bit value to write
299 *
300 * Writes the value specified to the offset specified.
301 */
302 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) {
303 if (offset < adev->rmmio_size)
304 writeb(value, adev->rmmio + offset);
305 else
306 BUG();
307 }
308
309 void static inline amdgpu_mm_wreg_mmio(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t acc_flags)
310 {
311 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
312
313 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
314 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
315 else {
316 unsigned long flags;
317
318 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
319 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
320 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
321 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
322 }
323
324 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
325 udelay(500);
326 }
327 }
328
329 /**
330 * amdgpu_mm_wreg - write to a memory mapped IO register
331 *
332 * @adev: amdgpu_device pointer
333 * @reg: dword aligned register offset
334 * @v: 32 bit value to write to the register
335 * @acc_flags: access flags which require special behavior
336 *
337 * Writes the value specified to the offset specified.
338 */
339 void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
340 uint32_t acc_flags)
341 {
342 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
343 adev->last_mm_index = v;
344 }
345
346 if ((acc_flags & AMDGPU_REGS_KIQ) || (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)))
347 return amdgpu_kiq_wreg(adev, reg, v);
348
349 amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags);
350 }
351
352 /*
353 * amdgpu_mm_wreg_mmio_rlc - write register either with mmio or with RLC path if in range
354 *
355 * this function is invoked only the debugfs register access
356 * */
357 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
358 uint32_t acc_flags)
359 {
360 if (amdgpu_sriov_fullaccess(adev) &&
361 adev->gfx.rlc.funcs &&
362 adev->gfx.rlc.funcs->is_rlcg_access_range) {
363
364 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
365 return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v);
366 }
367
368 amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags);
369 }
370
371 /**
372 * amdgpu_io_rreg - read an IO register
373 *
374 * @adev: amdgpu_device pointer
375 * @reg: dword aligned register offset
376 *
377 * Returns the 32 bit value from the offset specified.
378 */
379 u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
380 {
381 if ((reg * 4) < adev->rio_mem_size)
382 return ioread32(adev->rio_mem + (reg * 4));
383 else {
384 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
385 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
386 }
387 }
388
389 /**
390 * amdgpu_io_wreg - write to an IO register
391 *
392 * @adev: amdgpu_device pointer
393 * @reg: dword aligned register offset
394 * @v: 32 bit value to write to the register
395 *
396 * Writes the value specified to the offset specified.
397 */
398 void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
399 {
400 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
401 adev->last_mm_index = v;
402 }
403
404 if ((reg * 4) < adev->rio_mem_size)
405 iowrite32(v, adev->rio_mem + (reg * 4));
406 else {
407 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
408 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
409 }
410
411 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
412 udelay(500);
413 }
414 }
415
416 /**
417 * amdgpu_mm_rdoorbell - read a doorbell dword
418 *
419 * @adev: amdgpu_device pointer
420 * @index: doorbell index
421 *
422 * Returns the value in the doorbell aperture at the
423 * requested doorbell index (CIK).
424 */
425 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
426 {
427 if (index < adev->doorbell.num_doorbells) {
428 return readl(adev->doorbell.ptr + index);
429 } else {
430 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
431 return 0;
432 }
433 }
434
435 /**
436 * amdgpu_mm_wdoorbell - write a doorbell dword
437 *
438 * @adev: amdgpu_device pointer
439 * @index: doorbell index
440 * @v: value to write
441 *
442 * Writes @v to the doorbell aperture at the
443 * requested doorbell index (CIK).
444 */
445 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
446 {
447 if (index < adev->doorbell.num_doorbells) {
448 writel(v, adev->doorbell.ptr + index);
449 } else {
450 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
451 }
452 }
453
454 /**
455 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
456 *
457 * @adev: amdgpu_device pointer
458 * @index: doorbell index
459 *
460 * Returns the value in the doorbell aperture at the
461 * requested doorbell index (VEGA10+).
462 */
463 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
464 {
465 if (index < adev->doorbell.num_doorbells) {
466 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
467 } else {
468 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
469 return 0;
470 }
471 }
472
473 /**
474 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
475 *
476 * @adev: amdgpu_device pointer
477 * @index: doorbell index
478 * @v: value to write
479 *
480 * Writes @v to the doorbell aperture at the
481 * requested doorbell index (VEGA10+).
482 */
483 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
484 {
485 if (index < adev->doorbell.num_doorbells) {
486 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
487 } else {
488 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
489 }
490 }
491
492 /**
493 * amdgpu_invalid_rreg - dummy reg read function
494 *
495 * @adev: amdgpu device pointer
496 * @reg: offset of register
497 *
498 * Dummy register read function. Used for register blocks
499 * that certain asics don't have (all asics).
500 * Returns the value in the register.
501 */
502 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
503 {
504 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
505 BUG();
506 return 0;
507 }
508
509 /**
510 * amdgpu_invalid_wreg - dummy reg write function
511 *
512 * @adev: amdgpu device pointer
513 * @reg: offset of register
514 * @v: value to write to the register
515 *
516 * Dummy register read function. Used for register blocks
517 * that certain asics don't have (all asics).
518 */
519 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
520 {
521 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
522 reg, v);
523 BUG();
524 }
525
526 /**
527 * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
528 *
529 * @adev: amdgpu device pointer
530 * @reg: offset of register
531 *
532 * Dummy register read function. Used for register blocks
533 * that certain asics don't have (all asics).
534 * Returns the value in the register.
535 */
536 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
537 {
538 DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
539 BUG();
540 return 0;
541 }
542
543 /**
544 * amdgpu_invalid_wreg64 - dummy reg write function
545 *
546 * @adev: amdgpu device pointer
547 * @reg: offset of register
548 * @v: value to write to the register
549 *
550 * Dummy register read function. Used for register blocks
551 * that certain asics don't have (all asics).
552 */
553 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
554 {
555 DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
556 reg, v);
557 BUG();
558 }
559
560 /**
561 * amdgpu_block_invalid_rreg - dummy reg read function
562 *
563 * @adev: amdgpu device pointer
564 * @block: offset of instance
565 * @reg: offset of register
566 *
567 * Dummy register read function. Used for register blocks
568 * that certain asics don't have (all asics).
569 * Returns the value in the register.
570 */
571 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
572 uint32_t block, uint32_t reg)
573 {
574 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
575 reg, block);
576 BUG();
577 return 0;
578 }
579
580 /**
581 * amdgpu_block_invalid_wreg - dummy reg write function
582 *
583 * @adev: amdgpu device pointer
584 * @block: offset of instance
585 * @reg: offset of register
586 * @v: value to write to the register
587 *
588 * Dummy register read function. Used for register blocks
589 * that certain asics don't have (all asics).
590 */
591 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
592 uint32_t block,
593 uint32_t reg, uint32_t v)
594 {
595 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
596 reg, block, v);
597 BUG();
598 }
599
600 /**
601 * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
602 *
603 * @adev: amdgpu device pointer
604 *
605 * Allocates a scratch page of VRAM for use by various things in the
606 * driver.
607 */
608 static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
609 {
610 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
611 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
612 &adev->vram_scratch.robj,
613 &adev->vram_scratch.gpu_addr,
614 (void **)&adev->vram_scratch.ptr);
615 }
616
617 /**
618 * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
619 *
620 * @adev: amdgpu device pointer
621 *
622 * Frees the VRAM scratch page.
623 */
624 static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
625 {
626 amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
627 }
628
629 /**
630 * amdgpu_device_program_register_sequence - program an array of registers.
631 *
632 * @adev: amdgpu_device pointer
633 * @registers: pointer to the register array
634 * @array_size: size of the register array
635 *
636 * Programs an array or registers with and and or masks.
637 * This is a helper for setting golden registers.
638 */
639 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
640 const u32 *registers,
641 const u32 array_size)
642 {
643 u32 tmp, reg, and_mask, or_mask;
644 int i;
645
646 if (array_size % 3)
647 return;
648
649 for (i = 0; i < array_size; i +=3) {
650 reg = registers[i + 0];
651 and_mask = registers[i + 1];
652 or_mask = registers[i + 2];
653
654 if (and_mask == 0xffffffff) {
655 tmp = or_mask;
656 } else {
657 tmp = RREG32(reg);
658 tmp &= ~and_mask;
659 if (adev->family >= AMDGPU_FAMILY_AI)
660 tmp |= (or_mask & and_mask);
661 else
662 tmp |= or_mask;
663 }
664 WREG32(reg, tmp);
665 }
666 }
667
668 /**
669 * amdgpu_device_pci_config_reset - reset the GPU
670 *
671 * @adev: amdgpu_device pointer
672 *
673 * Resets the GPU using the pci config reset sequence.
674 * Only applicable to asics prior to vega10.
675 */
676 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
677 {
678 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
679 }
680
681 /*
682 * GPU doorbell aperture helpers function.
683 */
684 /**
685 * amdgpu_device_doorbell_init - Init doorbell driver information.
686 *
687 * @adev: amdgpu_device pointer
688 *
689 * Init doorbell driver information (CIK)
690 * Returns 0 on success, error on failure.
691 */
692 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
693 {
694
695 /* No doorbell on SI hardware generation */
696 if (adev->asic_type < CHIP_BONAIRE) {
697 adev->doorbell.base = 0;
698 adev->doorbell.size = 0;
699 adev->doorbell.num_doorbells = 0;
700 adev->doorbell.ptr = NULL;
701 return 0;
702 }
703
704 if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
705 return -EINVAL;
706
707 amdgpu_asic_init_doorbell_index(adev);
708
709 /* doorbell bar mapping */
710 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
711 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
712
713 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
714 adev->doorbell_index.max_assignment+1);
715 if (adev->doorbell.num_doorbells == 0)
716 return -EINVAL;
717
718 /* For Vega, reserve and map two pages on doorbell BAR since SDMA
719 * paging queue doorbell use the second page. The
720 * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
721 * doorbells are in the first page. So with paging queue enabled,
722 * the max num_doorbells should + 1 page (0x400 in dword)
723 */
724 if (adev->asic_type >= CHIP_VEGA10)
725 adev->doorbell.num_doorbells += 0x400;
726
727 adev->doorbell.ptr = ioremap(adev->doorbell.base,
728 adev->doorbell.num_doorbells *
729 sizeof(u32));
730 if (adev->doorbell.ptr == NULL)
731 return -ENOMEM;
732
733 return 0;
734 }
735
736 /**
737 * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
738 *
739 * @adev: amdgpu_device pointer
740 *
741 * Tear down doorbell driver information (CIK)
742 */
743 static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
744 {
745 iounmap(adev->doorbell.ptr);
746 adev->doorbell.ptr = NULL;
747 }
748
749
750
751 /*
752 * amdgpu_device_wb_*()
753 * Writeback is the method by which the GPU updates special pages in memory
754 * with the status of certain GPU events (fences, ring pointers,etc.).
755 */
756
757 /**
758 * amdgpu_device_wb_fini - Disable Writeback and free memory
759 *
760 * @adev: amdgpu_device pointer
761 *
762 * Disables Writeback and frees the Writeback memory (all asics).
763 * Used at driver shutdown.
764 */
765 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
766 {
767 if (adev->wb.wb_obj) {
768 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
769 &adev->wb.gpu_addr,
770 (void **)&adev->wb.wb);
771 adev->wb.wb_obj = NULL;
772 }
773 }
774
775 /**
776 * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
777 *
778 * @adev: amdgpu_device pointer
779 *
780 * Initializes writeback and allocates writeback memory (all asics).
781 * Used at driver startup.
782 * Returns 0 on success or an -error on failure.
783 */
784 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
785 {
786 int r;
787
788 if (adev->wb.wb_obj == NULL) {
789 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
790 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
791 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
792 &adev->wb.wb_obj, &adev->wb.gpu_addr,
793 (void **)&adev->wb.wb);
794 if (r) {
795 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
796 return r;
797 }
798
799 adev->wb.num_wb = AMDGPU_MAX_WB;
800 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
801
802 /* clear wb memory */
803 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
804 }
805
806 return 0;
807 }
808
809 /**
810 * amdgpu_device_wb_get - Allocate a wb entry
811 *
812 * @adev: amdgpu_device pointer
813 * @wb: wb index
814 *
815 * Allocate a wb slot for use by the driver (all asics).
816 * Returns 0 on success or -EINVAL on failure.
817 */
818 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
819 {
820 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
821
822 if (offset < adev->wb.num_wb) {
823 __set_bit(offset, adev->wb.used);
824 *wb = offset << 3; /* convert to dw offset */
825 return 0;
826 } else {
827 return -EINVAL;
828 }
829 }
830
831 /**
832 * amdgpu_device_wb_free - Free a wb entry
833 *
834 * @adev: amdgpu_device pointer
835 * @wb: wb index
836 *
837 * Free a wb slot allocated for use by the driver (all asics)
838 */
839 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
840 {
841 wb >>= 3;
842 if (wb < adev->wb.num_wb)
843 __clear_bit(wb, adev->wb.used);
844 }
845
846 /**
847 * amdgpu_device_resize_fb_bar - try to resize FB BAR
848 *
849 * @adev: amdgpu_device pointer
850 *
851 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
852 * to fail, but if any of the BARs is not accessible after the size we abort
853 * driver loading by returning -ENODEV.
854 */
855 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
856 {
857 u64 space_needed = roundup_pow_of_two(adev->gmc.real_vram_size);
858 u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1;
859 struct pci_bus *root;
860 struct resource *res;
861 unsigned i;
862 u16 cmd;
863 int r;
864
865 /* Bypass for VF */
866 if (amdgpu_sriov_vf(adev))
867 return 0;
868
869 /* Check if the root BUS has 64bit memory resources */
870 root = adev->pdev->bus;
871 while (root->parent)
872 root = root->parent;
873
874 pci_bus_for_each_resource(root, res, i) {
875 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
876 res->start > 0x100000000ull)
877 break;
878 }
879
880 /* Trying to resize is pointless without a root hub window above 4GB */
881 if (!res)
882 return 0;
883
884 /* Disable memory decoding while we change the BAR addresses and size */
885 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
886 pci_write_config_word(adev->pdev, PCI_COMMAND,
887 cmd & ~PCI_COMMAND_MEMORY);
888
889 /* Free the VRAM and doorbell BAR, we most likely need to move both. */
890 amdgpu_device_doorbell_fini(adev);
891 if (adev->asic_type >= CHIP_BONAIRE)
892 pci_release_resource(adev->pdev, 2);
893
894 pci_release_resource(adev->pdev, 0);
895
896 r = pci_resize_resource(adev->pdev, 0, rbar_size);
897 if (r == -ENOSPC)
898 DRM_INFO("Not enough PCI address space for a large BAR.");
899 else if (r && r != -ENOTSUPP)
900 DRM_ERROR("Problem resizing BAR0 (%d).", r);
901
902 pci_assign_unassigned_bus_resources(adev->pdev->bus);
903
904 /* When the doorbell or fb BAR isn't available we have no chance of
905 * using the device.
906 */
907 r = amdgpu_device_doorbell_init(adev);
908 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
909 return -ENODEV;
910
911 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
912
913 return 0;
914 }
915
916 /*
917 * GPU helpers function.
918 */
919 /**
920 * amdgpu_device_need_post - check if the hw need post or not
921 *
922 * @adev: amdgpu_device pointer
923 *
924 * Check if the asic has been initialized (all asics) at driver startup
925 * or post is needed if hw reset is performed.
926 * Returns true if need or false if not.
927 */
928 bool amdgpu_device_need_post(struct amdgpu_device *adev)
929 {
930 uint32_t reg;
931
932 if (amdgpu_sriov_vf(adev))
933 return false;
934
935 if (amdgpu_passthrough(adev)) {
936 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
937 * some old smc fw still need driver do vPost otherwise gpu hang, while
938 * those smc fw version above 22.15 doesn't have this flaw, so we force
939 * vpost executed for smc version below 22.15
940 */
941 if (adev->asic_type == CHIP_FIJI) {
942 int err;
943 uint32_t fw_ver;
944 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
945 /* force vPost if error occured */
946 if (err)
947 return true;
948
949 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
950 if (fw_ver < 0x00160e00)
951 return true;
952 }
953 }
954
955 if (adev->has_hw_reset) {
956 adev->has_hw_reset = false;
957 return true;
958 }
959
960 /* bios scratch used on CIK+ */
961 if (adev->asic_type >= CHIP_BONAIRE)
962 return amdgpu_atombios_scratch_need_asic_init(adev);
963
964 /* check MEM_SIZE for older asics */
965 reg = amdgpu_asic_get_config_memsize(adev);
966
967 if ((reg != 0) && (reg != 0xffffffff))
968 return false;
969
970 return true;
971 }
972
973 /* if we get transitioned to only one device, take VGA back */
974 /**
975 * amdgpu_device_vga_set_decode - enable/disable vga decode
976 *
977 * @cookie: amdgpu_device pointer
978 * @state: enable/disable vga decode
979 *
980 * Enable/disable vga decode (all asics).
981 * Returns VGA resource flags.
982 */
983 static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state)
984 {
985 struct amdgpu_device *adev = cookie;
986 amdgpu_asic_set_vga_state(adev, state);
987 if (state)
988 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
989 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
990 else
991 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
992 }
993
994 /**
995 * amdgpu_device_check_block_size - validate the vm block size
996 *
997 * @adev: amdgpu_device pointer
998 *
999 * Validates the vm block size specified via module parameter.
1000 * The vm block size defines number of bits in page table versus page directory,
1001 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1002 * page table and the remaining bits are in the page directory.
1003 */
1004 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1005 {
1006 /* defines number of bits in page table versus page directory,
1007 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1008 * page table and the remaining bits are in the page directory */
1009 if (amdgpu_vm_block_size == -1)
1010 return;
1011
1012 if (amdgpu_vm_block_size < 9) {
1013 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1014 amdgpu_vm_block_size);
1015 amdgpu_vm_block_size = -1;
1016 }
1017 }
1018
1019 /**
1020 * amdgpu_device_check_vm_size - validate the vm size
1021 *
1022 * @adev: amdgpu_device pointer
1023 *
1024 * Validates the vm size in GB specified via module parameter.
1025 * The VM size is the size of the GPU virtual memory space in GB.
1026 */
1027 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1028 {
1029 /* no need to check the default value */
1030 if (amdgpu_vm_size == -1)
1031 return;
1032
1033 if (amdgpu_vm_size < 1) {
1034 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1035 amdgpu_vm_size);
1036 amdgpu_vm_size = -1;
1037 }
1038 }
1039
1040 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1041 {
1042 struct sysinfo si;
1043 bool is_os_64 = (sizeof(void *) == 8);
1044 uint64_t total_memory;
1045 uint64_t dram_size_seven_GB = 0x1B8000000;
1046 uint64_t dram_size_three_GB = 0xB8000000;
1047
1048 if (amdgpu_smu_memory_pool_size == 0)
1049 return;
1050
1051 if (!is_os_64) {
1052 DRM_WARN("Not 64-bit OS, feature not supported\n");
1053 goto def_value;
1054 }
1055 si_meminfo(&si);
1056 total_memory = (uint64_t)si.totalram * si.mem_unit;
1057
1058 if ((amdgpu_smu_memory_pool_size == 1) ||
1059 (amdgpu_smu_memory_pool_size == 2)) {
1060 if (total_memory < dram_size_three_GB)
1061 goto def_value1;
1062 } else if ((amdgpu_smu_memory_pool_size == 4) ||
1063 (amdgpu_smu_memory_pool_size == 8)) {
1064 if (total_memory < dram_size_seven_GB)
1065 goto def_value1;
1066 } else {
1067 DRM_WARN("Smu memory pool size not supported\n");
1068 goto def_value;
1069 }
1070 adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1071
1072 return;
1073
1074 def_value1:
1075 DRM_WARN("No enough system memory\n");
1076 def_value:
1077 adev->pm.smu_prv_buffer_size = 0;
1078 }
1079
1080 /**
1081 * amdgpu_device_check_arguments - validate module params
1082 *
1083 * @adev: amdgpu_device pointer
1084 *
1085 * Validates certain module parameters and updates
1086 * the associated values used by the driver (all asics).
1087 */
1088 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1089 {
1090 if (amdgpu_sched_jobs < 4) {
1091 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1092 amdgpu_sched_jobs);
1093 amdgpu_sched_jobs = 4;
1094 } else if (!is_power_of_2(amdgpu_sched_jobs)){
1095 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1096 amdgpu_sched_jobs);
1097 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1098 }
1099
1100 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1101 /* gart size must be greater or equal to 32M */
1102 dev_warn(adev->dev, "gart size (%d) too small\n",
1103 amdgpu_gart_size);
1104 amdgpu_gart_size = -1;
1105 }
1106
1107 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1108 /* gtt size must be greater or equal to 32M */
1109 dev_warn(adev->dev, "gtt size (%d) too small\n",
1110 amdgpu_gtt_size);
1111 amdgpu_gtt_size = -1;
1112 }
1113
1114 /* valid range is between 4 and 9 inclusive */
1115 if (amdgpu_vm_fragment_size != -1 &&
1116 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1117 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1118 amdgpu_vm_fragment_size = -1;
1119 }
1120
1121 amdgpu_device_check_smu_prv_buffer_size(adev);
1122
1123 amdgpu_device_check_vm_size(adev);
1124
1125 amdgpu_device_check_block_size(adev);
1126
1127 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1128
1129 return 0;
1130 }
1131
1132 /**
1133 * amdgpu_switcheroo_set_state - set switcheroo state
1134 *
1135 * @pdev: pci dev pointer
1136 * @state: vga_switcheroo state
1137 *
1138 * Callback for the switcheroo driver. Suspends or resumes the
1139 * the asics before or after it is powered up using ACPI methods.
1140 */
1141 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1142 {
1143 struct drm_device *dev = pci_get_drvdata(pdev);
1144 int r;
1145
1146 if (amdgpu_device_supports_boco(dev) && state == VGA_SWITCHEROO_OFF)
1147 return;
1148
1149 if (state == VGA_SWITCHEROO_ON) {
1150 pr_info("amdgpu: switched on\n");
1151 /* don't suspend or resume card normally */
1152 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1153
1154 pci_set_power_state(dev->pdev, PCI_D0);
1155 pci_restore_state(dev->pdev);
1156 r = pci_enable_device(dev->pdev);
1157 if (r)
1158 DRM_WARN("pci_enable_device failed (%d)\n", r);
1159 amdgpu_device_resume(dev, true);
1160
1161 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1162 drm_kms_helper_poll_enable(dev);
1163 } else {
1164 pr_info("amdgpu: switched off\n");
1165 drm_kms_helper_poll_disable(dev);
1166 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1167 amdgpu_device_suspend(dev, true);
1168 pci_save_state(dev->pdev);
1169 /* Shut down the device */
1170 pci_disable_device(dev->pdev);
1171 pci_set_power_state(dev->pdev, PCI_D3cold);
1172 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1173 }
1174 }
1175
1176 /**
1177 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1178 *
1179 * @pdev: pci dev pointer
1180 *
1181 * Callback for the switcheroo driver. Check of the switcheroo
1182 * state can be changed.
1183 * Returns true if the state can be changed, false if not.
1184 */
1185 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1186 {
1187 struct drm_device *dev = pci_get_drvdata(pdev);
1188
1189 /*
1190 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1191 * locking inversion with the driver load path. And the access here is
1192 * completely racy anyway. So don't bother with locking for now.
1193 */
1194 return atomic_read(&dev->open_count) == 0;
1195 }
1196
1197 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1198 .set_gpu_state = amdgpu_switcheroo_set_state,
1199 .reprobe = NULL,
1200 .can_switch = amdgpu_switcheroo_can_switch,
1201 };
1202
1203 /**
1204 * amdgpu_device_ip_set_clockgating_state - set the CG state
1205 *
1206 * @dev: amdgpu_device pointer
1207 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1208 * @state: clockgating state (gate or ungate)
1209 *
1210 * Sets the requested clockgating state for all instances of
1211 * the hardware IP specified.
1212 * Returns the error code from the last instance.
1213 */
1214 int amdgpu_device_ip_set_clockgating_state(void *dev,
1215 enum amd_ip_block_type block_type,
1216 enum amd_clockgating_state state)
1217 {
1218 struct amdgpu_device *adev = dev;
1219 int i, r = 0;
1220
1221 for (i = 0; i < adev->num_ip_blocks; i++) {
1222 if (!adev->ip_blocks[i].status.valid)
1223 continue;
1224 if (adev->ip_blocks[i].version->type != block_type)
1225 continue;
1226 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1227 continue;
1228 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1229 (void *)adev, state);
1230 if (r)
1231 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1232 adev->ip_blocks[i].version->funcs->name, r);
1233 }
1234 return r;
1235 }
1236
1237 /**
1238 * amdgpu_device_ip_set_powergating_state - set the PG state
1239 *
1240 * @dev: amdgpu_device pointer
1241 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1242 * @state: powergating state (gate or ungate)
1243 *
1244 * Sets the requested powergating state for all instances of
1245 * the hardware IP specified.
1246 * Returns the error code from the last instance.
1247 */
1248 int amdgpu_device_ip_set_powergating_state(void *dev,
1249 enum amd_ip_block_type block_type,
1250 enum amd_powergating_state state)
1251 {
1252 struct amdgpu_device *adev = dev;
1253 int i, r = 0;
1254
1255 for (i = 0; i < adev->num_ip_blocks; i++) {
1256 if (!adev->ip_blocks[i].status.valid)
1257 continue;
1258 if (adev->ip_blocks[i].version->type != block_type)
1259 continue;
1260 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1261 continue;
1262 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1263 (void *)adev, state);
1264 if (r)
1265 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1266 adev->ip_blocks[i].version->funcs->name, r);
1267 }
1268 return r;
1269 }
1270
1271 /**
1272 * amdgpu_device_ip_get_clockgating_state - get the CG state
1273 *
1274 * @adev: amdgpu_device pointer
1275 * @flags: clockgating feature flags
1276 *
1277 * Walks the list of IPs on the device and updates the clockgating
1278 * flags for each IP.
1279 * Updates @flags with the feature flags for each hardware IP where
1280 * clockgating is enabled.
1281 */
1282 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1283 u32 *flags)
1284 {
1285 int i;
1286
1287 for (i = 0; i < adev->num_ip_blocks; i++) {
1288 if (!adev->ip_blocks[i].status.valid)
1289 continue;
1290 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1291 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1292 }
1293 }
1294
1295 /**
1296 * amdgpu_device_ip_wait_for_idle - wait for idle
1297 *
1298 * @adev: amdgpu_device pointer
1299 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1300 *
1301 * Waits for the request hardware IP to be idle.
1302 * Returns 0 for success or a negative error code on failure.
1303 */
1304 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1305 enum amd_ip_block_type block_type)
1306 {
1307 int i, r;
1308
1309 for (i = 0; i < adev->num_ip_blocks; i++) {
1310 if (!adev->ip_blocks[i].status.valid)
1311 continue;
1312 if (adev->ip_blocks[i].version->type == block_type) {
1313 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1314 if (r)
1315 return r;
1316 break;
1317 }
1318 }
1319 return 0;
1320
1321 }
1322
1323 /**
1324 * amdgpu_device_ip_is_idle - is the hardware IP idle
1325 *
1326 * @adev: amdgpu_device pointer
1327 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1328 *
1329 * Check if the hardware IP is idle or not.
1330 * Returns true if it the IP is idle, false if not.
1331 */
1332 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1333 enum amd_ip_block_type block_type)
1334 {
1335 int i;
1336
1337 for (i = 0; i < adev->num_ip_blocks; i++) {
1338 if (!adev->ip_blocks[i].status.valid)
1339 continue;
1340 if (adev->ip_blocks[i].version->type == block_type)
1341 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1342 }
1343 return true;
1344
1345 }
1346
1347 /**
1348 * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1349 *
1350 * @adev: amdgpu_device pointer
1351 * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1352 *
1353 * Returns a pointer to the hardware IP block structure
1354 * if it exists for the asic, otherwise NULL.
1355 */
1356 struct amdgpu_ip_block *
1357 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1358 enum amd_ip_block_type type)
1359 {
1360 int i;
1361
1362 for (i = 0; i < adev->num_ip_blocks; i++)
1363 if (adev->ip_blocks[i].version->type == type)
1364 return &adev->ip_blocks[i];
1365
1366 return NULL;
1367 }
1368
1369 /**
1370 * amdgpu_device_ip_block_version_cmp
1371 *
1372 * @adev: amdgpu_device pointer
1373 * @type: enum amd_ip_block_type
1374 * @major: major version
1375 * @minor: minor version
1376 *
1377 * return 0 if equal or greater
1378 * return 1 if smaller or the ip_block doesn't exist
1379 */
1380 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1381 enum amd_ip_block_type type,
1382 u32 major, u32 minor)
1383 {
1384 struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1385
1386 if (ip_block && ((ip_block->version->major > major) ||
1387 ((ip_block->version->major == major) &&
1388 (ip_block->version->minor >= minor))))
1389 return 0;
1390
1391 return 1;
1392 }
1393
1394 /**
1395 * amdgpu_device_ip_block_add
1396 *
1397 * @adev: amdgpu_device pointer
1398 * @ip_block_version: pointer to the IP to add
1399 *
1400 * Adds the IP block driver information to the collection of IPs
1401 * on the asic.
1402 */
1403 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1404 const struct amdgpu_ip_block_version *ip_block_version)
1405 {
1406 if (!ip_block_version)
1407 return -EINVAL;
1408
1409 DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1410 ip_block_version->funcs->name);
1411
1412 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1413
1414 return 0;
1415 }
1416
1417 /**
1418 * amdgpu_device_enable_virtual_display - enable virtual display feature
1419 *
1420 * @adev: amdgpu_device pointer
1421 *
1422 * Enabled the virtual display feature if the user has enabled it via
1423 * the module parameter virtual_display. This feature provides a virtual
1424 * display hardware on headless boards or in virtualized environments.
1425 * This function parses and validates the configuration string specified by
1426 * the user and configues the virtual display configuration (number of
1427 * virtual connectors, crtcs, etc.) specified.
1428 */
1429 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1430 {
1431 adev->enable_virtual_display = false;
1432
1433 if (amdgpu_virtual_display) {
1434 struct drm_device *ddev = adev->ddev;
1435 const char *pci_address_name = pci_name(ddev->pdev);
1436 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1437
1438 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1439 pciaddstr_tmp = pciaddstr;
1440 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1441 pciaddname = strsep(&pciaddname_tmp, ",");
1442 if (!strcmp("all", pciaddname)
1443 || !strcmp(pci_address_name, pciaddname)) {
1444 long num_crtc;
1445 int res = -1;
1446
1447 adev->enable_virtual_display = true;
1448
1449 if (pciaddname_tmp)
1450 res = kstrtol(pciaddname_tmp, 10,
1451 &num_crtc);
1452
1453 if (!res) {
1454 if (num_crtc < 1)
1455 num_crtc = 1;
1456 if (num_crtc > 6)
1457 num_crtc = 6;
1458 adev->mode_info.num_crtc = num_crtc;
1459 } else {
1460 adev->mode_info.num_crtc = 1;
1461 }
1462 break;
1463 }
1464 }
1465
1466 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1467 amdgpu_virtual_display, pci_address_name,
1468 adev->enable_virtual_display, adev->mode_info.num_crtc);
1469
1470 kfree(pciaddstr);
1471 }
1472 }
1473
1474 /**
1475 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1476 *
1477 * @adev: amdgpu_device pointer
1478 *
1479 * Parses the asic configuration parameters specified in the gpu info
1480 * firmware and makes them availale to the driver for use in configuring
1481 * the asic.
1482 * Returns 0 on success, -EINVAL on failure.
1483 */
1484 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1485 {
1486 const char *chip_name;
1487 char fw_name[30];
1488 int err;
1489 const struct gpu_info_firmware_header_v1_0 *hdr;
1490
1491 adev->firmware.gpu_info_fw = NULL;
1492
1493 switch (adev->asic_type) {
1494 case CHIP_TOPAZ:
1495 case CHIP_TONGA:
1496 case CHIP_FIJI:
1497 case CHIP_POLARIS10:
1498 case CHIP_POLARIS11:
1499 case CHIP_POLARIS12:
1500 case CHIP_VEGAM:
1501 case CHIP_CARRIZO:
1502 case CHIP_STONEY:
1503 #ifdef CONFIG_DRM_AMDGPU_SI
1504 case CHIP_VERDE:
1505 case CHIP_TAHITI:
1506 case CHIP_PITCAIRN:
1507 case CHIP_OLAND:
1508 case CHIP_HAINAN:
1509 #endif
1510 #ifdef CONFIG_DRM_AMDGPU_CIK
1511 case CHIP_BONAIRE:
1512 case CHIP_HAWAII:
1513 case CHIP_KAVERI:
1514 case CHIP_KABINI:
1515 case CHIP_MULLINS:
1516 #endif
1517 case CHIP_VEGA20:
1518 default:
1519 return 0;
1520 case CHIP_VEGA10:
1521 chip_name = "vega10";
1522 break;
1523 case CHIP_VEGA12:
1524 chip_name = "vega12";
1525 break;
1526 case CHIP_RAVEN:
1527 if (adev->rev_id >= 8)
1528 chip_name = "raven2";
1529 else if (adev->pdev->device == 0x15d8)
1530 chip_name = "picasso";
1531 else
1532 chip_name = "raven";
1533 break;
1534 case CHIP_ARCTURUS:
1535 chip_name = "arcturus";
1536 break;
1537 case CHIP_RENOIR:
1538 chip_name = "renoir";
1539 break;
1540 case CHIP_NAVI10:
1541 chip_name = "navi10";
1542 break;
1543 case CHIP_NAVI14:
1544 chip_name = "navi14";
1545 break;
1546 case CHIP_NAVI12:
1547 chip_name = "navi12";
1548 break;
1549 }
1550
1551 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1552 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1553 if (err) {
1554 dev_err(adev->dev,
1555 "Failed to load gpu_info firmware \"%s\"\n",
1556 fw_name);
1557 goto out;
1558 }
1559 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1560 if (err) {
1561 dev_err(adev->dev,
1562 "Failed to validate gpu_info firmware \"%s\"\n",
1563 fw_name);
1564 goto out;
1565 }
1566
1567 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1568 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1569
1570 switch (hdr->version_major) {
1571 case 1:
1572 {
1573 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
1574 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
1575 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1576
1577 if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
1578 goto parse_soc_bounding_box;
1579
1580 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1581 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1582 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1583 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
1584 adev->gfx.config.max_texture_channel_caches =
1585 le32_to_cpu(gpu_info_fw->gc_num_tccs);
1586 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1587 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1588 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1589 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
1590 adev->gfx.config.double_offchip_lds_buf =
1591 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1592 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
1593 adev->gfx.cu_info.max_waves_per_simd =
1594 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1595 adev->gfx.cu_info.max_scratch_slots_per_cu =
1596 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1597 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
1598 if (hdr->version_minor >= 1) {
1599 const struct gpu_info_firmware_v1_1 *gpu_info_fw =
1600 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
1601 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1602 adev->gfx.config.num_sc_per_sh =
1603 le32_to_cpu(gpu_info_fw->num_sc_per_sh);
1604 adev->gfx.config.num_packer_per_sc =
1605 le32_to_cpu(gpu_info_fw->num_packer_per_sc);
1606 }
1607
1608 parse_soc_bounding_box:
1609 /*
1610 * soc bounding box info is not integrated in disocovery table,
1611 * we always need to parse it from gpu info firmware.
1612 */
1613 if (hdr->version_minor == 2) {
1614 const struct gpu_info_firmware_v1_2 *gpu_info_fw =
1615 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
1616 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1617 adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
1618 }
1619 break;
1620 }
1621 default:
1622 dev_err(adev->dev,
1623 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1624 err = -EINVAL;
1625 goto out;
1626 }
1627 out:
1628 return err;
1629 }
1630
1631 /**
1632 * amdgpu_device_ip_early_init - run early init for hardware IPs
1633 *
1634 * @adev: amdgpu_device pointer
1635 *
1636 * Early initialization pass for hardware IPs. The hardware IPs that make
1637 * up each asic are discovered each IP's early_init callback is run. This
1638 * is the first stage in initializing the asic.
1639 * Returns 0 on success, negative error code on failure.
1640 */
1641 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
1642 {
1643 int i, r;
1644
1645 amdgpu_device_enable_virtual_display(adev);
1646
1647 switch (adev->asic_type) {
1648 case CHIP_TOPAZ:
1649 case CHIP_TONGA:
1650 case CHIP_FIJI:
1651 case CHIP_POLARIS10:
1652 case CHIP_POLARIS11:
1653 case CHIP_POLARIS12:
1654 case CHIP_VEGAM:
1655 case CHIP_CARRIZO:
1656 case CHIP_STONEY:
1657 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
1658 adev->family = AMDGPU_FAMILY_CZ;
1659 else
1660 adev->family = AMDGPU_FAMILY_VI;
1661
1662 r = vi_set_ip_blocks(adev);
1663 if (r)
1664 return r;
1665 break;
1666 #ifdef CONFIG_DRM_AMDGPU_SI
1667 case CHIP_VERDE:
1668 case CHIP_TAHITI:
1669 case CHIP_PITCAIRN:
1670 case CHIP_OLAND:
1671 case CHIP_HAINAN:
1672 adev->family = AMDGPU_FAMILY_SI;
1673 r = si_set_ip_blocks(adev);
1674 if (r)
1675 return r;
1676 break;
1677 #endif
1678 #ifdef CONFIG_DRM_AMDGPU_CIK
1679 case CHIP_BONAIRE:
1680 case CHIP_HAWAII:
1681 case CHIP_KAVERI:
1682 case CHIP_KABINI:
1683 case CHIP_MULLINS:
1684 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1685 adev->family = AMDGPU_FAMILY_CI;
1686 else
1687 adev->family = AMDGPU_FAMILY_KV;
1688
1689 r = cik_set_ip_blocks(adev);
1690 if (r)
1691 return r;
1692 break;
1693 #endif
1694 case CHIP_VEGA10:
1695 case CHIP_VEGA12:
1696 case CHIP_VEGA20:
1697 case CHIP_RAVEN:
1698 case CHIP_ARCTURUS:
1699 case CHIP_RENOIR:
1700 if (adev->asic_type == CHIP_RAVEN ||
1701 adev->asic_type == CHIP_RENOIR)
1702 adev->family = AMDGPU_FAMILY_RV;
1703 else
1704 adev->family = AMDGPU_FAMILY_AI;
1705
1706 r = soc15_set_ip_blocks(adev);
1707 if (r)
1708 return r;
1709 break;
1710 case CHIP_NAVI10:
1711 case CHIP_NAVI14:
1712 case CHIP_NAVI12:
1713 adev->family = AMDGPU_FAMILY_NV;
1714
1715 r = nv_set_ip_blocks(adev);
1716 if (r)
1717 return r;
1718 break;
1719 default:
1720 /* FIXME: not supported yet */
1721 return -EINVAL;
1722 }
1723
1724 r = amdgpu_device_parse_gpu_info_fw(adev);
1725 if (r)
1726 return r;
1727
1728 if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
1729 amdgpu_discovery_get_gfx_info(adev);
1730
1731 amdgpu_amdkfd_device_probe(adev);
1732
1733 if (amdgpu_sriov_vf(adev)) {
1734 r = amdgpu_virt_request_full_gpu(adev, true);
1735 if (r)
1736 return -EAGAIN;
1737 }
1738
1739 adev->pm.pp_feature = amdgpu_pp_feature_mask;
1740 if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
1741 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
1742
1743 for (i = 0; i < adev->num_ip_blocks; i++) {
1744 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
1745 DRM_ERROR("disabled ip block: %d <%s>\n",
1746 i, adev->ip_blocks[i].version->funcs->name);
1747 adev->ip_blocks[i].status.valid = false;
1748 } else {
1749 if (adev->ip_blocks[i].version->funcs->early_init) {
1750 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
1751 if (r == -ENOENT) {
1752 adev->ip_blocks[i].status.valid = false;
1753 } else if (r) {
1754 DRM_ERROR("early_init of IP block <%s> failed %d\n",
1755 adev->ip_blocks[i].version->funcs->name, r);
1756 return r;
1757 } else {
1758 adev->ip_blocks[i].status.valid = true;
1759 }
1760 } else {
1761 adev->ip_blocks[i].status.valid = true;
1762 }
1763 }
1764 /* get the vbios after the asic_funcs are set up */
1765 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
1766 /* Read BIOS */
1767 if (!amdgpu_get_bios(adev))
1768 return -EINVAL;
1769
1770 r = amdgpu_atombios_init(adev);
1771 if (r) {
1772 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
1773 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
1774 return r;
1775 }
1776 }
1777 }
1778
1779 adev->cg_flags &= amdgpu_cg_mask;
1780 adev->pg_flags &= amdgpu_pg_mask;
1781
1782 return 0;
1783 }
1784
1785 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
1786 {
1787 int i, r;
1788
1789 for (i = 0; i < adev->num_ip_blocks; i++) {
1790 if (!adev->ip_blocks[i].status.sw)
1791 continue;
1792 if (adev->ip_blocks[i].status.hw)
1793 continue;
1794 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1795 (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
1796 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
1797 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
1798 if (r) {
1799 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1800 adev->ip_blocks[i].version->funcs->name, r);
1801 return r;
1802 }
1803 adev->ip_blocks[i].status.hw = true;
1804 }
1805 }
1806
1807 return 0;
1808 }
1809
1810 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
1811 {
1812 int i, r;
1813
1814 for (i = 0; i < adev->num_ip_blocks; i++) {
1815 if (!adev->ip_blocks[i].status.sw)
1816 continue;
1817 if (adev->ip_blocks[i].status.hw)
1818 continue;
1819 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
1820 if (r) {
1821 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1822 adev->ip_blocks[i].version->funcs->name, r);
1823 return r;
1824 }
1825 adev->ip_blocks[i].status.hw = true;
1826 }
1827
1828 return 0;
1829 }
1830
1831 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
1832 {
1833 int r = 0;
1834 int i;
1835 uint32_t smu_version;
1836
1837 if (adev->asic_type >= CHIP_VEGA10) {
1838 for (i = 0; i < adev->num_ip_blocks; i++) {
1839 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
1840 continue;
1841
1842 /* no need to do the fw loading again if already done*/
1843 if (adev->ip_blocks[i].status.hw == true)
1844 break;
1845
1846 if (adev->in_gpu_reset || adev->in_suspend) {
1847 r = adev->ip_blocks[i].version->funcs->resume(adev);
1848 if (r) {
1849 DRM_ERROR("resume of IP block <%s> failed %d\n",
1850 adev->ip_blocks[i].version->funcs->name, r);
1851 return r;
1852 }
1853 } else {
1854 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
1855 if (r) {
1856 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1857 adev->ip_blocks[i].version->funcs->name, r);
1858 return r;
1859 }
1860 }
1861
1862 adev->ip_blocks[i].status.hw = true;
1863 break;
1864 }
1865 }
1866
1867 if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
1868 r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
1869
1870 return r;
1871 }
1872
1873 /**
1874 * amdgpu_device_ip_init - run init for hardware IPs
1875 *
1876 * @adev: amdgpu_device pointer
1877 *
1878 * Main initialization pass for hardware IPs. The list of all the hardware
1879 * IPs that make up the asic is walked and the sw_init and hw_init callbacks
1880 * are run. sw_init initializes the software state associated with each IP
1881 * and hw_init initializes the hardware associated with each IP.
1882 * Returns 0 on success, negative error code on failure.
1883 */
1884 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
1885 {
1886 int i, r;
1887
1888 r = amdgpu_ras_init(adev);
1889 if (r)
1890 return r;
1891
1892 for (i = 0; i < adev->num_ip_blocks; i++) {
1893 if (!adev->ip_blocks[i].status.valid)
1894 continue;
1895 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
1896 if (r) {
1897 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1898 adev->ip_blocks[i].version->funcs->name, r);
1899 goto init_failed;
1900 }
1901 adev->ip_blocks[i].status.sw = true;
1902
1903 /* need to do gmc hw init early so we can allocate gpu mem */
1904 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
1905 r = amdgpu_device_vram_scratch_init(adev);
1906 if (r) {
1907 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
1908 goto init_failed;
1909 }
1910 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
1911 if (r) {
1912 DRM_ERROR("hw_init %d failed %d\n", i, r);
1913 goto init_failed;
1914 }
1915 r = amdgpu_device_wb_init(adev);
1916 if (r) {
1917 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
1918 goto init_failed;
1919 }
1920 adev->ip_blocks[i].status.hw = true;
1921
1922 /* right after GMC hw init, we create CSA */
1923 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
1924 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
1925 AMDGPU_GEM_DOMAIN_VRAM,
1926 AMDGPU_CSA_SIZE);
1927 if (r) {
1928 DRM_ERROR("allocate CSA failed %d\n", r);
1929 goto init_failed;
1930 }
1931 }
1932 }
1933 }
1934
1935 if (amdgpu_sriov_vf(adev))
1936 amdgpu_virt_init_data_exchange(adev);
1937
1938 r = amdgpu_ib_pool_init(adev);
1939 if (r) {
1940 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
1941 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
1942 goto init_failed;
1943 }
1944
1945 r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
1946 if (r)
1947 goto init_failed;
1948
1949 r = amdgpu_device_ip_hw_init_phase1(adev);
1950 if (r)
1951 goto init_failed;
1952
1953 r = amdgpu_device_fw_loading(adev);
1954 if (r)
1955 goto init_failed;
1956
1957 r = amdgpu_device_ip_hw_init_phase2(adev);
1958 if (r)
1959 goto init_failed;
1960
1961 /*
1962 * retired pages will be loaded from eeprom and reserved here,
1963 * it should be called after amdgpu_device_ip_hw_init_phase2 since
1964 * for some ASICs the RAS EEPROM code relies on SMU fully functioning
1965 * for I2C communication which only true at this point.
1966 * recovery_init may fail, but it can free all resources allocated by
1967 * itself and its failure should not stop amdgpu init process.
1968 *
1969 * Note: theoretically, this should be called before all vram allocations
1970 * to protect retired page from abusing
1971 */
1972 amdgpu_ras_recovery_init(adev);
1973
1974 if (adev->gmc.xgmi.num_physical_nodes > 1)
1975 amdgpu_xgmi_add_device(adev);
1976 amdgpu_amdkfd_device_init(adev);
1977
1978 init_failed:
1979 if (amdgpu_sriov_vf(adev))
1980 amdgpu_virt_release_full_gpu(adev, true);
1981
1982 return r;
1983 }
1984
1985 /**
1986 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
1987 *
1988 * @adev: amdgpu_device pointer
1989 *
1990 * Writes a reset magic value to the gart pointer in VRAM. The driver calls
1991 * this function before a GPU reset. If the value is retained after a
1992 * GPU reset, VRAM has not been lost. Some GPU resets may destry VRAM contents.
1993 */
1994 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
1995 {
1996 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
1997 }
1998
1999 /**
2000 * amdgpu_device_check_vram_lost - check if vram is valid
2001 *
2002 * @adev: amdgpu_device pointer
2003 *
2004 * Checks the reset magic value written to the gart pointer in VRAM.
2005 * The driver calls this after a GPU reset to see if the contents of
2006 * VRAM is lost or now.
2007 * returns true if vram is lost, false if not.
2008 */
2009 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2010 {
2011 return !!memcmp(adev->gart.ptr, adev->reset_magic,
2012 AMDGPU_RESET_MAGIC_NUM);
2013 }
2014
2015 /**
2016 * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2017 *
2018 * @adev: amdgpu_device pointer
2019 * @state: clockgating state (gate or ungate)
2020 *
2021 * The list of all the hardware IPs that make up the asic is walked and the
2022 * set_clockgating_state callbacks are run.
2023 * Late initialization pass enabling clockgating for hardware IPs.
2024 * Fini or suspend, pass disabling clockgating for hardware IPs.
2025 * Returns 0 on success, negative error code on failure.
2026 */
2027
2028 static int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2029 enum amd_clockgating_state state)
2030 {
2031 int i, j, r;
2032
2033 if (amdgpu_emu_mode == 1)
2034 return 0;
2035
2036 for (j = 0; j < adev->num_ip_blocks; j++) {
2037 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2038 if (!adev->ip_blocks[i].status.late_initialized)
2039 continue;
2040 /* skip CG for VCE/UVD, it's handled specially */
2041 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2042 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2043 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2044 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2045 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2046 /* enable clockgating to save power */
2047 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2048 state);
2049 if (r) {
2050 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2051 adev->ip_blocks[i].version->funcs->name, r);
2052 return r;
2053 }
2054 }
2055 }
2056
2057 return 0;
2058 }
2059
2060 static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_powergating_state state)
2061 {
2062 int i, j, r;
2063
2064 if (amdgpu_emu_mode == 1)
2065 return 0;
2066
2067 for (j = 0; j < adev->num_ip_blocks; j++) {
2068 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2069 if (!adev->ip_blocks[i].status.late_initialized)
2070 continue;
2071 /* skip CG for VCE/UVD, it's handled specially */
2072 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2073 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2074 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2075 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2076 adev->ip_blocks[i].version->funcs->set_powergating_state) {
2077 /* enable powergating to save power */
2078 r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2079 state);
2080 if (r) {
2081 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2082 adev->ip_blocks[i].version->funcs->name, r);
2083 return r;
2084 }
2085 }
2086 }
2087 return 0;
2088 }
2089
2090 static int amdgpu_device_enable_mgpu_fan_boost(void)
2091 {
2092 struct amdgpu_gpu_instance *gpu_ins;
2093 struct amdgpu_device *adev;
2094 int i, ret = 0;
2095
2096 mutex_lock(&mgpu_info.mutex);
2097
2098 /*
2099 * MGPU fan boost feature should be enabled
2100 * only when there are two or more dGPUs in
2101 * the system
2102 */
2103 if (mgpu_info.num_dgpu < 2)
2104 goto out;
2105
2106 for (i = 0; i < mgpu_info.num_dgpu; i++) {
2107 gpu_ins = &(mgpu_info.gpu_ins[i]);
2108 adev = gpu_ins->adev;
2109 if (!(adev->flags & AMD_IS_APU) &&
2110 !gpu_ins->mgpu_fan_enabled &&
2111 adev->powerplay.pp_funcs &&
2112 adev->powerplay.pp_funcs->enable_mgpu_fan_boost) {
2113 ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2114 if (ret)
2115 break;
2116
2117 gpu_ins->mgpu_fan_enabled = 1;
2118 }
2119 }
2120
2121 out:
2122 mutex_unlock(&mgpu_info.mutex);
2123
2124 return ret;
2125 }
2126
2127 /**
2128 * amdgpu_device_ip_late_init - run late init for hardware IPs
2129 *
2130 * @adev: amdgpu_device pointer
2131 *
2132 * Late initialization pass for hardware IPs. The list of all the hardware
2133 * IPs that make up the asic is walked and the late_init callbacks are run.
2134 * late_init covers any special initialization that an IP requires
2135 * after all of the have been initialized or something that needs to happen
2136 * late in the init process.
2137 * Returns 0 on success, negative error code on failure.
2138 */
2139 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2140 {
2141 struct amdgpu_gpu_instance *gpu_instance;
2142 int i = 0, r;
2143
2144 for (i = 0; i < adev->num_ip_blocks; i++) {
2145 if (!adev->ip_blocks[i].status.hw)
2146 continue;
2147 if (adev->ip_blocks[i].version->funcs->late_init) {
2148 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2149 if (r) {
2150 DRM_ERROR("late_init of IP block <%s> failed %d\n",
2151 adev->ip_blocks[i].version->funcs->name, r);
2152 return r;
2153 }
2154 }
2155 adev->ip_blocks[i].status.late_initialized = true;
2156 }
2157
2158 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2159 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2160
2161 amdgpu_device_fill_reset_magic(adev);
2162
2163 r = amdgpu_device_enable_mgpu_fan_boost();
2164 if (r)
2165 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2166
2167
2168 if (adev->gmc.xgmi.num_physical_nodes > 1) {
2169 mutex_lock(&mgpu_info.mutex);
2170
2171 /*
2172 * Reset device p-state to low as this was booted with high.
2173 *
2174 * This should be performed only after all devices from the same
2175 * hive get initialized.
2176 *
2177 * However, it's unknown how many device in the hive in advance.
2178 * As this is counted one by one during devices initializations.
2179 *
2180 * So, we wait for all XGMI interlinked devices initialized.
2181 * This may bring some delays as those devices may come from
2182 * different hives. But that should be OK.
2183 */
2184 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2185 for (i = 0; i < mgpu_info.num_gpu; i++) {
2186 gpu_instance = &(mgpu_info.gpu_ins[i]);
2187 if (gpu_instance->adev->flags & AMD_IS_APU)
2188 continue;
2189
2190 r = amdgpu_xgmi_set_pstate(gpu_instance->adev, 0);
2191 if (r) {
2192 DRM_ERROR("pstate setting failed (%d).\n", r);
2193 break;
2194 }
2195 }
2196 }
2197
2198 mutex_unlock(&mgpu_info.mutex);
2199 }
2200
2201 return 0;
2202 }
2203
2204 /**
2205 * amdgpu_device_ip_fini - run fini for hardware IPs
2206 *
2207 * @adev: amdgpu_device pointer
2208 *
2209 * Main teardown pass for hardware IPs. The list of all the hardware
2210 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2211 * are run. hw_fini tears down the hardware associated with each IP
2212 * and sw_fini tears down any software state associated with each IP.
2213 * Returns 0 on success, negative error code on failure.
2214 */
2215 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2216 {
2217 int i, r;
2218
2219 amdgpu_ras_pre_fini(adev);
2220
2221 if (adev->gmc.xgmi.num_physical_nodes > 1)
2222 amdgpu_xgmi_remove_device(adev);
2223
2224 amdgpu_amdkfd_device_fini(adev);
2225
2226 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2227 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2228
2229 /* need to disable SMC first */
2230 for (i = 0; i < adev->num_ip_blocks; i++) {
2231 if (!adev->ip_blocks[i].status.hw)
2232 continue;
2233 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2234 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2235 /* XXX handle errors */
2236 if (r) {
2237 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2238 adev->ip_blocks[i].version->funcs->name, r);
2239 }
2240 adev->ip_blocks[i].status.hw = false;
2241 break;
2242 }
2243 }
2244
2245 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2246 if (!adev->ip_blocks[i].status.hw)
2247 continue;
2248
2249 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2250 /* XXX handle errors */
2251 if (r) {
2252 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2253 adev->ip_blocks[i].version->funcs->name, r);
2254 }
2255
2256 adev->ip_blocks[i].status.hw = false;
2257 }
2258
2259
2260 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2261 if (!adev->ip_blocks[i].status.sw)
2262 continue;
2263
2264 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2265 amdgpu_ucode_free_bo(adev);
2266 amdgpu_free_static_csa(&adev->virt.csa_obj);
2267 amdgpu_device_wb_fini(adev);
2268 amdgpu_device_vram_scratch_fini(adev);
2269 amdgpu_ib_pool_fini(adev);
2270 }
2271
2272 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2273 /* XXX handle errors */
2274 if (r) {
2275 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2276 adev->ip_blocks[i].version->funcs->name, r);
2277 }
2278 adev->ip_blocks[i].status.sw = false;
2279 adev->ip_blocks[i].status.valid = false;
2280 }
2281
2282 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2283 if (!adev->ip_blocks[i].status.late_initialized)
2284 continue;
2285 if (adev->ip_blocks[i].version->funcs->late_fini)
2286 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2287 adev->ip_blocks[i].status.late_initialized = false;
2288 }
2289
2290 amdgpu_ras_fini(adev);
2291
2292 if (amdgpu_sriov_vf(adev))
2293 if (amdgpu_virt_release_full_gpu(adev, false))
2294 DRM_ERROR("failed to release exclusive mode on fini\n");
2295
2296 return 0;
2297 }
2298
2299 /**
2300 * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2301 *
2302 * @work: work_struct.
2303 */
2304 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2305 {
2306 struct amdgpu_device *adev =
2307 container_of(work, struct amdgpu_device, delayed_init_work.work);
2308 int r;
2309
2310 r = amdgpu_ib_ring_tests(adev);
2311 if (r)
2312 DRM_ERROR("ib ring test failed (%d).\n", r);
2313 }
2314
2315 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2316 {
2317 struct amdgpu_device *adev =
2318 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2319
2320 mutex_lock(&adev->gfx.gfx_off_mutex);
2321 if (!adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
2322 if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2323 adev->gfx.gfx_off_state = true;
2324 }
2325 mutex_unlock(&adev->gfx.gfx_off_mutex);
2326 }
2327
2328 /**
2329 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2330 *
2331 * @adev: amdgpu_device pointer
2332 *
2333 * Main suspend function for hardware IPs. The list of all the hardware
2334 * IPs that make up the asic is walked, clockgating is disabled and the
2335 * suspend callbacks are run. suspend puts the hardware and software state
2336 * in each IP into a state suitable for suspend.
2337 * Returns 0 on success, negative error code on failure.
2338 */
2339 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2340 {
2341 int i, r;
2342
2343
2344 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2345 if (!adev->ip_blocks[i].status.valid)
2346 continue;
2347 /* displays are handled separately */
2348 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) {
2349 /* XXX handle errors */
2350 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2351 /* XXX handle errors */
2352 if (r) {
2353 DRM_ERROR("suspend of IP block <%s> failed %d\n",
2354 adev->ip_blocks[i].version->funcs->name, r);
2355 return r;
2356 }
2357 adev->ip_blocks[i].status.hw = false;
2358 }
2359 }
2360
2361 return 0;
2362 }
2363
2364 /**
2365 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2366 *
2367 * @adev: amdgpu_device pointer
2368 *
2369 * Main suspend function for hardware IPs. The list of all the hardware
2370 * IPs that make up the asic is walked, clockgating is disabled and the
2371 * suspend callbacks are run. suspend puts the hardware and software state
2372 * in each IP into a state suitable for suspend.
2373 * Returns 0 on success, negative error code on failure.
2374 */
2375 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
2376 {
2377 int i, r;
2378
2379 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2380 if (!adev->ip_blocks[i].status.valid)
2381 continue;
2382 /* displays are handled in phase1 */
2383 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
2384 continue;
2385 /* PSP lost connection when err_event_athub occurs */
2386 if (amdgpu_ras_intr_triggered() &&
2387 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
2388 adev->ip_blocks[i].status.hw = false;
2389 continue;
2390 }
2391 /* XXX handle errors */
2392 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2393 /* XXX handle errors */
2394 if (r) {
2395 DRM_ERROR("suspend of IP block <%s> failed %d\n",
2396 adev->ip_blocks[i].version->funcs->name, r);
2397 }
2398 adev->ip_blocks[i].status.hw = false;
2399 /* handle putting the SMC in the appropriate state */
2400 if(!amdgpu_sriov_vf(adev)){
2401 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2402 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
2403 if (r) {
2404 DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
2405 adev->mp1_state, r);
2406 return r;
2407 }
2408 }
2409 }
2410 adev->ip_blocks[i].status.hw = false;
2411 }
2412
2413 return 0;
2414 }
2415
2416 /**
2417 * amdgpu_device_ip_suspend - run suspend for hardware IPs
2418 *
2419 * @adev: amdgpu_device pointer
2420 *
2421 * Main suspend function for hardware IPs. The list of all the hardware
2422 * IPs that make up the asic is walked, clockgating is disabled and the
2423 * suspend callbacks are run. suspend puts the hardware and software state
2424 * in each IP into a state suitable for suspend.
2425 * Returns 0 on success, negative error code on failure.
2426 */
2427 int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
2428 {
2429 int r;
2430
2431 if (amdgpu_sriov_vf(adev))
2432 amdgpu_virt_request_full_gpu(adev, false);
2433
2434 r = amdgpu_device_ip_suspend_phase1(adev);
2435 if (r)
2436 return r;
2437 r = amdgpu_device_ip_suspend_phase2(adev);
2438
2439 if (amdgpu_sriov_vf(adev))
2440 amdgpu_virt_release_full_gpu(adev, false);
2441
2442 return r;
2443 }
2444
2445 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
2446 {
2447 int i, r;
2448
2449 static enum amd_ip_block_type ip_order[] = {
2450 AMD_IP_BLOCK_TYPE_GMC,
2451 AMD_IP_BLOCK_TYPE_COMMON,
2452 AMD_IP_BLOCK_TYPE_PSP,
2453 AMD_IP_BLOCK_TYPE_IH,
2454 };
2455
2456 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
2457 int j;
2458 struct amdgpu_ip_block *block;
2459
2460 for (j = 0; j < adev->num_ip_blocks; j++) {
2461 block = &adev->ip_blocks[j];
2462
2463 block->status.hw = false;
2464 if (block->version->type != ip_order[i] ||
2465 !block->status.valid)
2466 continue;
2467
2468 r = block->version->funcs->hw_init(adev);
2469 DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
2470 if (r)
2471 return r;
2472 block->status.hw = true;
2473 }
2474 }
2475
2476 return 0;
2477 }
2478
2479 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
2480 {
2481 int i, r;
2482
2483 static enum amd_ip_block_type ip_order[] = {
2484 AMD_IP_BLOCK_TYPE_SMC,
2485 AMD_IP_BLOCK_TYPE_DCE,
2486 AMD_IP_BLOCK_TYPE_GFX,
2487 AMD_IP_BLOCK_TYPE_SDMA,
2488 AMD_IP_BLOCK_TYPE_UVD,
2489 AMD_IP_BLOCK_TYPE_VCE,
2490 AMD_IP_BLOCK_TYPE_VCN
2491 };
2492
2493 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
2494 int j;
2495 struct amdgpu_ip_block *block;
2496
2497 for (j = 0; j < adev->num_ip_blocks; j++) {
2498 block = &adev->ip_blocks[j];
2499
2500 if (block->version->type != ip_order[i] ||
2501 !block->status.valid ||
2502 block->status.hw)
2503 continue;
2504
2505 if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
2506 r = block->version->funcs->resume(adev);
2507 else
2508 r = block->version->funcs->hw_init(adev);
2509
2510 DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
2511 if (r)
2512 return r;
2513 block->status.hw = true;
2514 }
2515 }
2516
2517 return 0;
2518 }
2519
2520 /**
2521 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
2522 *
2523 * @adev: amdgpu_device pointer
2524 *
2525 * First resume function for hardware IPs. The list of all the hardware
2526 * IPs that make up the asic is walked and the resume callbacks are run for
2527 * COMMON, GMC, and IH. resume puts the hardware into a functional state
2528 * after a suspend and updates the software state as necessary. This
2529 * function is also used for restoring the GPU after a GPU reset.
2530 * Returns 0 on success, negative error code on failure.
2531 */
2532 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
2533 {
2534 int i, r;
2535
2536 for (i = 0; i < adev->num_ip_blocks; i++) {
2537 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
2538 continue;
2539 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2540 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2541 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2542
2543 r = adev->ip_blocks[i].version->funcs->resume(adev);
2544 if (r) {
2545 DRM_ERROR("resume of IP block <%s> failed %d\n",
2546 adev->ip_blocks[i].version->funcs->name, r);
2547 return r;
2548 }
2549 adev->ip_blocks[i].status.hw = true;
2550 }
2551 }
2552
2553 return 0;
2554 }
2555
2556 /**
2557 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
2558 *
2559 * @adev: amdgpu_device pointer
2560 *
2561 * First resume function for hardware IPs. The list of all the hardware
2562 * IPs that make up the asic is walked and the resume callbacks are run for
2563 * all blocks except COMMON, GMC, and IH. resume puts the hardware into a
2564 * functional state after a suspend and updates the software state as
2565 * necessary. This function is also used for restoring the GPU after a GPU
2566 * reset.
2567 * Returns 0 on success, negative error code on failure.
2568 */
2569 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
2570 {
2571 int i, r;
2572
2573 for (i = 0; i < adev->num_ip_blocks; i++) {
2574 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
2575 continue;
2576 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2577 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2578 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
2579 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
2580 continue;
2581 r = adev->ip_blocks[i].version->funcs->resume(adev);
2582 if (r) {
2583 DRM_ERROR("resume of IP block <%s> failed %d\n",
2584 adev->ip_blocks[i].version->funcs->name, r);
2585 return r;
2586 }
2587 adev->ip_blocks[i].status.hw = true;
2588 }
2589
2590 return 0;
2591 }
2592
2593 /**
2594 * amdgpu_device_ip_resume - run resume for hardware IPs
2595 *
2596 * @adev: amdgpu_device pointer
2597 *
2598 * Main resume function for hardware IPs. The hardware IPs
2599 * are split into two resume functions because they are
2600 * are also used in in recovering from a GPU reset and some additional
2601 * steps need to be take between them. In this case (S3/S4) they are
2602 * run sequentially.
2603 * Returns 0 on success, negative error code on failure.
2604 */
2605 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
2606 {
2607 int r;
2608
2609 r = amdgpu_device_ip_resume_phase1(adev);
2610 if (r)
2611 return r;
2612
2613 r = amdgpu_device_fw_loading(adev);
2614 if (r)
2615 return r;
2616
2617 r = amdgpu_device_ip_resume_phase2(adev);
2618
2619 return r;
2620 }
2621
2622 /**
2623 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
2624 *
2625 * @adev: amdgpu_device pointer
2626 *
2627 * Query the VBIOS data tables to determine if the board supports SR-IOV.
2628 */
2629 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
2630 {
2631 if (amdgpu_sriov_vf(adev)) {
2632 if (adev->is_atom_fw) {
2633 if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
2634 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2635 } else {
2636 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
2637 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2638 }
2639
2640 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
2641 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
2642 }
2643 }
2644
2645 /**
2646 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
2647 *
2648 * @asic_type: AMD asic type
2649 *
2650 * Check if there is DC (new modesetting infrastructre) support for an asic.
2651 * returns true if DC has support, false if not.
2652 */
2653 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
2654 {
2655 switch (asic_type) {
2656 #if defined(CONFIG_DRM_AMD_DC)
2657 case CHIP_BONAIRE:
2658 case CHIP_KAVERI:
2659 case CHIP_KABINI:
2660 case CHIP_MULLINS:
2661 /*
2662 * We have systems in the wild with these ASICs that require
2663 * LVDS and VGA support which is not supported with DC.
2664 *
2665 * Fallback to the non-DC driver here by default so as not to
2666 * cause regressions.
2667 */
2668 return amdgpu_dc > 0;
2669 case CHIP_HAWAII:
2670 case CHIP_CARRIZO:
2671 case CHIP_STONEY:
2672 case CHIP_POLARIS10:
2673 case CHIP_POLARIS11:
2674 case CHIP_POLARIS12:
2675 case CHIP_VEGAM:
2676 case CHIP_TONGA:
2677 case CHIP_FIJI:
2678 case CHIP_VEGA10:
2679 case CHIP_VEGA12:
2680 case CHIP_VEGA20:
2681 #if defined(CONFIG_DRM_AMD_DC_DCN)
2682 case CHIP_RAVEN:
2683 case CHIP_NAVI10:
2684 case CHIP_NAVI14:
2685 case CHIP_NAVI12:
2686 case CHIP_RENOIR:
2687 #endif
2688 return amdgpu_dc != 0;
2689 #endif
2690 default:
2691 if (amdgpu_dc > 0)
2692 DRM_INFO("Display Core has been requested via kernel parameter "
2693 "but isn't supported by ASIC, ignoring\n");
2694 return false;
2695 }
2696 }
2697
2698 /**
2699 * amdgpu_device_has_dc_support - check if dc is supported
2700 *
2701 * @adev: amdgpu_device_pointer
2702 *
2703 * Returns true for supported, false for not supported
2704 */
2705 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
2706 {
2707 if (amdgpu_sriov_vf(adev))
2708 return false;
2709
2710 return amdgpu_device_asic_has_dc_support(adev->asic_type);
2711 }
2712
2713
2714 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
2715 {
2716 struct amdgpu_device *adev =
2717 container_of(__work, struct amdgpu_device, xgmi_reset_work);
2718 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0);
2719
2720 /* It's a bug to not have a hive within this function */
2721 if (WARN_ON(!hive))
2722 return;
2723
2724 /*
2725 * Use task barrier to synchronize all xgmi reset works across the
2726 * hive. task_barrier_enter and task_barrier_exit will block
2727 * until all the threads running the xgmi reset works reach
2728 * those points. task_barrier_full will do both blocks.
2729 */
2730 if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
2731
2732 task_barrier_enter(&hive->tb);
2733 adev->asic_reset_res = amdgpu_device_baco_enter(adev->ddev);
2734
2735 if (adev->asic_reset_res)
2736 goto fail;
2737
2738 task_barrier_exit(&hive->tb);
2739 adev->asic_reset_res = amdgpu_device_baco_exit(adev->ddev);
2740
2741 if (adev->asic_reset_res)
2742 goto fail;
2743
2744 if (adev->mmhub.funcs && adev->mmhub.funcs->reset_ras_error_count)
2745 adev->mmhub.funcs->reset_ras_error_count(adev);
2746 } else {
2747
2748 task_barrier_full(&hive->tb);
2749 adev->asic_reset_res = amdgpu_asic_reset(adev);
2750 }
2751
2752 fail:
2753 if (adev->asic_reset_res)
2754 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
2755 adev->asic_reset_res, adev->ddev->unique);
2756 }
2757
2758 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
2759 {
2760 char *input = amdgpu_lockup_timeout;
2761 char *timeout_setting = NULL;
2762 int index = 0;
2763 long timeout;
2764 int ret = 0;
2765
2766 /*
2767 * By default timeout for non compute jobs is 10000.
2768 * And there is no timeout enforced on compute jobs.
2769 * In SR-IOV or passthrough mode, timeout for compute
2770 * jobs are 10000 by default.
2771 */
2772 adev->gfx_timeout = msecs_to_jiffies(10000);
2773 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
2774 if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
2775 adev->compute_timeout = adev->gfx_timeout;
2776 else
2777 adev->compute_timeout = MAX_SCHEDULE_TIMEOUT;
2778
2779 if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
2780 while ((timeout_setting = strsep(&input, ",")) &&
2781 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
2782 ret = kstrtol(timeout_setting, 0, &timeout);
2783 if (ret)
2784 return ret;
2785
2786 if (timeout == 0) {
2787 index++;
2788 continue;
2789 } else if (timeout < 0) {
2790 timeout = MAX_SCHEDULE_TIMEOUT;
2791 } else {
2792 timeout = msecs_to_jiffies(timeout);
2793 }
2794
2795 switch (index++) {
2796 case 0:
2797 adev->gfx_timeout = timeout;
2798 break;
2799 case 1:
2800 adev->compute_timeout = timeout;
2801 break;
2802 case 2:
2803 adev->sdma_timeout = timeout;
2804 break;
2805 case 3:
2806 adev->video_timeout = timeout;
2807 break;
2808 default:
2809 break;
2810 }
2811 }
2812 /*
2813 * There is only one value specified and
2814 * it should apply to all non-compute jobs.
2815 */
2816 if (index == 1) {
2817 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
2818 if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
2819 adev->compute_timeout = adev->gfx_timeout;
2820 }
2821 }
2822
2823 return ret;
2824 }
2825
2826 /**
2827 * amdgpu_device_init - initialize the driver
2828 *
2829 * @adev: amdgpu_device pointer
2830 * @ddev: drm dev pointer
2831 * @pdev: pci dev pointer
2832 * @flags: driver flags
2833 *
2834 * Initializes the driver info and hw (all asics).
2835 * Returns 0 for success or an error on failure.
2836 * Called at driver startup.
2837 */
2838 int amdgpu_device_init(struct amdgpu_device *adev,
2839 struct drm_device *ddev,
2840 struct pci_dev *pdev,
2841 uint32_t flags)
2842 {
2843 int r, i;
2844 bool boco = false;
2845 u32 max_MBps;
2846
2847 adev->shutdown = false;
2848 adev->dev = &pdev->dev;
2849 adev->ddev = ddev;
2850 adev->pdev = pdev;
2851 adev->flags = flags;
2852
2853 if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
2854 adev->asic_type = amdgpu_force_asic_type;
2855 else
2856 adev->asic_type = flags & AMD_ASIC_MASK;
2857
2858 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
2859 if (amdgpu_emu_mode == 1)
2860 adev->usec_timeout *= 10;
2861 adev->gmc.gart_size = 512 * 1024 * 1024;
2862 adev->accel_working = false;
2863 adev->num_rings = 0;
2864 adev->mman.buffer_funcs = NULL;
2865 adev->mman.buffer_funcs_ring = NULL;
2866 adev->vm_manager.vm_pte_funcs = NULL;
2867 adev->vm_manager.vm_pte_num_scheds = 0;
2868 adev->gmc.gmc_funcs = NULL;
2869 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2870 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
2871
2872 adev->smc_rreg = &amdgpu_invalid_rreg;
2873 adev->smc_wreg = &amdgpu_invalid_wreg;
2874 adev->pcie_rreg = &amdgpu_invalid_rreg;
2875 adev->pcie_wreg = &amdgpu_invalid_wreg;
2876 adev->pciep_rreg = &amdgpu_invalid_rreg;
2877 adev->pciep_wreg = &amdgpu_invalid_wreg;
2878 adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
2879 adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
2880 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
2881 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
2882 adev->didt_rreg = &amdgpu_invalid_rreg;
2883 adev->didt_wreg = &amdgpu_invalid_wreg;
2884 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
2885 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
2886 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
2887 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
2888
2889 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
2890 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
2891 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
2892
2893 /* mutex initialization are all done here so we
2894 * can recall function without having locking issues */
2895 atomic_set(&adev->irq.ih.lock, 0);
2896 mutex_init(&adev->firmware.mutex);
2897 mutex_init(&adev->pm.mutex);
2898 mutex_init(&adev->gfx.gpu_clock_mutex);
2899 mutex_init(&adev->srbm_mutex);
2900 mutex_init(&adev->gfx.pipe_reserve_mutex);
2901 mutex_init(&adev->gfx.gfx_off_mutex);
2902 mutex_init(&adev->grbm_idx_mutex);
2903 mutex_init(&adev->mn_lock);
2904 mutex_init(&adev->virt.vf_errors.lock);
2905 hash_init(adev->mn_hash);
2906 mutex_init(&adev->lock_reset);
2907 mutex_init(&adev->psp.mutex);
2908 mutex_init(&adev->notifier_lock);
2909
2910 r = amdgpu_device_check_arguments(adev);
2911 if (r)
2912 return r;
2913
2914 spin_lock_init(&adev->mmio_idx_lock);
2915 spin_lock_init(&adev->smc_idx_lock);
2916 spin_lock_init(&adev->pcie_idx_lock);
2917 spin_lock_init(&adev->uvd_ctx_idx_lock);
2918 spin_lock_init(&adev->didt_idx_lock);
2919 spin_lock_init(&adev->gc_cac_idx_lock);
2920 spin_lock_init(&adev->se_cac_idx_lock);
2921 spin_lock_init(&adev->audio_endpt_idx_lock);
2922 spin_lock_init(&adev->mm_stats.lock);
2923
2924 INIT_LIST_HEAD(&adev->shadow_list);
2925 mutex_init(&adev->shadow_list_lock);
2926
2927 INIT_LIST_HEAD(&adev->ring_lru_list);
2928 spin_lock_init(&adev->ring_lru_list_lock);
2929
2930 INIT_DELAYED_WORK(&adev->delayed_init_work,
2931 amdgpu_device_delayed_init_work_handler);
2932 INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
2933 amdgpu_device_delay_enable_gfx_off);
2934
2935 INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
2936
2937 adev->gfx.gfx_off_req_count = 1;
2938 adev->pm.ac_power = power_supply_is_system_supplied() > 0 ? true : false;
2939
2940 /* Registers mapping */
2941 /* TODO: block userspace mapping of io register */
2942 if (adev->asic_type >= CHIP_BONAIRE) {
2943 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
2944 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
2945 } else {
2946 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
2947 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
2948 }
2949
2950 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
2951 if (adev->rmmio == NULL) {
2952 return -ENOMEM;
2953 }
2954 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
2955 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
2956
2957 /* io port mapping */
2958 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2959 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
2960 adev->rio_mem_size = pci_resource_len(adev->pdev, i);
2961 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
2962 break;
2963 }
2964 }
2965 if (adev->rio_mem == NULL)
2966 DRM_INFO("PCI I/O BAR is not found.\n");
2967
2968 /* enable PCIE atomic ops */
2969 r = pci_enable_atomic_ops_to_root(adev->pdev,
2970 PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
2971 PCI_EXP_DEVCAP2_ATOMIC_COMP64);
2972 if (r) {
2973 adev->have_atomics_support = false;
2974 DRM_INFO("PCIE atomic ops is not supported\n");
2975 } else {
2976 adev->have_atomics_support = true;
2977 }
2978
2979 amdgpu_device_get_pcie_info(adev);
2980
2981 if (amdgpu_mcbp)
2982 DRM_INFO("MCBP is enabled\n");
2983
2984 if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10)
2985 adev->enable_mes = true;
2986
2987 if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) {
2988 r = amdgpu_discovery_init(adev);
2989 if (r) {
2990 dev_err(adev->dev, "amdgpu_discovery_init failed\n");
2991 return r;
2992 }
2993 }
2994
2995 /* early init functions */
2996 r = amdgpu_device_ip_early_init(adev);
2997 if (r)
2998 return r;
2999
3000 r = amdgpu_device_get_job_timeout_settings(adev);
3001 if (r) {
3002 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3003 return r;
3004 }
3005
3006 /* doorbell bar mapping and doorbell index init*/
3007 amdgpu_device_doorbell_init(adev);
3008
3009 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
3010 /* this will fail for cards that aren't VGA class devices, just
3011 * ignore it */
3012 vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
3013
3014 if (amdgpu_device_supports_boco(ddev))
3015 boco = true;
3016 if (amdgpu_has_atpx() &&
3017 (amdgpu_is_atpx_hybrid() ||
3018 amdgpu_has_atpx_dgpu_power_cntl()) &&
3019 !pci_is_thunderbolt_attached(adev->pdev))
3020 vga_switcheroo_register_client(adev->pdev,
3021 &amdgpu_switcheroo_ops, boco);
3022 if (boco)
3023 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
3024
3025 if (amdgpu_emu_mode == 1) {
3026 /* post the asic on emulation mode */
3027 emu_soc_asic_init(adev);
3028 goto fence_driver_init;
3029 }
3030
3031 /* detect if we are with an SRIOV vbios */
3032 amdgpu_device_detect_sriov_bios(adev);
3033
3034 /* check if we need to reset the asic
3035 * E.g., driver was not cleanly unloaded previously, etc.
3036 */
3037 if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3038 r = amdgpu_asic_reset(adev);
3039 if (r) {
3040 dev_err(adev->dev, "asic reset on init failed\n");
3041 goto failed;
3042 }
3043 }
3044
3045 /* Post card if necessary */
3046 if (amdgpu_device_need_post(adev)) {
3047 if (!adev->bios) {
3048 dev_err(adev->dev, "no vBIOS found\n");
3049 r = -EINVAL;
3050 goto failed;
3051 }
3052 DRM_INFO("GPU posting now...\n");
3053 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
3054 if (r) {
3055 dev_err(adev->dev, "gpu post error!\n");
3056 goto failed;
3057 }
3058 }
3059
3060 if (adev->is_atom_fw) {
3061 /* Initialize clocks */
3062 r = amdgpu_atomfirmware_get_clock_info(adev);
3063 if (r) {
3064 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3065 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3066 goto failed;
3067 }
3068 } else {
3069 /* Initialize clocks */
3070 r = amdgpu_atombios_get_clock_info(adev);
3071 if (r) {
3072 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3073 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3074 goto failed;
3075 }
3076 /* init i2c buses */
3077 if (!amdgpu_device_has_dc_support(adev))
3078 amdgpu_atombios_i2c_init(adev);
3079 }
3080
3081 fence_driver_init:
3082 /* Fence driver */
3083 r = amdgpu_fence_driver_init(adev);
3084 if (r) {
3085 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
3086 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3087 goto failed;
3088 }
3089
3090 /* init the mode config */
3091 drm_mode_config_init(adev->ddev);
3092
3093 r = amdgpu_device_ip_init(adev);
3094 if (r) {
3095 /* failed in exclusive mode due to timeout */
3096 if (amdgpu_sriov_vf(adev) &&
3097 !amdgpu_sriov_runtime(adev) &&
3098 amdgpu_virt_mmio_blocked(adev) &&
3099 !amdgpu_virt_wait_reset(adev)) {
3100 dev_err(adev->dev, "VF exclusive mode timeout\n");
3101 /* Don't send request since VF is inactive. */
3102 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
3103 adev->virt.ops = NULL;
3104 r = -EAGAIN;
3105 goto failed;
3106 }
3107 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
3108 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3109 goto failed;
3110 }
3111
3112 DRM_DEBUG("SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
3113 adev->gfx.config.max_shader_engines,
3114 adev->gfx.config.max_sh_per_se,
3115 adev->gfx.config.max_cu_per_sh,
3116 adev->gfx.cu_info.number);
3117
3118 amdgpu_ctx_init_sched(adev);
3119
3120 adev->accel_working = true;
3121
3122 amdgpu_vm_check_compute_bug(adev);
3123
3124 /* Initialize the buffer migration limit. */
3125 if (amdgpu_moverate >= 0)
3126 max_MBps = amdgpu_moverate;
3127 else
3128 max_MBps = 8; /* Allow 8 MB/s. */
3129 /* Get a log2 for easy divisions. */
3130 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3131
3132 amdgpu_fbdev_init(adev);
3133
3134 r = amdgpu_pm_sysfs_init(adev);
3135 if (r) {
3136 adev->pm_sysfs_en = false;
3137 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
3138 } else
3139 adev->pm_sysfs_en = true;
3140
3141 r = amdgpu_ucode_sysfs_init(adev);
3142 if (r) {
3143 adev->ucode_sysfs_en = false;
3144 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
3145 } else
3146 adev->ucode_sysfs_en = true;
3147
3148 if ((amdgpu_testing & 1)) {
3149 if (adev->accel_working)
3150 amdgpu_test_moves(adev);
3151 else
3152 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
3153 }
3154 if (amdgpu_benchmarking) {
3155 if (adev->accel_working)
3156 amdgpu_benchmark(adev, amdgpu_benchmarking);
3157 else
3158 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
3159 }
3160
3161 /*
3162 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3163 * Otherwise the mgpu fan boost feature will be skipped due to the
3164 * gpu instance is counted less.
3165 */
3166 amdgpu_register_gpu_instance(adev);
3167
3168 /* enable clockgating, etc. after ib tests, etc. since some blocks require
3169 * explicit gating rather than handling it automatically.
3170 */
3171 r = amdgpu_device_ip_late_init(adev);
3172 if (r) {
3173 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
3174 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
3175 goto failed;
3176 }
3177
3178 /* must succeed. */
3179 amdgpu_ras_resume(adev);
3180
3181 queue_delayed_work(system_wq, &adev->delayed_init_work,
3182 msecs_to_jiffies(AMDGPU_RESUME_MS));
3183
3184 r = device_create_file(adev->dev, &dev_attr_pcie_replay_count);
3185 if (r) {
3186 dev_err(adev->dev, "Could not create pcie_replay_count");
3187 return r;
3188 }
3189
3190 if (IS_ENABLED(CONFIG_PERF_EVENTS))
3191 r = amdgpu_pmu_init(adev);
3192 if (r)
3193 dev_err(adev->dev, "amdgpu_pmu_init failed\n");
3194
3195 return 0;
3196
3197 failed:
3198 amdgpu_vf_error_trans_all(adev);
3199 if (boco)
3200 vga_switcheroo_fini_domain_pm_ops(adev->dev);
3201
3202 return r;
3203 }
3204
3205 /**
3206 * amdgpu_device_fini - tear down the driver
3207 *
3208 * @adev: amdgpu_device pointer
3209 *
3210 * Tear down the driver info (all asics).
3211 * Called at driver shutdown.
3212 */
3213 void amdgpu_device_fini(struct amdgpu_device *adev)
3214 {
3215 int r;
3216
3217 DRM_INFO("amdgpu: finishing device.\n");
3218 flush_delayed_work(&adev->delayed_init_work);
3219 adev->shutdown = true;
3220
3221 /* make sure IB test finished before entering exclusive mode
3222 * to avoid preemption on IB test
3223 * */
3224 if (amdgpu_sriov_vf(adev))
3225 amdgpu_virt_request_full_gpu(adev, false);
3226
3227 /* disable all interrupts */
3228 amdgpu_irq_disable_all(adev);
3229 if (adev->mode_info.mode_config_initialized){
3230 if (!amdgpu_device_has_dc_support(adev))
3231 drm_helper_force_disable_all(adev->ddev);
3232 else
3233 drm_atomic_helper_shutdown(adev->ddev);
3234 }
3235 amdgpu_fence_driver_fini(adev);
3236 if (adev->pm_sysfs_en)
3237 amdgpu_pm_sysfs_fini(adev);
3238 amdgpu_fbdev_fini(adev);
3239 r = amdgpu_device_ip_fini(adev);
3240 if (adev->firmware.gpu_info_fw) {
3241 release_firmware(adev->firmware.gpu_info_fw);
3242 adev->firmware.gpu_info_fw = NULL;
3243 }
3244 adev->accel_working = false;
3245 /* free i2c buses */
3246 if (!amdgpu_device_has_dc_support(adev))
3247 amdgpu_i2c_fini(adev);
3248
3249 if (amdgpu_emu_mode != 1)
3250 amdgpu_atombios_fini(adev);
3251
3252 kfree(adev->bios);
3253 adev->bios = NULL;
3254 if (amdgpu_has_atpx() &&
3255 (amdgpu_is_atpx_hybrid() ||
3256 amdgpu_has_atpx_dgpu_power_cntl()) &&
3257 !pci_is_thunderbolt_attached(adev->pdev))
3258 vga_switcheroo_unregister_client(adev->pdev);
3259 if (amdgpu_device_supports_boco(adev->ddev))
3260 vga_switcheroo_fini_domain_pm_ops(adev->dev);
3261 vga_client_register(adev->pdev, NULL, NULL, NULL);
3262 if (adev->rio_mem)
3263 pci_iounmap(adev->pdev, adev->rio_mem);
3264 adev->rio_mem = NULL;
3265 iounmap(adev->rmmio);
3266 adev->rmmio = NULL;
3267 amdgpu_device_doorbell_fini(adev);
3268
3269 device_remove_file(adev->dev, &dev_attr_pcie_replay_count);
3270 if (adev->ucode_sysfs_en)
3271 amdgpu_ucode_sysfs_fini(adev);
3272 if (IS_ENABLED(CONFIG_PERF_EVENTS))
3273 amdgpu_pmu_fini(adev);
3274 if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
3275 amdgpu_discovery_fini(adev);
3276 }
3277
3278
3279 /*
3280 * Suspend & resume.
3281 */
3282 /**
3283 * amdgpu_device_suspend - initiate device suspend
3284 *
3285 * @dev: drm dev pointer
3286 * @suspend: suspend state
3287 * @fbcon : notify the fbdev of suspend
3288 *
3289 * Puts the hw in the suspend state (all asics).
3290 * Returns 0 for success or an error on failure.
3291 * Called at driver suspend.
3292 */
3293 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
3294 {
3295 struct amdgpu_device *adev;
3296 struct drm_crtc *crtc;
3297 struct drm_connector *connector;
3298 struct drm_connector_list_iter iter;
3299 int r;
3300
3301 if (dev == NULL || dev->dev_private == NULL) {
3302 return -ENODEV;
3303 }
3304
3305 adev = dev->dev_private;
3306
3307 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3308 return 0;
3309
3310 adev->in_suspend = true;
3311 drm_kms_helper_poll_disable(dev);
3312
3313 if (fbcon)
3314 amdgpu_fbdev_set_suspend(adev, 1);
3315
3316 cancel_delayed_work_sync(&adev->delayed_init_work);
3317
3318 if (!amdgpu_device_has_dc_support(adev)) {
3319 /* turn off display hw */
3320 drm_modeset_lock_all(dev);
3321 drm_connector_list_iter_begin(dev, &iter);
3322 drm_for_each_connector_iter(connector, &iter)
3323 drm_helper_connector_dpms(connector,
3324 DRM_MODE_DPMS_OFF);
3325 drm_connector_list_iter_end(&iter);
3326 drm_modeset_unlock_all(dev);
3327 /* unpin the front buffers and cursors */
3328 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3329 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
3330 struct drm_framebuffer *fb = crtc->primary->fb;
3331 struct amdgpu_bo *robj;
3332
3333 if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
3334 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
3335 r = amdgpu_bo_reserve(aobj, true);
3336 if (r == 0) {
3337 amdgpu_bo_unpin(aobj);
3338 amdgpu_bo_unreserve(aobj);
3339 }
3340 }
3341
3342 if (fb == NULL || fb->obj[0] == NULL) {
3343 continue;
3344 }
3345 robj = gem_to_amdgpu_bo(fb->obj[0]);
3346 /* don't unpin kernel fb objects */
3347 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
3348 r = amdgpu_bo_reserve(robj, true);
3349 if (r == 0) {
3350 amdgpu_bo_unpin(robj);
3351 amdgpu_bo_unreserve(robj);
3352 }
3353 }
3354 }
3355 }
3356
3357 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
3358 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
3359
3360 amdgpu_amdkfd_suspend(adev, !fbcon);
3361
3362 amdgpu_ras_suspend(adev);
3363
3364 r = amdgpu_device_ip_suspend_phase1(adev);
3365
3366 /* evict vram memory */
3367 amdgpu_bo_evict_vram(adev);
3368
3369 amdgpu_fence_driver_suspend(adev);
3370
3371 r = amdgpu_device_ip_suspend_phase2(adev);
3372
3373 /* evict remaining vram memory
3374 * This second call to evict vram is to evict the gart page table
3375 * using the CPU.
3376 */
3377 amdgpu_bo_evict_vram(adev);
3378
3379 return 0;
3380 }
3381
3382 /**
3383 * amdgpu_device_resume - initiate device resume
3384 *
3385 * @dev: drm dev pointer
3386 * @resume: resume state
3387 * @fbcon : notify the fbdev of resume
3388 *
3389 * Bring the hw back to operating state (all asics).
3390 * Returns 0 for success or an error on failure.
3391 * Called at driver resume.
3392 */
3393 int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
3394 {
3395 struct drm_connector *connector;
3396 struct drm_connector_list_iter iter;
3397 struct amdgpu_device *adev = dev->dev_private;
3398 struct drm_crtc *crtc;
3399 int r = 0;
3400
3401 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3402 return 0;
3403
3404 /* post card */
3405 if (amdgpu_device_need_post(adev)) {
3406 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
3407 if (r)
3408 DRM_ERROR("amdgpu asic init failed\n");
3409 }
3410
3411 r = amdgpu_device_ip_resume(adev);
3412 if (r) {
3413 DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r);
3414 return r;
3415 }
3416 amdgpu_fence_driver_resume(adev);
3417
3418
3419 r = amdgpu_device_ip_late_init(adev);
3420 if (r)
3421 return r;
3422
3423 queue_delayed_work(system_wq, &adev->delayed_init_work,
3424 msecs_to_jiffies(AMDGPU_RESUME_MS));
3425
3426 if (!amdgpu_device_has_dc_support(adev)) {
3427 /* pin cursors */
3428 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3429 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
3430
3431 if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
3432 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
3433 r = amdgpu_bo_reserve(aobj, true);
3434 if (r == 0) {
3435 r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
3436 if (r != 0)
3437 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
3438 amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
3439 amdgpu_bo_unreserve(aobj);
3440 }
3441 }
3442 }
3443 }
3444 r = amdgpu_amdkfd_resume(adev, !fbcon);
3445 if (r)
3446 return r;
3447
3448 /* Make sure IB tests flushed */
3449 flush_delayed_work(&adev->delayed_init_work);
3450
3451 /* blat the mode back in */
3452 if (fbcon) {
3453 if (!amdgpu_device_has_dc_support(adev)) {
3454 /* pre DCE11 */
3455 drm_helper_resume_force_mode(dev);
3456
3457 /* turn on display hw */
3458 drm_modeset_lock_all(dev);
3459
3460 drm_connector_list_iter_begin(dev, &iter);
3461 drm_for_each_connector_iter(connector, &iter)
3462 drm_helper_connector_dpms(connector,
3463 DRM_MODE_DPMS_ON);
3464 drm_connector_list_iter_end(&iter);
3465
3466 drm_modeset_unlock_all(dev);
3467 }
3468 amdgpu_fbdev_set_suspend(adev, 0);
3469 }
3470
3471 drm_kms_helper_poll_enable(dev);
3472
3473 amdgpu_ras_resume(adev);
3474
3475 /*
3476 * Most of the connector probing functions try to acquire runtime pm
3477 * refs to ensure that the GPU is powered on when connector polling is
3478 * performed. Since we're calling this from a runtime PM callback,
3479 * trying to acquire rpm refs will cause us to deadlock.
3480 *
3481 * Since we're guaranteed to be holding the rpm lock, it's safe to
3482 * temporarily disable the rpm helpers so this doesn't deadlock us.
3483 */
3484 #ifdef CONFIG_PM
3485 dev->dev->power.disable_depth++;
3486 #endif
3487 if (!amdgpu_device_has_dc_support(adev))
3488 drm_helper_hpd_irq_event(dev);
3489 else
3490 drm_kms_helper_hotplug_event(dev);
3491 #ifdef CONFIG_PM
3492 dev->dev->power.disable_depth--;
3493 #endif
3494 adev->in_suspend = false;
3495
3496 return 0;
3497 }
3498
3499 /**
3500 * amdgpu_device_ip_check_soft_reset - did soft reset succeed
3501 *
3502 * @adev: amdgpu_device pointer
3503 *
3504 * The list of all the hardware IPs that make up the asic is walked and
3505 * the check_soft_reset callbacks are run. check_soft_reset determines
3506 * if the asic is still hung or not.
3507 * Returns true if any of the IPs are still in a hung state, false if not.
3508 */
3509 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
3510 {
3511 int i;
3512 bool asic_hang = false;
3513
3514 if (amdgpu_sriov_vf(adev))
3515 return true;
3516
3517 if (amdgpu_asic_need_full_reset(adev))
3518 return true;
3519
3520 for (i = 0; i < adev->num_ip_blocks; i++) {
3521 if (!adev->ip_blocks[i].status.valid)
3522 continue;
3523 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
3524 adev->ip_blocks[i].status.hang =
3525 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
3526 if (adev->ip_blocks[i].status.hang) {
3527 DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
3528 asic_hang = true;
3529 }
3530 }
3531 return asic_hang;
3532 }
3533
3534 /**
3535 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
3536 *
3537 * @adev: amdgpu_device pointer
3538 *
3539 * The list of all the hardware IPs that make up the asic is walked and the
3540 * pre_soft_reset callbacks are run if the block is hung. pre_soft_reset
3541 * handles any IP specific hardware or software state changes that are
3542 * necessary for a soft reset to succeed.
3543 * Returns 0 on success, negative error code on failure.
3544 */
3545 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
3546 {
3547 int i, r = 0;
3548
3549 for (i = 0; i < adev->num_ip_blocks; i++) {
3550 if (!adev->ip_blocks[i].status.valid)
3551 continue;
3552 if (adev->ip_blocks[i].status.hang &&
3553 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
3554 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
3555 if (r)
3556 return r;
3557 }
3558 }
3559
3560 return 0;
3561 }
3562
3563 /**
3564 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
3565 *
3566 * @adev: amdgpu_device pointer
3567 *
3568 * Some hardware IPs cannot be soft reset. If they are hung, a full gpu
3569 * reset is necessary to recover.
3570 * Returns true if a full asic reset is required, false if not.
3571 */
3572 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
3573 {
3574 int i;
3575
3576 if (amdgpu_asic_need_full_reset(adev))
3577 return true;
3578
3579 for (i = 0; i < adev->num_ip_blocks; i++) {
3580 if (!adev->ip_blocks[i].status.valid)
3581 continue;
3582 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
3583 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
3584 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
3585 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
3586 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
3587 if (adev->ip_blocks[i].status.hang) {
3588 DRM_INFO("Some block need full reset!\n");
3589 return true;
3590 }
3591 }
3592 }
3593 return false;
3594 }
3595
3596 /**
3597 * amdgpu_device_ip_soft_reset - do a soft reset
3598 *
3599 * @adev: amdgpu_device pointer
3600 *
3601 * The list of all the hardware IPs that make up the asic is walked and the
3602 * soft_reset callbacks are run if the block is hung. soft_reset handles any
3603 * IP specific hardware or software state changes that are necessary to soft
3604 * reset the IP.
3605 * Returns 0 on success, negative error code on failure.
3606 */
3607 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
3608 {
3609 int i, r = 0;
3610
3611 for (i = 0; i < adev->num_ip_blocks; i++) {
3612 if (!adev->ip_blocks[i].status.valid)
3613 continue;
3614 if (adev->ip_blocks[i].status.hang &&
3615 adev->ip_blocks[i].version->funcs->soft_reset) {
3616 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
3617 if (r)
3618 return r;
3619 }
3620 }
3621
3622 return 0;
3623 }
3624
3625 /**
3626 * amdgpu_device_ip_post_soft_reset - clean up from soft reset
3627 *
3628 * @adev: amdgpu_device pointer
3629 *
3630 * The list of all the hardware IPs that make up the asic is walked and the
3631 * post_soft_reset callbacks are run if the asic was hung. post_soft_reset
3632 * handles any IP specific hardware or software state changes that are
3633 * necessary after the IP has been soft reset.
3634 * Returns 0 on success, negative error code on failure.
3635 */
3636 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
3637 {
3638 int i, r = 0;
3639
3640 for (i = 0; i < adev->num_ip_blocks; i++) {
3641 if (!adev->ip_blocks[i].status.valid)
3642 continue;
3643 if (adev->ip_blocks[i].status.hang &&
3644 adev->ip_blocks[i].version->funcs->post_soft_reset)
3645 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
3646 if (r)
3647 return r;
3648 }
3649
3650 return 0;
3651 }
3652
3653 /**
3654 * amdgpu_device_recover_vram - Recover some VRAM contents
3655 *
3656 * @adev: amdgpu_device pointer
3657 *
3658 * Restores the contents of VRAM buffers from the shadows in GTT. Used to
3659 * restore things like GPUVM page tables after a GPU reset where
3660 * the contents of VRAM might be lost.
3661 *
3662 * Returns:
3663 * 0 on success, negative error code on failure.
3664 */
3665 static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
3666 {
3667 struct dma_fence *fence = NULL, *next = NULL;
3668 struct amdgpu_bo *shadow;
3669 long r = 1, tmo;
3670
3671 if (amdgpu_sriov_runtime(adev))
3672 tmo = msecs_to_jiffies(8000);
3673 else
3674 tmo = msecs_to_jiffies(100);
3675
3676 DRM_INFO("recover vram bo from shadow start\n");
3677 mutex_lock(&adev->shadow_list_lock);
3678 list_for_each_entry(shadow, &adev->shadow_list, shadow_list) {
3679
3680 /* No need to recover an evicted BO */
3681 if (shadow->tbo.mem.mem_type != TTM_PL_TT ||
3682 shadow->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET ||
3683 shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM)
3684 continue;
3685
3686 r = amdgpu_bo_restore_shadow(shadow, &next);
3687 if (r)
3688 break;
3689
3690 if (fence) {
3691 tmo = dma_fence_wait_timeout(fence, false, tmo);
3692 dma_fence_put(fence);
3693 fence = next;
3694 if (tmo == 0) {
3695 r = -ETIMEDOUT;
3696 break;
3697 } else if (tmo < 0) {
3698 r = tmo;
3699 break;
3700 }
3701 } else {
3702 fence = next;
3703 }
3704 }
3705 mutex_unlock(&adev->shadow_list_lock);
3706
3707 if (fence)
3708 tmo = dma_fence_wait_timeout(fence, false, tmo);
3709 dma_fence_put(fence);
3710
3711 if (r < 0 || tmo <= 0) {
3712 DRM_ERROR("recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
3713 return -EIO;
3714 }
3715
3716 DRM_INFO("recover vram bo from shadow done\n");
3717 return 0;
3718 }
3719
3720
3721 /**
3722 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
3723 *
3724 * @adev: amdgpu device pointer
3725 * @from_hypervisor: request from hypervisor
3726 *
3727 * do VF FLR and reinitialize Asic
3728 * return 0 means succeeded otherwise failed
3729 */
3730 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
3731 bool from_hypervisor)
3732 {
3733 int r;
3734
3735 if (from_hypervisor)
3736 r = amdgpu_virt_request_full_gpu(adev, true);
3737 else
3738 r = amdgpu_virt_reset_gpu(adev);
3739 if (r)
3740 return r;
3741
3742 /* Resume IP prior to SMC */
3743 r = amdgpu_device_ip_reinit_early_sriov(adev);
3744 if (r)
3745 goto error;
3746
3747 amdgpu_virt_init_data_exchange(adev);
3748 /* we need recover gart prior to run SMC/CP/SDMA resume */
3749 amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]);
3750
3751 r = amdgpu_device_fw_loading(adev);
3752 if (r)
3753 return r;
3754
3755 /* now we are okay to resume SMC/CP/SDMA */
3756 r = amdgpu_device_ip_reinit_late_sriov(adev);
3757 if (r)
3758 goto error;
3759
3760 amdgpu_irq_gpu_reset_resume_helper(adev);
3761 r = amdgpu_ib_ring_tests(adev);
3762 amdgpu_amdkfd_post_reset(adev);
3763
3764 error:
3765 amdgpu_virt_release_full_gpu(adev, true);
3766 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
3767 amdgpu_inc_vram_lost(adev);
3768 r = amdgpu_device_recover_vram(adev);
3769 }
3770
3771 return r;
3772 }
3773
3774 /**
3775 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
3776 *
3777 * @adev: amdgpu device pointer
3778 *
3779 * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
3780 * a hung GPU.
3781 */
3782 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
3783 {
3784 if (!amdgpu_device_ip_check_soft_reset(adev)) {
3785 DRM_INFO("Timeout, but no hardware hang detected.\n");
3786 return false;
3787 }
3788
3789 if (amdgpu_gpu_recovery == 0)
3790 goto disabled;
3791
3792 if (amdgpu_sriov_vf(adev))
3793 return true;
3794
3795 if (amdgpu_gpu_recovery == -1) {
3796 switch (adev->asic_type) {
3797 case CHIP_BONAIRE:
3798 case CHIP_HAWAII:
3799 case CHIP_TOPAZ:
3800 case CHIP_TONGA:
3801 case CHIP_FIJI:
3802 case CHIP_POLARIS10:
3803 case CHIP_POLARIS11:
3804 case CHIP_POLARIS12:
3805 case CHIP_VEGAM:
3806 case CHIP_VEGA20:
3807 case CHIP_VEGA10:
3808 case CHIP_VEGA12:
3809 case CHIP_RAVEN:
3810 case CHIP_ARCTURUS:
3811 case CHIP_RENOIR:
3812 case CHIP_NAVI10:
3813 case CHIP_NAVI14:
3814 case CHIP_NAVI12:
3815 break;
3816 default:
3817 goto disabled;
3818 }
3819 }
3820
3821 return true;
3822
3823 disabled:
3824 DRM_INFO("GPU recovery disabled.\n");
3825 return false;
3826 }
3827
3828
3829 static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
3830 struct amdgpu_job *job,
3831 bool *need_full_reset_arg)
3832 {
3833 int i, r = 0;
3834 bool need_full_reset = *need_full_reset_arg;
3835
3836 /* block all schedulers and reset given job's ring */
3837 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
3838 struct amdgpu_ring *ring = adev->rings[i];
3839
3840 if (!ring || !ring->sched.thread)
3841 continue;
3842
3843 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
3844 amdgpu_fence_driver_force_completion(ring);
3845 }
3846
3847 if(job)
3848 drm_sched_increase_karma(&job->base);
3849
3850 /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
3851 if (!amdgpu_sriov_vf(adev)) {
3852
3853 if (!need_full_reset)
3854 need_full_reset = amdgpu_device_ip_need_full_reset(adev);
3855
3856 if (!need_full_reset) {
3857 amdgpu_device_ip_pre_soft_reset(adev);
3858 r = amdgpu_device_ip_soft_reset(adev);
3859 amdgpu_device_ip_post_soft_reset(adev);
3860 if (r || amdgpu_device_ip_check_soft_reset(adev)) {
3861 DRM_INFO("soft reset failed, will fallback to full reset!\n");
3862 need_full_reset = true;
3863 }
3864 }
3865
3866 if (need_full_reset)
3867 r = amdgpu_device_ip_suspend(adev);
3868
3869 *need_full_reset_arg = need_full_reset;
3870 }
3871
3872 return r;
3873 }
3874
3875 static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
3876 struct list_head *device_list_handle,
3877 bool *need_full_reset_arg)
3878 {
3879 struct amdgpu_device *tmp_adev = NULL;
3880 bool need_full_reset = *need_full_reset_arg, vram_lost = false;
3881 int r = 0;
3882
3883 /*
3884 * ASIC reset has to be done on all HGMI hive nodes ASAP
3885 * to allow proper links negotiation in FW (within 1 sec)
3886 */
3887 if (need_full_reset) {
3888 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
3889 /* For XGMI run all resets in parallel to speed up the process */
3890 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
3891 if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
3892 r = -EALREADY;
3893 } else
3894 r = amdgpu_asic_reset(tmp_adev);
3895
3896 if (r) {
3897 DRM_ERROR("ASIC reset failed with error, %d for drm dev, %s",
3898 r, tmp_adev->ddev->unique);
3899 break;
3900 }
3901 }
3902
3903 /* For XGMI wait for all resets to complete before proceed */
3904 if (!r) {
3905 list_for_each_entry(tmp_adev, device_list_handle,
3906 gmc.xgmi.head) {
3907 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
3908 flush_work(&tmp_adev->xgmi_reset_work);
3909 r = tmp_adev->asic_reset_res;
3910 if (r)
3911 break;
3912 }
3913 }
3914 }
3915 }
3916
3917 if (!r && amdgpu_ras_intr_triggered()) {
3918 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
3919 if (tmp_adev->mmhub.funcs &&
3920 tmp_adev->mmhub.funcs->reset_ras_error_count)
3921 tmp_adev->mmhub.funcs->reset_ras_error_count(tmp_adev);
3922 }
3923
3924 amdgpu_ras_intr_cleared();
3925 }
3926
3927 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
3928 if (need_full_reset) {
3929 /* post card */
3930 if (amdgpu_atom_asic_init(tmp_adev->mode_info.atom_context))
3931 DRM_WARN("asic atom init failed!");
3932
3933 if (!r) {
3934 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
3935 r = amdgpu_device_ip_resume_phase1(tmp_adev);
3936 if (r)
3937 goto out;
3938
3939 vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
3940 if (vram_lost) {
3941 DRM_INFO("VRAM is lost due to GPU reset!\n");
3942 amdgpu_inc_vram_lost(tmp_adev);
3943 }
3944
3945 r = amdgpu_gtt_mgr_recover(
3946 &tmp_adev->mman.bdev.man[TTM_PL_TT]);
3947 if (r)
3948 goto out;
3949
3950 r = amdgpu_device_fw_loading(tmp_adev);
3951 if (r)
3952 return r;
3953
3954 r = amdgpu_device_ip_resume_phase2(tmp_adev);
3955 if (r)
3956 goto out;
3957
3958 if (vram_lost)
3959 amdgpu_device_fill_reset_magic(tmp_adev);
3960
3961 /*
3962 * Add this ASIC as tracked as reset was already
3963 * complete successfully.
3964 */
3965 amdgpu_register_gpu_instance(tmp_adev);
3966
3967 r = amdgpu_device_ip_late_init(tmp_adev);
3968 if (r)
3969 goto out;
3970
3971 amdgpu_fbdev_set_suspend(tmp_adev, 0);
3972
3973 /* must succeed. */
3974 amdgpu_ras_resume(tmp_adev);
3975
3976 /* Update PSP FW topology after reset */
3977 if (hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1)
3978 r = amdgpu_xgmi_update_topology(hive, tmp_adev);
3979 }
3980 }
3981
3982
3983 out:
3984 if (!r) {
3985 amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
3986 r = amdgpu_ib_ring_tests(tmp_adev);
3987 if (r) {
3988 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
3989 r = amdgpu_device_ip_suspend(tmp_adev);
3990 need_full_reset = true;
3991 r = -EAGAIN;
3992 goto end;
3993 }
3994 }
3995
3996 if (!r)
3997 r = amdgpu_device_recover_vram(tmp_adev);
3998 else
3999 tmp_adev->asic_reset_res = r;
4000 }
4001
4002 end:
4003 *need_full_reset_arg = need_full_reset;
4004 return r;
4005 }
4006
4007 static bool amdgpu_device_lock_adev(struct amdgpu_device *adev, bool trylock)
4008 {
4009 if (trylock) {
4010 if (!mutex_trylock(&adev->lock_reset))
4011 return false;
4012 } else
4013 mutex_lock(&adev->lock_reset);
4014
4015 atomic_inc(&adev->gpu_reset_counter);
4016 adev->in_gpu_reset = true;
4017 switch (amdgpu_asic_reset_method(adev)) {
4018 case AMD_RESET_METHOD_MODE1:
4019 adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
4020 break;
4021 case AMD_RESET_METHOD_MODE2:
4022 adev->mp1_state = PP_MP1_STATE_RESET;
4023 break;
4024 default:
4025 adev->mp1_state = PP_MP1_STATE_NONE;
4026 break;
4027 }
4028
4029 return true;
4030 }
4031
4032 static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
4033 {
4034 amdgpu_vf_error_trans_all(adev);
4035 adev->mp1_state = PP_MP1_STATE_NONE;
4036 adev->in_gpu_reset = false;
4037 mutex_unlock(&adev->lock_reset);
4038 }
4039
4040 /**
4041 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
4042 *
4043 * @adev: amdgpu device pointer
4044 * @job: which job trigger hang
4045 *
4046 * Attempt to reset the GPU if it has hung (all asics).
4047 * Attempt to do soft-reset or full-reset and reinitialize Asic
4048 * Returns 0 for success or an error on failure.
4049 */
4050
4051 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
4052 struct amdgpu_job *job)
4053 {
4054 struct list_head device_list, *device_list_handle = NULL;
4055 bool need_full_reset, job_signaled;
4056 struct amdgpu_hive_info *hive = NULL;
4057 struct amdgpu_device *tmp_adev = NULL;
4058 int i, r = 0;
4059 bool in_ras_intr = amdgpu_ras_intr_triggered();
4060 bool use_baco =
4061 (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) ?
4062 true : false;
4063
4064 /*
4065 * Flush RAM to disk so that after reboot
4066 * the user can read log and see why the system rebooted.
4067 */
4068 if (in_ras_intr && !use_baco && amdgpu_ras_get_context(adev)->reboot) {
4069
4070 DRM_WARN("Emergency reboot.");
4071
4072 ksys_sync_helper();
4073 emergency_restart();
4074 }
4075
4076 need_full_reset = job_signaled = false;
4077 INIT_LIST_HEAD(&device_list);
4078
4079 dev_info(adev->dev, "GPU %s begin!\n",
4080 (in_ras_intr && !use_baco) ? "jobs stop":"reset");
4081
4082 cancel_delayed_work_sync(&adev->delayed_init_work);
4083
4084 hive = amdgpu_get_xgmi_hive(adev, false);
4085
4086 /*
4087 * Here we trylock to avoid chain of resets executing from
4088 * either trigger by jobs on different adevs in XGMI hive or jobs on
4089 * different schedulers for same device while this TO handler is running.
4090 * We always reset all schedulers for device and all devices for XGMI
4091 * hive so that should take care of them too.
4092 */
4093
4094 if (hive && !mutex_trylock(&hive->reset_lock)) {
4095 DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
4096 job ? job->base.id : -1, hive->hive_id);
4097 return 0;
4098 }
4099
4100 /* Start with adev pre asic reset first for soft reset check.*/
4101 if (!amdgpu_device_lock_adev(adev, !hive)) {
4102 DRM_INFO("Bailing on TDR for s_job:%llx, as another already in progress",
4103 job ? job->base.id : -1);
4104 return 0;
4105 }
4106
4107 /* Block kfd: SRIOV would do it separately */
4108 if (!amdgpu_sriov_vf(adev))
4109 amdgpu_amdkfd_pre_reset(adev);
4110
4111 /* Build list of devices to reset */
4112 if (adev->gmc.xgmi.num_physical_nodes > 1) {
4113 if (!hive) {
4114 /*unlock kfd: SRIOV would do it separately */
4115 if (!amdgpu_sriov_vf(adev))
4116 amdgpu_amdkfd_post_reset(adev);
4117 amdgpu_device_unlock_adev(adev);
4118 return -ENODEV;
4119 }
4120
4121 /*
4122 * In case we are in XGMI hive mode device reset is done for all the
4123 * nodes in the hive to retrain all XGMI links and hence the reset
4124 * sequence is executed in loop on all nodes.
4125 */
4126 device_list_handle = &hive->device_list;
4127 } else {
4128 list_add_tail(&adev->gmc.xgmi.head, &device_list);
4129 device_list_handle = &device_list;
4130 }
4131
4132 /* block all schedulers and reset given job's ring */
4133 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4134 if (tmp_adev != adev) {
4135 amdgpu_device_lock_adev(tmp_adev, false);
4136 if (!amdgpu_sriov_vf(tmp_adev))
4137 amdgpu_amdkfd_pre_reset(tmp_adev);
4138 }
4139
4140 /*
4141 * Mark these ASICs to be reseted as untracked first
4142 * And add them back after reset completed
4143 */
4144 amdgpu_unregister_gpu_instance(tmp_adev);
4145
4146 amdgpu_fbdev_set_suspend(adev, 1);
4147
4148 /* disable ras on ALL IPs */
4149 if (!(in_ras_intr && !use_baco) &&
4150 amdgpu_device_ip_need_full_reset(tmp_adev))
4151 amdgpu_ras_suspend(tmp_adev);
4152
4153 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4154 struct amdgpu_ring *ring = tmp_adev->rings[i];
4155
4156 if (!ring || !ring->sched.thread)
4157 continue;
4158
4159 drm_sched_stop(&ring->sched, job ? &job->base : NULL);
4160
4161 if (in_ras_intr && !use_baco)
4162 amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
4163 }
4164 }
4165
4166
4167 if (in_ras_intr && !use_baco)
4168 goto skip_sched_resume;
4169
4170 /*
4171 * Must check guilty signal here since after this point all old
4172 * HW fences are force signaled.
4173 *
4174 * job->base holds a reference to parent fence
4175 */
4176 if (job && job->base.s_fence->parent &&
4177 dma_fence_is_signaled(job->base.s_fence->parent))
4178 job_signaled = true;
4179
4180 if (job_signaled) {
4181 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
4182 goto skip_hw_reset;
4183 }
4184
4185
4186 /* Guilty job will be freed after this*/
4187 r = amdgpu_device_pre_asic_reset(adev, job, &need_full_reset);
4188 if (r) {
4189 /*TODO Should we stop ?*/
4190 DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
4191 r, adev->ddev->unique);
4192 adev->asic_reset_res = r;
4193 }
4194
4195 retry: /* Rest of adevs pre asic reset from XGMI hive. */
4196 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4197
4198 if (tmp_adev == adev)
4199 continue;
4200
4201 r = amdgpu_device_pre_asic_reset(tmp_adev,
4202 NULL,
4203 &need_full_reset);
4204 /*TODO Should we stop ?*/
4205 if (r) {
4206 DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
4207 r, tmp_adev->ddev->unique);
4208 tmp_adev->asic_reset_res = r;
4209 }
4210 }
4211
4212 /* Actual ASIC resets if needed.*/
4213 /* TODO Implement XGMI hive reset logic for SRIOV */
4214 if (amdgpu_sriov_vf(adev)) {
4215 r = amdgpu_device_reset_sriov(adev, job ? false : true);
4216 if (r)
4217 adev->asic_reset_res = r;
4218 } else {
4219 r = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset);
4220 if (r && r == -EAGAIN)
4221 goto retry;
4222 }
4223
4224 skip_hw_reset:
4225
4226 /* Post ASIC reset for all devs .*/
4227 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4228
4229 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4230 struct amdgpu_ring *ring = tmp_adev->rings[i];
4231
4232 if (!ring || !ring->sched.thread)
4233 continue;
4234
4235 /* No point to resubmit jobs if we didn't HW reset*/
4236 if (!tmp_adev->asic_reset_res && !job_signaled)
4237 drm_sched_resubmit_jobs(&ring->sched);
4238
4239 drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
4240 }
4241
4242 if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) {
4243 drm_helper_resume_force_mode(tmp_adev->ddev);
4244 }
4245
4246 tmp_adev->asic_reset_res = 0;
4247
4248 if (r) {
4249 /* bad news, how to tell it to userspace ? */
4250 dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
4251 amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
4252 } else {
4253 dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
4254 }
4255 }
4256
4257 skip_sched_resume:
4258 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
4259 /*unlock kfd: SRIOV would do it separately */
4260 if (!(in_ras_intr && !use_baco) && !amdgpu_sriov_vf(tmp_adev))
4261 amdgpu_amdkfd_post_reset(tmp_adev);
4262 amdgpu_device_unlock_adev(tmp_adev);
4263 }
4264
4265 if (hive)
4266 mutex_unlock(&hive->reset_lock);
4267
4268 if (r)
4269 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
4270 return r;
4271 }
4272
4273 /**
4274 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
4275 *
4276 * @adev: amdgpu_device pointer
4277 *
4278 * Fetchs and stores in the driver the PCIE capabilities (gen speed
4279 * and lanes) of the slot the device is in. Handles APUs and
4280 * virtualized environments where PCIE config space may not be available.
4281 */
4282 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
4283 {
4284 struct pci_dev *pdev;
4285 enum pci_bus_speed speed_cap, platform_speed_cap;
4286 enum pcie_link_width platform_link_width;
4287
4288 if (amdgpu_pcie_gen_cap)
4289 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
4290
4291 if (amdgpu_pcie_lane_cap)
4292 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
4293
4294 /* covers APUs as well */
4295 if (pci_is_root_bus(adev->pdev->bus)) {
4296 if (adev->pm.pcie_gen_mask == 0)
4297 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
4298 if (adev->pm.pcie_mlw_mask == 0)
4299 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
4300 return;
4301 }
4302
4303 if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
4304 return;
4305
4306 pcie_bandwidth_available(adev->pdev, NULL,
4307 &platform_speed_cap, &platform_link_width);
4308
4309 if (adev->pm.pcie_gen_mask == 0) {
4310 /* asic caps */
4311 pdev = adev->pdev;
4312 speed_cap = pcie_get_speed_cap(pdev);
4313 if (speed_cap == PCI_SPEED_UNKNOWN) {
4314 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4315 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4316 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
4317 } else {
4318 if (speed_cap == PCIE_SPEED_16_0GT)
4319 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4320 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4321 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
4322 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
4323 else if (speed_cap == PCIE_SPEED_8_0GT)
4324 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4325 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4326 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
4327 else if (speed_cap == PCIE_SPEED_5_0GT)
4328 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4329 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
4330 else
4331 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
4332 }
4333 /* platform caps */
4334 if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
4335 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4336 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
4337 } else {
4338 if (platform_speed_cap == PCIE_SPEED_16_0GT)
4339 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4340 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4341 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
4342 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
4343 else if (platform_speed_cap == PCIE_SPEED_8_0GT)
4344 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4345 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4346 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
4347 else if (platform_speed_cap == PCIE_SPEED_5_0GT)
4348 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4349 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
4350 else
4351 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
4352
4353 }
4354 }
4355 if (adev->pm.pcie_mlw_mask == 0) {
4356 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
4357 adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
4358 } else {
4359 switch (platform_link_width) {
4360 case PCIE_LNK_X32:
4361 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
4362 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
4363 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
4364 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4365 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4366 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4367 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4368 break;
4369 case PCIE_LNK_X16:
4370 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
4371 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
4372 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4373 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4374 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4375 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4376 break;
4377 case PCIE_LNK_X12:
4378 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
4379 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4380 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4381 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4382 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4383 break;
4384 case PCIE_LNK_X8:
4385 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
4386 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4387 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4388 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4389 break;
4390 case PCIE_LNK_X4:
4391 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
4392 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4393 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4394 break;
4395 case PCIE_LNK_X2:
4396 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
4397 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
4398 break;
4399 case PCIE_LNK_X1:
4400 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
4401 break;
4402 default:
4403 break;
4404 }
4405 }
4406 }
4407 }
4408
4409 int amdgpu_device_baco_enter(struct drm_device *dev)
4410 {
4411 struct amdgpu_device *adev = dev->dev_private;
4412 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
4413
4414 if (!amdgpu_device_supports_baco(adev->ddev))
4415 return -ENOTSUPP;
4416
4417 if (ras && ras->supported)
4418 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
4419
4420 return amdgpu_dpm_baco_enter(adev);
4421 }
4422
4423 int amdgpu_device_baco_exit(struct drm_device *dev)
4424 {
4425 struct amdgpu_device *adev = dev->dev_private;
4426 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
4427 int ret = 0;
4428
4429 if (!amdgpu_device_supports_baco(adev->ddev))
4430 return -ENOTSUPP;
4431
4432 ret = amdgpu_dpm_baco_exit(adev);
4433 if (ret)
4434 return ret;
4435
4436 if (ras && ras->supported)
4437 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
4438
4439 return 0;
4440 }