]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drm/ttm: use an ttm operation ctx for ttm_bo_move_xxx
[thirdparty/kernel/stable.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
CommitLineData
d38ceaf9
AD
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
0875dc9e 28#include <linux/kthread.h>
d38ceaf9
AD
29#include <linux/console.h>
30#include <linux/slab.h>
31#include <linux/debugfs.h>
32#include <drm/drmP.h>
33#include <drm/drm_crtc_helper.h>
4562236b 34#include <drm/drm_atomic_helper.h>
d38ceaf9
AD
35#include <drm/amdgpu_drm.h>
36#include <linux/vgaarb.h>
37#include <linux/vga_switcheroo.h>
38#include <linux/efi.h>
39#include "amdgpu.h"
f4b373f4 40#include "amdgpu_trace.h"
d38ceaf9
AD
41#include "amdgpu_i2c.h"
42#include "atom.h"
43#include "amdgpu_atombios.h"
a5bde2f9 44#include "amdgpu_atomfirmware.h"
d0dd7f0c 45#include "amd_pcie.h"
33f34802
KW
46#ifdef CONFIG_DRM_AMDGPU_SI
47#include "si.h"
48#endif
a2e73f56
AD
49#ifdef CONFIG_DRM_AMDGPU_CIK
50#include "cik.h"
51#endif
aaa36a97 52#include "vi.h"
460826e6 53#include "soc15.h"
d38ceaf9 54#include "bif/bif_4_1_d.h"
9accf2fd 55#include <linux/pci.h>
bec86378 56#include <linux/firmware.h>
89041940 57#include "amdgpu_vf_error.h"
d38ceaf9 58
ba997709 59#include "amdgpu_amdkfd.h"
d2f52ac8 60#include "amdgpu_pm.h"
d38ceaf9 61
e2a75f88 62MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
2d2e5e7e 63MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
e2a75f88 64
2dc80b00
S
65#define AMDGPU_RESUME_MS 2000
66
d38ceaf9
AD
67static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
68static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
763efb6c 69static int amdgpu_debugfs_init(struct amdgpu_device *adev);
d38ceaf9
AD
70
71static const char *amdgpu_asic_name[] = {
da69c161
KW
72 "TAHITI",
73 "PITCAIRN",
74 "VERDE",
75 "OLAND",
76 "HAINAN",
d38ceaf9
AD
77 "BONAIRE",
78 "KAVERI",
79 "KABINI",
80 "HAWAII",
81 "MULLINS",
82 "TOPAZ",
83 "TONGA",
48299f95 84 "FIJI",
d38ceaf9 85 "CARRIZO",
139f4917 86 "STONEY",
2cc0c0b5
FC
87 "POLARIS10",
88 "POLARIS11",
c4642a47 89 "POLARIS12",
d4196f01 90 "VEGA10",
2ca8a5d2 91 "RAVEN",
d38ceaf9
AD
92 "LAST",
93};
94
95bool amdgpu_device_is_px(struct drm_device *dev)
96{
97 struct amdgpu_device *adev = dev->dev_private;
98
2f7d10b3 99 if (adev->flags & AMD_IS_PX)
d38ceaf9
AD
100 return true;
101 return false;
102}
103
104/*
105 * MMIO register access helper functions.
106 */
107uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
15d72fd7 108 uint32_t acc_flags)
d38ceaf9 109{
f4b373f4
TSD
110 uint32_t ret;
111
43ca8efa 112 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
bc992ba5 113 return amdgpu_virt_kiq_rreg(adev, reg);
bc992ba5 114
15d72fd7 115 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
f4b373f4 116 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
d38ceaf9
AD
117 else {
118 unsigned long flags;
d38ceaf9
AD
119
120 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
121 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
122 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
123 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
d38ceaf9 124 }
f4b373f4
TSD
125 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
126 return ret;
d38ceaf9
AD
127}
128
129void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
15d72fd7 130 uint32_t acc_flags)
d38ceaf9 131{
f4b373f4 132 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
4e99a44e 133
47ed4e1c
KW
134 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
135 adev->last_mm_index = v;
136 }
137
43ca8efa 138 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
bc992ba5 139 return amdgpu_virt_kiq_wreg(adev, reg, v);
bc992ba5 140
15d72fd7 141 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
d38ceaf9
AD
142 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
143 else {
144 unsigned long flags;
145
146 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
147 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
148 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
149 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
150 }
47ed4e1c
KW
151
152 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
153 udelay(500);
154 }
d38ceaf9
AD
155}
156
157u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
158{
159 if ((reg * 4) < adev->rio_mem_size)
160 return ioread32(adev->rio_mem + (reg * 4));
161 else {
162 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
163 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
164 }
165}
166
167void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
168{
47ed4e1c
KW
169 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
170 adev->last_mm_index = v;
171 }
d38ceaf9
AD
172
173 if ((reg * 4) < adev->rio_mem_size)
174 iowrite32(v, adev->rio_mem + (reg * 4));
175 else {
176 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
177 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
178 }
47ed4e1c
KW
179
180 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
181 udelay(500);
182 }
d38ceaf9
AD
183}
184
185/**
186 * amdgpu_mm_rdoorbell - read a doorbell dword
187 *
188 * @adev: amdgpu_device pointer
189 * @index: doorbell index
190 *
191 * Returns the value in the doorbell aperture at the
192 * requested doorbell index (CIK).
193 */
194u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
195{
196 if (index < adev->doorbell.num_doorbells) {
197 return readl(adev->doorbell.ptr + index);
198 } else {
199 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
200 return 0;
201 }
202}
203
204/**
205 * amdgpu_mm_wdoorbell - write a doorbell dword
206 *
207 * @adev: amdgpu_device pointer
208 * @index: doorbell index
209 * @v: value to write
210 *
211 * Writes @v to the doorbell aperture at the
212 * requested doorbell index (CIK).
213 */
214void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
215{
216 if (index < adev->doorbell.num_doorbells) {
217 writel(v, adev->doorbell.ptr + index);
218 } else {
219 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
220 }
221}
222
832be404
KW
223/**
224 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
225 *
226 * @adev: amdgpu_device pointer
227 * @index: doorbell index
228 *
229 * Returns the value in the doorbell aperture at the
230 * requested doorbell index (VEGA10+).
231 */
232u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
233{
234 if (index < adev->doorbell.num_doorbells) {
235 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
236 } else {
237 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
238 return 0;
239 }
240}
241
242/**
243 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
244 *
245 * @adev: amdgpu_device pointer
246 * @index: doorbell index
247 * @v: value to write
248 *
249 * Writes @v to the doorbell aperture at the
250 * requested doorbell index (VEGA10+).
251 */
252void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
253{
254 if (index < adev->doorbell.num_doorbells) {
255 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
256 } else {
257 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
258 }
259}
260
d38ceaf9
AD
261/**
262 * amdgpu_invalid_rreg - dummy reg read function
263 *
264 * @adev: amdgpu device pointer
265 * @reg: offset of register
266 *
267 * Dummy register read function. Used for register blocks
268 * that certain asics don't have (all asics).
269 * Returns the value in the register.
270 */
271static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
272{
273 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
274 BUG();
275 return 0;
276}
277
278/**
279 * amdgpu_invalid_wreg - dummy reg write function
280 *
281 * @adev: amdgpu device pointer
282 * @reg: offset of register
283 * @v: value to write to the register
284 *
285 * Dummy register read function. Used for register blocks
286 * that certain asics don't have (all asics).
287 */
288static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
289{
290 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
291 reg, v);
292 BUG();
293}
294
295/**
296 * amdgpu_block_invalid_rreg - dummy reg read function
297 *
298 * @adev: amdgpu device pointer
299 * @block: offset of instance
300 * @reg: offset of register
301 *
302 * Dummy register read function. Used for register blocks
303 * that certain asics don't have (all asics).
304 * Returns the value in the register.
305 */
306static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
307 uint32_t block, uint32_t reg)
308{
309 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
310 reg, block);
311 BUG();
312 return 0;
313}
314
315/**
316 * amdgpu_block_invalid_wreg - dummy reg write function
317 *
318 * @adev: amdgpu device pointer
319 * @block: offset of instance
320 * @reg: offset of register
321 * @v: value to write to the register
322 *
323 * Dummy register read function. Used for register blocks
324 * that certain asics don't have (all asics).
325 */
326static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
327 uint32_t block,
328 uint32_t reg, uint32_t v)
329{
330 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
331 reg, block, v);
332 BUG();
333}
334
335static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
336{
a4a02777
CK
337 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
338 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
339 &adev->vram_scratch.robj,
340 &adev->vram_scratch.gpu_addr,
341 (void **)&adev->vram_scratch.ptr);
d38ceaf9
AD
342}
343
344static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
345{
078af1a3 346 amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
d38ceaf9
AD
347}
348
349/**
350 * amdgpu_program_register_sequence - program an array of registers.
351 *
352 * @adev: amdgpu_device pointer
353 * @registers: pointer to the register array
354 * @array_size: size of the register array
355 *
356 * Programs an array or registers with and and or masks.
357 * This is a helper for setting golden registers.
358 */
359void amdgpu_program_register_sequence(struct amdgpu_device *adev,
360 const u32 *registers,
361 const u32 array_size)
362{
363 u32 tmp, reg, and_mask, or_mask;
364 int i;
365
366 if (array_size % 3)
367 return;
368
369 for (i = 0; i < array_size; i +=3) {
370 reg = registers[i + 0];
371 and_mask = registers[i + 1];
372 or_mask = registers[i + 2];
373
374 if (and_mask == 0xffffffff) {
375 tmp = or_mask;
376 } else {
377 tmp = RREG32(reg);
378 tmp &= ~and_mask;
379 tmp |= or_mask;
380 }
381 WREG32(reg, tmp);
382 }
383}
384
385void amdgpu_pci_config_reset(struct amdgpu_device *adev)
386{
387 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
388}
389
390/*
391 * GPU doorbell aperture helpers function.
392 */
393/**
394 * amdgpu_doorbell_init - Init doorbell driver information.
395 *
396 * @adev: amdgpu_device pointer
397 *
398 * Init doorbell driver information (CIK)
399 * Returns 0 on success, error on failure.
400 */
401static int amdgpu_doorbell_init(struct amdgpu_device *adev)
402{
705e519e
CK
403 /* No doorbell on SI hardware generation */
404 if (adev->asic_type < CHIP_BONAIRE) {
405 adev->doorbell.base = 0;
406 adev->doorbell.size = 0;
407 adev->doorbell.num_doorbells = 0;
408 adev->doorbell.ptr = NULL;
409 return 0;
410 }
411
d6895ad3
CK
412 if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
413 return -EINVAL;
414
d38ceaf9
AD
415 /* doorbell bar mapping */
416 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
417 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
418
edf600da 419 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
d38ceaf9
AD
420 AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
421 if (adev->doorbell.num_doorbells == 0)
422 return -EINVAL;
423
8972e5d2
CK
424 adev->doorbell.ptr = ioremap(adev->doorbell.base,
425 adev->doorbell.num_doorbells *
426 sizeof(u32));
427 if (adev->doorbell.ptr == NULL)
d38ceaf9 428 return -ENOMEM;
d38ceaf9
AD
429
430 return 0;
431}
432
433/**
434 * amdgpu_doorbell_fini - Tear down doorbell driver information.
435 *
436 * @adev: amdgpu_device pointer
437 *
438 * Tear down doorbell driver information (CIK)
439 */
440static void amdgpu_doorbell_fini(struct amdgpu_device *adev)
441{
442 iounmap(adev->doorbell.ptr);
443 adev->doorbell.ptr = NULL;
444}
445
446/**
447 * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
448 * setup amdkfd
449 *
450 * @adev: amdgpu_device pointer
451 * @aperture_base: output returning doorbell aperture base physical address
452 * @aperture_size: output returning doorbell aperture size in bytes
453 * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
454 *
455 * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
456 * takes doorbells required for its own rings and reports the setup to amdkfd.
457 * amdgpu reserved doorbells are at the start of the doorbell aperture.
458 */
459void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
460 phys_addr_t *aperture_base,
461 size_t *aperture_size,
462 size_t *start_offset)
463{
464 /*
465 * The first num_doorbells are used by amdgpu.
466 * amdkfd takes whatever's left in the aperture.
467 */
468 if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
469 *aperture_base = adev->doorbell.base;
470 *aperture_size = adev->doorbell.size;
471 *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
472 } else {
473 *aperture_base = 0;
474 *aperture_size = 0;
475 *start_offset = 0;
476 }
477}
478
479/*
480 * amdgpu_wb_*()
455a7bc2 481 * Writeback is the method by which the GPU updates special pages in memory
ea81a173 482 * with the status of certain GPU events (fences, ring pointers,etc.).
d38ceaf9
AD
483 */
484
485/**
486 * amdgpu_wb_fini - Disable Writeback and free memory
487 *
488 * @adev: amdgpu_device pointer
489 *
490 * Disables Writeback and frees the Writeback memory (all asics).
491 * Used at driver shutdown.
492 */
493static void amdgpu_wb_fini(struct amdgpu_device *adev)
494{
495 if (adev->wb.wb_obj) {
a76ed485
AD
496 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
497 &adev->wb.gpu_addr,
498 (void **)&adev->wb.wb);
d38ceaf9
AD
499 adev->wb.wb_obj = NULL;
500 }
501}
502
503/**
504 * amdgpu_wb_init- Init Writeback driver info and allocate memory
505 *
506 * @adev: amdgpu_device pointer
507 *
455a7bc2 508 * Initializes writeback and allocates writeback memory (all asics).
d38ceaf9
AD
509 * Used at driver startup.
510 * Returns 0 on success or an -error on failure.
511 */
512static int amdgpu_wb_init(struct amdgpu_device *adev)
513{
514 int r;
515
516 if (adev->wb.wb_obj == NULL) {
97407b63
AD
517 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
518 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
a76ed485
AD
519 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
520 &adev->wb.wb_obj, &adev->wb.gpu_addr,
521 (void **)&adev->wb.wb);
d38ceaf9
AD
522 if (r) {
523 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
524 return r;
525 }
d38ceaf9
AD
526
527 adev->wb.num_wb = AMDGPU_MAX_WB;
528 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
529
530 /* clear wb memory */
60a970a6 531 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t));
d38ceaf9
AD
532 }
533
534 return 0;
535}
536
537/**
538 * amdgpu_wb_get - Allocate a wb entry
539 *
540 * @adev: amdgpu_device pointer
541 * @wb: wb index
542 *
543 * Allocate a wb slot for use by the driver (all asics).
544 * Returns 0 on success or -EINVAL on failure.
545 */
546int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
547{
548 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
d38ceaf9 549
97407b63 550 if (offset < adev->wb.num_wb) {
7014285a 551 __set_bit(offset, adev->wb.used);
63ae07ca 552 *wb = offset << 3; /* convert to dw offset */
0915fdbc
ML
553 return 0;
554 } else {
555 return -EINVAL;
556 }
557}
558
d38ceaf9
AD
559/**
560 * amdgpu_wb_free - Free a wb entry
561 *
562 * @adev: amdgpu_device pointer
563 * @wb: wb index
564 *
565 * Free a wb slot allocated for use by the driver (all asics)
566 */
567void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
568{
569 if (wb < adev->wb.num_wb)
63ae07ca 570 __clear_bit(wb >> 3, adev->wb.used);
d38ceaf9
AD
571}
572
573/**
574 * amdgpu_vram_location - try to find VRAM location
575 * @adev: amdgpu device structure holding all necessary informations
576 * @mc: memory controller structure holding memory informations
577 * @base: base address at which to put VRAM
578 *
455a7bc2 579 * Function will try to place VRAM at base address provided
3d647c8f 580 * as parameter.
d38ceaf9
AD
581 */
582void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base)
583{
584 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
585
586 mc->vram_start = base;
d38ceaf9
AD
587 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
588 if (limit && limit < mc->real_vram_size)
589 mc->real_vram_size = limit;
590 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
591 mc->mc_vram_size >> 20, mc->vram_start,
592 mc->vram_end, mc->real_vram_size >> 20);
593}
594
595/**
6f02a696 596 * amdgpu_gart_location - try to find GTT location
d38ceaf9
AD
597 * @adev: amdgpu device structure holding all necessary informations
598 * @mc: memory controller structure holding memory informations
599 *
600 * Function will place try to place GTT before or after VRAM.
601 *
602 * If GTT size is bigger than space left then we ajust GTT size.
603 * Thus function will never fails.
604 *
605 * FIXME: when reducing GTT size align new size on power of 2.
606 */
6f02a696 607void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
d38ceaf9
AD
608{
609 u64 size_af, size_bf;
610
ed21c047
CK
611 size_af = adev->mc.mc_mask - mc->vram_end;
612 size_bf = mc->vram_start;
d38ceaf9 613 if (size_bf > size_af) {
6f02a696 614 if (mc->gart_size > size_bf) {
d38ceaf9 615 dev_warn(adev->dev, "limiting GTT\n");
6f02a696 616 mc->gart_size = size_bf;
d38ceaf9 617 }
6f02a696 618 mc->gart_start = 0;
d38ceaf9 619 } else {
6f02a696 620 if (mc->gart_size > size_af) {
d38ceaf9 621 dev_warn(adev->dev, "limiting GTT\n");
6f02a696 622 mc->gart_size = size_af;
d38ceaf9 623 }
b98f1b9e
CK
624 /* VCE doesn't like it when BOs cross a 4GB segment, so align
625 * the GART base on a 4GB boundary as well.
626 */
627 mc->gart_start = ALIGN(mc->vram_end + 1, 0x100000000ULL);
d38ceaf9 628 }
6f02a696 629 mc->gart_end = mc->gart_start + mc->gart_size - 1;
d38ceaf9 630 dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
6f02a696 631 mc->gart_size >> 20, mc->gart_start, mc->gart_end);
d38ceaf9
AD
632}
633
a05502e5
HC
634/*
635 * Firmware Reservation functions
636 */
637/**
638 * amdgpu_fw_reserve_vram_fini - free fw reserved vram
639 *
640 * @adev: amdgpu_device pointer
641 *
642 * free fw reserved vram if it has been reserved.
643 */
644void amdgpu_fw_reserve_vram_fini(struct amdgpu_device *adev)
645{
646 amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo,
647 NULL, &adev->fw_vram_usage.va);
648}
649
650/**
651 * amdgpu_fw_reserve_vram_init - create bo vram reservation from fw
652 *
653 * @adev: amdgpu_device pointer
654 *
655 * create bo vram reservation from fw.
656 */
657int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev)
658{
c13c55d6 659 struct ttm_operation_ctx ctx = { false, false };
a05502e5 660 int r = 0;
3c738893 661 int i;
a05502e5 662 u64 vram_size = adev->mc.visible_vram_size;
3c738893
HC
663 u64 offset = adev->fw_vram_usage.start_offset;
664 u64 size = adev->fw_vram_usage.size;
665 struct amdgpu_bo *bo;
a05502e5
HC
666
667 adev->fw_vram_usage.va = NULL;
668 adev->fw_vram_usage.reserved_bo = NULL;
669
670 if (adev->fw_vram_usage.size > 0 &&
671 adev->fw_vram_usage.size <= vram_size) {
672
673 r = amdgpu_bo_create(adev, adev->fw_vram_usage.size,
3c738893 674 PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
a05502e5
HC
675 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
676 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, NULL, NULL, 0,
677 &adev->fw_vram_usage.reserved_bo);
678 if (r)
679 goto error_create;
680
681 r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false);
682 if (r)
683 goto error_reserve;
3c738893
HC
684
685 /* remove the original mem node and create a new one at the
686 * request position
687 */
688 bo = adev->fw_vram_usage.reserved_bo;
689 offset = ALIGN(offset, PAGE_SIZE);
690 for (i = 0; i < bo->placement.num_placement; ++i) {
691 bo->placements[i].fpfn = offset >> PAGE_SHIFT;
692 bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
693 }
694
695 ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem);
c13c55d6
CK
696 r = ttm_bo_mem_space(&bo->tbo, &bo->placement,
697 &bo->tbo.mem, &ctx);
3c738893
HC
698 if (r)
699 goto error_pin;
700
a05502e5
HC
701 r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo,
702 AMDGPU_GEM_DOMAIN_VRAM,
703 adev->fw_vram_usage.start_offset,
704 (adev->fw_vram_usage.start_offset +
9921167d 705 adev->fw_vram_usage.size), NULL);
a05502e5
HC
706 if (r)
707 goto error_pin;
708 r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
709 &adev->fw_vram_usage.va);
710 if (r)
711 goto error_kmap;
712
713 amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
714 }
715 return r;
716
717error_kmap:
718 amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo);
719error_pin:
720 amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
721error_reserve:
722 amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo);
723error_create:
724 adev->fw_vram_usage.va = NULL;
725 adev->fw_vram_usage.reserved_bo = NULL;
726 return r;
727}
728
d6895ad3
CK
729/**
730 * amdgpu_device_resize_fb_bar - try to resize FB BAR
731 *
732 * @adev: amdgpu_device pointer
733 *
734 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
735 * to fail, but if any of the BARs is not accessible after the size we abort
736 * driver loading by returning -ENODEV.
737 */
738int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
739{
740 u64 space_needed = roundup_pow_of_two(adev->mc.real_vram_size);
741 u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1;
31b8adab
CK
742 struct pci_bus *root;
743 struct resource *res;
744 unsigned i;
d6895ad3
CK
745 u16 cmd;
746 int r;
747
0c03b912 748 /* Bypass for VF */
749 if (amdgpu_sriov_vf(adev))
750 return 0;
751
31b8adab
CK
752 /* Check if the root BUS has 64bit memory resources */
753 root = adev->pdev->bus;
754 while (root->parent)
755 root = root->parent;
756
757 pci_bus_for_each_resource(root, res, i) {
758 if (res && res->flags & IORESOURCE_MEM_64 &&
759 res->start > 0x100000000ull)
760 break;
761 }
762
763 /* Trying to resize is pointless without a root hub window above 4GB */
764 if (!res)
765 return 0;
766
d6895ad3
CK
767 /* Disable memory decoding while we change the BAR addresses and size */
768 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
769 pci_write_config_word(adev->pdev, PCI_COMMAND,
770 cmd & ~PCI_COMMAND_MEMORY);
771
772 /* Free the VRAM and doorbell BAR, we most likely need to move both. */
773 amdgpu_doorbell_fini(adev);
774 if (adev->asic_type >= CHIP_BONAIRE)
775 pci_release_resource(adev->pdev, 2);
776
777 pci_release_resource(adev->pdev, 0);
778
779 r = pci_resize_resource(adev->pdev, 0, rbar_size);
780 if (r == -ENOSPC)
781 DRM_INFO("Not enough PCI address space for a large BAR.");
782 else if (r && r != -ENOTSUPP)
783 DRM_ERROR("Problem resizing BAR0 (%d).", r);
784
785 pci_assign_unassigned_bus_resources(adev->pdev->bus);
786
787 /* When the doorbell or fb BAR isn't available we have no chance of
788 * using the device.
789 */
790 r = amdgpu_doorbell_init(adev);
791 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
792 return -ENODEV;
793
794 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
795
796 return 0;
797}
a05502e5 798
d38ceaf9
AD
799/*
800 * GPU helpers function.
801 */
802/**
c836fec5 803 * amdgpu_need_post - check if the hw need post or not
d38ceaf9
AD
804 *
805 * @adev: amdgpu_device pointer
806 *
c836fec5
JQ
807 * Check if the asic has been initialized (all asics) at driver startup
808 * or post is needed if hw reset is performed.
809 * Returns true if need or false if not.
d38ceaf9 810 */
c836fec5 811bool amdgpu_need_post(struct amdgpu_device *adev)
d38ceaf9
AD
812{
813 uint32_t reg;
814
bec86378
ML
815 if (amdgpu_sriov_vf(adev))
816 return false;
817
818 if (amdgpu_passthrough(adev)) {
1da2c326
ML
819 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
820 * some old smc fw still need driver do vPost otherwise gpu hang, while
821 * those smc fw version above 22.15 doesn't have this flaw, so we force
822 * vpost executed for smc version below 22.15
bec86378
ML
823 */
824 if (adev->asic_type == CHIP_FIJI) {
825 int err;
826 uint32_t fw_ver;
827 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
828 /* force vPost if error occured */
829 if (err)
830 return true;
831
832 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1da2c326
ML
833 if (fw_ver < 0x00160e00)
834 return true;
bec86378 835 }
bec86378 836 }
91fe77eb 837
838 if (adev->has_hw_reset) {
839 adev->has_hw_reset = false;
840 return true;
841 }
842
843 /* bios scratch used on CIK+ */
844 if (adev->asic_type >= CHIP_BONAIRE)
845 return amdgpu_atombios_scratch_need_asic_init(adev);
846
847 /* check MEM_SIZE for older asics */
848 reg = amdgpu_asic_get_config_memsize(adev);
849
850 if ((reg != 0) && (reg != 0xffffffff))
851 return false;
852
853 return true;
bec86378
ML
854}
855
d38ceaf9
AD
856/**
857 * amdgpu_dummy_page_init - init dummy page used by the driver
858 *
859 * @adev: amdgpu_device pointer
860 *
861 * Allocate the dummy page used by the driver (all asics).
862 * This dummy page is used by the driver as a filler for gart entries
863 * when pages are taken out of the GART
864 * Returns 0 on sucess, -ENOMEM on failure.
865 */
866int amdgpu_dummy_page_init(struct amdgpu_device *adev)
867{
868 if (adev->dummy_page.page)
869 return 0;
870 adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
871 if (adev->dummy_page.page == NULL)
872 return -ENOMEM;
873 adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page,
874 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
875 if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) {
876 dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
877 __free_page(adev->dummy_page.page);
878 adev->dummy_page.page = NULL;
879 return -ENOMEM;
880 }
881 return 0;
882}
883
884/**
885 * amdgpu_dummy_page_fini - free dummy page used by the driver
886 *
887 * @adev: amdgpu_device pointer
888 *
889 * Frees the dummy page used by the driver (all asics).
890 */
891void amdgpu_dummy_page_fini(struct amdgpu_device *adev)
892{
893 if (adev->dummy_page.page == NULL)
894 return;
895 pci_unmap_page(adev->pdev, adev->dummy_page.addr,
896 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
897 __free_page(adev->dummy_page.page);
898 adev->dummy_page.page = NULL;
899}
900
901
902/* ATOM accessor methods */
903/*
904 * ATOM is an interpreted byte code stored in tables in the vbios. The
905 * driver registers callbacks to access registers and the interpreter
906 * in the driver parses the tables and executes then to program specific
907 * actions (set display modes, asic init, etc.). See amdgpu_atombios.c,
908 * atombios.h, and atom.c
909 */
910
911/**
912 * cail_pll_read - read PLL register
913 *
914 * @info: atom card_info pointer
915 * @reg: PLL register offset
916 *
917 * Provides a PLL register accessor for the atom interpreter (r4xx+).
918 * Returns the value of the PLL register.
919 */
920static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
921{
922 return 0;
923}
924
925/**
926 * cail_pll_write - write PLL register
927 *
928 * @info: atom card_info pointer
929 * @reg: PLL register offset
930 * @val: value to write to the pll register
931 *
932 * Provides a PLL register accessor for the atom interpreter (r4xx+).
933 */
934static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
935{
936
937}
938
939/**
940 * cail_mc_read - read MC (Memory Controller) register
941 *
942 * @info: atom card_info pointer
943 * @reg: MC register offset
944 *
945 * Provides an MC register accessor for the atom interpreter (r4xx+).
946 * Returns the value of the MC register.
947 */
948static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
949{
950 return 0;
951}
952
953/**
954 * cail_mc_write - write MC (Memory Controller) register
955 *
956 * @info: atom card_info pointer
957 * @reg: MC register offset
958 * @val: value to write to the pll register
959 *
960 * Provides a MC register accessor for the atom interpreter (r4xx+).
961 */
962static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
963{
964
965}
966
967/**
968 * cail_reg_write - write MMIO register
969 *
970 * @info: atom card_info pointer
971 * @reg: MMIO register offset
972 * @val: value to write to the pll register
973 *
974 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
975 */
976static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
977{
978 struct amdgpu_device *adev = info->dev->dev_private;
979
980 WREG32(reg, val);
981}
982
983/**
984 * cail_reg_read - read MMIO register
985 *
986 * @info: atom card_info pointer
987 * @reg: MMIO register offset
988 *
989 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
990 * Returns the value of the MMIO register.
991 */
992static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
993{
994 struct amdgpu_device *adev = info->dev->dev_private;
995 uint32_t r;
996
997 r = RREG32(reg);
998 return r;
999}
1000
1001/**
1002 * cail_ioreg_write - write IO register
1003 *
1004 * @info: atom card_info pointer
1005 * @reg: IO register offset
1006 * @val: value to write to the pll register
1007 *
1008 * Provides a IO register accessor for the atom interpreter (r4xx+).
1009 */
1010static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
1011{
1012 struct amdgpu_device *adev = info->dev->dev_private;
1013
1014 WREG32_IO(reg, val);
1015}
1016
1017/**
1018 * cail_ioreg_read - read IO register
1019 *
1020 * @info: atom card_info pointer
1021 * @reg: IO register offset
1022 *
1023 * Provides an IO register accessor for the atom interpreter (r4xx+).
1024 * Returns the value of the IO register.
1025 */
1026static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
1027{
1028 struct amdgpu_device *adev = info->dev->dev_private;
1029 uint32_t r;
1030
1031 r = RREG32_IO(reg);
1032 return r;
1033}
1034
5b41d94c
KR
1035static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev,
1036 struct device_attribute *attr,
1037 char *buf)
1038{
1039 struct drm_device *ddev = dev_get_drvdata(dev);
1040 struct amdgpu_device *adev = ddev->dev_private;
1041 struct atom_context *ctx = adev->mode_info.atom_context;
1042
1043 return snprintf(buf, PAGE_SIZE, "%s\n", ctx->vbios_version);
1044}
1045
1046static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version,
1047 NULL);
1048
d38ceaf9
AD
1049/**
1050 * amdgpu_atombios_fini - free the driver info and callbacks for atombios
1051 *
1052 * @adev: amdgpu_device pointer
1053 *
1054 * Frees the driver info and register access callbacks for the ATOM
1055 * interpreter (r4xx+).
1056 * Called at driver shutdown.
1057 */
1058static void amdgpu_atombios_fini(struct amdgpu_device *adev)
1059{
89e0ec9f 1060 if (adev->mode_info.atom_context) {
d38ceaf9 1061 kfree(adev->mode_info.atom_context->scratch);
89e0ec9f
ML
1062 kfree(adev->mode_info.atom_context->iio);
1063 }
d38ceaf9
AD
1064 kfree(adev->mode_info.atom_context);
1065 adev->mode_info.atom_context = NULL;
1066 kfree(adev->mode_info.atom_card_info);
1067 adev->mode_info.atom_card_info = NULL;
5b41d94c 1068 device_remove_file(adev->dev, &dev_attr_vbios_version);
d38ceaf9
AD
1069}
1070
1071/**
1072 * amdgpu_atombios_init - init the driver info and callbacks for atombios
1073 *
1074 * @adev: amdgpu_device pointer
1075 *
1076 * Initializes the driver info and register access callbacks for the
1077 * ATOM interpreter (r4xx+).
1078 * Returns 0 on sucess, -ENOMEM on failure.
1079 * Called at driver startup.
1080 */
1081static int amdgpu_atombios_init(struct amdgpu_device *adev)
1082{
1083 struct card_info *atom_card_info =
1084 kzalloc(sizeof(struct card_info), GFP_KERNEL);
5b41d94c 1085 int ret;
d38ceaf9
AD
1086
1087 if (!atom_card_info)
1088 return -ENOMEM;
1089
1090 adev->mode_info.atom_card_info = atom_card_info;
1091 atom_card_info->dev = adev->ddev;
1092 atom_card_info->reg_read = cail_reg_read;
1093 atom_card_info->reg_write = cail_reg_write;
1094 /* needed for iio ops */
1095 if (adev->rio_mem) {
1096 atom_card_info->ioreg_read = cail_ioreg_read;
1097 atom_card_info->ioreg_write = cail_ioreg_write;
1098 } else {
9953b72f 1099 DRM_DEBUG("PCI I/O BAR is not found. Using MMIO to access ATOM BIOS\n");
d38ceaf9
AD
1100 atom_card_info->ioreg_read = cail_reg_read;
1101 atom_card_info->ioreg_write = cail_reg_write;
1102 }
1103 atom_card_info->mc_read = cail_mc_read;
1104 atom_card_info->mc_write = cail_mc_write;
1105 atom_card_info->pll_read = cail_pll_read;
1106 atom_card_info->pll_write = cail_pll_write;
1107
1108 adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios);
1109 if (!adev->mode_info.atom_context) {
1110 amdgpu_atombios_fini(adev);
1111 return -ENOMEM;
1112 }
1113
1114 mutex_init(&adev->mode_info.atom_context->mutex);
a5bde2f9
AD
1115 if (adev->is_atom_fw) {
1116 amdgpu_atomfirmware_scratch_regs_init(adev);
1117 amdgpu_atomfirmware_allocate_fb_scratch(adev);
1118 } else {
1119 amdgpu_atombios_scratch_regs_init(adev);
1120 amdgpu_atombios_allocate_fb_scratch(adev);
1121 }
5b41d94c
KR
1122
1123 ret = device_create_file(adev->dev, &dev_attr_vbios_version);
1124 if (ret) {
1125 DRM_ERROR("Failed to create device file for VBIOS version\n");
1126 return ret;
1127 }
1128
d38ceaf9
AD
1129 return 0;
1130}
1131
1132/* if we get transitioned to only one device, take VGA back */
1133/**
1134 * amdgpu_vga_set_decode - enable/disable vga decode
1135 *
1136 * @cookie: amdgpu_device pointer
1137 * @state: enable/disable vga decode
1138 *
1139 * Enable/disable vga decode (all asics).
1140 * Returns VGA resource flags.
1141 */
1142static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
1143{
1144 struct amdgpu_device *adev = cookie;
1145 amdgpu_asic_set_vga_state(adev, state);
1146 if (state)
1147 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1148 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1149 else
1150 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1151}
1152
bab4fee7 1153static void amdgpu_check_block_size(struct amdgpu_device *adev)
a1adf8be
CZ
1154{
1155 /* defines number of bits in page table versus page directory,
1156 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1157 * page table and the remaining bits are in the page directory */
bab4fee7
JZ
1158 if (amdgpu_vm_block_size == -1)
1159 return;
a1adf8be 1160
bab4fee7 1161 if (amdgpu_vm_block_size < 9) {
a1adf8be
CZ
1162 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1163 amdgpu_vm_block_size);
97489129 1164 amdgpu_vm_block_size = -1;
a1adf8be 1165 }
a1adf8be
CZ
1166}
1167
83ca145d
ZJ
1168static void amdgpu_check_vm_size(struct amdgpu_device *adev)
1169{
64dab074
AD
1170 /* no need to check the default value */
1171 if (amdgpu_vm_size == -1)
1172 return;
1173
83ca145d
ZJ
1174 if (amdgpu_vm_size < 1) {
1175 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1176 amdgpu_vm_size);
f3368128 1177 amdgpu_vm_size = -1;
83ca145d 1178 }
83ca145d
ZJ
1179}
1180
d38ceaf9
AD
1181/**
1182 * amdgpu_check_arguments - validate module params
1183 *
1184 * @adev: amdgpu_device pointer
1185 *
1186 * Validates certain module parameters and updates
1187 * the associated values used by the driver (all asics).
1188 */
1189static void amdgpu_check_arguments(struct amdgpu_device *adev)
1190{
5b011235
CZ
1191 if (amdgpu_sched_jobs < 4) {
1192 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1193 amdgpu_sched_jobs);
1194 amdgpu_sched_jobs = 4;
76117507 1195 } else if (!is_power_of_2(amdgpu_sched_jobs)){
5b011235
CZ
1196 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1197 amdgpu_sched_jobs);
1198 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1199 }
d38ceaf9 1200
83e74db6 1201 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
f9321cc4
CK
1202 /* gart size must be greater or equal to 32M */
1203 dev_warn(adev->dev, "gart size (%d) too small\n",
1204 amdgpu_gart_size);
83e74db6 1205 amdgpu_gart_size = -1;
d38ceaf9
AD
1206 }
1207
36d38372 1208 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
c4e1a13a 1209 /* gtt size must be greater or equal to 32M */
36d38372
CK
1210 dev_warn(adev->dev, "gtt size (%d) too small\n",
1211 amdgpu_gtt_size);
1212 amdgpu_gtt_size = -1;
d38ceaf9
AD
1213 }
1214
d07f14be
RH
1215 /* valid range is between 4 and 9 inclusive */
1216 if (amdgpu_vm_fragment_size != -1 &&
1217 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1218 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1219 amdgpu_vm_fragment_size = -1;
1220 }
1221
83ca145d 1222 amdgpu_check_vm_size(adev);
d38ceaf9 1223
bab4fee7 1224 amdgpu_check_block_size(adev);
6a7f76e7 1225
526bae37 1226 if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
76117507 1227 !is_power_of_2(amdgpu_vram_page_split))) {
6a7f76e7
CK
1228 dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
1229 amdgpu_vram_page_split);
1230 amdgpu_vram_page_split = 1024;
1231 }
d38ceaf9
AD
1232}
1233
1234/**
1235 * amdgpu_switcheroo_set_state - set switcheroo state
1236 *
1237 * @pdev: pci dev pointer
1694467b 1238 * @state: vga_switcheroo state
d38ceaf9
AD
1239 *
1240 * Callback for the switcheroo driver. Suspends or resumes the
1241 * the asics before or after it is powered up using ACPI methods.
1242 */
1243static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1244{
1245 struct drm_device *dev = pci_get_drvdata(pdev);
1246
1247 if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1248 return;
1249
1250 if (state == VGA_SWITCHEROO_ON) {
7ca85295 1251 pr_info("amdgpu: switched on\n");
d38ceaf9
AD
1252 /* don't suspend or resume card normally */
1253 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1254
810ddc3a 1255 amdgpu_device_resume(dev, true, true);
d38ceaf9 1256
d38ceaf9
AD
1257 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1258 drm_kms_helper_poll_enable(dev);
1259 } else {
7ca85295 1260 pr_info("amdgpu: switched off\n");
d38ceaf9
AD
1261 drm_kms_helper_poll_disable(dev);
1262 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
810ddc3a 1263 amdgpu_device_suspend(dev, true, true);
d38ceaf9
AD
1264 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1265 }
1266}
1267
1268/**
1269 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1270 *
1271 * @pdev: pci dev pointer
1272 *
1273 * Callback for the switcheroo driver. Check of the switcheroo
1274 * state can be changed.
1275 * Returns true if the state can be changed, false if not.
1276 */
1277static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1278{
1279 struct drm_device *dev = pci_get_drvdata(pdev);
1280
1281 /*
1282 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1283 * locking inversion with the driver load path. And the access here is
1284 * completely racy anyway. So don't bother with locking for now.
1285 */
1286 return dev->open_count == 0;
1287}
1288
1289static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1290 .set_gpu_state = amdgpu_switcheroo_set_state,
1291 .reprobe = NULL,
1292 .can_switch = amdgpu_switcheroo_can_switch,
1293};
1294
1295int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
5fc3aeeb 1296 enum amd_ip_block_type block_type,
1297 enum amd_clockgating_state state)
d38ceaf9
AD
1298{
1299 int i, r = 0;
1300
1301 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1302 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1303 continue;
c722865a
RZ
1304 if (adev->ip_blocks[i].version->type != block_type)
1305 continue;
1306 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1307 continue;
1308 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1309 (void *)adev, state);
1310 if (r)
1311 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1312 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9
AD
1313 }
1314 return r;
1315}
1316
1317int amdgpu_set_powergating_state(struct amdgpu_device *adev,
5fc3aeeb 1318 enum amd_ip_block_type block_type,
1319 enum amd_powergating_state state)
d38ceaf9
AD
1320{
1321 int i, r = 0;
1322
1323 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1324 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1325 continue;
c722865a
RZ
1326 if (adev->ip_blocks[i].version->type != block_type)
1327 continue;
1328 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1329 continue;
1330 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1331 (void *)adev, state);
1332 if (r)
1333 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1334 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9
AD
1335 }
1336 return r;
1337}
1338
6cb2d4e4
HR
1339void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
1340{
1341 int i;
1342
1343 for (i = 0; i < adev->num_ip_blocks; i++) {
1344 if (!adev->ip_blocks[i].status.valid)
1345 continue;
1346 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1347 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1348 }
1349}
1350
5dbbb60b
AD
1351int amdgpu_wait_for_idle(struct amdgpu_device *adev,
1352 enum amd_ip_block_type block_type)
1353{
1354 int i, r;
1355
1356 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1357 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1358 continue;
a1255107
AD
1359 if (adev->ip_blocks[i].version->type == block_type) {
1360 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
5dbbb60b
AD
1361 if (r)
1362 return r;
1363 break;
1364 }
1365 }
1366 return 0;
1367
1368}
1369
1370bool amdgpu_is_idle(struct amdgpu_device *adev,
1371 enum amd_ip_block_type block_type)
1372{
1373 int i;
1374
1375 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1376 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 1377 continue;
a1255107
AD
1378 if (adev->ip_blocks[i].version->type == block_type)
1379 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
5dbbb60b
AD
1380 }
1381 return true;
1382
1383}
1384
a1255107
AD
1385struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
1386 enum amd_ip_block_type type)
d38ceaf9
AD
1387{
1388 int i;
1389
1390 for (i = 0; i < adev->num_ip_blocks; i++)
a1255107 1391 if (adev->ip_blocks[i].version->type == type)
d38ceaf9
AD
1392 return &adev->ip_blocks[i];
1393
1394 return NULL;
1395}
1396
1397/**
1398 * amdgpu_ip_block_version_cmp
1399 *
1400 * @adev: amdgpu_device pointer
5fc3aeeb 1401 * @type: enum amd_ip_block_type
d38ceaf9
AD
1402 * @major: major version
1403 * @minor: minor version
1404 *
1405 * return 0 if equal or greater
1406 * return 1 if smaller or the ip_block doesn't exist
1407 */
1408int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
5fc3aeeb 1409 enum amd_ip_block_type type,
d38ceaf9
AD
1410 u32 major, u32 minor)
1411{
a1255107 1412 struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type);
d38ceaf9 1413
a1255107
AD
1414 if (ip_block && ((ip_block->version->major > major) ||
1415 ((ip_block->version->major == major) &&
1416 (ip_block->version->minor >= minor))))
d38ceaf9
AD
1417 return 0;
1418
1419 return 1;
1420}
1421
a1255107
AD
1422/**
1423 * amdgpu_ip_block_add
1424 *
1425 * @adev: amdgpu_device pointer
1426 * @ip_block_version: pointer to the IP to add
1427 *
1428 * Adds the IP block driver information to the collection of IPs
1429 * on the asic.
1430 */
1431int amdgpu_ip_block_add(struct amdgpu_device *adev,
1432 const struct amdgpu_ip_block_version *ip_block_version)
1433{
1434 if (!ip_block_version)
1435 return -EINVAL;
1436
a0bae357
HR
1437 DRM_DEBUG("add ip block number %d <%s>\n", adev->num_ip_blocks,
1438 ip_block_version->funcs->name);
1439
a1255107
AD
1440 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1441
1442 return 0;
1443}
1444
483ef985 1445static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
9accf2fd
ED
1446{
1447 adev->enable_virtual_display = false;
1448
1449 if (amdgpu_virtual_display) {
1450 struct drm_device *ddev = adev->ddev;
1451 const char *pci_address_name = pci_name(ddev->pdev);
0f66356d 1452 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
9accf2fd
ED
1453
1454 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1455 pciaddstr_tmp = pciaddstr;
0f66356d
ED
1456 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1457 pciaddname = strsep(&pciaddname_tmp, ",");
967de2a9
YT
1458 if (!strcmp("all", pciaddname)
1459 || !strcmp(pci_address_name, pciaddname)) {
0f66356d
ED
1460 long num_crtc;
1461 int res = -1;
1462
9accf2fd 1463 adev->enable_virtual_display = true;
0f66356d
ED
1464
1465 if (pciaddname_tmp)
1466 res = kstrtol(pciaddname_tmp, 10,
1467 &num_crtc);
1468
1469 if (!res) {
1470 if (num_crtc < 1)
1471 num_crtc = 1;
1472 if (num_crtc > 6)
1473 num_crtc = 6;
1474 adev->mode_info.num_crtc = num_crtc;
1475 } else {
1476 adev->mode_info.num_crtc = 1;
1477 }
9accf2fd
ED
1478 break;
1479 }
1480 }
1481
0f66356d
ED
1482 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1483 amdgpu_virtual_display, pci_address_name,
1484 adev->enable_virtual_display, adev->mode_info.num_crtc);
9accf2fd
ED
1485
1486 kfree(pciaddstr);
1487 }
1488}
1489
e2a75f88
AD
1490static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1491{
e2a75f88
AD
1492 const char *chip_name;
1493 char fw_name[30];
1494 int err;
1495 const struct gpu_info_firmware_header_v1_0 *hdr;
1496
ab4fe3e1
HR
1497 adev->firmware.gpu_info_fw = NULL;
1498
e2a75f88
AD
1499 switch (adev->asic_type) {
1500 case CHIP_TOPAZ:
1501 case CHIP_TONGA:
1502 case CHIP_FIJI:
1503 case CHIP_POLARIS11:
1504 case CHIP_POLARIS10:
1505 case CHIP_POLARIS12:
1506 case CHIP_CARRIZO:
1507 case CHIP_STONEY:
1508#ifdef CONFIG_DRM_AMDGPU_SI
1509 case CHIP_VERDE:
1510 case CHIP_TAHITI:
1511 case CHIP_PITCAIRN:
1512 case CHIP_OLAND:
1513 case CHIP_HAINAN:
1514#endif
1515#ifdef CONFIG_DRM_AMDGPU_CIK
1516 case CHIP_BONAIRE:
1517 case CHIP_HAWAII:
1518 case CHIP_KAVERI:
1519 case CHIP_KABINI:
1520 case CHIP_MULLINS:
1521#endif
1522 default:
1523 return 0;
1524 case CHIP_VEGA10:
1525 chip_name = "vega10";
1526 break;
2d2e5e7e
AD
1527 case CHIP_RAVEN:
1528 chip_name = "raven";
1529 break;
e2a75f88
AD
1530 }
1531
1532 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
ab4fe3e1 1533 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
e2a75f88
AD
1534 if (err) {
1535 dev_err(adev->dev,
1536 "Failed to load gpu_info firmware \"%s\"\n",
1537 fw_name);
1538 goto out;
1539 }
ab4fe3e1 1540 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
e2a75f88
AD
1541 if (err) {
1542 dev_err(adev->dev,
1543 "Failed to validate gpu_info firmware \"%s\"\n",
1544 fw_name);
1545 goto out;
1546 }
1547
ab4fe3e1 1548 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
e2a75f88
AD
1549 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1550
1551 switch (hdr->version_major) {
1552 case 1:
1553 {
1554 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
ab4fe3e1 1555 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
e2a75f88
AD
1556 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1557
b5ab16bf
AD
1558 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1559 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1560 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1561 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
e2a75f88 1562 adev->gfx.config.max_texture_channel_caches =
b5ab16bf
AD
1563 le32_to_cpu(gpu_info_fw->gc_num_tccs);
1564 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1565 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1566 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1567 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
e2a75f88 1568 adev->gfx.config.double_offchip_lds_buf =
b5ab16bf
AD
1569 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1570 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
51fd0370
HZ
1571 adev->gfx.cu_info.max_waves_per_simd =
1572 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1573 adev->gfx.cu_info.max_scratch_slots_per_cu =
1574 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1575 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
e2a75f88
AD
1576 break;
1577 }
1578 default:
1579 dev_err(adev->dev,
1580 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1581 err = -EINVAL;
1582 goto out;
1583 }
1584out:
e2a75f88
AD
1585 return err;
1586}
1587
d38ceaf9
AD
1588static int amdgpu_early_init(struct amdgpu_device *adev)
1589{
aaa36a97 1590 int i, r;
d38ceaf9 1591
483ef985 1592 amdgpu_device_enable_virtual_display(adev);
a6be7570 1593
d38ceaf9 1594 switch (adev->asic_type) {
aaa36a97
AD
1595 case CHIP_TOPAZ:
1596 case CHIP_TONGA:
48299f95 1597 case CHIP_FIJI:
2cc0c0b5
FC
1598 case CHIP_POLARIS11:
1599 case CHIP_POLARIS10:
c4642a47 1600 case CHIP_POLARIS12:
aaa36a97 1601 case CHIP_CARRIZO:
39bb0c92
SL
1602 case CHIP_STONEY:
1603 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
aaa36a97
AD
1604 adev->family = AMDGPU_FAMILY_CZ;
1605 else
1606 adev->family = AMDGPU_FAMILY_VI;
1607
1608 r = vi_set_ip_blocks(adev);
1609 if (r)
1610 return r;
1611 break;
33f34802
KW
1612#ifdef CONFIG_DRM_AMDGPU_SI
1613 case CHIP_VERDE:
1614 case CHIP_TAHITI:
1615 case CHIP_PITCAIRN:
1616 case CHIP_OLAND:
1617 case CHIP_HAINAN:
295d0daf 1618 adev->family = AMDGPU_FAMILY_SI;
33f34802
KW
1619 r = si_set_ip_blocks(adev);
1620 if (r)
1621 return r;
1622 break;
1623#endif
a2e73f56
AD
1624#ifdef CONFIG_DRM_AMDGPU_CIK
1625 case CHIP_BONAIRE:
1626 case CHIP_HAWAII:
1627 case CHIP_KAVERI:
1628 case CHIP_KABINI:
1629 case CHIP_MULLINS:
1630 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1631 adev->family = AMDGPU_FAMILY_CI;
1632 else
1633 adev->family = AMDGPU_FAMILY_KV;
1634
1635 r = cik_set_ip_blocks(adev);
1636 if (r)
1637 return r;
1638 break;
1639#endif
2ca8a5d2
CZ
1640 case CHIP_VEGA10:
1641 case CHIP_RAVEN:
1642 if (adev->asic_type == CHIP_RAVEN)
1643 adev->family = AMDGPU_FAMILY_RV;
1644 else
1645 adev->family = AMDGPU_FAMILY_AI;
460826e6
KW
1646
1647 r = soc15_set_ip_blocks(adev);
1648 if (r)
1649 return r;
1650 break;
d38ceaf9
AD
1651 default:
1652 /* FIXME: not supported yet */
1653 return -EINVAL;
1654 }
1655
e2a75f88
AD
1656 r = amdgpu_device_parse_gpu_info_fw(adev);
1657 if (r)
1658 return r;
1659
1884734a 1660 amdgpu_amdkfd_device_probe(adev);
1661
3149d9da
XY
1662 if (amdgpu_sriov_vf(adev)) {
1663 r = amdgpu_virt_request_full_gpu(adev, true);
1664 if (r)
5ffa61c1 1665 return -EAGAIN;
3149d9da
XY
1666 }
1667
d38ceaf9
AD
1668 for (i = 0; i < adev->num_ip_blocks; i++) {
1669 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
ed8cf00c
HR
1670 DRM_ERROR("disabled ip block: %d <%s>\n",
1671 i, adev->ip_blocks[i].version->funcs->name);
a1255107 1672 adev->ip_blocks[i].status.valid = false;
d38ceaf9 1673 } else {
a1255107
AD
1674 if (adev->ip_blocks[i].version->funcs->early_init) {
1675 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2c1a2784 1676 if (r == -ENOENT) {
a1255107 1677 adev->ip_blocks[i].status.valid = false;
2c1a2784 1678 } else if (r) {
a1255107
AD
1679 DRM_ERROR("early_init of IP block <%s> failed %d\n",
1680 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1681 return r;
2c1a2784 1682 } else {
a1255107 1683 adev->ip_blocks[i].status.valid = true;
2c1a2784 1684 }
974e6b64 1685 } else {
a1255107 1686 adev->ip_blocks[i].status.valid = true;
d38ceaf9 1687 }
d38ceaf9
AD
1688 }
1689 }
1690
395d1fb9
NH
1691 adev->cg_flags &= amdgpu_cg_mask;
1692 adev->pg_flags &= amdgpu_pg_mask;
1693
d38ceaf9
AD
1694 return 0;
1695}
1696
1697static int amdgpu_init(struct amdgpu_device *adev)
1698{
1699 int i, r;
1700
1701 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1702 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 1703 continue;
a1255107 1704 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2c1a2784 1705 if (r) {
a1255107
AD
1706 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1707 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1708 return r;
2c1a2784 1709 }
a1255107 1710 adev->ip_blocks[i].status.sw = true;
d38ceaf9 1711 /* need to do gmc hw init early so we can allocate gpu mem */
a1255107 1712 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
d38ceaf9 1713 r = amdgpu_vram_scratch_init(adev);
2c1a2784
AD
1714 if (r) {
1715 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
d38ceaf9 1716 return r;
2c1a2784 1717 }
a1255107 1718 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2c1a2784
AD
1719 if (r) {
1720 DRM_ERROR("hw_init %d failed %d\n", i, r);
d38ceaf9 1721 return r;
2c1a2784 1722 }
d38ceaf9 1723 r = amdgpu_wb_init(adev);
2c1a2784
AD
1724 if (r) {
1725 DRM_ERROR("amdgpu_wb_init failed %d\n", r);
d38ceaf9 1726 return r;
2c1a2784 1727 }
a1255107 1728 adev->ip_blocks[i].status.hw = true;
2493664f
ML
1729
1730 /* right after GMC hw init, we create CSA */
1731 if (amdgpu_sriov_vf(adev)) {
1732 r = amdgpu_allocate_static_csa(adev);
1733 if (r) {
1734 DRM_ERROR("allocate CSA failed %d\n", r);
1735 return r;
1736 }
1737 }
d38ceaf9
AD
1738 }
1739 }
1740
1741 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1742 if (!adev->ip_blocks[i].status.sw)
d38ceaf9
AD
1743 continue;
1744 /* gmc hw init is done early */
a1255107 1745 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC)
d38ceaf9 1746 continue;
a1255107 1747 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2c1a2784 1748 if (r) {
a1255107
AD
1749 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1750 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1751 return r;
2c1a2784 1752 }
a1255107 1753 adev->ip_blocks[i].status.hw = true;
d38ceaf9
AD
1754 }
1755
1884734a 1756 amdgpu_amdkfd_device_init(adev);
c6332b97 1757
1758 if (amdgpu_sriov_vf(adev))
1759 amdgpu_virt_release_full_gpu(adev, true);
1760
d38ceaf9
AD
1761 return 0;
1762}
1763
0c49e0b8
CZ
1764static void amdgpu_fill_reset_magic(struct amdgpu_device *adev)
1765{
1766 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
1767}
1768
1769static bool amdgpu_check_vram_lost(struct amdgpu_device *adev)
1770{
1771 return !!memcmp(adev->gart.ptr, adev->reset_magic,
1772 AMDGPU_RESET_MAGIC_NUM);
1773}
1774
2dc80b00 1775static int amdgpu_late_set_cg_state(struct amdgpu_device *adev)
d38ceaf9
AD
1776{
1777 int i = 0, r;
1778
1779 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1780 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 1781 continue;
4a446d55 1782 /* skip CG for VCE/UVD, it's handled specially */
a1255107
AD
1783 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1784 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
4a446d55 1785 /* enable clockgating to save power */
a1255107
AD
1786 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1787 AMD_CG_STATE_GATE);
4a446d55
AD
1788 if (r) {
1789 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
a1255107 1790 adev->ip_blocks[i].version->funcs->name, r);
4a446d55
AD
1791 return r;
1792 }
b0b00ff1 1793 }
d38ceaf9 1794 }
2dc80b00
S
1795 return 0;
1796}
1797
1798static int amdgpu_late_init(struct amdgpu_device *adev)
1799{
1800 int i = 0, r;
1801
1802 for (i = 0; i < adev->num_ip_blocks; i++) {
1803 if (!adev->ip_blocks[i].status.valid)
1804 continue;
1805 if (adev->ip_blocks[i].version->funcs->late_init) {
1806 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
1807 if (r) {
1808 DRM_ERROR("late_init of IP block <%s> failed %d\n",
1809 adev->ip_blocks[i].version->funcs->name, r);
1810 return r;
1811 }
1812 adev->ip_blocks[i].status.late_initialized = true;
1813 }
1814 }
1815
1816 mod_delayed_work(system_wq, &adev->late_init_work,
1817 msecs_to_jiffies(AMDGPU_RESUME_MS));
d38ceaf9 1818
0c49e0b8 1819 amdgpu_fill_reset_magic(adev);
d38ceaf9
AD
1820
1821 return 0;
1822}
1823
1824static int amdgpu_fini(struct amdgpu_device *adev)
1825{
1826 int i, r;
1827
1884734a 1828 amdgpu_amdkfd_device_fini(adev);
3e96dbfd
AD
1829 /* need to disable SMC first */
1830 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1831 if (!adev->ip_blocks[i].status.hw)
3e96dbfd 1832 continue;
a1255107 1833 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3e96dbfd 1834 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
a1255107
AD
1835 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1836 AMD_CG_STATE_UNGATE);
3e96dbfd
AD
1837 if (r) {
1838 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
a1255107 1839 adev->ip_blocks[i].version->funcs->name, r);
3e96dbfd
AD
1840 return r;
1841 }
a1255107 1842 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
3e96dbfd
AD
1843 /* XXX handle errors */
1844 if (r) {
1845 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
a1255107 1846 adev->ip_blocks[i].version->funcs->name, r);
3e96dbfd 1847 }
a1255107 1848 adev->ip_blocks[i].status.hw = false;
3e96dbfd
AD
1849 break;
1850 }
1851 }
1852
d38ceaf9 1853 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1854 if (!adev->ip_blocks[i].status.hw)
d38ceaf9 1855 continue;
a1255107 1856 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
84e5b516 1857 amdgpu_free_static_csa(adev);
d38ceaf9
AD
1858 amdgpu_wb_fini(adev);
1859 amdgpu_vram_scratch_fini(adev);
1860 }
8201a67a
RZ
1861
1862 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1863 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
1864 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1865 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1866 AMD_CG_STATE_UNGATE);
1867 if (r) {
1868 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1869 adev->ip_blocks[i].version->funcs->name, r);
1870 return r;
1871 }
2c1a2784 1872 }
8201a67a 1873
a1255107 1874 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
d38ceaf9 1875 /* XXX handle errors */
2c1a2784 1876 if (r) {
a1255107
AD
1877 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1878 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1879 }
8201a67a 1880
a1255107 1881 adev->ip_blocks[i].status.hw = false;
d38ceaf9
AD
1882 }
1883
1884 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1885 if (!adev->ip_blocks[i].status.sw)
d38ceaf9 1886 continue;
a1255107 1887 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
d38ceaf9 1888 /* XXX handle errors */
2c1a2784 1889 if (r) {
a1255107
AD
1890 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
1891 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1892 }
a1255107
AD
1893 adev->ip_blocks[i].status.sw = false;
1894 adev->ip_blocks[i].status.valid = false;
d38ceaf9
AD
1895 }
1896
a6dcfd9c 1897 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1898 if (!adev->ip_blocks[i].status.late_initialized)
8a2eef1d 1899 continue;
a1255107
AD
1900 if (adev->ip_blocks[i].version->funcs->late_fini)
1901 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
1902 adev->ip_blocks[i].status.late_initialized = false;
a6dcfd9c
ML
1903 }
1904
030308fc 1905 if (amdgpu_sriov_vf(adev))
24136135
ML
1906 if (amdgpu_virt_release_full_gpu(adev, false))
1907 DRM_ERROR("failed to release exclusive mode on fini\n");
2493664f 1908
d38ceaf9
AD
1909 return 0;
1910}
1911
2dc80b00
S
1912static void amdgpu_late_init_func_handler(struct work_struct *work)
1913{
1914 struct amdgpu_device *adev =
1915 container_of(work, struct amdgpu_device, late_init_work.work);
1916 amdgpu_late_set_cg_state(adev);
1917}
1918
faefba95 1919int amdgpu_suspend(struct amdgpu_device *adev)
d38ceaf9
AD
1920{
1921 int i, r;
1922
e941ea99
XY
1923 if (amdgpu_sriov_vf(adev))
1924 amdgpu_virt_request_full_gpu(adev, false);
1925
c5a93a28
FC
1926 /* ungate SMC block first */
1927 r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
1928 AMD_CG_STATE_UNGATE);
1929 if (r) {
1930 DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n",r);
1931 }
1932
d38ceaf9 1933 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1934 if (!adev->ip_blocks[i].status.valid)
d38ceaf9
AD
1935 continue;
1936 /* ungate blocks so that suspend can properly shut them down */
c5a93a28 1937 if (i != AMD_IP_BLOCK_TYPE_SMC) {
a1255107
AD
1938 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1939 AMD_CG_STATE_UNGATE);
c5a93a28 1940 if (r) {
a1255107
AD
1941 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1942 adev->ip_blocks[i].version->funcs->name, r);
c5a93a28 1943 }
2c1a2784 1944 }
d38ceaf9 1945 /* XXX handle errors */
a1255107 1946 r = adev->ip_blocks[i].version->funcs->suspend(adev);
d38ceaf9 1947 /* XXX handle errors */
2c1a2784 1948 if (r) {
a1255107
AD
1949 DRM_ERROR("suspend of IP block <%s> failed %d\n",
1950 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1951 }
d38ceaf9
AD
1952 }
1953
e941ea99
XY
1954 if (amdgpu_sriov_vf(adev))
1955 amdgpu_virt_release_full_gpu(adev, false);
1956
d38ceaf9
AD
1957 return 0;
1958}
1959
e4f0fdcc 1960static int amdgpu_sriov_reinit_early(struct amdgpu_device *adev)
a90ad3c2
ML
1961{
1962 int i, r;
1963
2cb681b6
ML
1964 static enum amd_ip_block_type ip_order[] = {
1965 AMD_IP_BLOCK_TYPE_GMC,
1966 AMD_IP_BLOCK_TYPE_COMMON,
2cb681b6
ML
1967 AMD_IP_BLOCK_TYPE_IH,
1968 };
a90ad3c2 1969
2cb681b6
ML
1970 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1971 int j;
1972 struct amdgpu_ip_block *block;
a90ad3c2 1973
2cb681b6
ML
1974 for (j = 0; j < adev->num_ip_blocks; j++) {
1975 block = &adev->ip_blocks[j];
1976
1977 if (block->version->type != ip_order[i] ||
1978 !block->status.valid)
1979 continue;
1980
1981 r = block->version->funcs->hw_init(adev);
1982 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
a90ad3c2
ML
1983 }
1984 }
1985
1986 return 0;
1987}
1988
e4f0fdcc 1989static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev)
a90ad3c2
ML
1990{
1991 int i, r;
1992
2cb681b6
ML
1993 static enum amd_ip_block_type ip_order[] = {
1994 AMD_IP_BLOCK_TYPE_SMC,
ef4c166d 1995 AMD_IP_BLOCK_TYPE_PSP,
2cb681b6
ML
1996 AMD_IP_BLOCK_TYPE_DCE,
1997 AMD_IP_BLOCK_TYPE_GFX,
1998 AMD_IP_BLOCK_TYPE_SDMA,
257deb8c
FM
1999 AMD_IP_BLOCK_TYPE_UVD,
2000 AMD_IP_BLOCK_TYPE_VCE
2cb681b6 2001 };
a90ad3c2 2002
2cb681b6
ML
2003 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
2004 int j;
2005 struct amdgpu_ip_block *block;
a90ad3c2 2006
2cb681b6
ML
2007 for (j = 0; j < adev->num_ip_blocks; j++) {
2008 block = &adev->ip_blocks[j];
2009
2010 if (block->version->type != ip_order[i] ||
2011 !block->status.valid)
2012 continue;
2013
2014 r = block->version->funcs->hw_init(adev);
2015 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
a90ad3c2
ML
2016 }
2017 }
2018
2019 return 0;
2020}
2021
fcf0649f 2022static int amdgpu_resume_phase1(struct amdgpu_device *adev)
d38ceaf9
AD
2023{
2024 int i, r;
2025
a90ad3c2
ML
2026 for (i = 0; i < adev->num_ip_blocks; i++) {
2027 if (!adev->ip_blocks[i].status.valid)
2028 continue;
a90ad3c2
ML
2029 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2030 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
fcf0649f
CZ
2031 adev->ip_blocks[i].version->type ==
2032 AMD_IP_BLOCK_TYPE_IH) {
2033 r = adev->ip_blocks[i].version->funcs->resume(adev);
2034 if (r) {
2035 DRM_ERROR("resume of IP block <%s> failed %d\n",
2036 adev->ip_blocks[i].version->funcs->name, r);
2037 return r;
2038 }
a90ad3c2
ML
2039 }
2040 }
2041
2042 return 0;
2043}
2044
fcf0649f 2045static int amdgpu_resume_phase2(struct amdgpu_device *adev)
d38ceaf9
AD
2046{
2047 int i, r;
2048
2049 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2050 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 2051 continue;
fcf0649f
CZ
2052 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2053 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2054 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH )
2055 continue;
a1255107 2056 r = adev->ip_blocks[i].version->funcs->resume(adev);
2c1a2784 2057 if (r) {
a1255107
AD
2058 DRM_ERROR("resume of IP block <%s> failed %d\n",
2059 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 2060 return r;
2c1a2784 2061 }
d38ceaf9
AD
2062 }
2063
2064 return 0;
2065}
2066
fcf0649f
CZ
2067static int amdgpu_resume(struct amdgpu_device *adev)
2068{
2069 int r;
2070
2071 r = amdgpu_resume_phase1(adev);
2072 if (r)
2073 return r;
2074 r = amdgpu_resume_phase2(adev);
2075
2076 return r;
2077}
2078
4e99a44e 2079static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
048765ad 2080{
6867e1b5
ML
2081 if (amdgpu_sriov_vf(adev)) {
2082 if (adev->is_atom_fw) {
2083 if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
2084 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2085 } else {
2086 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
2087 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2088 }
2089
2090 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
2091 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
a5bde2f9 2092 }
048765ad
AR
2093}
2094
4562236b
HW
2095bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
2096{
2097 switch (asic_type) {
2098#if defined(CONFIG_DRM_AMD_DC)
2099 case CHIP_BONAIRE:
2100 case CHIP_HAWAII:
0d6fbccb 2101 case CHIP_KAVERI:
4562236b
HW
2102 case CHIP_CARRIZO:
2103 case CHIP_STONEY:
2104 case CHIP_POLARIS11:
2105 case CHIP_POLARIS10:
2c8ad2d5 2106 case CHIP_POLARIS12:
4562236b
HW
2107 case CHIP_TONGA:
2108 case CHIP_FIJI:
2109#if defined(CONFIG_DRM_AMD_DC_PRE_VEGA)
2110 return amdgpu_dc != 0;
4562236b 2111#endif
17b7cf8c
AD
2112 case CHIP_KABINI:
2113 case CHIP_MULLINS:
2114 return amdgpu_dc > 0;
42f8ffa1
HW
2115 case CHIP_VEGA10:
2116#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
fd187853 2117 case CHIP_RAVEN:
42f8ffa1 2118#endif
fd187853 2119 return amdgpu_dc != 0;
4562236b
HW
2120#endif
2121 default:
2122 return false;
2123 }
2124}
2125
2126/**
2127 * amdgpu_device_has_dc_support - check if dc is supported
2128 *
2129 * @adev: amdgpu_device_pointer
2130 *
2131 * Returns true for supported, false for not supported
2132 */
2133bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
2134{
2555039d
XY
2135 if (amdgpu_sriov_vf(adev))
2136 return false;
2137
4562236b
HW
2138 return amdgpu_device_asic_has_dc_support(adev->asic_type);
2139}
2140
d38ceaf9
AD
2141/**
2142 * amdgpu_device_init - initialize the driver
2143 *
2144 * @adev: amdgpu_device pointer
2145 * @pdev: drm dev pointer
2146 * @pdev: pci dev pointer
2147 * @flags: driver flags
2148 *
2149 * Initializes the driver info and hw (all asics).
2150 * Returns 0 for success or an error on failure.
2151 * Called at driver startup.
2152 */
2153int amdgpu_device_init(struct amdgpu_device *adev,
2154 struct drm_device *ddev,
2155 struct pci_dev *pdev,
2156 uint32_t flags)
2157{
2158 int r, i;
2159 bool runtime = false;
95844d20 2160 u32 max_MBps;
d38ceaf9
AD
2161
2162 adev->shutdown = false;
2163 adev->dev = &pdev->dev;
2164 adev->ddev = ddev;
2165 adev->pdev = pdev;
2166 adev->flags = flags;
2f7d10b3 2167 adev->asic_type = flags & AMD_ASIC_MASK;
d38ceaf9 2168 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
6f02a696 2169 adev->mc.gart_size = 512 * 1024 * 1024;
d38ceaf9
AD
2170 adev->accel_working = false;
2171 adev->num_rings = 0;
2172 adev->mman.buffer_funcs = NULL;
2173 adev->mman.buffer_funcs_ring = NULL;
2174 adev->vm_manager.vm_pte_funcs = NULL;
2d55e45a 2175 adev->vm_manager.vm_pte_num_rings = 0;
d38ceaf9 2176 adev->gart.gart_funcs = NULL;
f54d1867 2177 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
b8866c26 2178 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
d38ceaf9
AD
2179
2180 adev->smc_rreg = &amdgpu_invalid_rreg;
2181 adev->smc_wreg = &amdgpu_invalid_wreg;
2182 adev->pcie_rreg = &amdgpu_invalid_rreg;
2183 adev->pcie_wreg = &amdgpu_invalid_wreg;
36b9a952
HR
2184 adev->pciep_rreg = &amdgpu_invalid_rreg;
2185 adev->pciep_wreg = &amdgpu_invalid_wreg;
d38ceaf9
AD
2186 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
2187 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
2188 adev->didt_rreg = &amdgpu_invalid_rreg;
2189 adev->didt_wreg = &amdgpu_invalid_wreg;
ccdbb20a
RZ
2190 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
2191 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
d38ceaf9
AD
2192 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
2193 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
2194
3e39ab90
AD
2195 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
2196 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
2197 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
d38ceaf9
AD
2198
2199 /* mutex initialization are all done here so we
2200 * can recall function without having locking issues */
d38ceaf9 2201 atomic_set(&adev->irq.ih.lock, 0);
0e5ca0d1 2202 mutex_init(&adev->firmware.mutex);
d38ceaf9
AD
2203 mutex_init(&adev->pm.mutex);
2204 mutex_init(&adev->gfx.gpu_clock_mutex);
2205 mutex_init(&adev->srbm_mutex);
b8866c26 2206 mutex_init(&adev->gfx.pipe_reserve_mutex);
d38ceaf9 2207 mutex_init(&adev->grbm_idx_mutex);
d38ceaf9 2208 mutex_init(&adev->mn_lock);
e23b74aa 2209 mutex_init(&adev->virt.vf_errors.lock);
d38ceaf9 2210 hash_init(adev->mn_hash);
13a752e3 2211 mutex_init(&adev->lock_reset);
d38ceaf9
AD
2212
2213 amdgpu_check_arguments(adev);
2214
d38ceaf9
AD
2215 spin_lock_init(&adev->mmio_idx_lock);
2216 spin_lock_init(&adev->smc_idx_lock);
2217 spin_lock_init(&adev->pcie_idx_lock);
2218 spin_lock_init(&adev->uvd_ctx_idx_lock);
2219 spin_lock_init(&adev->didt_idx_lock);
ccdbb20a 2220 spin_lock_init(&adev->gc_cac_idx_lock);
16abb5d2 2221 spin_lock_init(&adev->se_cac_idx_lock);
d38ceaf9 2222 spin_lock_init(&adev->audio_endpt_idx_lock);
95844d20 2223 spin_lock_init(&adev->mm_stats.lock);
d38ceaf9 2224
0c4e7fa5
CZ
2225 INIT_LIST_HEAD(&adev->shadow_list);
2226 mutex_init(&adev->shadow_list_lock);
2227
795f2813
AR
2228 INIT_LIST_HEAD(&adev->ring_lru_list);
2229 spin_lock_init(&adev->ring_lru_list_lock);
2230
2dc80b00
S
2231 INIT_DELAYED_WORK(&adev->late_init_work, amdgpu_late_init_func_handler);
2232
0fa49558
AX
2233 /* Registers mapping */
2234 /* TODO: block userspace mapping of io register */
da69c161
KW
2235 if (adev->asic_type >= CHIP_BONAIRE) {
2236 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
2237 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
2238 } else {
2239 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
2240 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
2241 }
d38ceaf9 2242
d38ceaf9
AD
2243 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
2244 if (adev->rmmio == NULL) {
2245 return -ENOMEM;
2246 }
2247 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
2248 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
2249
705e519e
CK
2250 /* doorbell bar mapping */
2251 amdgpu_doorbell_init(adev);
d38ceaf9
AD
2252
2253 /* io port mapping */
2254 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2255 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
2256 adev->rio_mem_size = pci_resource_len(adev->pdev, i);
2257 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
2258 break;
2259 }
2260 }
2261 if (adev->rio_mem == NULL)
b64a18c5 2262 DRM_INFO("PCI I/O BAR is not found.\n");
d38ceaf9
AD
2263
2264 /* early init functions */
2265 r = amdgpu_early_init(adev);
2266 if (r)
2267 return r;
2268
2269 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
2270 /* this will fail for cards that aren't VGA class devices, just
2271 * ignore it */
2272 vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode);
2273
2274 if (amdgpu_runtime_pm == 1)
2275 runtime = true;
e9bef455 2276 if (amdgpu_device_is_px(ddev))
d38ceaf9 2277 runtime = true;
84c8b22e
LW
2278 if (!pci_is_thunderbolt_attached(adev->pdev))
2279 vga_switcheroo_register_client(adev->pdev,
2280 &amdgpu_switcheroo_ops, runtime);
d38ceaf9
AD
2281 if (runtime)
2282 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
2283
2284 /* Read BIOS */
83ba126a
AD
2285 if (!amdgpu_get_bios(adev)) {
2286 r = -EINVAL;
2287 goto failed;
2288 }
f7e9e9fe 2289
d38ceaf9 2290 r = amdgpu_atombios_init(adev);
2c1a2784
AD
2291 if (r) {
2292 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
e23b74aa 2293 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
83ba126a 2294 goto failed;
2c1a2784 2295 }
d38ceaf9 2296
4e99a44e
ML
2297 /* detect if we are with an SRIOV vbios */
2298 amdgpu_device_detect_sriov_bios(adev);
048765ad 2299
d38ceaf9 2300 /* Post card if necessary */
91fe77eb 2301 if (amdgpu_need_post(adev)) {
d38ceaf9 2302 if (!adev->bios) {
bec86378 2303 dev_err(adev->dev, "no vBIOS found\n");
83ba126a
AD
2304 r = -EINVAL;
2305 goto failed;
d38ceaf9 2306 }
bec86378 2307 DRM_INFO("GPU posting now...\n");
4e99a44e
ML
2308 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2309 if (r) {
2310 dev_err(adev->dev, "gpu post error!\n");
2311 goto failed;
2312 }
d38ceaf9
AD
2313 }
2314
88b64e95
AD
2315 if (adev->is_atom_fw) {
2316 /* Initialize clocks */
2317 r = amdgpu_atomfirmware_get_clock_info(adev);
2318 if (r) {
2319 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
e23b74aa 2320 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
88b64e95
AD
2321 goto failed;
2322 }
2323 } else {
a5bde2f9
AD
2324 /* Initialize clocks */
2325 r = amdgpu_atombios_get_clock_info(adev);
2326 if (r) {
2327 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
e23b74aa 2328 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
89041940 2329 goto failed;
a5bde2f9
AD
2330 }
2331 /* init i2c buses */
4562236b
HW
2332 if (!amdgpu_device_has_dc_support(adev))
2333 amdgpu_atombios_i2c_init(adev);
2c1a2784 2334 }
d38ceaf9
AD
2335
2336 /* Fence driver */
2337 r = amdgpu_fence_driver_init(adev);
2c1a2784
AD
2338 if (r) {
2339 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
e23b74aa 2340 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
83ba126a 2341 goto failed;
2c1a2784 2342 }
d38ceaf9
AD
2343
2344 /* init the mode config */
2345 drm_mode_config_init(adev->ddev);
2346
2347 r = amdgpu_init(adev);
2348 if (r) {
8840a387 2349 /* failed in exclusive mode due to timeout */
2350 if (amdgpu_sriov_vf(adev) &&
2351 !amdgpu_sriov_runtime(adev) &&
2352 amdgpu_virt_mmio_blocked(adev) &&
2353 !amdgpu_virt_wait_reset(adev)) {
2354 dev_err(adev->dev, "VF exclusive mode timeout\n");
1daee8b4
PD
2355 /* Don't send request since VF is inactive. */
2356 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
2357 adev->virt.ops = NULL;
8840a387 2358 r = -EAGAIN;
2359 goto failed;
2360 }
2c1a2784 2361 dev_err(adev->dev, "amdgpu_init failed\n");
e23b74aa 2362 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
d38ceaf9 2363 amdgpu_fini(adev);
83ba126a 2364 goto failed;
d38ceaf9
AD
2365 }
2366
2367 adev->accel_working = true;
2368
e59c0205
AX
2369 amdgpu_vm_check_compute_bug(adev);
2370
95844d20
MO
2371 /* Initialize the buffer migration limit. */
2372 if (amdgpu_moverate >= 0)
2373 max_MBps = amdgpu_moverate;
2374 else
2375 max_MBps = 8; /* Allow 8 MB/s. */
2376 /* Get a log2 for easy divisions. */
2377 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
2378
d38ceaf9
AD
2379 r = amdgpu_ib_pool_init(adev);
2380 if (r) {
2381 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
e23b74aa 2382 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
83ba126a 2383 goto failed;
d38ceaf9
AD
2384 }
2385
2386 r = amdgpu_ib_ring_tests(adev);
2387 if (r)
2388 DRM_ERROR("ib ring test failed (%d).\n", r);
2389
2dc8f81e
HC
2390 if (amdgpu_sriov_vf(adev))
2391 amdgpu_virt_init_data_exchange(adev);
2392
9bc92b9c
ML
2393 amdgpu_fbdev_init(adev);
2394
d2f52ac8
RZ
2395 r = amdgpu_pm_sysfs_init(adev);
2396 if (r)
2397 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
2398
d38ceaf9 2399 r = amdgpu_gem_debugfs_init(adev);
3f14e623 2400 if (r)
d38ceaf9 2401 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
d38ceaf9
AD
2402
2403 r = amdgpu_debugfs_regs_init(adev);
3f14e623 2404 if (r)
d38ceaf9 2405 DRM_ERROR("registering register debugfs failed (%d).\n", r);
d38ceaf9 2406
50ab2533 2407 r = amdgpu_debugfs_firmware_init(adev);
3f14e623 2408 if (r)
50ab2533 2409 DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
50ab2533 2410
763efb6c 2411 r = amdgpu_debugfs_init(adev);
db95e218 2412 if (r)
763efb6c 2413 DRM_ERROR("Creating debugfs files failed (%d).\n", r);
db95e218 2414
d38ceaf9
AD
2415 if ((amdgpu_testing & 1)) {
2416 if (adev->accel_working)
2417 amdgpu_test_moves(adev);
2418 else
2419 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
2420 }
d38ceaf9
AD
2421 if (amdgpu_benchmarking) {
2422 if (adev->accel_working)
2423 amdgpu_benchmark(adev, amdgpu_benchmarking);
2424 else
2425 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
2426 }
2427
2428 /* enable clockgating, etc. after ib tests, etc. since some blocks require
2429 * explicit gating rather than handling it automatically.
2430 */
2431 r = amdgpu_late_init(adev);
2c1a2784
AD
2432 if (r) {
2433 dev_err(adev->dev, "amdgpu_late_init failed\n");
e23b74aa 2434 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
83ba126a 2435 goto failed;
2c1a2784 2436 }
d38ceaf9
AD
2437
2438 return 0;
83ba126a
AD
2439
2440failed:
89041940 2441 amdgpu_vf_error_trans_all(adev);
83ba126a
AD
2442 if (runtime)
2443 vga_switcheroo_fini_domain_pm_ops(adev->dev);
8840a387 2444
83ba126a 2445 return r;
d38ceaf9
AD
2446}
2447
d38ceaf9
AD
2448/**
2449 * amdgpu_device_fini - tear down the driver
2450 *
2451 * @adev: amdgpu_device pointer
2452 *
2453 * Tear down the driver info (all asics).
2454 * Called at driver shutdown.
2455 */
2456void amdgpu_device_fini(struct amdgpu_device *adev)
2457{
2458 int r;
2459
2460 DRM_INFO("amdgpu: finishing device.\n");
2461 adev->shutdown = true;
db2c2a97
PD
2462 if (adev->mode_info.mode_config_initialized)
2463 drm_crtc_force_disable_all(adev->ddev);
b9141cd3 2464
d38ceaf9
AD
2465 amdgpu_ib_pool_fini(adev);
2466 amdgpu_fence_driver_fini(adev);
2467 amdgpu_fbdev_fini(adev);
2468 r = amdgpu_fini(adev);
ab4fe3e1
HR
2469 if (adev->firmware.gpu_info_fw) {
2470 release_firmware(adev->firmware.gpu_info_fw);
2471 adev->firmware.gpu_info_fw = NULL;
2472 }
d38ceaf9 2473 adev->accel_working = false;
2dc80b00 2474 cancel_delayed_work_sync(&adev->late_init_work);
d38ceaf9 2475 /* free i2c buses */
4562236b
HW
2476 if (!amdgpu_device_has_dc_support(adev))
2477 amdgpu_i2c_fini(adev);
d38ceaf9
AD
2478 amdgpu_atombios_fini(adev);
2479 kfree(adev->bios);
2480 adev->bios = NULL;
84c8b22e
LW
2481 if (!pci_is_thunderbolt_attached(adev->pdev))
2482 vga_switcheroo_unregister_client(adev->pdev);
83ba126a
AD
2483 if (adev->flags & AMD_IS_PX)
2484 vga_switcheroo_fini_domain_pm_ops(adev->dev);
d38ceaf9
AD
2485 vga_client_register(adev->pdev, NULL, NULL, NULL);
2486 if (adev->rio_mem)
2487 pci_iounmap(adev->pdev, adev->rio_mem);
2488 adev->rio_mem = NULL;
2489 iounmap(adev->rmmio);
2490 adev->rmmio = NULL;
705e519e 2491 amdgpu_doorbell_fini(adev);
d2f52ac8 2492 amdgpu_pm_sysfs_fini(adev);
d38ceaf9 2493 amdgpu_debugfs_regs_cleanup(adev);
d38ceaf9
AD
2494}
2495
2496
2497/*
2498 * Suspend & resume.
2499 */
2500/**
810ddc3a 2501 * amdgpu_device_suspend - initiate device suspend
d38ceaf9
AD
2502 *
2503 * @pdev: drm dev pointer
2504 * @state: suspend state
2505 *
2506 * Puts the hw in the suspend state (all asics).
2507 * Returns 0 for success or an error on failure.
2508 * Called at driver suspend.
2509 */
810ddc3a 2510int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
d38ceaf9
AD
2511{
2512 struct amdgpu_device *adev;
2513 struct drm_crtc *crtc;
2514 struct drm_connector *connector;
5ceb54c6 2515 int r;
d38ceaf9
AD
2516
2517 if (dev == NULL || dev->dev_private == NULL) {
2518 return -ENODEV;
2519 }
2520
2521 adev = dev->dev_private;
2522
2523 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2524 return 0;
2525
2526 drm_kms_helper_poll_disable(dev);
2527
4562236b
HW
2528 if (!amdgpu_device_has_dc_support(adev)) {
2529 /* turn off display hw */
2530 drm_modeset_lock_all(dev);
2531 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2532 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
2533 }
2534 drm_modeset_unlock_all(dev);
d38ceaf9
AD
2535 }
2536
ba997709
YZ
2537 amdgpu_amdkfd_suspend(adev);
2538
756e6880 2539 /* unpin the front buffers and cursors */
d38ceaf9 2540 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
756e6880 2541 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
d38ceaf9
AD
2542 struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
2543 struct amdgpu_bo *robj;
2544
756e6880
AD
2545 if (amdgpu_crtc->cursor_bo) {
2546 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
7a6901d7 2547 r = amdgpu_bo_reserve(aobj, true);
756e6880
AD
2548 if (r == 0) {
2549 amdgpu_bo_unpin(aobj);
2550 amdgpu_bo_unreserve(aobj);
2551 }
2552 }
2553
d38ceaf9
AD
2554 if (rfb == NULL || rfb->obj == NULL) {
2555 continue;
2556 }
2557 robj = gem_to_amdgpu_bo(rfb->obj);
2558 /* don't unpin kernel fb objects */
2559 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
7a6901d7 2560 r = amdgpu_bo_reserve(robj, true);
d38ceaf9
AD
2561 if (r == 0) {
2562 amdgpu_bo_unpin(robj);
2563 amdgpu_bo_unreserve(robj);
2564 }
2565 }
2566 }
2567 /* evict vram memory */
2568 amdgpu_bo_evict_vram(adev);
2569
5ceb54c6 2570 amdgpu_fence_driver_suspend(adev);
d38ceaf9
AD
2571
2572 r = amdgpu_suspend(adev);
2573
a0a71e49
AD
2574 /* evict remaining vram memory
2575 * This second call to evict vram is to evict the gart page table
2576 * using the CPU.
2577 */
d38ceaf9
AD
2578 amdgpu_bo_evict_vram(adev);
2579
2580 pci_save_state(dev->pdev);
2581 if (suspend) {
2582 /* Shut down the device */
2583 pci_disable_device(dev->pdev);
2584 pci_set_power_state(dev->pdev, PCI_D3hot);
74b0b157 2585 } else {
2586 r = amdgpu_asic_reset(adev);
2587 if (r)
2588 DRM_ERROR("amdgpu asic reset failed\n");
d38ceaf9
AD
2589 }
2590
2591 if (fbcon) {
2592 console_lock();
2593 amdgpu_fbdev_set_suspend(adev, 1);
2594 console_unlock();
2595 }
2596 return 0;
2597}
2598
2599/**
810ddc3a 2600 * amdgpu_device_resume - initiate device resume
d38ceaf9
AD
2601 *
2602 * @pdev: drm dev pointer
2603 *
2604 * Bring the hw back to operating state (all asics).
2605 * Returns 0 for success or an error on failure.
2606 * Called at driver resume.
2607 */
810ddc3a 2608int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
d38ceaf9
AD
2609{
2610 struct drm_connector *connector;
2611 struct amdgpu_device *adev = dev->dev_private;
756e6880 2612 struct drm_crtc *crtc;
03161a6e 2613 int r = 0;
d38ceaf9
AD
2614
2615 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2616 return 0;
2617
74b0b157 2618 if (fbcon)
d38ceaf9 2619 console_lock();
74b0b157 2620
d38ceaf9
AD
2621 if (resume) {
2622 pci_set_power_state(dev->pdev, PCI_D0);
2623 pci_restore_state(dev->pdev);
74b0b157 2624 r = pci_enable_device(dev->pdev);
03161a6e
HR
2625 if (r)
2626 goto unlock;
d38ceaf9
AD
2627 }
2628
2629 /* post card */
c836fec5 2630 if (amdgpu_need_post(adev)) {
74b0b157 2631 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2632 if (r)
2633 DRM_ERROR("amdgpu asic init failed\n");
2634 }
d38ceaf9
AD
2635
2636 r = amdgpu_resume(adev);
e6707218 2637 if (r) {
ca198528 2638 DRM_ERROR("amdgpu_resume failed (%d).\n", r);
03161a6e 2639 goto unlock;
e6707218 2640 }
5ceb54c6
AD
2641 amdgpu_fence_driver_resume(adev);
2642
ca198528
FC
2643 if (resume) {
2644 r = amdgpu_ib_ring_tests(adev);
2645 if (r)
2646 DRM_ERROR("ib ring test failed (%d).\n", r);
2647 }
d38ceaf9
AD
2648
2649 r = amdgpu_late_init(adev);
03161a6e
HR
2650 if (r)
2651 goto unlock;
d38ceaf9 2652
756e6880
AD
2653 /* pin cursors */
2654 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2655 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2656
2657 if (amdgpu_crtc->cursor_bo) {
2658 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
7a6901d7 2659 r = amdgpu_bo_reserve(aobj, true);
756e6880
AD
2660 if (r == 0) {
2661 r = amdgpu_bo_pin(aobj,
2662 AMDGPU_GEM_DOMAIN_VRAM,
2663 &amdgpu_crtc->cursor_addr);
2664 if (r != 0)
2665 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
2666 amdgpu_bo_unreserve(aobj);
2667 }
2668 }
2669 }
ba997709
YZ
2670 r = amdgpu_amdkfd_resume(adev);
2671 if (r)
2672 return r;
756e6880 2673
d38ceaf9
AD
2674 /* blat the mode back in */
2675 if (fbcon) {
4562236b
HW
2676 if (!amdgpu_device_has_dc_support(adev)) {
2677 /* pre DCE11 */
2678 drm_helper_resume_force_mode(dev);
2679
2680 /* turn on display hw */
2681 drm_modeset_lock_all(dev);
2682 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2683 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
2684 }
2685 drm_modeset_unlock_all(dev);
2686 } else {
2687 /*
2688 * There is no equivalent atomic helper to turn on
2689 * display, so we defined our own function for this,
2690 * once suspend resume is supported by the atomic
2691 * framework this will be reworked
2692 */
2693 amdgpu_dm_display_resume(adev);
d38ceaf9
AD
2694 }
2695 }
2696
2697 drm_kms_helper_poll_enable(dev);
23a1a9e5
L
2698
2699 /*
2700 * Most of the connector probing functions try to acquire runtime pm
2701 * refs to ensure that the GPU is powered on when connector polling is
2702 * performed. Since we're calling this from a runtime PM callback,
2703 * trying to acquire rpm refs will cause us to deadlock.
2704 *
2705 * Since we're guaranteed to be holding the rpm lock, it's safe to
2706 * temporarily disable the rpm helpers so this doesn't deadlock us.
2707 */
2708#ifdef CONFIG_PM
2709 dev->dev->power.disable_depth++;
2710#endif
4562236b
HW
2711 if (!amdgpu_device_has_dc_support(adev))
2712 drm_helper_hpd_irq_event(dev);
2713 else
2714 drm_kms_helper_hotplug_event(dev);
23a1a9e5
L
2715#ifdef CONFIG_PM
2716 dev->dev->power.disable_depth--;
2717#endif
d38ceaf9 2718
03161a6e 2719 if (fbcon)
d38ceaf9 2720 amdgpu_fbdev_set_suspend(adev, 0);
03161a6e
HR
2721
2722unlock:
2723 if (fbcon)
d38ceaf9 2724 console_unlock();
d38ceaf9 2725
03161a6e 2726 return r;
d38ceaf9
AD
2727}
2728
63fbf42f
CZ
2729static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
2730{
2731 int i;
2732 bool asic_hang = false;
2733
f993d628
ML
2734 if (amdgpu_sriov_vf(adev))
2735 return true;
2736
63fbf42f 2737 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2738 if (!adev->ip_blocks[i].status.valid)
63fbf42f 2739 continue;
a1255107
AD
2740 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
2741 adev->ip_blocks[i].status.hang =
2742 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
2743 if (adev->ip_blocks[i].status.hang) {
2744 DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
63fbf42f
CZ
2745 asic_hang = true;
2746 }
2747 }
2748 return asic_hang;
2749}
2750
4d446656 2751static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
d31a501e
CZ
2752{
2753 int i, r = 0;
2754
2755 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2756 if (!adev->ip_blocks[i].status.valid)
d31a501e 2757 continue;
a1255107
AD
2758 if (adev->ip_blocks[i].status.hang &&
2759 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
2760 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
d31a501e
CZ
2761 if (r)
2762 return r;
2763 }
2764 }
2765
2766 return 0;
2767}
2768
35d782fe
CZ
2769static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
2770{
da146d3b
AD
2771 int i;
2772
2773 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2774 if (!adev->ip_blocks[i].status.valid)
da146d3b 2775 continue;
a1255107
AD
2776 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
2777 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
2778 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
98512bb8
KW
2779 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
2780 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
a1255107 2781 if (adev->ip_blocks[i].status.hang) {
da146d3b
AD
2782 DRM_INFO("Some block need full reset!\n");
2783 return true;
2784 }
2785 }
35d782fe
CZ
2786 }
2787 return false;
2788}
2789
2790static int amdgpu_soft_reset(struct amdgpu_device *adev)
2791{
2792 int i, r = 0;
2793
2794 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2795 if (!adev->ip_blocks[i].status.valid)
35d782fe 2796 continue;
a1255107
AD
2797 if (adev->ip_blocks[i].status.hang &&
2798 adev->ip_blocks[i].version->funcs->soft_reset) {
2799 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
35d782fe
CZ
2800 if (r)
2801 return r;
2802 }
2803 }
2804
2805 return 0;
2806}
2807
2808static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
2809{
2810 int i, r = 0;
2811
2812 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2813 if (!adev->ip_blocks[i].status.valid)
35d782fe 2814 continue;
a1255107
AD
2815 if (adev->ip_blocks[i].status.hang &&
2816 adev->ip_blocks[i].version->funcs->post_soft_reset)
2817 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
35d782fe
CZ
2818 if (r)
2819 return r;
2820 }
2821
2822 return 0;
2823}
2824
3ad81f16
CZ
2825bool amdgpu_need_backup(struct amdgpu_device *adev)
2826{
2827 if (adev->flags & AMD_IS_APU)
2828 return false;
2829
2830 return amdgpu_lockup_timeout > 0 ? true : false;
2831}
2832
53cdccd5
CZ
2833static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
2834 struct amdgpu_ring *ring,
2835 struct amdgpu_bo *bo,
f54d1867 2836 struct dma_fence **fence)
53cdccd5
CZ
2837{
2838 uint32_t domain;
2839 int r;
2840
23d2e504
RH
2841 if (!bo->shadow)
2842 return 0;
2843
1d284797 2844 r = amdgpu_bo_reserve(bo, true);
23d2e504
RH
2845 if (r)
2846 return r;
2847 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
2848 /* if bo has been evicted, then no need to recover */
2849 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
82521316
RH
2850 r = amdgpu_bo_validate(bo->shadow);
2851 if (r) {
2852 DRM_ERROR("bo validate failed!\n");
2853 goto err;
2854 }
2855
23d2e504 2856 r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
53cdccd5 2857 NULL, fence, true);
23d2e504
RH
2858 if (r) {
2859 DRM_ERROR("recover page table failed!\n");
2860 goto err;
2861 }
2862 }
53cdccd5 2863err:
23d2e504
RH
2864 amdgpu_bo_unreserve(bo);
2865 return r;
53cdccd5
CZ
2866}
2867
5740682e
ML
2868/*
2869 * amdgpu_reset - reset ASIC/GPU for bare-metal or passthrough
a90ad3c2
ML
2870 *
2871 * @adev: amdgpu device pointer
5740682e 2872 * @reset_flags: output param tells caller the reset result
a90ad3c2 2873 *
5740682e
ML
2874 * attempt to do soft-reset or full-reset and reinitialize Asic
2875 * return 0 means successed otherwise failed
2876*/
2877static int amdgpu_reset(struct amdgpu_device *adev, uint64_t* reset_flags)
a90ad3c2 2878{
5740682e
ML
2879 bool need_full_reset, vram_lost = 0;
2880 int r;
a90ad3c2 2881
5740682e 2882 need_full_reset = amdgpu_need_full_reset(adev);
a90ad3c2 2883
5740682e
ML
2884 if (!need_full_reset) {
2885 amdgpu_pre_soft_reset(adev);
2886 r = amdgpu_soft_reset(adev);
2887 amdgpu_post_soft_reset(adev);
2888 if (r || amdgpu_check_soft_reset(adev)) {
2889 DRM_INFO("soft reset failed, will fallback to full reset!\n");
2890 need_full_reset = true;
2891 }
a90ad3c2 2892
5740682e 2893 }
a90ad3c2 2894
5740682e
ML
2895 if (need_full_reset) {
2896 r = amdgpu_suspend(adev);
a90ad3c2 2897
5740682e 2898retry:
5740682e 2899 r = amdgpu_asic_reset(adev);
5740682e
ML
2900 /* post card */
2901 amdgpu_atom_asic_init(adev->mode_info.atom_context);
65781c78 2902
5740682e
ML
2903 if (!r) {
2904 dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
2905 r = amdgpu_resume_phase1(adev);
2906 if (r)
2907 goto out;
65781c78 2908
5740682e
ML
2909 vram_lost = amdgpu_check_vram_lost(adev);
2910 if (vram_lost) {
2911 DRM_ERROR("VRAM is lost!\n");
2912 atomic_inc(&adev->vram_lost_counter);
2913 }
2914
c1c7ce8f
CK
2915 r = amdgpu_gtt_mgr_recover(
2916 &adev->mman.bdev.man[TTM_PL_TT]);
5740682e
ML
2917 if (r)
2918 goto out;
2919
2920 r = amdgpu_resume_phase2(adev);
2921 if (r)
2922 goto out;
2923
2924 if (vram_lost)
2925 amdgpu_fill_reset_magic(adev);
65781c78 2926 }
5740682e 2927 }
65781c78 2928
5740682e
ML
2929out:
2930 if (!r) {
2931 amdgpu_irq_gpu_reset_resume_helper(adev);
2932 r = amdgpu_ib_ring_tests(adev);
2933 if (r) {
2934 dev_err(adev->dev, "ib ring test failed (%d).\n", r);
2935 r = amdgpu_suspend(adev);
2936 need_full_reset = true;
2937 goto retry;
2938 }
2939 }
65781c78 2940
5740682e
ML
2941 if (reset_flags) {
2942 if (vram_lost)
2943 (*reset_flags) |= AMDGPU_RESET_INFO_VRAM_LOST;
a90ad3c2 2944
5740682e
ML
2945 if (need_full_reset)
2946 (*reset_flags) |= AMDGPU_RESET_INFO_FULLRESET;
65781c78 2947 }
a90ad3c2 2948
5740682e
ML
2949 return r;
2950}
a90ad3c2 2951
5740682e
ML
2952/*
2953 * amdgpu_reset_sriov - reset ASIC for SR-IOV vf
2954 *
2955 * @adev: amdgpu device pointer
2956 * @reset_flags: output param tells caller the reset result
2957 *
2958 * do VF FLR and reinitialize Asic
2959 * return 0 means successed otherwise failed
2960*/
2961static int amdgpu_reset_sriov(struct amdgpu_device *adev, uint64_t *reset_flags, bool from_hypervisor)
2962{
2963 int r;
2964
2965 if (from_hypervisor)
2966 r = amdgpu_virt_request_full_gpu(adev, true);
2967 else
2968 r = amdgpu_virt_reset_gpu(adev);
2969 if (r)
2970 return r;
a90ad3c2
ML
2971
2972 /* Resume IP prior to SMC */
5740682e
ML
2973 r = amdgpu_sriov_reinit_early(adev);
2974 if (r)
2975 goto error;
a90ad3c2
ML
2976
2977 /* we need recover gart prior to run SMC/CP/SDMA resume */
c1c7ce8f 2978 amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]);
a90ad3c2
ML
2979
2980 /* now we are okay to resume SMC/CP/SDMA */
5740682e
ML
2981 r = amdgpu_sriov_reinit_late(adev);
2982 if (r)
2983 goto error;
a90ad3c2
ML
2984
2985 amdgpu_irq_gpu_reset_resume_helper(adev);
5740682e
ML
2986 r = amdgpu_ib_ring_tests(adev);
2987 if (r)
a90ad3c2
ML
2988 dev_err(adev->dev, "[GPU_RESET] ib ring test failed (%d).\n", r);
2989
5740682e 2990error:
a90ad3c2
ML
2991 /* release full control of GPU after ib test */
2992 amdgpu_virt_release_full_gpu(adev, true);
2993
5740682e 2994 if (reset_flags) {
75bc6099
ML
2995 if (adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
2996 (*reset_flags) |= AMDGPU_RESET_INFO_VRAM_LOST;
2997 atomic_inc(&adev->vram_lost_counter);
2998 }
a90ad3c2 2999
5740682e
ML
3000 /* VF FLR or hotlink reset is always full-reset */
3001 (*reset_flags) |= AMDGPU_RESET_INFO_FULLRESET;
a90ad3c2
ML
3002 }
3003
3004 return r;
3005}
3006
d38ceaf9 3007/**
5740682e 3008 * amdgpu_gpu_recover - reset the asic and recover scheduler
d38ceaf9
AD
3009 *
3010 * @adev: amdgpu device pointer
5740682e 3011 * @job: which job trigger hang
d38ceaf9 3012 *
5740682e 3013 * Attempt to reset the GPU if it has hung (all asics).
d38ceaf9
AD
3014 * Returns 0 for success or an error on failure.
3015 */
5740682e 3016int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job)
d38ceaf9 3017{
4562236b 3018 struct drm_atomic_state *state = NULL;
5740682e
ML
3019 uint64_t reset_flags = 0;
3020 int i, r, resched;
fb140b29 3021
63fbf42f
CZ
3022 if (!amdgpu_check_soft_reset(adev)) {
3023 DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
3024 return 0;
3025 }
d38ceaf9 3026
5740682e
ML
3027 dev_info(adev->dev, "GPU reset begin!\n");
3028
13a752e3 3029 mutex_lock(&adev->lock_reset);
d94aed5a 3030 atomic_inc(&adev->gpu_reset_counter);
13a752e3 3031 adev->in_gpu_reset = 1;
d38ceaf9 3032
a3c47d6b
CZ
3033 /* block TTM */
3034 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
4562236b
HW
3035 /* store modesetting */
3036 if (amdgpu_device_has_dc_support(adev))
3037 state = drm_atomic_helper_suspend(adev->ddev);
a3c47d6b 3038
0875dc9e
CZ
3039 /* block scheduler */
3040 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
3041 struct amdgpu_ring *ring = adev->rings[i];
3042
51687759 3043 if (!ring || !ring->sched.thread)
0875dc9e 3044 continue;
5740682e
ML
3045
3046 /* only focus on the ring hit timeout if &job not NULL */
3047 if (job && job->ring->idx != i)
3048 continue;
3049
0875dc9e 3050 kthread_park(ring->sched.thread);
1b1f42d8 3051 drm_sched_hw_job_reset(&ring->sched, &job->base);
5740682e 3052
2f9d4084
ML
3053 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
3054 amdgpu_fence_driver_force_completion(ring);
0875dc9e 3055 }
d38ceaf9 3056
5740682e
ML
3057 if (amdgpu_sriov_vf(adev))
3058 r = amdgpu_reset_sriov(adev, &reset_flags, job ? false : true);
3059 else
3060 r = amdgpu_reset(adev, &reset_flags);
35d782fe 3061
d38ceaf9 3062 if (!r) {
5740682e
ML
3063 if (((reset_flags & AMDGPU_RESET_INFO_FULLRESET) && !(adev->flags & AMD_IS_APU)) ||
3064 (reset_flags & AMDGPU_RESET_INFO_VRAM_LOST)) {
53cdccd5
CZ
3065 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
3066 struct amdgpu_bo *bo, *tmp;
f54d1867 3067 struct dma_fence *fence = NULL, *next = NULL;
53cdccd5
CZ
3068
3069 DRM_INFO("recover vram bo from shadow\n");
3070 mutex_lock(&adev->shadow_list_lock);
3071 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
236763d3 3072 next = NULL;
53cdccd5
CZ
3073 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
3074 if (fence) {
f54d1867 3075 r = dma_fence_wait(fence, false);
53cdccd5 3076 if (r) {
1d7b17b0 3077 WARN(r, "recovery from shadow isn't completed\n");
53cdccd5
CZ
3078 break;
3079 }
3080 }
1f465087 3081
f54d1867 3082 dma_fence_put(fence);
53cdccd5
CZ
3083 fence = next;
3084 }
3085 mutex_unlock(&adev->shadow_list_lock);
3086 if (fence) {
f54d1867 3087 r = dma_fence_wait(fence, false);
53cdccd5 3088 if (r)
1d7b17b0 3089 WARN(r, "recovery from shadow isn't completed\n");
53cdccd5 3090 }
f54d1867 3091 dma_fence_put(fence);
53cdccd5 3092 }
5740682e 3093
d38ceaf9
AD
3094 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
3095 struct amdgpu_ring *ring = adev->rings[i];
51687759
CZ
3096
3097 if (!ring || !ring->sched.thread)
d38ceaf9 3098 continue;
53cdccd5 3099
5740682e
ML
3100 /* only focus on the ring hit timeout if &job not NULL */
3101 if (job && job->ring->idx != i)
3102 continue;
3103
1b1f42d8 3104 drm_sched_job_recovery(&ring->sched);
0875dc9e 3105 kthread_unpark(ring->sched.thread);
d38ceaf9 3106 }
d38ceaf9 3107 } else {
d38ceaf9 3108 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5740682e
ML
3109 struct amdgpu_ring *ring = adev->rings[i];
3110
3111 if (!ring || !ring->sched.thread)
3112 continue;
3113
3114 /* only focus on the ring hit timeout if &job not NULL */
3115 if (job && job->ring->idx != i)
3116 continue;
3117
3118 kthread_unpark(adev->rings[i]->sched.thread);
d38ceaf9
AD
3119 }
3120 }
3121
4562236b 3122 if (amdgpu_device_has_dc_support(adev)) {
5740682e
ML
3123 if (drm_atomic_helper_resume(adev->ddev, state))
3124 dev_info(adev->dev, "drm resume failed:%d\n", r);
4562236b 3125 amdgpu_dm_display_resume(adev);
5740682e 3126 } else {
4562236b 3127 drm_helper_resume_force_mode(adev->ddev);
5740682e 3128 }
d38ceaf9
AD
3129
3130 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
5740682e 3131
89041940 3132 if (r) {
d38ceaf9 3133 /* bad news, how to tell it to userspace ? */
5740682e
ML
3134 dev_info(adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
3135 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
3136 } else {
3137 dev_info(adev->dev, "GPU reset(%d) successed!\n",atomic_read(&adev->gpu_reset_counter));
89041940 3138 }
d38ceaf9 3139
89041940 3140 amdgpu_vf_error_trans_all(adev);
13a752e3
ML
3141 adev->in_gpu_reset = 0;
3142 mutex_unlock(&adev->lock_reset);
d38ceaf9
AD
3143 return r;
3144}
3145
d0dd7f0c
AD
3146void amdgpu_get_pcie_info(struct amdgpu_device *adev)
3147{
3148 u32 mask;
3149 int ret;
3150
cd474ba0
AD
3151 if (amdgpu_pcie_gen_cap)
3152 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
d0dd7f0c 3153
cd474ba0
AD
3154 if (amdgpu_pcie_lane_cap)
3155 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
d0dd7f0c 3156
cd474ba0
AD
3157 /* covers APUs as well */
3158 if (pci_is_root_bus(adev->pdev->bus)) {
3159 if (adev->pm.pcie_gen_mask == 0)
3160 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
3161 if (adev->pm.pcie_mlw_mask == 0)
3162 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
d0dd7f0c 3163 return;
cd474ba0 3164 }
d0dd7f0c 3165
cd474ba0
AD
3166 if (adev->pm.pcie_gen_mask == 0) {
3167 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
3168 if (!ret) {
3169 adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3170 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
3171 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
3172
3173 if (mask & DRM_PCIE_SPEED_25)
3174 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
3175 if (mask & DRM_PCIE_SPEED_50)
3176 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
3177 if (mask & DRM_PCIE_SPEED_80)
3178 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
3179 } else {
3180 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
3181 }
3182 }
3183 if (adev->pm.pcie_mlw_mask == 0) {
3184 ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
3185 if (!ret) {
3186 switch (mask) {
3187 case 32:
3188 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
3189 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
3190 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3191 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3192 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3193 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3194 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3195 break;
3196 case 16:
3197 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
3198 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3199 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3200 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3201 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3202 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3203 break;
3204 case 12:
3205 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3206 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3207 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3208 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3209 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3210 break;
3211 case 8:
3212 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3213 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3214 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3215 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3216 break;
3217 case 4:
3218 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3219 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3220 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3221 break;
3222 case 2:
3223 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3224 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3225 break;
3226 case 1:
3227 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
3228 break;
3229 default:
3230 break;
3231 }
3232 } else {
3233 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
d0dd7f0c
AD
3234 }
3235 }
3236}
d38ceaf9
AD
3237
3238/*
3239 * Debugfs
3240 */
3241int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
06ab6832 3242 const struct drm_info_list *files,
d38ceaf9
AD
3243 unsigned nfiles)
3244{
3245 unsigned i;
3246
3247 for (i = 0; i < adev->debugfs_count; i++) {
3248 if (adev->debugfs[i].files == files) {
3249 /* Already registered */
3250 return 0;
3251 }
3252 }
3253
3254 i = adev->debugfs_count + 1;
3255 if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
3256 DRM_ERROR("Reached maximum number of debugfs components.\n");
3257 DRM_ERROR("Report so we increase "
3258 "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
3259 return -EINVAL;
3260 }
3261 adev->debugfs[adev->debugfs_count].files = files;
3262 adev->debugfs[adev->debugfs_count].num_files = nfiles;
3263 adev->debugfs_count = i;
3264#if defined(CONFIG_DEBUG_FS)
d38ceaf9
AD
3265 drm_debugfs_create_files(files, nfiles,
3266 adev->ddev->primary->debugfs_root,
3267 adev->ddev->primary);
3268#endif
3269 return 0;
3270}
3271
d38ceaf9
AD
3272#if defined(CONFIG_DEBUG_FS)
3273
3274static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
3275 size_t size, loff_t *pos)
3276{
45063097 3277 struct amdgpu_device *adev = file_inode(f)->i_private;
d38ceaf9
AD
3278 ssize_t result = 0;
3279 int r;
bd12267d 3280 bool pm_pg_lock, use_bank;
56628159 3281 unsigned instance_bank, sh_bank, se_bank;
d38ceaf9
AD
3282
3283 if (size & 0x3 || *pos & 0x3)
3284 return -EINVAL;
3285
bd12267d
TSD
3286 /* are we reading registers for which a PG lock is necessary? */
3287 pm_pg_lock = (*pos >> 23) & 1;
3288
56628159 3289 if (*pos & (1ULL << 62)) {
0b968650
TSD
3290 se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
3291 sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
3292 instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
32977f93
TSD
3293
3294 if (se_bank == 0x3FF)
3295 se_bank = 0xFFFFFFFF;
3296 if (sh_bank == 0x3FF)
3297 sh_bank = 0xFFFFFFFF;
3298 if (instance_bank == 0x3FF)
3299 instance_bank = 0xFFFFFFFF;
56628159 3300 use_bank = 1;
56628159
TSD
3301 } else {
3302 use_bank = 0;
3303 }
3304
801a6aa9 3305 *pos &= (1UL << 22) - 1;
bd12267d 3306
56628159 3307 if (use_bank) {
32977f93
TSD
3308 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
3309 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
56628159
TSD
3310 return -EINVAL;
3311 mutex_lock(&adev->grbm_idx_mutex);
3312 amdgpu_gfx_select_se_sh(adev, se_bank,
3313 sh_bank, instance_bank);
3314 }
3315
bd12267d
TSD
3316 if (pm_pg_lock)
3317 mutex_lock(&adev->pm.mutex);
3318
d38ceaf9
AD
3319 while (size) {
3320 uint32_t value;
3321
3322 if (*pos > adev->rmmio_size)
56628159 3323 goto end;
d38ceaf9
AD
3324
3325 value = RREG32(*pos >> 2);
3326 r = put_user(value, (uint32_t *)buf);
56628159
TSD
3327 if (r) {
3328 result = r;
3329 goto end;
3330 }
d38ceaf9
AD
3331
3332 result += 4;
3333 buf += 4;
3334 *pos += 4;
3335 size -= 4;
3336 }
3337
56628159
TSD
3338end:
3339 if (use_bank) {
3340 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3341 mutex_unlock(&adev->grbm_idx_mutex);
3342 }
3343
bd12267d
TSD
3344 if (pm_pg_lock)
3345 mutex_unlock(&adev->pm.mutex);
3346
d38ceaf9
AD
3347 return result;
3348}
3349
3350static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
3351 size_t size, loff_t *pos)
3352{
45063097 3353 struct amdgpu_device *adev = file_inode(f)->i_private;
d38ceaf9
AD
3354 ssize_t result = 0;
3355 int r;
394fdde2
TSD
3356 bool pm_pg_lock, use_bank;
3357 unsigned instance_bank, sh_bank, se_bank;
d38ceaf9
AD
3358
3359 if (size & 0x3 || *pos & 0x3)
3360 return -EINVAL;
3361
394fdde2
TSD
3362 /* are we reading registers for which a PG lock is necessary? */
3363 pm_pg_lock = (*pos >> 23) & 1;
3364
3365 if (*pos & (1ULL << 62)) {
0b968650
TSD
3366 se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
3367 sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
3368 instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
394fdde2
TSD
3369
3370 if (se_bank == 0x3FF)
3371 se_bank = 0xFFFFFFFF;
3372 if (sh_bank == 0x3FF)
3373 sh_bank = 0xFFFFFFFF;
3374 if (instance_bank == 0x3FF)
3375 instance_bank = 0xFFFFFFFF;
3376 use_bank = 1;
3377 } else {
3378 use_bank = 0;
3379 }
3380
801a6aa9 3381 *pos &= (1UL << 22) - 1;
394fdde2
TSD
3382
3383 if (use_bank) {
3384 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
3385 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
3386 return -EINVAL;
3387 mutex_lock(&adev->grbm_idx_mutex);
3388 amdgpu_gfx_select_se_sh(adev, se_bank,
3389 sh_bank, instance_bank);
3390 }
3391
3392 if (pm_pg_lock)
3393 mutex_lock(&adev->pm.mutex);
3394
d38ceaf9
AD
3395 while (size) {
3396 uint32_t value;
3397
3398 if (*pos > adev->rmmio_size)
3399 return result;
3400
3401 r = get_user(value, (uint32_t *)buf);
3402 if (r)
3403 return r;
3404
3405 WREG32(*pos >> 2, value);
3406
3407 result += 4;
3408 buf += 4;
3409 *pos += 4;
3410 size -= 4;
3411 }
3412
394fdde2
TSD
3413 if (use_bank) {
3414 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3415 mutex_unlock(&adev->grbm_idx_mutex);
3416 }
3417
3418 if (pm_pg_lock)
3419 mutex_unlock(&adev->pm.mutex);
3420
d38ceaf9
AD
3421 return result;
3422}
3423
adcec288
TSD
3424static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
3425 size_t size, loff_t *pos)
3426{
45063097 3427 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3428 ssize_t result = 0;
3429 int r;
3430
3431 if (size & 0x3 || *pos & 0x3)
3432 return -EINVAL;
3433
3434 while (size) {
3435 uint32_t value;
3436
3437 value = RREG32_PCIE(*pos >> 2);
3438 r = put_user(value, (uint32_t *)buf);
3439 if (r)
3440 return r;
3441
3442 result += 4;
3443 buf += 4;
3444 *pos += 4;
3445 size -= 4;
3446 }
3447
3448 return result;
3449}
3450
3451static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
3452 size_t size, loff_t *pos)
3453{
45063097 3454 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3455 ssize_t result = 0;
3456 int r;
3457
3458 if (size & 0x3 || *pos & 0x3)
3459 return -EINVAL;
3460
3461 while (size) {
3462 uint32_t value;
3463
3464 r = get_user(value, (uint32_t *)buf);
3465 if (r)
3466 return r;
3467
3468 WREG32_PCIE(*pos >> 2, value);
3469
3470 result += 4;
3471 buf += 4;
3472 *pos += 4;
3473 size -= 4;
3474 }
3475
3476 return result;
3477}
3478
3479static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
3480 size_t size, loff_t *pos)
3481{
45063097 3482 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3483 ssize_t result = 0;
3484 int r;
3485
3486 if (size & 0x3 || *pos & 0x3)
3487 return -EINVAL;
3488
3489 while (size) {
3490 uint32_t value;
3491
3492 value = RREG32_DIDT(*pos >> 2);
3493 r = put_user(value, (uint32_t *)buf);
3494 if (r)
3495 return r;
3496
3497 result += 4;
3498 buf += 4;
3499 *pos += 4;
3500 size -= 4;
3501 }
3502
3503 return result;
3504}
3505
3506static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
3507 size_t size, loff_t *pos)
3508{
45063097 3509 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3510 ssize_t result = 0;
3511 int r;
3512
3513 if (size & 0x3 || *pos & 0x3)
3514 return -EINVAL;
3515
3516 while (size) {
3517 uint32_t value;
3518
3519 r = get_user(value, (uint32_t *)buf);
3520 if (r)
3521 return r;
3522
3523 WREG32_DIDT(*pos >> 2, value);
3524
3525 result += 4;
3526 buf += 4;
3527 *pos += 4;
3528 size -= 4;
3529 }
3530
3531 return result;
3532}
3533
3534static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
3535 size_t size, loff_t *pos)
3536{
45063097 3537 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3538 ssize_t result = 0;
3539 int r;
3540
3541 if (size & 0x3 || *pos & 0x3)
3542 return -EINVAL;
3543
3544 while (size) {
3545 uint32_t value;
3546
6fc0deaf 3547 value = RREG32_SMC(*pos);
adcec288
TSD
3548 r = put_user(value, (uint32_t *)buf);
3549 if (r)
3550 return r;
3551
3552 result += 4;
3553 buf += 4;
3554 *pos += 4;
3555 size -= 4;
3556 }
3557
3558 return result;
3559}
3560
3561static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
3562 size_t size, loff_t *pos)
3563{
45063097 3564 struct amdgpu_device *adev = file_inode(f)->i_private;
adcec288
TSD
3565 ssize_t result = 0;
3566 int r;
3567
3568 if (size & 0x3 || *pos & 0x3)
3569 return -EINVAL;
3570
3571 while (size) {
3572 uint32_t value;
3573
3574 r = get_user(value, (uint32_t *)buf);
3575 if (r)
3576 return r;
3577
6fc0deaf 3578 WREG32_SMC(*pos, value);
adcec288
TSD
3579
3580 result += 4;
3581 buf += 4;
3582 *pos += 4;
3583 size -= 4;
3584 }
3585
3586 return result;
3587}
3588
1e051413
TSD
3589static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
3590 size_t size, loff_t *pos)
3591{
45063097 3592 struct amdgpu_device *adev = file_inode(f)->i_private;
1e051413
TSD
3593 ssize_t result = 0;
3594 int r;
3595 uint32_t *config, no_regs = 0;
3596
3597 if (size & 0x3 || *pos & 0x3)
3598 return -EINVAL;
3599
ecab7668 3600 config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
1e051413
TSD
3601 if (!config)
3602 return -ENOMEM;
3603
3604 /* version, increment each time something is added */
9a999359 3605 config[no_regs++] = 3;
1e051413
TSD
3606 config[no_regs++] = adev->gfx.config.max_shader_engines;
3607 config[no_regs++] = adev->gfx.config.max_tile_pipes;
3608 config[no_regs++] = adev->gfx.config.max_cu_per_sh;
3609 config[no_regs++] = adev->gfx.config.max_sh_per_se;
3610 config[no_regs++] = adev->gfx.config.max_backends_per_se;
3611 config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
3612 config[no_regs++] = adev->gfx.config.max_gprs;
3613 config[no_regs++] = adev->gfx.config.max_gs_threads;
3614 config[no_regs++] = adev->gfx.config.max_hw_contexts;
3615 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
3616 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
3617 config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
3618 config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
3619 config[no_regs++] = adev->gfx.config.num_tile_pipes;
3620 config[no_regs++] = adev->gfx.config.backend_enable_mask;
3621 config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
3622 config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
3623 config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
3624 config[no_regs++] = adev->gfx.config.num_gpus;
3625 config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
3626 config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
3627 config[no_regs++] = adev->gfx.config.gb_addr_config;
3628 config[no_regs++] = adev->gfx.config.num_rbs;
3629
89a8f309
TSD
3630 /* rev==1 */
3631 config[no_regs++] = adev->rev_id;
3632 config[no_regs++] = adev->pg_flags;
3633 config[no_regs++] = adev->cg_flags;
3634
e9f11dc8
TSD
3635 /* rev==2 */
3636 config[no_regs++] = adev->family;
3637 config[no_regs++] = adev->external_rev_id;
3638
9a999359
TSD
3639 /* rev==3 */
3640 config[no_regs++] = adev->pdev->device;
3641 config[no_regs++] = adev->pdev->revision;
3642 config[no_regs++] = adev->pdev->subsystem_device;
3643 config[no_regs++] = adev->pdev->subsystem_vendor;
3644
1e051413
TSD
3645 while (size && (*pos < no_regs * 4)) {
3646 uint32_t value;
3647
3648 value = config[*pos >> 2];
3649 r = put_user(value, (uint32_t *)buf);
3650 if (r) {
3651 kfree(config);
3652 return r;
3653 }
3654
3655 result += 4;
3656 buf += 4;
3657 *pos += 4;
3658 size -= 4;
3659 }
3660
3661 kfree(config);
3662 return result;
3663}
3664
f2cdaf20
TSD
3665static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
3666 size_t size, loff_t *pos)
3667{
45063097 3668 struct amdgpu_device *adev = file_inode(f)->i_private;
9f8df7d7
TSD
3669 int idx, x, outsize, r, valuesize;
3670 uint32_t values[16];
f2cdaf20 3671
9f8df7d7 3672 if (size & 3 || *pos & 0x3)
f2cdaf20
TSD
3673 return -EINVAL;
3674
3cbc614f
SP
3675 if (amdgpu_dpm == 0)
3676 return -EINVAL;
3677
f2cdaf20
TSD
3678 /* convert offset to sensor number */
3679 idx = *pos >> 2;
3680
9f8df7d7 3681 valuesize = sizeof(values);
f2cdaf20 3682 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
cd4d7464 3683 r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
f2cdaf20
TSD
3684 else
3685 return -EINVAL;
3686
9f8df7d7
TSD
3687 if (size > valuesize)
3688 return -EINVAL;
3689
3690 outsize = 0;
3691 x = 0;
3692 if (!r) {
3693 while (size) {
3694 r = put_user(values[x++], (int32_t *)buf);
3695 buf += 4;
3696 size -= 4;
3697 outsize += 4;
3698 }
3699 }
f2cdaf20 3700
9f8df7d7 3701 return !r ? outsize : r;
f2cdaf20 3702}
1e051413 3703
273d7aa1
TSD
3704static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
3705 size_t size, loff_t *pos)
3706{
3707 struct amdgpu_device *adev = f->f_inode->i_private;
3708 int r, x;
3709 ssize_t result=0;
472259f0 3710 uint32_t offset, se, sh, cu, wave, simd, data[32];
273d7aa1
TSD
3711
3712 if (size & 3 || *pos & 3)
3713 return -EINVAL;
3714
3715 /* decode offset */
0b968650
TSD
3716 offset = (*pos & GENMASK_ULL(6, 0));
3717 se = (*pos & GENMASK_ULL(14, 7)) >> 7;
3718 sh = (*pos & GENMASK_ULL(22, 15)) >> 15;
3719 cu = (*pos & GENMASK_ULL(30, 23)) >> 23;
3720 wave = (*pos & GENMASK_ULL(36, 31)) >> 31;
3721 simd = (*pos & GENMASK_ULL(44, 37)) >> 37;
273d7aa1
TSD
3722
3723 /* switch to the specific se/sh/cu */
3724 mutex_lock(&adev->grbm_idx_mutex);
3725 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3726
3727 x = 0;
472259f0
TSD
3728 if (adev->gfx.funcs->read_wave_data)
3729 adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
273d7aa1
TSD
3730
3731 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3732 mutex_unlock(&adev->grbm_idx_mutex);
3733
5ecfb3b8
TSD
3734 if (!x)
3735 return -EINVAL;
3736
472259f0 3737 while (size && (offset < x * 4)) {
273d7aa1
TSD
3738 uint32_t value;
3739
472259f0 3740 value = data[offset >> 2];
273d7aa1
TSD
3741 r = put_user(value, (uint32_t *)buf);
3742 if (r)
3743 return r;
3744
3745 result += 4;
3746 buf += 4;
472259f0 3747 offset += 4;
273d7aa1
TSD
3748 size -= 4;
3749 }
3750
3751 return result;
3752}
3753
c5a60ce8
TSD
3754static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
3755 size_t size, loff_t *pos)
3756{
3757 struct amdgpu_device *adev = f->f_inode->i_private;
3758 int r;
3759 ssize_t result = 0;
3760 uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
3761
3762 if (size & 3 || *pos & 3)
3763 return -EINVAL;
3764
3765 /* decode offset */
0b968650
TSD
3766 offset = *pos & GENMASK_ULL(11, 0);
3767 se = (*pos & GENMASK_ULL(19, 12)) >> 12;
3768 sh = (*pos & GENMASK_ULL(27, 20)) >> 20;
3769 cu = (*pos & GENMASK_ULL(35, 28)) >> 28;
3770 wave = (*pos & GENMASK_ULL(43, 36)) >> 36;
3771 simd = (*pos & GENMASK_ULL(51, 44)) >> 44;
3772 thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
3773 bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
c5a60ce8
TSD
3774
3775 data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
3776 if (!data)
3777 return -ENOMEM;
3778
3779 /* switch to the specific se/sh/cu */
3780 mutex_lock(&adev->grbm_idx_mutex);
3781 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3782
3783 if (bank == 0) {
3784 if (adev->gfx.funcs->read_wave_vgprs)
3785 adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
3786 } else {
3787 if (adev->gfx.funcs->read_wave_sgprs)
3788 adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
3789 }
3790
3791 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3792 mutex_unlock(&adev->grbm_idx_mutex);
3793
3794 while (size) {
3795 uint32_t value;
3796
3797 value = data[offset++];
3798 r = put_user(value, (uint32_t *)buf);
3799 if (r) {
3800 result = r;
3801 goto err;
3802 }
3803
3804 result += 4;
3805 buf += 4;
3806 size -= 4;
3807 }
3808
3809err:
3810 kfree(data);
3811 return result;
3812}
3813
d38ceaf9
AD
3814static const struct file_operations amdgpu_debugfs_regs_fops = {
3815 .owner = THIS_MODULE,
3816 .read = amdgpu_debugfs_regs_read,
3817 .write = amdgpu_debugfs_regs_write,
3818 .llseek = default_llseek
3819};
adcec288
TSD
3820static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
3821 .owner = THIS_MODULE,
3822 .read = amdgpu_debugfs_regs_didt_read,
3823 .write = amdgpu_debugfs_regs_didt_write,
3824 .llseek = default_llseek
3825};
3826static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
3827 .owner = THIS_MODULE,
3828 .read = amdgpu_debugfs_regs_pcie_read,
3829 .write = amdgpu_debugfs_regs_pcie_write,
3830 .llseek = default_llseek
3831};
3832static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
3833 .owner = THIS_MODULE,
3834 .read = amdgpu_debugfs_regs_smc_read,
3835 .write = amdgpu_debugfs_regs_smc_write,
3836 .llseek = default_llseek
3837};
3838
1e051413
TSD
3839static const struct file_operations amdgpu_debugfs_gca_config_fops = {
3840 .owner = THIS_MODULE,
3841 .read = amdgpu_debugfs_gca_config_read,
3842 .llseek = default_llseek
3843};
3844
f2cdaf20
TSD
3845static const struct file_operations amdgpu_debugfs_sensors_fops = {
3846 .owner = THIS_MODULE,
3847 .read = amdgpu_debugfs_sensor_read,
3848 .llseek = default_llseek
3849};
3850
273d7aa1
TSD
3851static const struct file_operations amdgpu_debugfs_wave_fops = {
3852 .owner = THIS_MODULE,
3853 .read = amdgpu_debugfs_wave_read,
3854 .llseek = default_llseek
3855};
c5a60ce8
TSD
3856static const struct file_operations amdgpu_debugfs_gpr_fops = {
3857 .owner = THIS_MODULE,
3858 .read = amdgpu_debugfs_gpr_read,
3859 .llseek = default_llseek
3860};
273d7aa1 3861
adcec288
TSD
3862static const struct file_operations *debugfs_regs[] = {
3863 &amdgpu_debugfs_regs_fops,
3864 &amdgpu_debugfs_regs_didt_fops,
3865 &amdgpu_debugfs_regs_pcie_fops,
3866 &amdgpu_debugfs_regs_smc_fops,
1e051413 3867 &amdgpu_debugfs_gca_config_fops,
f2cdaf20 3868 &amdgpu_debugfs_sensors_fops,
273d7aa1 3869 &amdgpu_debugfs_wave_fops,
c5a60ce8 3870 &amdgpu_debugfs_gpr_fops,
adcec288
TSD
3871};
3872
3873static const char *debugfs_regs_names[] = {
3874 "amdgpu_regs",
3875 "amdgpu_regs_didt",
3876 "amdgpu_regs_pcie",
3877 "amdgpu_regs_smc",
1e051413 3878 "amdgpu_gca_config",
f2cdaf20 3879 "amdgpu_sensors",
273d7aa1 3880 "amdgpu_wave",
c5a60ce8 3881 "amdgpu_gpr",
adcec288 3882};
d38ceaf9
AD
3883
3884static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3885{
3886 struct drm_minor *minor = adev->ddev->primary;
3887 struct dentry *ent, *root = minor->debugfs_root;
adcec288
TSD
3888 unsigned i, j;
3889
3890 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3891 ent = debugfs_create_file(debugfs_regs_names[i],
3892 S_IFREG | S_IRUGO, root,
3893 adev, debugfs_regs[i]);
3894 if (IS_ERR(ent)) {
3895 for (j = 0; j < i; j++) {
3896 debugfs_remove(adev->debugfs_regs[i]);
3897 adev->debugfs_regs[i] = NULL;
3898 }
3899 return PTR_ERR(ent);
3900 }
d38ceaf9 3901
adcec288
TSD
3902 if (!i)
3903 i_size_write(ent->d_inode, adev->rmmio_size);
3904 adev->debugfs_regs[i] = ent;
3905 }
d38ceaf9
AD
3906
3907 return 0;
3908}
3909
3910static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
3911{
adcec288
TSD
3912 unsigned i;
3913
3914 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3915 if (adev->debugfs_regs[i]) {
3916 debugfs_remove(adev->debugfs_regs[i]);
3917 adev->debugfs_regs[i] = NULL;
3918 }
3919 }
d38ceaf9
AD
3920}
3921
4f0955fc
HR
3922static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
3923{
3924 struct drm_info_node *node = (struct drm_info_node *) m->private;
3925 struct drm_device *dev = node->minor->dev;
3926 struct amdgpu_device *adev = dev->dev_private;
3927 int r = 0, i;
3928
3929 /* hold on the scheduler */
3930 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
3931 struct amdgpu_ring *ring = adev->rings[i];
3932
3933 if (!ring || !ring->sched.thread)
3934 continue;
3935 kthread_park(ring->sched.thread);
3936 }
3937
3938 seq_printf(m, "run ib test:\n");
3939 r = amdgpu_ib_ring_tests(adev);
3940 if (r)
3941 seq_printf(m, "ib ring tests failed (%d).\n", r);
3942 else
3943 seq_printf(m, "ib ring tests passed.\n");
3944
3945 /* go on the scheduler */
3946 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
3947 struct amdgpu_ring *ring = adev->rings[i];
3948
3949 if (!ring || !ring->sched.thread)
3950 continue;
3951 kthread_unpark(ring->sched.thread);
3952 }
3953
3954 return 0;
3955}
3956
db95e218
KR
3957static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data)
3958{
3959 struct drm_info_node *node = (struct drm_info_node *) m->private;
3960 struct drm_device *dev = node->minor->dev;
3961 struct amdgpu_device *adev = dev->dev_private;
3962
3963 seq_write(m, adev->bios, adev->bios_size);
3964 return 0;
3965}
3966
79588d21
CK
3967static int amdgpu_debugfs_evict_vram(struct seq_file *m, void *data)
3968{
3969 struct drm_info_node *node = (struct drm_info_node *)m->private;
3970 struct drm_device *dev = node->minor->dev;
3971 struct amdgpu_device *adev = dev->dev_private;
3972
3973 seq_printf(m, "(%d)\n", amdgpu_bo_evict_vram(adev));
3974 return 0;
3975}
3976
763efb6c
CK
3977static const struct drm_info_list amdgpu_debugfs_list[] = {
3978 {"amdgpu_vbios", amdgpu_debugfs_get_vbios_dump},
79588d21
CK
3979 {"amdgpu_test_ib", &amdgpu_debugfs_test_ib},
3980 {"amdgpu_evict_vram", &amdgpu_debugfs_evict_vram}
db95e218
KR
3981};
3982
763efb6c 3983static int amdgpu_debugfs_init(struct amdgpu_device *adev)
db95e218 3984{
763efb6c
CK
3985 return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_list,
3986 ARRAY_SIZE(amdgpu_debugfs_list));
db95e218 3987}
763efb6c 3988
7cebc728 3989#else
763efb6c 3990static int amdgpu_debugfs_init(struct amdgpu_device *adev)
4f0955fc
HR
3991{
3992 return 0;
3993}
7cebc728
AK
3994static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3995{
3996 return 0;
3997}
3998static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
d38ceaf9 3999#endif