]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drm/amdgpu: stop allocating a page array for prime shared BOs
[thirdparty/kernel/stable.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
CommitLineData
d38ceaf9
AD
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
0875dc9e 28#include <linux/kthread.h>
d38ceaf9
AD
29#include <linux/console.h>
30#include <linux/slab.h>
d38ceaf9
AD
31#include <drm/drmP.h>
32#include <drm/drm_crtc_helper.h>
4562236b 33#include <drm/drm_atomic_helper.h>
d38ceaf9
AD
34#include <drm/amdgpu_drm.h>
35#include <linux/vgaarb.h>
36#include <linux/vga_switcheroo.h>
37#include <linux/efi.h>
38#include "amdgpu.h"
f4b373f4 39#include "amdgpu_trace.h"
d38ceaf9
AD
40#include "amdgpu_i2c.h"
41#include "atom.h"
42#include "amdgpu_atombios.h"
a5bde2f9 43#include "amdgpu_atomfirmware.h"
d0dd7f0c 44#include "amd_pcie.h"
33f34802
KW
45#ifdef CONFIG_DRM_AMDGPU_SI
46#include "si.h"
47#endif
a2e73f56
AD
48#ifdef CONFIG_DRM_AMDGPU_CIK
49#include "cik.h"
50#endif
aaa36a97 51#include "vi.h"
460826e6 52#include "soc15.h"
d38ceaf9 53#include "bif/bif_4_1_d.h"
9accf2fd 54#include <linux/pci.h>
bec86378 55#include <linux/firmware.h>
89041940 56#include "amdgpu_vf_error.h"
d38ceaf9 57
ba997709 58#include "amdgpu_amdkfd.h"
d2f52ac8 59#include "amdgpu_pm.h"
d38ceaf9 60
e2a75f88 61MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
2d2e5e7e 62MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
e2a75f88 63
2dc80b00
S
64#define AMDGPU_RESUME_MS 2000
65
d38ceaf9 66static const char *amdgpu_asic_name[] = {
da69c161
KW
67 "TAHITI",
68 "PITCAIRN",
69 "VERDE",
70 "OLAND",
71 "HAINAN",
d38ceaf9
AD
72 "BONAIRE",
73 "KAVERI",
74 "KABINI",
75 "HAWAII",
76 "MULLINS",
77 "TOPAZ",
78 "TONGA",
48299f95 79 "FIJI",
d38ceaf9 80 "CARRIZO",
139f4917 81 "STONEY",
2cc0c0b5
FC
82 "POLARIS10",
83 "POLARIS11",
c4642a47 84 "POLARIS12",
d4196f01 85 "VEGA10",
2ca8a5d2 86 "RAVEN",
d38ceaf9
AD
87 "LAST",
88};
89
90bool amdgpu_device_is_px(struct drm_device *dev)
91{
92 struct amdgpu_device *adev = dev->dev_private;
93
2f7d10b3 94 if (adev->flags & AMD_IS_PX)
d38ceaf9
AD
95 return true;
96 return false;
97}
98
99/*
100 * MMIO register access helper functions.
101 */
102uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
15d72fd7 103 uint32_t acc_flags)
d38ceaf9 104{
f4b373f4
TSD
105 uint32_t ret;
106
43ca8efa 107 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
bc992ba5 108 return amdgpu_virt_kiq_rreg(adev, reg);
bc992ba5 109
15d72fd7 110 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
f4b373f4 111 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
d38ceaf9
AD
112 else {
113 unsigned long flags;
d38ceaf9
AD
114
115 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
116 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
117 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
118 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
d38ceaf9 119 }
f4b373f4
TSD
120 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
121 return ret;
d38ceaf9
AD
122}
123
124void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
15d72fd7 125 uint32_t acc_flags)
d38ceaf9 126{
f4b373f4 127 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
4e99a44e 128
47ed4e1c
KW
129 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
130 adev->last_mm_index = v;
131 }
132
43ca8efa 133 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
bc992ba5 134 return amdgpu_virt_kiq_wreg(adev, reg, v);
bc992ba5 135
15d72fd7 136 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
d38ceaf9
AD
137 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
138 else {
139 unsigned long flags;
140
141 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
142 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
143 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
144 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
145 }
47ed4e1c
KW
146
147 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
148 udelay(500);
149 }
d38ceaf9
AD
150}
151
152u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
153{
154 if ((reg * 4) < adev->rio_mem_size)
155 return ioread32(adev->rio_mem + (reg * 4));
156 else {
157 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
158 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
159 }
160}
161
162void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
163{
47ed4e1c
KW
164 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
165 adev->last_mm_index = v;
166 }
d38ceaf9
AD
167
168 if ((reg * 4) < adev->rio_mem_size)
169 iowrite32(v, adev->rio_mem + (reg * 4));
170 else {
171 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
172 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
173 }
47ed4e1c
KW
174
175 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
176 udelay(500);
177 }
d38ceaf9
AD
178}
179
180/**
181 * amdgpu_mm_rdoorbell - read a doorbell dword
182 *
183 * @adev: amdgpu_device pointer
184 * @index: doorbell index
185 *
186 * Returns the value in the doorbell aperture at the
187 * requested doorbell index (CIK).
188 */
189u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
190{
191 if (index < adev->doorbell.num_doorbells) {
192 return readl(adev->doorbell.ptr + index);
193 } else {
194 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
195 return 0;
196 }
197}
198
199/**
200 * amdgpu_mm_wdoorbell - write a doorbell dword
201 *
202 * @adev: amdgpu_device pointer
203 * @index: doorbell index
204 * @v: value to write
205 *
206 * Writes @v to the doorbell aperture at the
207 * requested doorbell index (CIK).
208 */
209void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
210{
211 if (index < adev->doorbell.num_doorbells) {
212 writel(v, adev->doorbell.ptr + index);
213 } else {
214 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
215 }
216}
217
832be404
KW
218/**
219 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
220 *
221 * @adev: amdgpu_device pointer
222 * @index: doorbell index
223 *
224 * Returns the value in the doorbell aperture at the
225 * requested doorbell index (VEGA10+).
226 */
227u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
228{
229 if (index < adev->doorbell.num_doorbells) {
230 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
231 } else {
232 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
233 return 0;
234 }
235}
236
237/**
238 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
239 *
240 * @adev: amdgpu_device pointer
241 * @index: doorbell index
242 * @v: value to write
243 *
244 * Writes @v to the doorbell aperture at the
245 * requested doorbell index (VEGA10+).
246 */
247void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
248{
249 if (index < adev->doorbell.num_doorbells) {
250 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
251 } else {
252 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
253 }
254}
255
d38ceaf9
AD
256/**
257 * amdgpu_invalid_rreg - dummy reg read function
258 *
259 * @adev: amdgpu device pointer
260 * @reg: offset of register
261 *
262 * Dummy register read function. Used for register blocks
263 * that certain asics don't have (all asics).
264 * Returns the value in the register.
265 */
266static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
267{
268 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
269 BUG();
270 return 0;
271}
272
273/**
274 * amdgpu_invalid_wreg - dummy reg write function
275 *
276 * @adev: amdgpu device pointer
277 * @reg: offset of register
278 * @v: value to write to the register
279 *
280 * Dummy register read function. Used for register blocks
281 * that certain asics don't have (all asics).
282 */
283static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
284{
285 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
286 reg, v);
287 BUG();
288}
289
290/**
291 * amdgpu_block_invalid_rreg - dummy reg read function
292 *
293 * @adev: amdgpu device pointer
294 * @block: offset of instance
295 * @reg: offset of register
296 *
297 * Dummy register read function. Used for register blocks
298 * that certain asics don't have (all asics).
299 * Returns the value in the register.
300 */
301static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
302 uint32_t block, uint32_t reg)
303{
304 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
305 reg, block);
306 BUG();
307 return 0;
308}
309
310/**
311 * amdgpu_block_invalid_wreg - dummy reg write function
312 *
313 * @adev: amdgpu device pointer
314 * @block: offset of instance
315 * @reg: offset of register
316 * @v: value to write to the register
317 *
318 * Dummy register read function. Used for register blocks
319 * that certain asics don't have (all asics).
320 */
321static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
322 uint32_t block,
323 uint32_t reg, uint32_t v)
324{
325 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
326 reg, block, v);
327 BUG();
328}
329
06ec9070 330static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
d38ceaf9 331{
a4a02777
CK
332 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
333 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
334 &adev->vram_scratch.robj,
335 &adev->vram_scratch.gpu_addr,
336 (void **)&adev->vram_scratch.ptr);
d38ceaf9
AD
337}
338
06ec9070 339static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
d38ceaf9 340{
078af1a3 341 amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
d38ceaf9
AD
342}
343
344/**
9c3f2b54 345 * amdgpu_device_program_register_sequence - program an array of registers.
d38ceaf9
AD
346 *
347 * @adev: amdgpu_device pointer
348 * @registers: pointer to the register array
349 * @array_size: size of the register array
350 *
351 * Programs an array or registers with and and or masks.
352 * This is a helper for setting golden registers.
353 */
9c3f2b54
AD
354void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
355 const u32 *registers,
356 const u32 array_size)
d38ceaf9
AD
357{
358 u32 tmp, reg, and_mask, or_mask;
359 int i;
360
361 if (array_size % 3)
362 return;
363
364 for (i = 0; i < array_size; i +=3) {
365 reg = registers[i + 0];
366 and_mask = registers[i + 1];
367 or_mask = registers[i + 2];
368
369 if (and_mask == 0xffffffff) {
370 tmp = or_mask;
371 } else {
372 tmp = RREG32(reg);
373 tmp &= ~and_mask;
374 tmp |= or_mask;
375 }
376 WREG32(reg, tmp);
377 }
378}
379
8111c387 380void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
d38ceaf9
AD
381{
382 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
383}
384
385/*
386 * GPU doorbell aperture helpers function.
387 */
388/**
06ec9070 389 * amdgpu_device_doorbell_init - Init doorbell driver information.
d38ceaf9
AD
390 *
391 * @adev: amdgpu_device pointer
392 *
393 * Init doorbell driver information (CIK)
394 * Returns 0 on success, error on failure.
395 */
06ec9070 396static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
d38ceaf9 397{
705e519e
CK
398 /* No doorbell on SI hardware generation */
399 if (adev->asic_type < CHIP_BONAIRE) {
400 adev->doorbell.base = 0;
401 adev->doorbell.size = 0;
402 adev->doorbell.num_doorbells = 0;
403 adev->doorbell.ptr = NULL;
404 return 0;
405 }
406
d6895ad3
CK
407 if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
408 return -EINVAL;
409
d38ceaf9
AD
410 /* doorbell bar mapping */
411 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
412 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
413
edf600da 414 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
d38ceaf9
AD
415 AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
416 if (adev->doorbell.num_doorbells == 0)
417 return -EINVAL;
418
8972e5d2
CK
419 adev->doorbell.ptr = ioremap(adev->doorbell.base,
420 adev->doorbell.num_doorbells *
421 sizeof(u32));
422 if (adev->doorbell.ptr == NULL)
d38ceaf9 423 return -ENOMEM;
d38ceaf9
AD
424
425 return 0;
426}
427
428/**
06ec9070 429 * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
d38ceaf9
AD
430 *
431 * @adev: amdgpu_device pointer
432 *
433 * Tear down doorbell driver information (CIK)
434 */
06ec9070 435static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
d38ceaf9
AD
436{
437 iounmap(adev->doorbell.ptr);
438 adev->doorbell.ptr = NULL;
439}
440
22cb0164 441
d38ceaf9
AD
442
443/*
06ec9070 444 * amdgpu_device_wb_*()
455a7bc2 445 * Writeback is the method by which the GPU updates special pages in memory
ea81a173 446 * with the status of certain GPU events (fences, ring pointers,etc.).
d38ceaf9
AD
447 */
448
449/**
06ec9070 450 * amdgpu_device_wb_fini - Disable Writeback and free memory
d38ceaf9
AD
451 *
452 * @adev: amdgpu_device pointer
453 *
454 * Disables Writeback and frees the Writeback memory (all asics).
455 * Used at driver shutdown.
456 */
06ec9070 457static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
d38ceaf9
AD
458{
459 if (adev->wb.wb_obj) {
a76ed485
AD
460 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
461 &adev->wb.gpu_addr,
462 (void **)&adev->wb.wb);
d38ceaf9
AD
463 adev->wb.wb_obj = NULL;
464 }
465}
466
467/**
06ec9070 468 * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
d38ceaf9
AD
469 *
470 * @adev: amdgpu_device pointer
471 *
455a7bc2 472 * Initializes writeback and allocates writeback memory (all asics).
d38ceaf9
AD
473 * Used at driver startup.
474 * Returns 0 on success or an -error on failure.
475 */
06ec9070 476static int amdgpu_device_wb_init(struct amdgpu_device *adev)
d38ceaf9
AD
477{
478 int r;
479
480 if (adev->wb.wb_obj == NULL) {
97407b63
AD
481 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
482 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
a76ed485
AD
483 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
484 &adev->wb.wb_obj, &adev->wb.gpu_addr,
485 (void **)&adev->wb.wb);
d38ceaf9
AD
486 if (r) {
487 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
488 return r;
489 }
d38ceaf9
AD
490
491 adev->wb.num_wb = AMDGPU_MAX_WB;
492 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
493
494 /* clear wb memory */
73469585 495 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
d38ceaf9
AD
496 }
497
498 return 0;
499}
500
501/**
131b4b36 502 * amdgpu_device_wb_get - Allocate a wb entry
d38ceaf9
AD
503 *
504 * @adev: amdgpu_device pointer
505 * @wb: wb index
506 *
507 * Allocate a wb slot for use by the driver (all asics).
508 * Returns 0 on success or -EINVAL on failure.
509 */
131b4b36 510int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
d38ceaf9
AD
511{
512 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
d38ceaf9 513
97407b63 514 if (offset < adev->wb.num_wb) {
7014285a 515 __set_bit(offset, adev->wb.used);
63ae07ca 516 *wb = offset << 3; /* convert to dw offset */
0915fdbc
ML
517 return 0;
518 } else {
519 return -EINVAL;
520 }
521}
522
d38ceaf9 523/**
131b4b36 524 * amdgpu_device_wb_free - Free a wb entry
d38ceaf9
AD
525 *
526 * @adev: amdgpu_device pointer
527 * @wb: wb index
528 *
529 * Free a wb slot allocated for use by the driver (all asics)
530 */
131b4b36 531void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
d38ceaf9 532{
73469585 533 wb >>= 3;
d38ceaf9 534 if (wb < adev->wb.num_wb)
73469585 535 __clear_bit(wb, adev->wb.used);
d38ceaf9
AD
536}
537
538/**
2543e28a 539 * amdgpu_device_vram_location - try to find VRAM location
d38ceaf9
AD
540 * @adev: amdgpu device structure holding all necessary informations
541 * @mc: memory controller structure holding memory informations
542 * @base: base address at which to put VRAM
543 *
455a7bc2 544 * Function will try to place VRAM at base address provided
3d647c8f 545 * as parameter.
d38ceaf9 546 */
2543e28a 547void amdgpu_device_vram_location(struct amdgpu_device *adev,
770d13b1 548 struct amdgpu_gmc *mc, u64 base)
d38ceaf9
AD
549{
550 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
551
552 mc->vram_start = base;
d38ceaf9
AD
553 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
554 if (limit && limit < mc->real_vram_size)
555 mc->real_vram_size = limit;
556 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
557 mc->mc_vram_size >> 20, mc->vram_start,
558 mc->vram_end, mc->real_vram_size >> 20);
559}
560
561/**
2543e28a 562 * amdgpu_device_gart_location - try to find GTT location
d38ceaf9
AD
563 * @adev: amdgpu device structure holding all necessary informations
564 * @mc: memory controller structure holding memory informations
565 *
566 * Function will place try to place GTT before or after VRAM.
567 *
568 * If GTT size is bigger than space left then we ajust GTT size.
569 * Thus function will never fails.
570 *
571 * FIXME: when reducing GTT size align new size on power of 2.
572 */
2543e28a 573void amdgpu_device_gart_location(struct amdgpu_device *adev,
770d13b1 574 struct amdgpu_gmc *mc)
d38ceaf9
AD
575{
576 u64 size_af, size_bf;
577
770d13b1 578 size_af = adev->gmc.mc_mask - mc->vram_end;
ed21c047 579 size_bf = mc->vram_start;
d38ceaf9 580 if (size_bf > size_af) {
6f02a696 581 if (mc->gart_size > size_bf) {
d38ceaf9 582 dev_warn(adev->dev, "limiting GTT\n");
6f02a696 583 mc->gart_size = size_bf;
d38ceaf9 584 }
6f02a696 585 mc->gart_start = 0;
d38ceaf9 586 } else {
6f02a696 587 if (mc->gart_size > size_af) {
d38ceaf9 588 dev_warn(adev->dev, "limiting GTT\n");
6f02a696 589 mc->gart_size = size_af;
d38ceaf9 590 }
b98f1b9e
CK
591 /* VCE doesn't like it when BOs cross a 4GB segment, so align
592 * the GART base on a 4GB boundary as well.
593 */
594 mc->gart_start = ALIGN(mc->vram_end + 1, 0x100000000ULL);
d38ceaf9 595 }
6f02a696 596 mc->gart_end = mc->gart_start + mc->gart_size - 1;
d38ceaf9 597 dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
6f02a696 598 mc->gart_size >> 20, mc->gart_start, mc->gart_end);
d38ceaf9
AD
599}
600
d6895ad3
CK
601/**
602 * amdgpu_device_resize_fb_bar - try to resize FB BAR
603 *
604 * @adev: amdgpu_device pointer
605 *
606 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
607 * to fail, but if any of the BARs is not accessible after the size we abort
608 * driver loading by returning -ENODEV.
609 */
610int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
611{
770d13b1 612 u64 space_needed = roundup_pow_of_two(adev->gmc.real_vram_size);
d6895ad3 613 u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1;
31b8adab
CK
614 struct pci_bus *root;
615 struct resource *res;
616 unsigned i;
d6895ad3
CK
617 u16 cmd;
618 int r;
619
0c03b912 620 /* Bypass for VF */
621 if (amdgpu_sriov_vf(adev))
622 return 0;
623
31b8adab
CK
624 /* Check if the root BUS has 64bit memory resources */
625 root = adev->pdev->bus;
626 while (root->parent)
627 root = root->parent;
628
629 pci_bus_for_each_resource(root, res, i) {
0ebb7c54 630 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
31b8adab
CK
631 res->start > 0x100000000ull)
632 break;
633 }
634
635 /* Trying to resize is pointless without a root hub window above 4GB */
636 if (!res)
637 return 0;
638
d6895ad3
CK
639 /* Disable memory decoding while we change the BAR addresses and size */
640 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
641 pci_write_config_word(adev->pdev, PCI_COMMAND,
642 cmd & ~PCI_COMMAND_MEMORY);
643
644 /* Free the VRAM and doorbell BAR, we most likely need to move both. */
06ec9070 645 amdgpu_device_doorbell_fini(adev);
d6895ad3
CK
646 if (adev->asic_type >= CHIP_BONAIRE)
647 pci_release_resource(adev->pdev, 2);
648
649 pci_release_resource(adev->pdev, 0);
650
651 r = pci_resize_resource(adev->pdev, 0, rbar_size);
652 if (r == -ENOSPC)
653 DRM_INFO("Not enough PCI address space for a large BAR.");
654 else if (r && r != -ENOTSUPP)
655 DRM_ERROR("Problem resizing BAR0 (%d).", r);
656
657 pci_assign_unassigned_bus_resources(adev->pdev->bus);
658
659 /* When the doorbell or fb BAR isn't available we have no chance of
660 * using the device.
661 */
06ec9070 662 r = amdgpu_device_doorbell_init(adev);
d6895ad3
CK
663 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
664 return -ENODEV;
665
666 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
667
668 return 0;
669}
a05502e5 670
d38ceaf9
AD
671/*
672 * GPU helpers function.
673 */
674/**
39c640c0 675 * amdgpu_device_need_post - check if the hw need post or not
d38ceaf9
AD
676 *
677 * @adev: amdgpu_device pointer
678 *
c836fec5
JQ
679 * Check if the asic has been initialized (all asics) at driver startup
680 * or post is needed if hw reset is performed.
681 * Returns true if need or false if not.
d38ceaf9 682 */
39c640c0 683bool amdgpu_device_need_post(struct amdgpu_device *adev)
d38ceaf9
AD
684{
685 uint32_t reg;
686
bec86378
ML
687 if (amdgpu_sriov_vf(adev))
688 return false;
689
690 if (amdgpu_passthrough(adev)) {
1da2c326
ML
691 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
692 * some old smc fw still need driver do vPost otherwise gpu hang, while
693 * those smc fw version above 22.15 doesn't have this flaw, so we force
694 * vpost executed for smc version below 22.15
bec86378
ML
695 */
696 if (adev->asic_type == CHIP_FIJI) {
697 int err;
698 uint32_t fw_ver;
699 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
700 /* force vPost if error occured */
701 if (err)
702 return true;
703
704 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1da2c326
ML
705 if (fw_ver < 0x00160e00)
706 return true;
bec86378 707 }
bec86378 708 }
91fe77eb 709
710 if (adev->has_hw_reset) {
711 adev->has_hw_reset = false;
712 return true;
713 }
714
715 /* bios scratch used on CIK+ */
716 if (adev->asic_type >= CHIP_BONAIRE)
717 return amdgpu_atombios_scratch_need_asic_init(adev);
718
719 /* check MEM_SIZE for older asics */
720 reg = amdgpu_asic_get_config_memsize(adev);
721
722 if ((reg != 0) && (reg != 0xffffffff))
723 return false;
724
725 return true;
bec86378
ML
726}
727
d38ceaf9
AD
728/* if we get transitioned to only one device, take VGA back */
729/**
06ec9070 730 * amdgpu_device_vga_set_decode - enable/disable vga decode
d38ceaf9
AD
731 *
732 * @cookie: amdgpu_device pointer
733 * @state: enable/disable vga decode
734 *
735 * Enable/disable vga decode (all asics).
736 * Returns VGA resource flags.
737 */
06ec9070 738static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state)
d38ceaf9
AD
739{
740 struct amdgpu_device *adev = cookie;
741 amdgpu_asic_set_vga_state(adev, state);
742 if (state)
743 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
744 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
745 else
746 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
747}
748
06ec9070 749static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
a1adf8be
CZ
750{
751 /* defines number of bits in page table versus page directory,
752 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
753 * page table and the remaining bits are in the page directory */
bab4fee7
JZ
754 if (amdgpu_vm_block_size == -1)
755 return;
a1adf8be 756
bab4fee7 757 if (amdgpu_vm_block_size < 9) {
a1adf8be
CZ
758 dev_warn(adev->dev, "VM page table size (%d) too small\n",
759 amdgpu_vm_block_size);
97489129 760 amdgpu_vm_block_size = -1;
a1adf8be 761 }
a1adf8be
CZ
762}
763
06ec9070 764static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
83ca145d 765{
64dab074
AD
766 /* no need to check the default value */
767 if (amdgpu_vm_size == -1)
768 return;
769
83ca145d
ZJ
770 if (amdgpu_vm_size < 1) {
771 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
772 amdgpu_vm_size);
f3368128 773 amdgpu_vm_size = -1;
83ca145d 774 }
83ca145d
ZJ
775}
776
d38ceaf9 777/**
06ec9070 778 * amdgpu_device_check_arguments - validate module params
d38ceaf9
AD
779 *
780 * @adev: amdgpu_device pointer
781 *
782 * Validates certain module parameters and updates
783 * the associated values used by the driver (all asics).
784 */
06ec9070 785static void amdgpu_device_check_arguments(struct amdgpu_device *adev)
d38ceaf9 786{
5b011235
CZ
787 if (amdgpu_sched_jobs < 4) {
788 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
789 amdgpu_sched_jobs);
790 amdgpu_sched_jobs = 4;
76117507 791 } else if (!is_power_of_2(amdgpu_sched_jobs)){
5b011235
CZ
792 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
793 amdgpu_sched_jobs);
794 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
795 }
d38ceaf9 796
83e74db6 797 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
f9321cc4
CK
798 /* gart size must be greater or equal to 32M */
799 dev_warn(adev->dev, "gart size (%d) too small\n",
800 amdgpu_gart_size);
83e74db6 801 amdgpu_gart_size = -1;
d38ceaf9
AD
802 }
803
36d38372 804 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
c4e1a13a 805 /* gtt size must be greater or equal to 32M */
36d38372
CK
806 dev_warn(adev->dev, "gtt size (%d) too small\n",
807 amdgpu_gtt_size);
808 amdgpu_gtt_size = -1;
d38ceaf9
AD
809 }
810
d07f14be
RH
811 /* valid range is between 4 and 9 inclusive */
812 if (amdgpu_vm_fragment_size != -1 &&
813 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
814 dev_warn(adev->dev, "valid range is between 4 and 9\n");
815 amdgpu_vm_fragment_size = -1;
816 }
817
06ec9070 818 amdgpu_device_check_vm_size(adev);
d38ceaf9 819
06ec9070 820 amdgpu_device_check_block_size(adev);
6a7f76e7 821
526bae37 822 if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
76117507 823 !is_power_of_2(amdgpu_vram_page_split))) {
6a7f76e7
CK
824 dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
825 amdgpu_vram_page_split);
826 amdgpu_vram_page_split = 1024;
827 }
8854695a
AG
828
829 if (amdgpu_lockup_timeout == 0) {
830 dev_warn(adev->dev, "lockup_timeout msut be > 0, adjusting to 10000\n");
831 amdgpu_lockup_timeout = 10000;
832 }
d38ceaf9
AD
833}
834
835/**
836 * amdgpu_switcheroo_set_state - set switcheroo state
837 *
838 * @pdev: pci dev pointer
1694467b 839 * @state: vga_switcheroo state
d38ceaf9
AD
840 *
841 * Callback for the switcheroo driver. Suspends or resumes the
842 * the asics before or after it is powered up using ACPI methods.
843 */
844static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
845{
846 struct drm_device *dev = pci_get_drvdata(pdev);
847
848 if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
849 return;
850
851 if (state == VGA_SWITCHEROO_ON) {
7ca85295 852 pr_info("amdgpu: switched on\n");
d38ceaf9
AD
853 /* don't suspend or resume card normally */
854 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
855
810ddc3a 856 amdgpu_device_resume(dev, true, true);
d38ceaf9 857
d38ceaf9
AD
858 dev->switch_power_state = DRM_SWITCH_POWER_ON;
859 drm_kms_helper_poll_enable(dev);
860 } else {
7ca85295 861 pr_info("amdgpu: switched off\n");
d38ceaf9
AD
862 drm_kms_helper_poll_disable(dev);
863 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
810ddc3a 864 amdgpu_device_suspend(dev, true, true);
d38ceaf9
AD
865 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
866 }
867}
868
869/**
870 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
871 *
872 * @pdev: pci dev pointer
873 *
874 * Callback for the switcheroo driver. Check of the switcheroo
875 * state can be changed.
876 * Returns true if the state can be changed, false if not.
877 */
878static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
879{
880 struct drm_device *dev = pci_get_drvdata(pdev);
881
882 /*
883 * FIXME: open_count is protected by drm_global_mutex but that would lead to
884 * locking inversion with the driver load path. And the access here is
885 * completely racy anyway. So don't bother with locking for now.
886 */
887 return dev->open_count == 0;
888}
889
890static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
891 .set_gpu_state = amdgpu_switcheroo_set_state,
892 .reprobe = NULL,
893 .can_switch = amdgpu_switcheroo_can_switch,
894};
895
2990a1fc
AD
896int amdgpu_device_ip_set_clockgating_state(struct amdgpu_device *adev,
897 enum amd_ip_block_type block_type,
898 enum amd_clockgating_state state)
d38ceaf9
AD
899{
900 int i, r = 0;
901
902 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 903 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 904 continue;
c722865a
RZ
905 if (adev->ip_blocks[i].version->type != block_type)
906 continue;
907 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
908 continue;
909 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
910 (void *)adev, state);
911 if (r)
912 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
913 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9
AD
914 }
915 return r;
916}
917
2990a1fc
AD
918int amdgpu_device_ip_set_powergating_state(struct amdgpu_device *adev,
919 enum amd_ip_block_type block_type,
920 enum amd_powergating_state state)
d38ceaf9
AD
921{
922 int i, r = 0;
923
924 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 925 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 926 continue;
c722865a
RZ
927 if (adev->ip_blocks[i].version->type != block_type)
928 continue;
929 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
930 continue;
931 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
932 (void *)adev, state);
933 if (r)
934 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
935 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9
AD
936 }
937 return r;
938}
939
2990a1fc
AD
940void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
941 u32 *flags)
6cb2d4e4
HR
942{
943 int i;
944
945 for (i = 0; i < adev->num_ip_blocks; i++) {
946 if (!adev->ip_blocks[i].status.valid)
947 continue;
948 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
949 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
950 }
951}
952
2990a1fc
AD
953int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
954 enum amd_ip_block_type block_type)
5dbbb60b
AD
955{
956 int i, r;
957
958 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 959 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 960 continue;
a1255107
AD
961 if (adev->ip_blocks[i].version->type == block_type) {
962 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
5dbbb60b
AD
963 if (r)
964 return r;
965 break;
966 }
967 }
968 return 0;
969
970}
971
2990a1fc
AD
972bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
973 enum amd_ip_block_type block_type)
5dbbb60b
AD
974{
975 int i;
976
977 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 978 if (!adev->ip_blocks[i].status.valid)
9ecbe7f5 979 continue;
a1255107
AD
980 if (adev->ip_blocks[i].version->type == block_type)
981 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
5dbbb60b
AD
982 }
983 return true;
984
985}
986
2990a1fc
AD
987struct amdgpu_ip_block *
988amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
989 enum amd_ip_block_type type)
d38ceaf9
AD
990{
991 int i;
992
993 for (i = 0; i < adev->num_ip_blocks; i++)
a1255107 994 if (adev->ip_blocks[i].version->type == type)
d38ceaf9
AD
995 return &adev->ip_blocks[i];
996
997 return NULL;
998}
999
1000/**
2990a1fc 1001 * amdgpu_device_ip_block_version_cmp
d38ceaf9
AD
1002 *
1003 * @adev: amdgpu_device pointer
5fc3aeeb 1004 * @type: enum amd_ip_block_type
d38ceaf9
AD
1005 * @major: major version
1006 * @minor: minor version
1007 *
1008 * return 0 if equal or greater
1009 * return 1 if smaller or the ip_block doesn't exist
1010 */
2990a1fc
AD
1011int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1012 enum amd_ip_block_type type,
1013 u32 major, u32 minor)
d38ceaf9 1014{
2990a1fc 1015 struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
d38ceaf9 1016
a1255107
AD
1017 if (ip_block && ((ip_block->version->major > major) ||
1018 ((ip_block->version->major == major) &&
1019 (ip_block->version->minor >= minor))))
d38ceaf9
AD
1020 return 0;
1021
1022 return 1;
1023}
1024
a1255107 1025/**
2990a1fc 1026 * amdgpu_device_ip_block_add
a1255107
AD
1027 *
1028 * @adev: amdgpu_device pointer
1029 * @ip_block_version: pointer to the IP to add
1030 *
1031 * Adds the IP block driver information to the collection of IPs
1032 * on the asic.
1033 */
2990a1fc
AD
1034int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1035 const struct amdgpu_ip_block_version *ip_block_version)
a1255107
AD
1036{
1037 if (!ip_block_version)
1038 return -EINVAL;
1039
e966a725 1040 DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
a0bae357
HR
1041 ip_block_version->funcs->name);
1042
a1255107
AD
1043 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1044
1045 return 0;
1046}
1047
483ef985 1048static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
9accf2fd
ED
1049{
1050 adev->enable_virtual_display = false;
1051
1052 if (amdgpu_virtual_display) {
1053 struct drm_device *ddev = adev->ddev;
1054 const char *pci_address_name = pci_name(ddev->pdev);
0f66356d 1055 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
9accf2fd
ED
1056
1057 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1058 pciaddstr_tmp = pciaddstr;
0f66356d
ED
1059 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1060 pciaddname = strsep(&pciaddname_tmp, ",");
967de2a9
YT
1061 if (!strcmp("all", pciaddname)
1062 || !strcmp(pci_address_name, pciaddname)) {
0f66356d
ED
1063 long num_crtc;
1064 int res = -1;
1065
9accf2fd 1066 adev->enable_virtual_display = true;
0f66356d
ED
1067
1068 if (pciaddname_tmp)
1069 res = kstrtol(pciaddname_tmp, 10,
1070 &num_crtc);
1071
1072 if (!res) {
1073 if (num_crtc < 1)
1074 num_crtc = 1;
1075 if (num_crtc > 6)
1076 num_crtc = 6;
1077 adev->mode_info.num_crtc = num_crtc;
1078 } else {
1079 adev->mode_info.num_crtc = 1;
1080 }
9accf2fd
ED
1081 break;
1082 }
1083 }
1084
0f66356d
ED
1085 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1086 amdgpu_virtual_display, pci_address_name,
1087 adev->enable_virtual_display, adev->mode_info.num_crtc);
9accf2fd
ED
1088
1089 kfree(pciaddstr);
1090 }
1091}
1092
e2a75f88
AD
1093static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1094{
e2a75f88
AD
1095 const char *chip_name;
1096 char fw_name[30];
1097 int err;
1098 const struct gpu_info_firmware_header_v1_0 *hdr;
1099
ab4fe3e1
HR
1100 adev->firmware.gpu_info_fw = NULL;
1101
e2a75f88
AD
1102 switch (adev->asic_type) {
1103 case CHIP_TOPAZ:
1104 case CHIP_TONGA:
1105 case CHIP_FIJI:
1106 case CHIP_POLARIS11:
1107 case CHIP_POLARIS10:
1108 case CHIP_POLARIS12:
1109 case CHIP_CARRIZO:
1110 case CHIP_STONEY:
1111#ifdef CONFIG_DRM_AMDGPU_SI
1112 case CHIP_VERDE:
1113 case CHIP_TAHITI:
1114 case CHIP_PITCAIRN:
1115 case CHIP_OLAND:
1116 case CHIP_HAINAN:
1117#endif
1118#ifdef CONFIG_DRM_AMDGPU_CIK
1119 case CHIP_BONAIRE:
1120 case CHIP_HAWAII:
1121 case CHIP_KAVERI:
1122 case CHIP_KABINI:
1123 case CHIP_MULLINS:
1124#endif
1125 default:
1126 return 0;
1127 case CHIP_VEGA10:
1128 chip_name = "vega10";
1129 break;
2d2e5e7e
AD
1130 case CHIP_RAVEN:
1131 chip_name = "raven";
1132 break;
e2a75f88
AD
1133 }
1134
1135 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
ab4fe3e1 1136 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
e2a75f88
AD
1137 if (err) {
1138 dev_err(adev->dev,
1139 "Failed to load gpu_info firmware \"%s\"\n",
1140 fw_name);
1141 goto out;
1142 }
ab4fe3e1 1143 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
e2a75f88
AD
1144 if (err) {
1145 dev_err(adev->dev,
1146 "Failed to validate gpu_info firmware \"%s\"\n",
1147 fw_name);
1148 goto out;
1149 }
1150
ab4fe3e1 1151 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
e2a75f88
AD
1152 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1153
1154 switch (hdr->version_major) {
1155 case 1:
1156 {
1157 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
ab4fe3e1 1158 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
e2a75f88
AD
1159 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1160
b5ab16bf
AD
1161 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1162 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1163 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1164 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
e2a75f88 1165 adev->gfx.config.max_texture_channel_caches =
b5ab16bf
AD
1166 le32_to_cpu(gpu_info_fw->gc_num_tccs);
1167 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1168 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1169 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1170 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
e2a75f88 1171 adev->gfx.config.double_offchip_lds_buf =
b5ab16bf
AD
1172 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1173 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
51fd0370
HZ
1174 adev->gfx.cu_info.max_waves_per_simd =
1175 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1176 adev->gfx.cu_info.max_scratch_slots_per_cu =
1177 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1178 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
e2a75f88
AD
1179 break;
1180 }
1181 default:
1182 dev_err(adev->dev,
1183 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1184 err = -EINVAL;
1185 goto out;
1186 }
1187out:
e2a75f88
AD
1188 return err;
1189}
1190
06ec9070 1191static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
d38ceaf9 1192{
aaa36a97 1193 int i, r;
d38ceaf9 1194
483ef985 1195 amdgpu_device_enable_virtual_display(adev);
a6be7570 1196
d38ceaf9 1197 switch (adev->asic_type) {
aaa36a97
AD
1198 case CHIP_TOPAZ:
1199 case CHIP_TONGA:
48299f95 1200 case CHIP_FIJI:
2cc0c0b5
FC
1201 case CHIP_POLARIS11:
1202 case CHIP_POLARIS10:
c4642a47 1203 case CHIP_POLARIS12:
aaa36a97 1204 case CHIP_CARRIZO:
39bb0c92
SL
1205 case CHIP_STONEY:
1206 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
aaa36a97
AD
1207 adev->family = AMDGPU_FAMILY_CZ;
1208 else
1209 adev->family = AMDGPU_FAMILY_VI;
1210
1211 r = vi_set_ip_blocks(adev);
1212 if (r)
1213 return r;
1214 break;
33f34802
KW
1215#ifdef CONFIG_DRM_AMDGPU_SI
1216 case CHIP_VERDE:
1217 case CHIP_TAHITI:
1218 case CHIP_PITCAIRN:
1219 case CHIP_OLAND:
1220 case CHIP_HAINAN:
295d0daf 1221 adev->family = AMDGPU_FAMILY_SI;
33f34802
KW
1222 r = si_set_ip_blocks(adev);
1223 if (r)
1224 return r;
1225 break;
1226#endif
a2e73f56
AD
1227#ifdef CONFIG_DRM_AMDGPU_CIK
1228 case CHIP_BONAIRE:
1229 case CHIP_HAWAII:
1230 case CHIP_KAVERI:
1231 case CHIP_KABINI:
1232 case CHIP_MULLINS:
1233 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1234 adev->family = AMDGPU_FAMILY_CI;
1235 else
1236 adev->family = AMDGPU_FAMILY_KV;
1237
1238 r = cik_set_ip_blocks(adev);
1239 if (r)
1240 return r;
1241 break;
1242#endif
2ca8a5d2
CZ
1243 case CHIP_VEGA10:
1244 case CHIP_RAVEN:
1245 if (adev->asic_type == CHIP_RAVEN)
1246 adev->family = AMDGPU_FAMILY_RV;
1247 else
1248 adev->family = AMDGPU_FAMILY_AI;
460826e6
KW
1249
1250 r = soc15_set_ip_blocks(adev);
1251 if (r)
1252 return r;
1253 break;
d38ceaf9
AD
1254 default:
1255 /* FIXME: not supported yet */
1256 return -EINVAL;
1257 }
1258
e2a75f88
AD
1259 r = amdgpu_device_parse_gpu_info_fw(adev);
1260 if (r)
1261 return r;
1262
1884734a 1263 amdgpu_amdkfd_device_probe(adev);
1264
3149d9da
XY
1265 if (amdgpu_sriov_vf(adev)) {
1266 r = amdgpu_virt_request_full_gpu(adev, true);
1267 if (r)
5ffa61c1 1268 return -EAGAIN;
3149d9da
XY
1269 }
1270
d38ceaf9
AD
1271 for (i = 0; i < adev->num_ip_blocks; i++) {
1272 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
ed8cf00c
HR
1273 DRM_ERROR("disabled ip block: %d <%s>\n",
1274 i, adev->ip_blocks[i].version->funcs->name);
a1255107 1275 adev->ip_blocks[i].status.valid = false;
d38ceaf9 1276 } else {
a1255107
AD
1277 if (adev->ip_blocks[i].version->funcs->early_init) {
1278 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2c1a2784 1279 if (r == -ENOENT) {
a1255107 1280 adev->ip_blocks[i].status.valid = false;
2c1a2784 1281 } else if (r) {
a1255107
AD
1282 DRM_ERROR("early_init of IP block <%s> failed %d\n",
1283 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1284 return r;
2c1a2784 1285 } else {
a1255107 1286 adev->ip_blocks[i].status.valid = true;
2c1a2784 1287 }
974e6b64 1288 } else {
a1255107 1289 adev->ip_blocks[i].status.valid = true;
d38ceaf9 1290 }
d38ceaf9
AD
1291 }
1292 }
1293
395d1fb9
NH
1294 adev->cg_flags &= amdgpu_cg_mask;
1295 adev->pg_flags &= amdgpu_pg_mask;
1296
d38ceaf9
AD
1297 return 0;
1298}
1299
06ec9070 1300static int amdgpu_device_ip_init(struct amdgpu_device *adev)
d38ceaf9
AD
1301{
1302 int i, r;
1303
1304 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1305 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 1306 continue;
a1255107 1307 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2c1a2784 1308 if (r) {
a1255107
AD
1309 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1310 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1311 return r;
2c1a2784 1312 }
a1255107 1313 adev->ip_blocks[i].status.sw = true;
bfca0289 1314
d38ceaf9 1315 /* need to do gmc hw init early so we can allocate gpu mem */
a1255107 1316 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
06ec9070 1317 r = amdgpu_device_vram_scratch_init(adev);
2c1a2784
AD
1318 if (r) {
1319 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
d38ceaf9 1320 return r;
2c1a2784 1321 }
a1255107 1322 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2c1a2784
AD
1323 if (r) {
1324 DRM_ERROR("hw_init %d failed %d\n", i, r);
d38ceaf9 1325 return r;
2c1a2784 1326 }
06ec9070 1327 r = amdgpu_device_wb_init(adev);
2c1a2784 1328 if (r) {
06ec9070 1329 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
d38ceaf9 1330 return r;
2c1a2784 1331 }
a1255107 1332 adev->ip_blocks[i].status.hw = true;
2493664f
ML
1333
1334 /* right after GMC hw init, we create CSA */
1335 if (amdgpu_sriov_vf(adev)) {
1336 r = amdgpu_allocate_static_csa(adev);
1337 if (r) {
1338 DRM_ERROR("allocate CSA failed %d\n", r);
1339 return r;
1340 }
1341 }
d38ceaf9
AD
1342 }
1343 }
1344
1345 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1346 if (!adev->ip_blocks[i].status.sw)
d38ceaf9 1347 continue;
bfca0289 1348 if (adev->ip_blocks[i].status.hw)
d38ceaf9 1349 continue;
a1255107 1350 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2c1a2784 1351 if (r) {
a1255107
AD
1352 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1353 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1354 return r;
2c1a2784 1355 }
a1255107 1356 adev->ip_blocks[i].status.hw = true;
d38ceaf9
AD
1357 }
1358
1884734a 1359 amdgpu_amdkfd_device_init(adev);
c6332b97 1360
1361 if (amdgpu_sriov_vf(adev))
1362 amdgpu_virt_release_full_gpu(adev, true);
1363
d38ceaf9
AD
1364 return 0;
1365}
1366
06ec9070 1367static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
0c49e0b8
CZ
1368{
1369 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
1370}
1371
06ec9070 1372static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
0c49e0b8
CZ
1373{
1374 return !!memcmp(adev->gart.ptr, adev->reset_magic,
1375 AMDGPU_RESET_MAGIC_NUM);
1376}
1377
06ec9070 1378static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
d38ceaf9
AD
1379{
1380 int i = 0, r;
1381
4a2ba394
SL
1382 if (amdgpu_emu_mode == 1)
1383 return 0;
1384
d38ceaf9 1385 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1386 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 1387 continue;
4a446d55 1388 /* skip CG for VCE/UVD, it's handled specially */
a1255107
AD
1389 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1390 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
4a446d55 1391 /* enable clockgating to save power */
a1255107
AD
1392 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1393 AMD_CG_STATE_GATE);
4a446d55
AD
1394 if (r) {
1395 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
a1255107 1396 adev->ip_blocks[i].version->funcs->name, r);
4a446d55
AD
1397 return r;
1398 }
b0b00ff1 1399 }
d38ceaf9 1400 }
2dc80b00
S
1401 return 0;
1402}
1403
06ec9070 1404static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2dc80b00
S
1405{
1406 int i = 0, r;
1407
1408 for (i = 0; i < adev->num_ip_blocks; i++) {
1409 if (!adev->ip_blocks[i].status.valid)
1410 continue;
1411 if (adev->ip_blocks[i].version->funcs->late_init) {
1412 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
1413 if (r) {
1414 DRM_ERROR("late_init of IP block <%s> failed %d\n",
1415 adev->ip_blocks[i].version->funcs->name, r);
1416 return r;
1417 }
1418 adev->ip_blocks[i].status.late_initialized = true;
1419 }
1420 }
1421
1422 mod_delayed_work(system_wq, &adev->late_init_work,
1423 msecs_to_jiffies(AMDGPU_RESUME_MS));
d38ceaf9 1424
06ec9070 1425 amdgpu_device_fill_reset_magic(adev);
d38ceaf9
AD
1426
1427 return 0;
1428}
1429
06ec9070 1430static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
d38ceaf9
AD
1431{
1432 int i, r;
1433
1884734a 1434 amdgpu_amdkfd_device_fini(adev);
3e96dbfd
AD
1435 /* need to disable SMC first */
1436 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1437 if (!adev->ip_blocks[i].status.hw)
3e96dbfd 1438 continue;
a1255107 1439 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3e96dbfd 1440 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
a1255107
AD
1441 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1442 AMD_CG_STATE_UNGATE);
3e96dbfd
AD
1443 if (r) {
1444 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
a1255107 1445 adev->ip_blocks[i].version->funcs->name, r);
3e96dbfd
AD
1446 return r;
1447 }
a1255107 1448 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
3e96dbfd
AD
1449 /* XXX handle errors */
1450 if (r) {
1451 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
a1255107 1452 adev->ip_blocks[i].version->funcs->name, r);
3e96dbfd 1453 }
a1255107 1454 adev->ip_blocks[i].status.hw = false;
3e96dbfd
AD
1455 break;
1456 }
1457 }
1458
d38ceaf9 1459 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1460 if (!adev->ip_blocks[i].status.hw)
d38ceaf9 1461 continue;
8201a67a
RZ
1462
1463 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1464 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
1465 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1466 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1467 AMD_CG_STATE_UNGATE);
1468 if (r) {
1469 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1470 adev->ip_blocks[i].version->funcs->name, r);
1471 return r;
1472 }
2c1a2784 1473 }
8201a67a 1474
a1255107 1475 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
d38ceaf9 1476 /* XXX handle errors */
2c1a2784 1477 if (r) {
a1255107
AD
1478 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1479 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1480 }
8201a67a 1481
a1255107 1482 adev->ip_blocks[i].status.hw = false;
d38ceaf9
AD
1483 }
1484
9950cda2
AD
1485 /* disable all interrupts */
1486 amdgpu_irq_disable_all(adev);
1487
d38ceaf9 1488 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1489 if (!adev->ip_blocks[i].status.sw)
d38ceaf9 1490 continue;
c12aba3a
ML
1491
1492 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
1493 amdgpu_free_static_csa(adev);
1494 amdgpu_device_wb_fini(adev);
1495 amdgpu_device_vram_scratch_fini(adev);
1496 }
1497
a1255107 1498 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
d38ceaf9 1499 /* XXX handle errors */
2c1a2784 1500 if (r) {
a1255107
AD
1501 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
1502 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1503 }
a1255107
AD
1504 adev->ip_blocks[i].status.sw = false;
1505 adev->ip_blocks[i].status.valid = false;
d38ceaf9
AD
1506 }
1507
a6dcfd9c 1508 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1509 if (!adev->ip_blocks[i].status.late_initialized)
8a2eef1d 1510 continue;
a1255107
AD
1511 if (adev->ip_blocks[i].version->funcs->late_fini)
1512 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
1513 adev->ip_blocks[i].status.late_initialized = false;
a6dcfd9c
ML
1514 }
1515
030308fc 1516 if (amdgpu_sriov_vf(adev))
24136135
ML
1517 if (amdgpu_virt_release_full_gpu(adev, false))
1518 DRM_ERROR("failed to release exclusive mode on fini\n");
2493664f 1519
d38ceaf9
AD
1520 return 0;
1521}
1522
06ec9070 1523static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
2dc80b00
S
1524{
1525 struct amdgpu_device *adev =
1526 container_of(work, struct amdgpu_device, late_init_work.work);
06ec9070 1527 amdgpu_device_ip_late_set_cg_state(adev);
2dc80b00
S
1528}
1529
cdd61df6 1530int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
d38ceaf9
AD
1531{
1532 int i, r;
1533
e941ea99
XY
1534 if (amdgpu_sriov_vf(adev))
1535 amdgpu_virt_request_full_gpu(adev, false);
1536
c5a93a28 1537 /* ungate SMC block first */
2990a1fc
AD
1538 r = amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
1539 AMD_CG_STATE_UNGATE);
c5a93a28 1540 if (r) {
2990a1fc 1541 DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n", r);
c5a93a28
FC
1542 }
1543
d38ceaf9 1544 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
a1255107 1545 if (!adev->ip_blocks[i].status.valid)
d38ceaf9
AD
1546 continue;
1547 /* ungate blocks so that suspend can properly shut them down */
c5a93a28 1548 if (i != AMD_IP_BLOCK_TYPE_SMC) {
a1255107
AD
1549 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1550 AMD_CG_STATE_UNGATE);
c5a93a28 1551 if (r) {
a1255107
AD
1552 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1553 adev->ip_blocks[i].version->funcs->name, r);
c5a93a28 1554 }
2c1a2784 1555 }
d38ceaf9 1556 /* XXX handle errors */
a1255107 1557 r = adev->ip_blocks[i].version->funcs->suspend(adev);
d38ceaf9 1558 /* XXX handle errors */
2c1a2784 1559 if (r) {
a1255107
AD
1560 DRM_ERROR("suspend of IP block <%s> failed %d\n",
1561 adev->ip_blocks[i].version->funcs->name, r);
2c1a2784 1562 }
d38ceaf9
AD
1563 }
1564
e941ea99
XY
1565 if (amdgpu_sriov_vf(adev))
1566 amdgpu_virt_release_full_gpu(adev, false);
1567
d38ceaf9
AD
1568 return 0;
1569}
1570
06ec9070 1571static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
a90ad3c2
ML
1572{
1573 int i, r;
1574
2cb681b6
ML
1575 static enum amd_ip_block_type ip_order[] = {
1576 AMD_IP_BLOCK_TYPE_GMC,
1577 AMD_IP_BLOCK_TYPE_COMMON,
2cb681b6
ML
1578 AMD_IP_BLOCK_TYPE_IH,
1579 };
a90ad3c2 1580
2cb681b6
ML
1581 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1582 int j;
1583 struct amdgpu_ip_block *block;
a90ad3c2 1584
2cb681b6
ML
1585 for (j = 0; j < adev->num_ip_blocks; j++) {
1586 block = &adev->ip_blocks[j];
1587
1588 if (block->version->type != ip_order[i] ||
1589 !block->status.valid)
1590 continue;
1591
1592 r = block->version->funcs->hw_init(adev);
1593 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
c41d1cf6
ML
1594 if (r)
1595 return r;
a90ad3c2
ML
1596 }
1597 }
1598
1599 return 0;
1600}
1601
06ec9070 1602static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
a90ad3c2
ML
1603{
1604 int i, r;
1605
2cb681b6
ML
1606 static enum amd_ip_block_type ip_order[] = {
1607 AMD_IP_BLOCK_TYPE_SMC,
ef4c166d 1608 AMD_IP_BLOCK_TYPE_PSP,
2cb681b6
ML
1609 AMD_IP_BLOCK_TYPE_DCE,
1610 AMD_IP_BLOCK_TYPE_GFX,
1611 AMD_IP_BLOCK_TYPE_SDMA,
257deb8c
FM
1612 AMD_IP_BLOCK_TYPE_UVD,
1613 AMD_IP_BLOCK_TYPE_VCE
2cb681b6 1614 };
a90ad3c2 1615
2cb681b6
ML
1616 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1617 int j;
1618 struct amdgpu_ip_block *block;
a90ad3c2 1619
2cb681b6
ML
1620 for (j = 0; j < adev->num_ip_blocks; j++) {
1621 block = &adev->ip_blocks[j];
1622
1623 if (block->version->type != ip_order[i] ||
1624 !block->status.valid)
1625 continue;
1626
1627 r = block->version->funcs->hw_init(adev);
1628 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
c41d1cf6
ML
1629 if (r)
1630 return r;
a90ad3c2
ML
1631 }
1632 }
1633
1634 return 0;
1635}
1636
06ec9070 1637static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
d38ceaf9
AD
1638{
1639 int i, r;
1640
a90ad3c2
ML
1641 for (i = 0; i < adev->num_ip_blocks; i++) {
1642 if (!adev->ip_blocks[i].status.valid)
1643 continue;
a90ad3c2
ML
1644 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1645 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
fcf0649f
CZ
1646 adev->ip_blocks[i].version->type ==
1647 AMD_IP_BLOCK_TYPE_IH) {
1648 r = adev->ip_blocks[i].version->funcs->resume(adev);
1649 if (r) {
1650 DRM_ERROR("resume of IP block <%s> failed %d\n",
1651 adev->ip_blocks[i].version->funcs->name, r);
1652 return r;
1653 }
a90ad3c2
ML
1654 }
1655 }
1656
1657 return 0;
1658}
1659
06ec9070 1660static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
d38ceaf9
AD
1661{
1662 int i, r;
1663
1664 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 1665 if (!adev->ip_blocks[i].status.valid)
d38ceaf9 1666 continue;
fcf0649f
CZ
1667 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1668 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
1669 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH )
1670 continue;
a1255107 1671 r = adev->ip_blocks[i].version->funcs->resume(adev);
2c1a2784 1672 if (r) {
a1255107
AD
1673 DRM_ERROR("resume of IP block <%s> failed %d\n",
1674 adev->ip_blocks[i].version->funcs->name, r);
d38ceaf9 1675 return r;
2c1a2784 1676 }
d38ceaf9
AD
1677 }
1678
1679 return 0;
1680}
1681
06ec9070 1682static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
fcf0649f
CZ
1683{
1684 int r;
1685
06ec9070 1686 r = amdgpu_device_ip_resume_phase1(adev);
fcf0649f
CZ
1687 if (r)
1688 return r;
06ec9070 1689 r = amdgpu_device_ip_resume_phase2(adev);
fcf0649f
CZ
1690
1691 return r;
1692}
1693
4e99a44e 1694static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
048765ad 1695{
6867e1b5
ML
1696 if (amdgpu_sriov_vf(adev)) {
1697 if (adev->is_atom_fw) {
1698 if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
1699 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
1700 } else {
1701 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
1702 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
1703 }
1704
1705 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
1706 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
a5bde2f9 1707 }
048765ad
AR
1708}
1709
4562236b
HW
1710bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
1711{
1712 switch (asic_type) {
1713#if defined(CONFIG_DRM_AMD_DC)
1714 case CHIP_BONAIRE:
1715 case CHIP_HAWAII:
0d6fbccb 1716 case CHIP_KAVERI:
367e6687
AD
1717 case CHIP_KABINI:
1718 case CHIP_MULLINS:
4562236b
HW
1719 case CHIP_CARRIZO:
1720 case CHIP_STONEY:
1721 case CHIP_POLARIS11:
1722 case CHIP_POLARIS10:
2c8ad2d5 1723 case CHIP_POLARIS12:
4562236b
HW
1724 case CHIP_TONGA:
1725 case CHIP_FIJI:
1726#if defined(CONFIG_DRM_AMD_DC_PRE_VEGA)
1727 return amdgpu_dc != 0;
4562236b 1728#endif
42f8ffa1
HW
1729 case CHIP_VEGA10:
1730#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
fd187853 1731 case CHIP_RAVEN:
42f8ffa1 1732#endif
fd187853 1733 return amdgpu_dc != 0;
4562236b
HW
1734#endif
1735 default:
1736 return false;
1737 }
1738}
1739
1740/**
1741 * amdgpu_device_has_dc_support - check if dc is supported
1742 *
1743 * @adev: amdgpu_device_pointer
1744 *
1745 * Returns true for supported, false for not supported
1746 */
1747bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
1748{
2555039d
XY
1749 if (amdgpu_sriov_vf(adev))
1750 return false;
1751
4562236b
HW
1752 return amdgpu_device_asic_has_dc_support(adev->asic_type);
1753}
1754
d38ceaf9
AD
1755/**
1756 * amdgpu_device_init - initialize the driver
1757 *
1758 * @adev: amdgpu_device pointer
1759 * @pdev: drm dev pointer
1760 * @pdev: pci dev pointer
1761 * @flags: driver flags
1762 *
1763 * Initializes the driver info and hw (all asics).
1764 * Returns 0 for success or an error on failure.
1765 * Called at driver startup.
1766 */
1767int amdgpu_device_init(struct amdgpu_device *adev,
1768 struct drm_device *ddev,
1769 struct pci_dev *pdev,
1770 uint32_t flags)
1771{
1772 int r, i;
1773 bool runtime = false;
95844d20 1774 u32 max_MBps;
d38ceaf9
AD
1775
1776 adev->shutdown = false;
1777 adev->dev = &pdev->dev;
1778 adev->ddev = ddev;
1779 adev->pdev = pdev;
1780 adev->flags = flags;
2f7d10b3 1781 adev->asic_type = flags & AMD_ASIC_MASK;
d38ceaf9 1782 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
593aa2d2
SL
1783 if (amdgpu_emu_mode == 1)
1784 adev->usec_timeout *= 2;
770d13b1 1785 adev->gmc.gart_size = 512 * 1024 * 1024;
d38ceaf9
AD
1786 adev->accel_working = false;
1787 adev->num_rings = 0;
1788 adev->mman.buffer_funcs = NULL;
1789 adev->mman.buffer_funcs_ring = NULL;
1790 adev->vm_manager.vm_pte_funcs = NULL;
2d55e45a 1791 adev->vm_manager.vm_pte_num_rings = 0;
132f34e4 1792 adev->gmc.gmc_funcs = NULL;
f54d1867 1793 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
b8866c26 1794 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
d38ceaf9
AD
1795
1796 adev->smc_rreg = &amdgpu_invalid_rreg;
1797 adev->smc_wreg = &amdgpu_invalid_wreg;
1798 adev->pcie_rreg = &amdgpu_invalid_rreg;
1799 adev->pcie_wreg = &amdgpu_invalid_wreg;
36b9a952
HR
1800 adev->pciep_rreg = &amdgpu_invalid_rreg;
1801 adev->pciep_wreg = &amdgpu_invalid_wreg;
d38ceaf9
AD
1802 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
1803 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
1804 adev->didt_rreg = &amdgpu_invalid_rreg;
1805 adev->didt_wreg = &amdgpu_invalid_wreg;
ccdbb20a
RZ
1806 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
1807 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
d38ceaf9
AD
1808 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
1809 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
1810
3e39ab90
AD
1811 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1812 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
1813 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
d38ceaf9
AD
1814
1815 /* mutex initialization are all done here so we
1816 * can recall function without having locking issues */
d38ceaf9 1817 atomic_set(&adev->irq.ih.lock, 0);
0e5ca0d1 1818 mutex_init(&adev->firmware.mutex);
d38ceaf9
AD
1819 mutex_init(&adev->pm.mutex);
1820 mutex_init(&adev->gfx.gpu_clock_mutex);
1821 mutex_init(&adev->srbm_mutex);
b8866c26 1822 mutex_init(&adev->gfx.pipe_reserve_mutex);
d38ceaf9 1823 mutex_init(&adev->grbm_idx_mutex);
d38ceaf9 1824 mutex_init(&adev->mn_lock);
e23b74aa 1825 mutex_init(&adev->virt.vf_errors.lock);
d38ceaf9 1826 hash_init(adev->mn_hash);
13a752e3 1827 mutex_init(&adev->lock_reset);
d38ceaf9 1828
06ec9070 1829 amdgpu_device_check_arguments(adev);
d38ceaf9 1830
d38ceaf9
AD
1831 spin_lock_init(&adev->mmio_idx_lock);
1832 spin_lock_init(&adev->smc_idx_lock);
1833 spin_lock_init(&adev->pcie_idx_lock);
1834 spin_lock_init(&adev->uvd_ctx_idx_lock);
1835 spin_lock_init(&adev->didt_idx_lock);
ccdbb20a 1836 spin_lock_init(&adev->gc_cac_idx_lock);
16abb5d2 1837 spin_lock_init(&adev->se_cac_idx_lock);
d38ceaf9 1838 spin_lock_init(&adev->audio_endpt_idx_lock);
95844d20 1839 spin_lock_init(&adev->mm_stats.lock);
d38ceaf9 1840
0c4e7fa5
CZ
1841 INIT_LIST_HEAD(&adev->shadow_list);
1842 mutex_init(&adev->shadow_list_lock);
1843
795f2813
AR
1844 INIT_LIST_HEAD(&adev->ring_lru_list);
1845 spin_lock_init(&adev->ring_lru_list_lock);
1846
06ec9070
AD
1847 INIT_DELAYED_WORK(&adev->late_init_work,
1848 amdgpu_device_ip_late_init_func_handler);
2dc80b00 1849
0fa49558
AX
1850 /* Registers mapping */
1851 /* TODO: block userspace mapping of io register */
da69c161
KW
1852 if (adev->asic_type >= CHIP_BONAIRE) {
1853 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
1854 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
1855 } else {
1856 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
1857 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
1858 }
d38ceaf9 1859
d38ceaf9
AD
1860 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
1861 if (adev->rmmio == NULL) {
1862 return -ENOMEM;
1863 }
1864 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
1865 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
1866
705e519e 1867 /* doorbell bar mapping */
06ec9070 1868 amdgpu_device_doorbell_init(adev);
d38ceaf9
AD
1869
1870 /* io port mapping */
1871 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1872 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
1873 adev->rio_mem_size = pci_resource_len(adev->pdev, i);
1874 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
1875 break;
1876 }
1877 }
1878 if (adev->rio_mem == NULL)
b64a18c5 1879 DRM_INFO("PCI I/O BAR is not found.\n");
d38ceaf9
AD
1880
1881 /* early init functions */
06ec9070 1882 r = amdgpu_device_ip_early_init(adev);
d38ceaf9
AD
1883 if (r)
1884 return r;
1885
1886 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
1887 /* this will fail for cards that aren't VGA class devices, just
1888 * ignore it */
06ec9070 1889 vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
d38ceaf9 1890
e9bef455 1891 if (amdgpu_device_is_px(ddev))
d38ceaf9 1892 runtime = true;
84c8b22e
LW
1893 if (!pci_is_thunderbolt_attached(adev->pdev))
1894 vga_switcheroo_register_client(adev->pdev,
1895 &amdgpu_switcheroo_ops, runtime);
d38ceaf9
AD
1896 if (runtime)
1897 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
1898
9475a943
SL
1899 if (amdgpu_emu_mode == 1) {
1900 /* post the asic on emulation mode */
1901 emu_soc_asic_init(adev);
bfca0289 1902 goto fence_driver_init;
9475a943 1903 }
bfca0289 1904
d38ceaf9 1905 /* Read BIOS */
83ba126a
AD
1906 if (!amdgpu_get_bios(adev)) {
1907 r = -EINVAL;
1908 goto failed;
1909 }
f7e9e9fe 1910
d38ceaf9 1911 r = amdgpu_atombios_init(adev);
2c1a2784
AD
1912 if (r) {
1913 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
e23b74aa 1914 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
83ba126a 1915 goto failed;
2c1a2784 1916 }
d38ceaf9 1917
4e99a44e
ML
1918 /* detect if we are with an SRIOV vbios */
1919 amdgpu_device_detect_sriov_bios(adev);
048765ad 1920
d38ceaf9 1921 /* Post card if necessary */
39c640c0 1922 if (amdgpu_device_need_post(adev)) {
d38ceaf9 1923 if (!adev->bios) {
bec86378 1924 dev_err(adev->dev, "no vBIOS found\n");
83ba126a
AD
1925 r = -EINVAL;
1926 goto failed;
d38ceaf9 1927 }
bec86378 1928 DRM_INFO("GPU posting now...\n");
4e99a44e
ML
1929 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
1930 if (r) {
1931 dev_err(adev->dev, "gpu post error!\n");
1932 goto failed;
1933 }
d38ceaf9
AD
1934 }
1935
88b64e95
AD
1936 if (adev->is_atom_fw) {
1937 /* Initialize clocks */
1938 r = amdgpu_atomfirmware_get_clock_info(adev);
1939 if (r) {
1940 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
e23b74aa 1941 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
88b64e95
AD
1942 goto failed;
1943 }
1944 } else {
a5bde2f9
AD
1945 /* Initialize clocks */
1946 r = amdgpu_atombios_get_clock_info(adev);
1947 if (r) {
1948 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
e23b74aa 1949 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
89041940 1950 goto failed;
a5bde2f9
AD
1951 }
1952 /* init i2c buses */
4562236b
HW
1953 if (!amdgpu_device_has_dc_support(adev))
1954 amdgpu_atombios_i2c_init(adev);
2c1a2784 1955 }
d38ceaf9 1956
bfca0289 1957fence_driver_init:
d38ceaf9
AD
1958 /* Fence driver */
1959 r = amdgpu_fence_driver_init(adev);
2c1a2784
AD
1960 if (r) {
1961 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
e23b74aa 1962 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
83ba126a 1963 goto failed;
2c1a2784 1964 }
d38ceaf9
AD
1965
1966 /* init the mode config */
1967 drm_mode_config_init(adev->ddev);
1968
06ec9070 1969 r = amdgpu_device_ip_init(adev);
d38ceaf9 1970 if (r) {
8840a387 1971 /* failed in exclusive mode due to timeout */
1972 if (amdgpu_sriov_vf(adev) &&
1973 !amdgpu_sriov_runtime(adev) &&
1974 amdgpu_virt_mmio_blocked(adev) &&
1975 !amdgpu_virt_wait_reset(adev)) {
1976 dev_err(adev->dev, "VF exclusive mode timeout\n");
1daee8b4
PD
1977 /* Don't send request since VF is inactive. */
1978 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
1979 adev->virt.ops = NULL;
8840a387 1980 r = -EAGAIN;
1981 goto failed;
1982 }
06ec9070 1983 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
e23b74aa 1984 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
06ec9070 1985 amdgpu_device_ip_fini(adev);
83ba126a 1986 goto failed;
d38ceaf9
AD
1987 }
1988
1989 adev->accel_working = true;
1990
e59c0205
AX
1991 amdgpu_vm_check_compute_bug(adev);
1992
95844d20
MO
1993 /* Initialize the buffer migration limit. */
1994 if (amdgpu_moverate >= 0)
1995 max_MBps = amdgpu_moverate;
1996 else
1997 max_MBps = 8; /* Allow 8 MB/s. */
1998 /* Get a log2 for easy divisions. */
1999 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
2000
d38ceaf9
AD
2001 r = amdgpu_ib_pool_init(adev);
2002 if (r) {
2003 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
e23b74aa 2004 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
83ba126a 2005 goto failed;
d38ceaf9
AD
2006 }
2007
2008 r = amdgpu_ib_ring_tests(adev);
2009 if (r)
2010 DRM_ERROR("ib ring test failed (%d).\n", r);
2011
2dc8f81e
HC
2012 if (amdgpu_sriov_vf(adev))
2013 amdgpu_virt_init_data_exchange(adev);
2014
9bc92b9c
ML
2015 amdgpu_fbdev_init(adev);
2016
d2f52ac8
RZ
2017 r = amdgpu_pm_sysfs_init(adev);
2018 if (r)
2019 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
2020
75758255 2021 r = amdgpu_debugfs_gem_init(adev);
3f14e623 2022 if (r)
d38ceaf9 2023 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
d38ceaf9
AD
2024
2025 r = amdgpu_debugfs_regs_init(adev);
3f14e623 2026 if (r)
d38ceaf9 2027 DRM_ERROR("registering register debugfs failed (%d).\n", r);
d38ceaf9 2028
50ab2533 2029 r = amdgpu_debugfs_firmware_init(adev);
3f14e623 2030 if (r)
50ab2533 2031 DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
50ab2533 2032
763efb6c 2033 r = amdgpu_debugfs_init(adev);
db95e218 2034 if (r)
763efb6c 2035 DRM_ERROR("Creating debugfs files failed (%d).\n", r);
db95e218 2036
d38ceaf9
AD
2037 if ((amdgpu_testing & 1)) {
2038 if (adev->accel_working)
2039 amdgpu_test_moves(adev);
2040 else
2041 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
2042 }
d38ceaf9
AD
2043 if (amdgpu_benchmarking) {
2044 if (adev->accel_working)
2045 amdgpu_benchmark(adev, amdgpu_benchmarking);
2046 else
2047 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
2048 }
2049
2050 /* enable clockgating, etc. after ib tests, etc. since some blocks require
2051 * explicit gating rather than handling it automatically.
2052 */
06ec9070 2053 r = amdgpu_device_ip_late_init(adev);
2c1a2784 2054 if (r) {
06ec9070 2055 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
e23b74aa 2056 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
83ba126a 2057 goto failed;
2c1a2784 2058 }
d38ceaf9
AD
2059
2060 return 0;
83ba126a
AD
2061
2062failed:
89041940 2063 amdgpu_vf_error_trans_all(adev);
83ba126a
AD
2064 if (runtime)
2065 vga_switcheroo_fini_domain_pm_ops(adev->dev);
8840a387 2066
83ba126a 2067 return r;
d38ceaf9
AD
2068}
2069
d38ceaf9
AD
2070/**
2071 * amdgpu_device_fini - tear down the driver
2072 *
2073 * @adev: amdgpu_device pointer
2074 *
2075 * Tear down the driver info (all asics).
2076 * Called at driver shutdown.
2077 */
2078void amdgpu_device_fini(struct amdgpu_device *adev)
2079{
2080 int r;
2081
2082 DRM_INFO("amdgpu: finishing device.\n");
2083 adev->shutdown = true;
db2c2a97
PD
2084 if (adev->mode_info.mode_config_initialized)
2085 drm_crtc_force_disable_all(adev->ddev);
b9141cd3 2086
d38ceaf9
AD
2087 amdgpu_ib_pool_fini(adev);
2088 amdgpu_fence_driver_fini(adev);
2089 amdgpu_fbdev_fini(adev);
06ec9070 2090 r = amdgpu_device_ip_fini(adev);
ab4fe3e1
HR
2091 if (adev->firmware.gpu_info_fw) {
2092 release_firmware(adev->firmware.gpu_info_fw);
2093 adev->firmware.gpu_info_fw = NULL;
2094 }
d38ceaf9 2095 adev->accel_working = false;
2dc80b00 2096 cancel_delayed_work_sync(&adev->late_init_work);
d38ceaf9 2097 /* free i2c buses */
4562236b
HW
2098 if (!amdgpu_device_has_dc_support(adev))
2099 amdgpu_i2c_fini(adev);
bfca0289
SL
2100
2101 if (amdgpu_emu_mode != 1)
2102 amdgpu_atombios_fini(adev);
2103
d38ceaf9
AD
2104 kfree(adev->bios);
2105 adev->bios = NULL;
84c8b22e
LW
2106 if (!pci_is_thunderbolt_attached(adev->pdev))
2107 vga_switcheroo_unregister_client(adev->pdev);
83ba126a
AD
2108 if (adev->flags & AMD_IS_PX)
2109 vga_switcheroo_fini_domain_pm_ops(adev->dev);
d38ceaf9
AD
2110 vga_client_register(adev->pdev, NULL, NULL, NULL);
2111 if (adev->rio_mem)
2112 pci_iounmap(adev->pdev, adev->rio_mem);
2113 adev->rio_mem = NULL;
2114 iounmap(adev->rmmio);
2115 adev->rmmio = NULL;
06ec9070 2116 amdgpu_device_doorbell_fini(adev);
d2f52ac8 2117 amdgpu_pm_sysfs_fini(adev);
d38ceaf9 2118 amdgpu_debugfs_regs_cleanup(adev);
d38ceaf9
AD
2119}
2120
2121
2122/*
2123 * Suspend & resume.
2124 */
2125/**
810ddc3a 2126 * amdgpu_device_suspend - initiate device suspend
d38ceaf9
AD
2127 *
2128 * @pdev: drm dev pointer
2129 * @state: suspend state
2130 *
2131 * Puts the hw in the suspend state (all asics).
2132 * Returns 0 for success or an error on failure.
2133 * Called at driver suspend.
2134 */
810ddc3a 2135int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
d38ceaf9
AD
2136{
2137 struct amdgpu_device *adev;
2138 struct drm_crtc *crtc;
2139 struct drm_connector *connector;
5ceb54c6 2140 int r;
d38ceaf9
AD
2141
2142 if (dev == NULL || dev->dev_private == NULL) {
2143 return -ENODEV;
2144 }
2145
2146 adev = dev->dev_private;
2147
2148 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2149 return 0;
2150
2151 drm_kms_helper_poll_disable(dev);
2152
4562236b
HW
2153 if (!amdgpu_device_has_dc_support(adev)) {
2154 /* turn off display hw */
2155 drm_modeset_lock_all(dev);
2156 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2157 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
2158 }
2159 drm_modeset_unlock_all(dev);
d38ceaf9
AD
2160 }
2161
ba997709
YZ
2162 amdgpu_amdkfd_suspend(adev);
2163
756e6880 2164 /* unpin the front buffers and cursors */
d38ceaf9 2165 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
756e6880 2166 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
d38ceaf9
AD
2167 struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
2168 struct amdgpu_bo *robj;
2169
756e6880
AD
2170 if (amdgpu_crtc->cursor_bo) {
2171 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
7a6901d7 2172 r = amdgpu_bo_reserve(aobj, true);
756e6880
AD
2173 if (r == 0) {
2174 amdgpu_bo_unpin(aobj);
2175 amdgpu_bo_unreserve(aobj);
2176 }
2177 }
2178
d38ceaf9
AD
2179 if (rfb == NULL || rfb->obj == NULL) {
2180 continue;
2181 }
2182 robj = gem_to_amdgpu_bo(rfb->obj);
2183 /* don't unpin kernel fb objects */
2184 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
7a6901d7 2185 r = amdgpu_bo_reserve(robj, true);
d38ceaf9
AD
2186 if (r == 0) {
2187 amdgpu_bo_unpin(robj);
2188 amdgpu_bo_unreserve(robj);
2189 }
2190 }
2191 }
2192 /* evict vram memory */
2193 amdgpu_bo_evict_vram(adev);
2194
5ceb54c6 2195 amdgpu_fence_driver_suspend(adev);
d38ceaf9 2196
cdd61df6 2197 r = amdgpu_device_ip_suspend(adev);
d38ceaf9 2198
a0a71e49
AD
2199 /* evict remaining vram memory
2200 * This second call to evict vram is to evict the gart page table
2201 * using the CPU.
2202 */
d38ceaf9
AD
2203 amdgpu_bo_evict_vram(adev);
2204
2205 pci_save_state(dev->pdev);
2206 if (suspend) {
2207 /* Shut down the device */
2208 pci_disable_device(dev->pdev);
2209 pci_set_power_state(dev->pdev, PCI_D3hot);
74b0b157 2210 } else {
2211 r = amdgpu_asic_reset(adev);
2212 if (r)
2213 DRM_ERROR("amdgpu asic reset failed\n");
d38ceaf9
AD
2214 }
2215
2216 if (fbcon) {
2217 console_lock();
2218 amdgpu_fbdev_set_suspend(adev, 1);
2219 console_unlock();
2220 }
2221 return 0;
2222}
2223
2224/**
810ddc3a 2225 * amdgpu_device_resume - initiate device resume
d38ceaf9
AD
2226 *
2227 * @pdev: drm dev pointer
2228 *
2229 * Bring the hw back to operating state (all asics).
2230 * Returns 0 for success or an error on failure.
2231 * Called at driver resume.
2232 */
810ddc3a 2233int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
d38ceaf9
AD
2234{
2235 struct drm_connector *connector;
2236 struct amdgpu_device *adev = dev->dev_private;
756e6880 2237 struct drm_crtc *crtc;
03161a6e 2238 int r = 0;
d38ceaf9
AD
2239
2240 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2241 return 0;
2242
74b0b157 2243 if (fbcon)
d38ceaf9 2244 console_lock();
74b0b157 2245
d38ceaf9
AD
2246 if (resume) {
2247 pci_set_power_state(dev->pdev, PCI_D0);
2248 pci_restore_state(dev->pdev);
74b0b157 2249 r = pci_enable_device(dev->pdev);
03161a6e
HR
2250 if (r)
2251 goto unlock;
d38ceaf9
AD
2252 }
2253
2254 /* post card */
39c640c0 2255 if (amdgpu_device_need_post(adev)) {
74b0b157 2256 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2257 if (r)
2258 DRM_ERROR("amdgpu asic init failed\n");
2259 }
d38ceaf9 2260
06ec9070 2261 r = amdgpu_device_ip_resume(adev);
e6707218 2262 if (r) {
06ec9070 2263 DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r);
03161a6e 2264 goto unlock;
e6707218 2265 }
5ceb54c6
AD
2266 amdgpu_fence_driver_resume(adev);
2267
ca198528
FC
2268 if (resume) {
2269 r = amdgpu_ib_ring_tests(adev);
2270 if (r)
2271 DRM_ERROR("ib ring test failed (%d).\n", r);
2272 }
d38ceaf9 2273
06ec9070 2274 r = amdgpu_device_ip_late_init(adev);
03161a6e
HR
2275 if (r)
2276 goto unlock;
d38ceaf9 2277
756e6880
AD
2278 /* pin cursors */
2279 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2280 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2281
2282 if (amdgpu_crtc->cursor_bo) {
2283 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
7a6901d7 2284 r = amdgpu_bo_reserve(aobj, true);
756e6880
AD
2285 if (r == 0) {
2286 r = amdgpu_bo_pin(aobj,
2287 AMDGPU_GEM_DOMAIN_VRAM,
2288 &amdgpu_crtc->cursor_addr);
2289 if (r != 0)
2290 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
2291 amdgpu_bo_unreserve(aobj);
2292 }
2293 }
2294 }
ba997709
YZ
2295 r = amdgpu_amdkfd_resume(adev);
2296 if (r)
2297 return r;
756e6880 2298
d38ceaf9
AD
2299 /* blat the mode back in */
2300 if (fbcon) {
4562236b
HW
2301 if (!amdgpu_device_has_dc_support(adev)) {
2302 /* pre DCE11 */
2303 drm_helper_resume_force_mode(dev);
2304
2305 /* turn on display hw */
2306 drm_modeset_lock_all(dev);
2307 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2308 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
2309 }
2310 drm_modeset_unlock_all(dev);
d38ceaf9
AD
2311 }
2312 }
2313
2314 drm_kms_helper_poll_enable(dev);
23a1a9e5
L
2315
2316 /*
2317 * Most of the connector probing functions try to acquire runtime pm
2318 * refs to ensure that the GPU is powered on when connector polling is
2319 * performed. Since we're calling this from a runtime PM callback,
2320 * trying to acquire rpm refs will cause us to deadlock.
2321 *
2322 * Since we're guaranteed to be holding the rpm lock, it's safe to
2323 * temporarily disable the rpm helpers so this doesn't deadlock us.
2324 */
2325#ifdef CONFIG_PM
2326 dev->dev->power.disable_depth++;
2327#endif
4562236b
HW
2328 if (!amdgpu_device_has_dc_support(adev))
2329 drm_helper_hpd_irq_event(dev);
2330 else
2331 drm_kms_helper_hotplug_event(dev);
23a1a9e5
L
2332#ifdef CONFIG_PM
2333 dev->dev->power.disable_depth--;
2334#endif
d38ceaf9 2335
03161a6e 2336 if (fbcon)
d38ceaf9 2337 amdgpu_fbdev_set_suspend(adev, 0);
03161a6e
HR
2338
2339unlock:
2340 if (fbcon)
d38ceaf9 2341 console_unlock();
d38ceaf9 2342
03161a6e 2343 return r;
d38ceaf9
AD
2344}
2345
06ec9070 2346static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
63fbf42f
CZ
2347{
2348 int i;
2349 bool asic_hang = false;
2350
f993d628
ML
2351 if (amdgpu_sriov_vf(adev))
2352 return true;
2353
63fbf42f 2354 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2355 if (!adev->ip_blocks[i].status.valid)
63fbf42f 2356 continue;
a1255107
AD
2357 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
2358 adev->ip_blocks[i].status.hang =
2359 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
2360 if (adev->ip_blocks[i].status.hang) {
2361 DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
63fbf42f
CZ
2362 asic_hang = true;
2363 }
2364 }
2365 return asic_hang;
2366}
2367
06ec9070 2368static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
d31a501e
CZ
2369{
2370 int i, r = 0;
2371
2372 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2373 if (!adev->ip_blocks[i].status.valid)
d31a501e 2374 continue;
a1255107
AD
2375 if (adev->ip_blocks[i].status.hang &&
2376 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
2377 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
d31a501e
CZ
2378 if (r)
2379 return r;
2380 }
2381 }
2382
2383 return 0;
2384}
2385
06ec9070 2386static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
35d782fe 2387{
da146d3b
AD
2388 int i;
2389
2390 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2391 if (!adev->ip_blocks[i].status.valid)
da146d3b 2392 continue;
a1255107
AD
2393 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
2394 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
2395 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
98512bb8
KW
2396 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
2397 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
a1255107 2398 if (adev->ip_blocks[i].status.hang) {
da146d3b
AD
2399 DRM_INFO("Some block need full reset!\n");
2400 return true;
2401 }
2402 }
35d782fe
CZ
2403 }
2404 return false;
2405}
2406
06ec9070 2407static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
35d782fe
CZ
2408{
2409 int i, r = 0;
2410
2411 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2412 if (!adev->ip_blocks[i].status.valid)
35d782fe 2413 continue;
a1255107
AD
2414 if (adev->ip_blocks[i].status.hang &&
2415 adev->ip_blocks[i].version->funcs->soft_reset) {
2416 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
35d782fe
CZ
2417 if (r)
2418 return r;
2419 }
2420 }
2421
2422 return 0;
2423}
2424
06ec9070 2425static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
35d782fe
CZ
2426{
2427 int i, r = 0;
2428
2429 for (i = 0; i < adev->num_ip_blocks; i++) {
a1255107 2430 if (!adev->ip_blocks[i].status.valid)
35d782fe 2431 continue;
a1255107
AD
2432 if (adev->ip_blocks[i].status.hang &&
2433 adev->ip_blocks[i].version->funcs->post_soft_reset)
2434 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
35d782fe
CZ
2435 if (r)
2436 return r;
2437 }
2438
2439 return 0;
2440}
2441
06ec9070
AD
2442static int amdgpu_device_recover_vram_from_shadow(struct amdgpu_device *adev,
2443 struct amdgpu_ring *ring,
2444 struct amdgpu_bo *bo,
2445 struct dma_fence **fence)
53cdccd5
CZ
2446{
2447 uint32_t domain;
2448 int r;
2449
23d2e504
RH
2450 if (!bo->shadow)
2451 return 0;
2452
1d284797 2453 r = amdgpu_bo_reserve(bo, true);
23d2e504
RH
2454 if (r)
2455 return r;
2456 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
2457 /* if bo has been evicted, then no need to recover */
2458 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
82521316
RH
2459 r = amdgpu_bo_validate(bo->shadow);
2460 if (r) {
2461 DRM_ERROR("bo validate failed!\n");
2462 goto err;
2463 }
2464
23d2e504 2465 r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
53cdccd5 2466 NULL, fence, true);
23d2e504
RH
2467 if (r) {
2468 DRM_ERROR("recover page table failed!\n");
2469 goto err;
2470 }
2471 }
53cdccd5 2472err:
23d2e504
RH
2473 amdgpu_bo_unreserve(bo);
2474 return r;
53cdccd5
CZ
2475}
2476
c41d1cf6
ML
2477static int amdgpu_device_handle_vram_lost(struct amdgpu_device *adev)
2478{
2479 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2480 struct amdgpu_bo *bo, *tmp;
2481 struct dma_fence *fence = NULL, *next = NULL;
2482 long r = 1;
2483 int i = 0;
2484 long tmo;
2485
2486 if (amdgpu_sriov_runtime(adev))
2487 tmo = msecs_to_jiffies(amdgpu_lockup_timeout);
2488 else
2489 tmo = msecs_to_jiffies(100);
2490
2491 DRM_INFO("recover vram bo from shadow start\n");
2492 mutex_lock(&adev->shadow_list_lock);
2493 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
2494 next = NULL;
2495 amdgpu_device_recover_vram_from_shadow(adev, ring, bo, &next);
2496 if (fence) {
2497 r = dma_fence_wait_timeout(fence, false, tmo);
2498 if (r == 0)
2499 pr_err("wait fence %p[%d] timeout\n", fence, i);
2500 else if (r < 0)
2501 pr_err("wait fence %p[%d] interrupted\n", fence, i);
2502 if (r < 1) {
2503 dma_fence_put(fence);
2504 fence = next;
2505 break;
2506 }
2507 i++;
2508 }
2509
2510 dma_fence_put(fence);
2511 fence = next;
2512 }
2513 mutex_unlock(&adev->shadow_list_lock);
2514
2515 if (fence) {
2516 r = dma_fence_wait_timeout(fence, false, tmo);
2517 if (r == 0)
2518 pr_err("wait fence %p[%d] timeout\n", fence, i);
2519 else if (r < 0)
2520 pr_err("wait fence %p[%d] interrupted\n", fence, i);
2521
2522 }
2523 dma_fence_put(fence);
2524
2525 if (r > 0)
2526 DRM_INFO("recover vram bo from shadow done\n");
2527 else
2528 DRM_ERROR("recover vram bo from shadow failed\n");
2529
2530 return (r > 0?0:1);
2531}
2532
5740682e 2533/*
06ec9070 2534 * amdgpu_device_reset - reset ASIC/GPU for bare-metal or passthrough
a90ad3c2
ML
2535 *
2536 * @adev: amdgpu device pointer
a90ad3c2 2537 *
5740682e
ML
2538 * attempt to do soft-reset or full-reset and reinitialize Asic
2539 * return 0 means successed otherwise failed
2540*/
c41d1cf6 2541static int amdgpu_device_reset(struct amdgpu_device *adev)
a90ad3c2 2542{
5740682e
ML
2543 bool need_full_reset, vram_lost = 0;
2544 int r;
a90ad3c2 2545
06ec9070 2546 need_full_reset = amdgpu_device_ip_need_full_reset(adev);
a90ad3c2 2547
5740682e 2548 if (!need_full_reset) {
06ec9070
AD
2549 amdgpu_device_ip_pre_soft_reset(adev);
2550 r = amdgpu_device_ip_soft_reset(adev);
2551 amdgpu_device_ip_post_soft_reset(adev);
2552 if (r || amdgpu_device_ip_check_soft_reset(adev)) {
5740682e
ML
2553 DRM_INFO("soft reset failed, will fallback to full reset!\n");
2554 need_full_reset = true;
2555 }
5740682e 2556 }
a90ad3c2 2557
5740682e 2558 if (need_full_reset) {
cdd61df6 2559 r = amdgpu_device_ip_suspend(adev);
a90ad3c2 2560
5740682e 2561retry:
5740682e 2562 r = amdgpu_asic_reset(adev);
5740682e
ML
2563 /* post card */
2564 amdgpu_atom_asic_init(adev->mode_info.atom_context);
65781c78 2565
5740682e
ML
2566 if (!r) {
2567 dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
06ec9070 2568 r = amdgpu_device_ip_resume_phase1(adev);
5740682e
ML
2569 if (r)
2570 goto out;
65781c78 2571
06ec9070 2572 vram_lost = amdgpu_device_check_vram_lost(adev);
5740682e
ML
2573 if (vram_lost) {
2574 DRM_ERROR("VRAM is lost!\n");
2575 atomic_inc(&adev->vram_lost_counter);
2576 }
2577
c1c7ce8f
CK
2578 r = amdgpu_gtt_mgr_recover(
2579 &adev->mman.bdev.man[TTM_PL_TT]);
5740682e
ML
2580 if (r)
2581 goto out;
2582
06ec9070 2583 r = amdgpu_device_ip_resume_phase2(adev);
5740682e
ML
2584 if (r)
2585 goto out;
2586
2587 if (vram_lost)
06ec9070 2588 amdgpu_device_fill_reset_magic(adev);
65781c78 2589 }
5740682e 2590 }
65781c78 2591
5740682e
ML
2592out:
2593 if (!r) {
2594 amdgpu_irq_gpu_reset_resume_helper(adev);
2595 r = amdgpu_ib_ring_tests(adev);
2596 if (r) {
2597 dev_err(adev->dev, "ib ring test failed (%d).\n", r);
cdd61df6 2598 r = amdgpu_device_ip_suspend(adev);
5740682e
ML
2599 need_full_reset = true;
2600 goto retry;
2601 }
2602 }
65781c78 2603
c41d1cf6
ML
2604 if (!r && ((need_full_reset && !(adev->flags & AMD_IS_APU)) || vram_lost))
2605 r = amdgpu_device_handle_vram_lost(adev);
a90ad3c2 2606
5740682e
ML
2607 return r;
2608}
a90ad3c2 2609
5740682e 2610/*
06ec9070 2611 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
5740682e
ML
2612 *
2613 * @adev: amdgpu device pointer
5740682e
ML
2614 *
2615 * do VF FLR and reinitialize Asic
2616 * return 0 means successed otherwise failed
2617*/
c41d1cf6 2618static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, bool from_hypervisor)
5740682e
ML
2619{
2620 int r;
2621
2622 if (from_hypervisor)
2623 r = amdgpu_virt_request_full_gpu(adev, true);
2624 else
2625 r = amdgpu_virt_reset_gpu(adev);
2626 if (r)
2627 return r;
a90ad3c2
ML
2628
2629 /* Resume IP prior to SMC */
06ec9070 2630 r = amdgpu_device_ip_reinit_early_sriov(adev);
5740682e
ML
2631 if (r)
2632 goto error;
a90ad3c2
ML
2633
2634 /* we need recover gart prior to run SMC/CP/SDMA resume */
c1c7ce8f 2635 amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]);
a90ad3c2
ML
2636
2637 /* now we are okay to resume SMC/CP/SDMA */
06ec9070 2638 r = amdgpu_device_ip_reinit_late_sriov(adev);
c41d1cf6 2639 amdgpu_virt_release_full_gpu(adev, true);
5740682e
ML
2640 if (r)
2641 goto error;
a90ad3c2
ML
2642
2643 amdgpu_irq_gpu_reset_resume_helper(adev);
5740682e 2644 r = amdgpu_ib_ring_tests(adev);
a90ad3c2 2645
c41d1cf6
ML
2646 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
2647 atomic_inc(&adev->vram_lost_counter);
2648 r = amdgpu_device_handle_vram_lost(adev);
a90ad3c2
ML
2649 }
2650
c41d1cf6
ML
2651error:
2652
a90ad3c2
ML
2653 return r;
2654}
2655
d38ceaf9 2656/**
5f152b5e 2657 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
d38ceaf9
AD
2658 *
2659 * @adev: amdgpu device pointer
5740682e 2660 * @job: which job trigger hang
dcebf026 2661 * @force forces reset regardless of amdgpu_gpu_recovery
d38ceaf9 2662 *
5740682e 2663 * Attempt to reset the GPU if it has hung (all asics).
d38ceaf9
AD
2664 * Returns 0 for success or an error on failure.
2665 */
5f152b5e
AD
2666int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
2667 struct amdgpu_job *job, bool force)
d38ceaf9 2668{
4562236b 2669 struct drm_atomic_state *state = NULL;
5740682e 2670 int i, r, resched;
fb140b29 2671
54bc1398 2672 if (!force && !amdgpu_device_ip_check_soft_reset(adev)) {
63fbf42f
CZ
2673 DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
2674 return 0;
2675 }
d38ceaf9 2676
dcebf026
AG
2677 if (!force && (amdgpu_gpu_recovery == 0 ||
2678 (amdgpu_gpu_recovery == -1 && !amdgpu_sriov_vf(adev)))) {
2679 DRM_INFO("GPU recovery disabled.\n");
2680 return 0;
2681 }
2682
5740682e
ML
2683 dev_info(adev->dev, "GPU reset begin!\n");
2684
13a752e3 2685 mutex_lock(&adev->lock_reset);
d94aed5a 2686 atomic_inc(&adev->gpu_reset_counter);
13a752e3 2687 adev->in_gpu_reset = 1;
d38ceaf9 2688
a3c47d6b
CZ
2689 /* block TTM */
2690 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
71182665 2691
4562236b
HW
2692 /* store modesetting */
2693 if (amdgpu_device_has_dc_support(adev))
2694 state = drm_atomic_helper_suspend(adev->ddev);
a3c47d6b 2695
71182665 2696 /* block all schedulers and reset given job's ring */
0875dc9e
CZ
2697 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2698 struct amdgpu_ring *ring = adev->rings[i];
2699
51687759 2700 if (!ring || !ring->sched.thread)
0875dc9e 2701 continue;
5740682e 2702
71182665
ML
2703 kthread_park(ring->sched.thread);
2704
5740682e
ML
2705 if (job && job->ring->idx != i)
2706 continue;
2707
1b1f42d8 2708 drm_sched_hw_job_reset(&ring->sched, &job->base);
5740682e 2709
2f9d4084
ML
2710 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2711 amdgpu_fence_driver_force_completion(ring);
0875dc9e 2712 }
d38ceaf9 2713
5740682e 2714 if (amdgpu_sriov_vf(adev))
c41d1cf6 2715 r = amdgpu_device_reset_sriov(adev, job ? false : true);
5740682e 2716 else
c41d1cf6 2717 r = amdgpu_device_reset(adev);
5740682e 2718
71182665
ML
2719 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2720 struct amdgpu_ring *ring = adev->rings[i];
53cdccd5 2721
71182665
ML
2722 if (!ring || !ring->sched.thread)
2723 continue;
5740682e 2724
71182665
ML
2725 /* only need recovery sched of the given job's ring
2726 * or all rings (in the case @job is NULL)
2727 * after above amdgpu_reset accomplished
2728 */
2729 if ((!job || job->ring->idx == i) && !r)
1b1f42d8 2730 drm_sched_job_recovery(&ring->sched);
5740682e 2731
71182665 2732 kthread_unpark(ring->sched.thread);
d38ceaf9
AD
2733 }
2734
4562236b 2735 if (amdgpu_device_has_dc_support(adev)) {
5740682e
ML
2736 if (drm_atomic_helper_resume(adev->ddev, state))
2737 dev_info(adev->dev, "drm resume failed:%d\n", r);
5740682e 2738 } else {
4562236b 2739 drm_helper_resume_force_mode(adev->ddev);
5740682e 2740 }
d38ceaf9
AD
2741
2742 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
5740682e 2743
89041940 2744 if (r) {
d38ceaf9 2745 /* bad news, how to tell it to userspace ? */
5740682e
ML
2746 dev_info(adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
2747 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
2748 } else {
2749 dev_info(adev->dev, "GPU reset(%d) successed!\n",atomic_read(&adev->gpu_reset_counter));
89041940 2750 }
d38ceaf9 2751
89041940 2752 amdgpu_vf_error_trans_all(adev);
13a752e3
ML
2753 adev->in_gpu_reset = 0;
2754 mutex_unlock(&adev->lock_reset);
d38ceaf9
AD
2755 return r;
2756}
2757
041d9d93 2758void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
d0dd7f0c
AD
2759{
2760 u32 mask;
2761 int ret;
2762
cd474ba0
AD
2763 if (amdgpu_pcie_gen_cap)
2764 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
d0dd7f0c 2765
cd474ba0
AD
2766 if (amdgpu_pcie_lane_cap)
2767 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
d0dd7f0c 2768
cd474ba0
AD
2769 /* covers APUs as well */
2770 if (pci_is_root_bus(adev->pdev->bus)) {
2771 if (adev->pm.pcie_gen_mask == 0)
2772 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2773 if (adev->pm.pcie_mlw_mask == 0)
2774 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
d0dd7f0c 2775 return;
cd474ba0 2776 }
d0dd7f0c 2777
cd474ba0
AD
2778 if (adev->pm.pcie_gen_mask == 0) {
2779 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
2780 if (!ret) {
2781 adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
2782 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
2783 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
2784
2785 if (mask & DRM_PCIE_SPEED_25)
2786 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
2787 if (mask & DRM_PCIE_SPEED_50)
2788 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
2789 if (mask & DRM_PCIE_SPEED_80)
2790 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
2791 } else {
2792 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2793 }
2794 }
2795 if (adev->pm.pcie_mlw_mask == 0) {
2796 ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
2797 if (!ret) {
2798 switch (mask) {
2799 case 32:
2800 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
2801 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
2802 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2803 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2804 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2805 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2806 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2807 break;
2808 case 16:
2809 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
2810 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2811 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2812 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2813 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2814 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2815 break;
2816 case 12:
2817 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2818 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2819 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2820 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2821 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2822 break;
2823 case 8:
2824 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2825 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2826 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2827 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2828 break;
2829 case 4:
2830 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2831 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2832 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2833 break;
2834 case 2:
2835 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2836 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2837 break;
2838 case 1:
2839 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
2840 break;
2841 default:
2842 break;
2843 }
2844 } else {
2845 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
d0dd7f0c
AD
2846 }
2847 }
2848}
d38ceaf9 2849