2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
25 #include <linux/pci.h>
27 #include <drm/drm_cache.h>
31 #include "amdgpu_atomfirmware.h"
32 #include "amdgpu_gem.h"
34 #include "gc/gc_9_0_sh_mask.h"
35 #include "dce/dce_12_0_offset.h"
36 #include "dce/dce_12_0_sh_mask.h"
37 #include "vega10_enum.h"
38 #include "mmhub/mmhub_1_0_offset.h"
39 #include "athub/athub_1_0_sh_mask.h"
40 #include "athub/athub_1_0_offset.h"
41 #include "oss/osssys_4_0_offset.h"
45 #include "soc15_common.h"
46 #include "umc/umc_6_0_sh_mask.h"
48 #include "gfxhub_v1_0.h"
49 #include "mmhub_v1_0.h"
50 #include "athub_v1_0.h"
51 #include "gfxhub_v1_1.h"
52 #include "mmhub_v9_4.h"
53 #include "mmhub_v1_7.h"
60 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
62 #include "amdgpu_ras.h"
63 #include "amdgpu_xgmi.h"
65 #include "amdgpu_reset.h"
67 /* add these here since we already include dce12 headers and these are for DCN */
68 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d
69 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
70 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0
71 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
72 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL
73 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
74 #define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0 0x049d
75 #define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0_BASE_IDX 2
77 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2 0x05ea
78 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2_BASE_IDX 2
81 static const char *gfxhub_client_ids
[] = {
97 static const char *mmhub_client_ids_raven
[][2] = {
122 static const char *mmhub_client_ids_renoir
[][2] = {
150 static const char *mmhub_client_ids_vega10
[][2] = {
163 [32+14][0] = "SDMA0",
176 [32+4][1] = "DCEDWB",
179 [32+14][1] = "SDMA1",
182 static const char *mmhub_client_ids_vega12
[][2] = {
195 [32+15][0] = "SDMA0",
205 [32+1][1] = "DCEDWB",
211 [32+15][1] = "SDMA1",
214 static const char *mmhub_client_ids_vega20
[][2] = {
228 [32+12][0] = "UTCL2",
229 [32+14][0] = "SDMA1",
247 [32+14][1] = "SDMA1",
250 static const char *mmhub_client_ids_arcturus
[][2] = {
291 static const char *mmhub_client_ids_aldebaran
[][2] = {
294 [32+1][0] = "DBGU_IO0",
295 [32+2][0] = "DBGU_IO2",
297 [96+11][0] = "JPEG0",
299 [96+13][0] = "VCNU0",
300 [128+11][0] = "JPEG1",
301 [128+12][0] = "VCN1",
302 [128+13][0] = "VCNU1",
305 [256+0][0] = "SDMA0",
306 [256+1][0] = "SDMA1",
307 [256+2][0] = "SDMA2",
308 [256+3][0] = "SDMA3",
309 [256+4][0] = "SDMA4",
313 [32+1][1] = "DBGU_IO0",
314 [32+2][1] = "DBGU_IO2",
316 [96+11][1] = "JPEG0",
318 [96+13][1] = "VCNU0",
319 [128+11][1] = "JPEG1",
320 [128+12][1] = "VCN1",
321 [128+13][1] = "VCNU1",
324 [256+0][1] = "SDMA0",
325 [256+1][1] = "SDMA1",
326 [256+2][1] = "SDMA2",
327 [256+3][1] = "SDMA3",
328 [256+4][1] = "SDMA4",
332 static const struct soc15_reg_golden golden_settings_mmhub_1_0_0
[] =
334 SOC15_REG_GOLDEN_VALUE(MMHUB
, 0, mmDAGB1_WRCLI2
, 0x00000007, 0xfe5fe0fa),
335 SOC15_REG_GOLDEN_VALUE(MMHUB
, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0
, 0x00000030, 0x55555565)
338 static const struct soc15_reg_golden golden_settings_athub_1_0_0
[] =
340 SOC15_REG_GOLDEN_VALUE(ATHUB
, 0, mmRPB_ARB_CNTL
, 0x0000ff00, 0x00000800),
341 SOC15_REG_GOLDEN_VALUE(ATHUB
, 0, mmRPB_ARB_CNTL2
, 0x00ff00ff, 0x00080008)
344 static const uint32_t ecc_umc_mcumc_ctrl_addrs
[] = {
345 (0x000143c0 + 0x00000000),
346 (0x000143c0 + 0x00000800),
347 (0x000143c0 + 0x00001000),
348 (0x000143c0 + 0x00001800),
349 (0x000543c0 + 0x00000000),
350 (0x000543c0 + 0x00000800),
351 (0x000543c0 + 0x00001000),
352 (0x000543c0 + 0x00001800),
353 (0x000943c0 + 0x00000000),
354 (0x000943c0 + 0x00000800),
355 (0x000943c0 + 0x00001000),
356 (0x000943c0 + 0x00001800),
357 (0x000d43c0 + 0x00000000),
358 (0x000d43c0 + 0x00000800),
359 (0x000d43c0 + 0x00001000),
360 (0x000d43c0 + 0x00001800),
361 (0x001143c0 + 0x00000000),
362 (0x001143c0 + 0x00000800),
363 (0x001143c0 + 0x00001000),
364 (0x001143c0 + 0x00001800),
365 (0x001543c0 + 0x00000000),
366 (0x001543c0 + 0x00000800),
367 (0x001543c0 + 0x00001000),
368 (0x001543c0 + 0x00001800),
369 (0x001943c0 + 0x00000000),
370 (0x001943c0 + 0x00000800),
371 (0x001943c0 + 0x00001000),
372 (0x001943c0 + 0x00001800),
373 (0x001d43c0 + 0x00000000),
374 (0x001d43c0 + 0x00000800),
375 (0x001d43c0 + 0x00001000),
376 (0x001d43c0 + 0x00001800),
379 static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs
[] = {
380 (0x000143e0 + 0x00000000),
381 (0x000143e0 + 0x00000800),
382 (0x000143e0 + 0x00001000),
383 (0x000143e0 + 0x00001800),
384 (0x000543e0 + 0x00000000),
385 (0x000543e0 + 0x00000800),
386 (0x000543e0 + 0x00001000),
387 (0x000543e0 + 0x00001800),
388 (0x000943e0 + 0x00000000),
389 (0x000943e0 + 0x00000800),
390 (0x000943e0 + 0x00001000),
391 (0x000943e0 + 0x00001800),
392 (0x000d43e0 + 0x00000000),
393 (0x000d43e0 + 0x00000800),
394 (0x000d43e0 + 0x00001000),
395 (0x000d43e0 + 0x00001800),
396 (0x001143e0 + 0x00000000),
397 (0x001143e0 + 0x00000800),
398 (0x001143e0 + 0x00001000),
399 (0x001143e0 + 0x00001800),
400 (0x001543e0 + 0x00000000),
401 (0x001543e0 + 0x00000800),
402 (0x001543e0 + 0x00001000),
403 (0x001543e0 + 0x00001800),
404 (0x001943e0 + 0x00000000),
405 (0x001943e0 + 0x00000800),
406 (0x001943e0 + 0x00001000),
407 (0x001943e0 + 0x00001800),
408 (0x001d43e0 + 0x00000000),
409 (0x001d43e0 + 0x00000800),
410 (0x001d43e0 + 0x00001000),
411 (0x001d43e0 + 0x00001800),
414 static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device
*adev
,
415 struct amdgpu_irq_src
*src
,
417 enum amdgpu_interrupt_state state
)
419 u32 bits
, i
, tmp
, reg
;
421 /* Devices newer then VEGA10/12 shall have these programming
422 sequences performed by PSP BL */
423 if (adev
->asic_type
>= CHIP_VEGA20
)
429 case AMDGPU_IRQ_STATE_DISABLE
:
430 for (i
= 0; i
< ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs
); i
++) {
431 reg
= ecc_umc_mcumc_ctrl_addrs
[i
];
436 for (i
= 0; i
< ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs
); i
++) {
437 reg
= ecc_umc_mcumc_ctrl_mask_addrs
[i
];
443 case AMDGPU_IRQ_STATE_ENABLE
:
444 for (i
= 0; i
< ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs
); i
++) {
445 reg
= ecc_umc_mcumc_ctrl_addrs
[i
];
450 for (i
= 0; i
< ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs
); i
++) {
451 reg
= ecc_umc_mcumc_ctrl_mask_addrs
[i
];
464 static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device
*adev
,
465 struct amdgpu_irq_src
*src
,
467 enum amdgpu_interrupt_state state
)
469 struct amdgpu_vmhub
*hub
;
470 u32 tmp
, reg
, bits
, i
, j
;
472 bits
= VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
473 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
474 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
475 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
476 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
477 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
478 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
;
481 case AMDGPU_IRQ_STATE_DISABLE
:
482 for (j
= 0; j
< adev
->num_vmhubs
; j
++) {
483 hub
= &adev
->vmhub
[j
];
484 for (i
= 0; i
< 16; i
++) {
485 reg
= hub
->vm_context0_cntl
+ i
;
487 if (j
== AMDGPU_GFXHUB_0
)
488 tmp
= RREG32_SOC15_IP(GC
, reg
);
490 tmp
= RREG32_SOC15_IP(MMHUB
, reg
);
494 if (j
== AMDGPU_GFXHUB_0
)
495 WREG32_SOC15_IP(GC
, reg
, tmp
);
497 WREG32_SOC15_IP(MMHUB
, reg
, tmp
);
501 case AMDGPU_IRQ_STATE_ENABLE
:
502 for (j
= 0; j
< adev
->num_vmhubs
; j
++) {
503 hub
= &adev
->vmhub
[j
];
504 for (i
= 0; i
< 16; i
++) {
505 reg
= hub
->vm_context0_cntl
+ i
;
507 if (j
== AMDGPU_GFXHUB_0
)
508 tmp
= RREG32_SOC15_IP(GC
, reg
);
510 tmp
= RREG32_SOC15_IP(MMHUB
, reg
);
514 if (j
== AMDGPU_GFXHUB_0
)
515 WREG32_SOC15_IP(GC
, reg
, tmp
);
517 WREG32_SOC15_IP(MMHUB
, reg
, tmp
);
528 static int gmc_v9_0_process_interrupt(struct amdgpu_device
*adev
,
529 struct amdgpu_irq_src
*source
,
530 struct amdgpu_iv_entry
*entry
)
532 bool retry_fault
= !!(entry
->src_data
[1] & 0x80);
533 bool write_fault
= !!(entry
->src_data
[1] & 0x20);
534 uint32_t status
= 0, cid
= 0, rw
= 0;
535 struct amdgpu_task_info task_info
;
536 struct amdgpu_vmhub
*hub
;
537 const char *mmhub_cid
;
538 const char *hub_name
;
541 addr
= (u64
)entry
->src_data
[0] << 12;
542 addr
|= ((u64
)entry
->src_data
[1] & 0xf) << 44;
545 /* Returning 1 here also prevents sending the IV to the KFD */
547 /* Process it onyl if it's the first fault for this address */
548 if (entry
->ih
!= &adev
->irq
.ih_soft
&&
549 amdgpu_gmc_filter_faults(adev
, entry
->ih
, addr
, entry
->pasid
,
553 /* Delegate it to a different ring if the hardware hasn't
556 if (entry
->ih
== &adev
->irq
.ih
) {
557 amdgpu_irq_delegate(adev
, entry
, 8);
561 /* Try to handle the recoverable page faults by filling page
564 if (amdgpu_vm_handle_fault(adev
, entry
->pasid
, addr
, write_fault
))
568 if (!printk_ratelimit())
571 if (entry
->client_id
== SOC15_IH_CLIENTID_VMC
) {
573 hub
= &adev
->vmhub
[AMDGPU_MMHUB_0
];
574 } else if (entry
->client_id
== SOC15_IH_CLIENTID_VMC1
) {
576 hub
= &adev
->vmhub
[AMDGPU_MMHUB_1
];
578 hub_name
= "gfxhub0";
579 hub
= &adev
->vmhub
[AMDGPU_GFXHUB_0
];
582 memset(&task_info
, 0, sizeof(struct amdgpu_task_info
));
583 amdgpu_vm_get_task_info(adev
, entry
->pasid
, &task_info
);
586 "[%s] %s page fault (src_id:%u ring:%u vmid:%u "
587 "pasid:%u, for process %s pid %d thread %s pid %d)\n",
588 hub_name
, retry_fault
? "retry" : "no-retry",
589 entry
->src_id
, entry
->ring_id
, entry
->vmid
,
590 entry
->pasid
, task_info
.process_name
, task_info
.tgid
,
591 task_info
.task_name
, task_info
.pid
);
592 dev_err(adev
->dev
, " in page starting at address 0x%016llx from IH client 0x%x (%s)\n",
593 addr
, entry
->client_id
,
594 soc15_ih_clientid_name
[entry
->client_id
]);
596 if (amdgpu_sriov_vf(adev
))
600 * Issue a dummy read to wait for the status register to
601 * be updated to avoid reading an incorrect value due to
602 * the new fast GRBM interface.
604 if ((entry
->vmid_src
== AMDGPU_GFXHUB_0
) &&
605 (adev
->ip_versions
[GC_HWIP
][0] < IP_VERSION(9, 4, 2)))
606 RREG32(hub
->vm_l2_pro_fault_status
);
608 status
= RREG32(hub
->vm_l2_pro_fault_status
);
609 cid
= REG_GET_FIELD(status
, VM_L2_PROTECTION_FAULT_STATUS
, CID
);
610 rw
= REG_GET_FIELD(status
, VM_L2_PROTECTION_FAULT_STATUS
, RW
);
611 WREG32_P(hub
->vm_l2_pro_fault_cntl
, 1, ~1);
615 "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
617 if (hub
== &adev
->vmhub
[AMDGPU_GFXHUB_0
]) {
618 dev_err(adev
->dev
, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
619 cid
>= ARRAY_SIZE(gfxhub_client_ids
) ? "unknown" :
620 gfxhub_client_ids
[cid
],
623 switch (adev
->ip_versions
[MMHUB_HWIP
][0]) {
624 case IP_VERSION(9, 0, 0):
625 mmhub_cid
= mmhub_client_ids_vega10
[cid
][rw
];
627 case IP_VERSION(9, 3, 0):
628 mmhub_cid
= mmhub_client_ids_vega12
[cid
][rw
];
630 case IP_VERSION(9, 4, 0):
631 mmhub_cid
= mmhub_client_ids_vega20
[cid
][rw
];
633 case IP_VERSION(9, 4, 1):
634 mmhub_cid
= mmhub_client_ids_arcturus
[cid
][rw
];
636 case IP_VERSION(9, 1, 0):
637 case IP_VERSION(9, 2, 0):
638 mmhub_cid
= mmhub_client_ids_raven
[cid
][rw
];
640 case IP_VERSION(1, 5, 0):
641 case IP_VERSION(2, 4, 0):
642 mmhub_cid
= mmhub_client_ids_renoir
[cid
][rw
];
644 case IP_VERSION(9, 4, 2):
645 mmhub_cid
= mmhub_client_ids_aldebaran
[cid
][rw
];
651 dev_err(adev
->dev
, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
652 mmhub_cid
? mmhub_cid
: "unknown", cid
);
654 dev_err(adev
->dev
, "\t MORE_FAULTS: 0x%lx\n",
655 REG_GET_FIELD(status
,
656 VM_L2_PROTECTION_FAULT_STATUS
, MORE_FAULTS
));
657 dev_err(adev
->dev
, "\t WALKER_ERROR: 0x%lx\n",
658 REG_GET_FIELD(status
,
659 VM_L2_PROTECTION_FAULT_STATUS
, WALKER_ERROR
));
660 dev_err(adev
->dev
, "\t PERMISSION_FAULTS: 0x%lx\n",
661 REG_GET_FIELD(status
,
662 VM_L2_PROTECTION_FAULT_STATUS
, PERMISSION_FAULTS
));
663 dev_err(adev
->dev
, "\t MAPPING_ERROR: 0x%lx\n",
664 REG_GET_FIELD(status
,
665 VM_L2_PROTECTION_FAULT_STATUS
, MAPPING_ERROR
));
666 dev_err(adev
->dev
, "\t RW: 0x%x\n", rw
);
670 static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs
= {
671 .set
= gmc_v9_0_vm_fault_interrupt_state
,
672 .process
= gmc_v9_0_process_interrupt
,
676 static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs
= {
677 .set
= gmc_v9_0_ecc_interrupt_state
,
678 .process
= amdgpu_umc_process_ecc_irq
,
681 static void gmc_v9_0_set_irq_funcs(struct amdgpu_device
*adev
)
683 adev
->gmc
.vm_fault
.num_types
= 1;
684 adev
->gmc
.vm_fault
.funcs
= &gmc_v9_0_irq_funcs
;
686 if (!amdgpu_sriov_vf(adev
) &&
687 !adev
->gmc
.xgmi
.connected_to_cpu
) {
688 adev
->gmc
.ecc_irq
.num_types
= 1;
689 adev
->gmc
.ecc_irq
.funcs
= &gmc_v9_0_ecc_funcs
;
693 static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid
,
698 req
= REG_SET_FIELD(req
, VM_INVALIDATE_ENG0_REQ
,
699 PER_VMID_INVALIDATE_REQ
, 1 << vmid
);
700 req
= REG_SET_FIELD(req
, VM_INVALIDATE_ENG0_REQ
, FLUSH_TYPE
, flush_type
);
701 req
= REG_SET_FIELD(req
, VM_INVALIDATE_ENG0_REQ
, INVALIDATE_L2_PTES
, 1);
702 req
= REG_SET_FIELD(req
, VM_INVALIDATE_ENG0_REQ
, INVALIDATE_L2_PDE0
, 1);
703 req
= REG_SET_FIELD(req
, VM_INVALIDATE_ENG0_REQ
, INVALIDATE_L2_PDE1
, 1);
704 req
= REG_SET_FIELD(req
, VM_INVALIDATE_ENG0_REQ
, INVALIDATE_L2_PDE2
, 1);
705 req
= REG_SET_FIELD(req
, VM_INVALIDATE_ENG0_REQ
, INVALIDATE_L1_PTES
, 1);
706 req
= REG_SET_FIELD(req
, VM_INVALIDATE_ENG0_REQ
,
707 CLEAR_PROTECTION_FAULT_STATUS_ADDR
, 0);
713 * gmc_v9_0_use_invalidate_semaphore - judge whether to use semaphore
715 * @adev: amdgpu_device pointer
719 static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device
*adev
,
722 if (adev
->ip_versions
[GC_HWIP
][0] == IP_VERSION(9, 4, 2))
725 return ((vmhub
== AMDGPU_MMHUB_0
||
726 vmhub
== AMDGPU_MMHUB_1
) &&
727 (!amdgpu_sriov_vf(adev
)) &&
728 (!(!(adev
->apu_flags
& AMD_APU_IS_RAVEN2
) &&
729 (adev
->apu_flags
& AMD_APU_IS_PICASSO
))));
732 static bool gmc_v9_0_get_atc_vmid_pasid_mapping_info(struct amdgpu_device
*adev
,
733 uint8_t vmid
, uint16_t *p_pasid
)
737 value
= RREG32(SOC15_REG_OFFSET(ATHUB
, 0, mmATC_VMID0_PASID_MAPPING
)
739 *p_pasid
= value
& ATC_VMID0_PASID_MAPPING__PASID_MASK
;
741 return !!(value
& ATC_VMID0_PASID_MAPPING__VALID_MASK
);
746 * VMID 0 is the physical GPU addresses as used by the kernel.
747 * VMIDs 1-15 are used for userspace clients and are handled
748 * by the amdgpu vm/hsa code.
752 * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type
754 * @adev: amdgpu_device pointer
755 * @vmid: vm instance to flush
756 * @vmhub: which hub to flush
757 * @flush_type: the flush type
759 * Flush the TLB for the requested page table using certain type.
761 static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device
*adev
, uint32_t vmid
,
762 uint32_t vmhub
, uint32_t flush_type
)
764 bool use_semaphore
= gmc_v9_0_use_invalidate_semaphore(adev
, vmhub
);
765 const unsigned eng
= 17;
766 u32 j
, inv_req
, inv_req2
, tmp
;
767 struct amdgpu_vmhub
*hub
;
769 BUG_ON(vmhub
>= adev
->num_vmhubs
);
771 hub
= &adev
->vmhub
[vmhub
];
772 if (adev
->gmc
.xgmi
.num_physical_nodes
&&
773 adev
->ip_versions
[GC_HWIP
][0] == IP_VERSION(9, 4, 0)) {
774 /* Vega20+XGMI caches PTEs in TC and TLB. Add a
775 * heavy-weight TLB flush (type 2), which flushes
776 * both. Due to a race condition with concurrent
777 * memory accesses using the same TLB cache line, we
778 * still need a second TLB flush after this.
780 inv_req
= gmc_v9_0_get_invalidate_req(vmid
, 2);
781 inv_req2
= gmc_v9_0_get_invalidate_req(vmid
, flush_type
);
783 inv_req
= gmc_v9_0_get_invalidate_req(vmid
, flush_type
);
787 /* This is necessary for a HW workaround under SRIOV as well
788 * as GFXOFF under bare metal
790 if (adev
->gfx
.kiq
.ring
.sched
.ready
&&
791 (amdgpu_sriov_runtime(adev
) || !amdgpu_sriov_vf(adev
)) &&
792 down_read_trylock(&adev
->reset_domain
->sem
)) {
793 uint32_t req
= hub
->vm_inv_eng0_req
+ hub
->eng_distance
* eng
;
794 uint32_t ack
= hub
->vm_inv_eng0_ack
+ hub
->eng_distance
* eng
;
796 amdgpu_virt_kiq_reg_write_reg_wait(adev
, req
, ack
, inv_req
,
798 up_read(&adev
->reset_domain
->sem
);
802 spin_lock(&adev
->gmc
.invalidate_lock
);
805 * It may lose gpuvm invalidate acknowldege state across power-gating
806 * off cycle, add semaphore acquire before invalidation and semaphore
807 * release after invalidation to avoid entering power gated state
811 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
813 for (j
= 0; j
< adev
->usec_timeout
; j
++) {
814 /* a read return value of 1 means semaphore acquire */
815 if (vmhub
== AMDGPU_GFXHUB_0
)
816 tmp
= RREG32_SOC15_IP_NO_KIQ(GC
, hub
->vm_inv_eng0_sem
+ hub
->eng_distance
* eng
);
818 tmp
= RREG32_SOC15_IP_NO_KIQ(MMHUB
, hub
->vm_inv_eng0_sem
+ hub
->eng_distance
* eng
);
825 if (j
>= adev
->usec_timeout
)
826 DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
830 if (vmhub
== AMDGPU_GFXHUB_0
)
831 WREG32_SOC15_IP_NO_KIQ(GC
, hub
->vm_inv_eng0_req
+ hub
->eng_distance
* eng
, inv_req
);
833 WREG32_SOC15_IP_NO_KIQ(MMHUB
, hub
->vm_inv_eng0_req
+ hub
->eng_distance
* eng
, inv_req
);
836 * Issue a dummy read to wait for the ACK register to
837 * be cleared to avoid a false ACK due to the new fast
840 if ((vmhub
== AMDGPU_GFXHUB_0
) &&
841 (adev
->ip_versions
[GC_HWIP
][0] < IP_VERSION(9, 4, 2)))
842 RREG32_NO_KIQ(hub
->vm_inv_eng0_req
+
843 hub
->eng_distance
* eng
);
845 for (j
= 0; j
< adev
->usec_timeout
; j
++) {
846 if (vmhub
== AMDGPU_GFXHUB_0
)
847 tmp
= RREG32_SOC15_IP_NO_KIQ(GC
, hub
->vm_inv_eng0_ack
+ hub
->eng_distance
* eng
);
849 tmp
= RREG32_SOC15_IP_NO_KIQ(MMHUB
, hub
->vm_inv_eng0_ack
+ hub
->eng_distance
* eng
);
851 if (tmp
& (1 << vmid
))
860 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
863 * add semaphore release after invalidation,
864 * write with 0 means semaphore release
866 if (vmhub
== AMDGPU_GFXHUB_0
)
867 WREG32_SOC15_IP_NO_KIQ(GC
, hub
->vm_inv_eng0_sem
+ hub
->eng_distance
* eng
, 0);
869 WREG32_SOC15_IP_NO_KIQ(MMHUB
, hub
->vm_inv_eng0_sem
+ hub
->eng_distance
* eng
, 0);
872 spin_unlock(&adev
->gmc
.invalidate_lock
);
874 if (j
< adev
->usec_timeout
)
877 DRM_ERROR("Timeout waiting for VM flush ACK!\n");
881 * gmc_v9_0_flush_gpu_tlb_pasid - tlb flush via pasid
883 * @adev: amdgpu_device pointer
884 * @pasid: pasid to be flush
885 * @flush_type: the flush type
886 * @all_hub: flush all hubs
888 * Flush the TLB for the requested pasid.
890 static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device
*adev
,
891 uint16_t pasid
, uint32_t flush_type
,
897 uint16_t queried_pasid
;
899 u32 usec_timeout
= amdgpu_sriov_vf(adev
) ? SRIOV_USEC_TIMEOUT
: adev
->usec_timeout
;
900 struct amdgpu_ring
*ring
= &adev
->gfx
.kiq
.ring
;
901 struct amdgpu_kiq
*kiq
= &adev
->gfx
.kiq
;
903 if (amdgpu_in_reset(adev
))
906 if (ring
->sched
.ready
&& down_read_trylock(&adev
->reset_domain
->sem
)) {
907 /* Vega20+XGMI caches PTEs in TC and TLB. Add a
908 * heavy-weight TLB flush (type 2), which flushes
909 * both. Due to a race condition with concurrent
910 * memory accesses using the same TLB cache line, we
911 * still need a second TLB flush after this.
913 bool vega20_xgmi_wa
= (adev
->gmc
.xgmi
.num_physical_nodes
&&
914 adev
->ip_versions
[GC_HWIP
][0] == IP_VERSION(9, 4, 0));
915 /* 2 dwords flush + 8 dwords fence */
916 unsigned int ndw
= kiq
->pmf
->invalidate_tlbs_size
+ 8;
919 ndw
+= kiq
->pmf
->invalidate_tlbs_size
;
921 spin_lock(&adev
->gfx
.kiq
.ring_lock
);
922 /* 2 dwords flush + 8 dwords fence */
923 amdgpu_ring_alloc(ring
, ndw
);
925 kiq
->pmf
->kiq_invalidate_tlbs(ring
,
927 kiq
->pmf
->kiq_invalidate_tlbs(ring
,
928 pasid
, flush_type
, all_hub
);
929 r
= amdgpu_fence_emit_polling(ring
, &seq
, MAX_KIQ_REG_WAIT
);
931 amdgpu_ring_undo(ring
);
932 spin_unlock(&adev
->gfx
.kiq
.ring_lock
);
933 up_read(&adev
->reset_domain
->sem
);
937 amdgpu_ring_commit(ring
);
938 spin_unlock(&adev
->gfx
.kiq
.ring_lock
);
939 r
= amdgpu_fence_wait_polling(ring
, seq
, usec_timeout
);
941 dev_err(adev
->dev
, "wait for kiq fence error: %ld.\n", r
);
942 up_read(&adev
->reset_domain
->sem
);
945 up_read(&adev
->reset_domain
->sem
);
949 for (vmid
= 1; vmid
< 16; vmid
++) {
951 ret
= gmc_v9_0_get_atc_vmid_pasid_mapping_info(adev
, vmid
,
953 if (ret
&& queried_pasid
== pasid
) {
955 for (i
= 0; i
< adev
->num_vmhubs
; i
++)
956 gmc_v9_0_flush_gpu_tlb(adev
, vmid
,
959 gmc_v9_0_flush_gpu_tlb(adev
, vmid
,
960 AMDGPU_GFXHUB_0
, flush_type
);
970 static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring
*ring
,
971 unsigned vmid
, uint64_t pd_addr
)
973 bool use_semaphore
= gmc_v9_0_use_invalidate_semaphore(ring
->adev
, ring
->funcs
->vmhub
);
974 struct amdgpu_device
*adev
= ring
->adev
;
975 struct amdgpu_vmhub
*hub
= &adev
->vmhub
[ring
->funcs
->vmhub
];
976 uint32_t req
= gmc_v9_0_get_invalidate_req(vmid
, 0);
977 unsigned eng
= ring
->vm_inv_eng
;
980 * It may lose gpuvm invalidate acknowldege state across power-gating
981 * off cycle, add semaphore acquire before invalidation and semaphore
982 * release after invalidation to avoid entering power gated state
986 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
988 /* a read return value of 1 means semaphore acuqire */
989 amdgpu_ring_emit_reg_wait(ring
,
990 hub
->vm_inv_eng0_sem
+
991 hub
->eng_distance
* eng
, 0x1, 0x1);
993 amdgpu_ring_emit_wreg(ring
, hub
->ctx0_ptb_addr_lo32
+
994 (hub
->ctx_addr_distance
* vmid
),
995 lower_32_bits(pd_addr
));
997 amdgpu_ring_emit_wreg(ring
, hub
->ctx0_ptb_addr_hi32
+
998 (hub
->ctx_addr_distance
* vmid
),
999 upper_32_bits(pd_addr
));
1001 amdgpu_ring_emit_reg_write_reg_wait(ring
, hub
->vm_inv_eng0_req
+
1002 hub
->eng_distance
* eng
,
1003 hub
->vm_inv_eng0_ack
+
1004 hub
->eng_distance
* eng
,
1007 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
1010 * add semaphore release after invalidation,
1011 * write with 0 means semaphore release
1013 amdgpu_ring_emit_wreg(ring
, hub
->vm_inv_eng0_sem
+
1014 hub
->eng_distance
* eng
, 0);
1019 static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring
*ring
, unsigned vmid
,
1022 struct amdgpu_device
*adev
= ring
->adev
;
1025 /* Do nothing because there's no lut register for mmhub1. */
1026 if (ring
->funcs
->vmhub
== AMDGPU_MMHUB_1
)
1029 if (ring
->funcs
->vmhub
== AMDGPU_GFXHUB_0
)
1030 reg
= SOC15_REG_OFFSET(OSSSYS
, 0, mmIH_VMID_0_LUT
) + vmid
;
1032 reg
= SOC15_REG_OFFSET(OSSSYS
, 0, mmIH_VMID_0_LUT_MM
) + vmid
;
1034 amdgpu_ring_emit_wreg(ring
, reg
, pasid
);
1038 * PTE format on VEGA 10:
1047 * 47:12 4k physical page base address
1057 * PDE format on VEGA 10:
1058 * 63:59 block fragment size
1062 * 47:6 physical base address of PD or PTE
1069 static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device
*adev
, uint32_t flags
)
1073 case AMDGPU_VM_MTYPE_DEFAULT
:
1074 return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC
);
1075 case AMDGPU_VM_MTYPE_NC
:
1076 return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC
);
1077 case AMDGPU_VM_MTYPE_WC
:
1078 return AMDGPU_PTE_MTYPE_VG10(MTYPE_WC
);
1079 case AMDGPU_VM_MTYPE_RW
:
1080 return AMDGPU_PTE_MTYPE_VG10(MTYPE_RW
);
1081 case AMDGPU_VM_MTYPE_CC
:
1082 return AMDGPU_PTE_MTYPE_VG10(MTYPE_CC
);
1083 case AMDGPU_VM_MTYPE_UC
:
1084 return AMDGPU_PTE_MTYPE_VG10(MTYPE_UC
);
1086 return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC
);
1090 static void gmc_v9_0_get_vm_pde(struct amdgpu_device
*adev
, int level
,
1091 uint64_t *addr
, uint64_t *flags
)
1093 if (!(*flags
& AMDGPU_PDE_PTE
) && !(*flags
& AMDGPU_PTE_SYSTEM
))
1094 *addr
= amdgpu_gmc_vram_mc2pa(adev
, *addr
);
1095 BUG_ON(*addr
& 0xFFFF00000000003FULL
);
1097 if (!adev
->gmc
.translate_further
)
1100 if (level
== AMDGPU_VM_PDB1
) {
1101 /* Set the block fragment size */
1102 if (!(*flags
& AMDGPU_PDE_PTE
))
1103 *flags
|= AMDGPU_PDE_BFS(0x9);
1105 } else if (level
== AMDGPU_VM_PDB0
) {
1106 if (*flags
& AMDGPU_PDE_PTE
)
1107 *flags
&= ~AMDGPU_PDE_PTE
;
1109 *flags
|= AMDGPU_PTE_TF
;
1113 static void gmc_v9_0_get_vm_pte(struct amdgpu_device
*adev
,
1114 struct amdgpu_bo_va_mapping
*mapping
,
1117 *flags
&= ~AMDGPU_PTE_EXECUTABLE
;
1118 *flags
|= mapping
->flags
& AMDGPU_PTE_EXECUTABLE
;
1120 *flags
&= ~AMDGPU_PTE_MTYPE_VG10_MASK
;
1121 *flags
|= mapping
->flags
& AMDGPU_PTE_MTYPE_VG10_MASK
;
1123 if (mapping
->flags
& AMDGPU_PTE_PRT
) {
1124 *flags
|= AMDGPU_PTE_PRT
;
1125 *flags
&= ~AMDGPU_PTE_VALID
;
1128 if ((adev
->ip_versions
[GC_HWIP
][0] == IP_VERSION(9, 4, 1) ||
1129 adev
->ip_versions
[GC_HWIP
][0] == IP_VERSION(9, 4, 2)) &&
1130 !(*flags
& AMDGPU_PTE_SYSTEM
) &&
1131 mapping
->bo_va
->is_xgmi
)
1132 *flags
|= AMDGPU_PTE_SNOOPED
;
1134 if (adev
->ip_versions
[GC_HWIP
][0] == IP_VERSION(9, 4, 2))
1135 *flags
|= mapping
->flags
& AMDGPU_PTE_SNOOPED
;
1138 static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device
*adev
)
1140 u32 d1vga_control
= RREG32_SOC15(DCE
, 0, mmD1VGA_CONTROL
);
1143 /* TODO move to DC so GMC doesn't need to hard-code DCN registers */
1145 if (REG_GET_FIELD(d1vga_control
, D1VGA_CONTROL
, D1VGA_MODE_ENABLE
)) {
1146 size
= AMDGPU_VBIOS_VGA_ALLOCATION
;
1150 switch (adev
->ip_versions
[DCE_HWIP
][0]) {
1151 case IP_VERSION(1, 0, 0):
1152 case IP_VERSION(1, 0, 1):
1153 viewport
= RREG32_SOC15(DCE
, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION
);
1154 size
= (REG_GET_FIELD(viewport
,
1155 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION
, PRI_VIEWPORT_HEIGHT
) *
1156 REG_GET_FIELD(viewport
,
1157 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION
, PRI_VIEWPORT_WIDTH
) *
1160 case IP_VERSION(2, 1, 0):
1161 viewport
= RREG32_SOC15(DCE
, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2
);
1162 size
= (REG_GET_FIELD(viewport
,
1163 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION
, PRI_VIEWPORT_HEIGHT
) *
1164 REG_GET_FIELD(viewport
,
1165 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION
, PRI_VIEWPORT_WIDTH
) *
1169 viewport
= RREG32_SOC15(DCE
, 0, mmSCL0_VIEWPORT_SIZE
);
1170 size
= (REG_GET_FIELD(viewport
, SCL0_VIEWPORT_SIZE
, VIEWPORT_HEIGHT
) *
1171 REG_GET_FIELD(viewport
, SCL0_VIEWPORT_SIZE
, VIEWPORT_WIDTH
) *
1180 static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs
= {
1181 .flush_gpu_tlb
= gmc_v9_0_flush_gpu_tlb
,
1182 .flush_gpu_tlb_pasid
= gmc_v9_0_flush_gpu_tlb_pasid
,
1183 .emit_flush_gpu_tlb
= gmc_v9_0_emit_flush_gpu_tlb
,
1184 .emit_pasid_mapping
= gmc_v9_0_emit_pasid_mapping
,
1185 .map_mtype
= gmc_v9_0_map_mtype
,
1186 .get_vm_pde
= gmc_v9_0_get_vm_pde
,
1187 .get_vm_pte
= gmc_v9_0_get_vm_pte
,
1188 .get_vbios_fb_size
= gmc_v9_0_get_vbios_fb_size
,
1191 static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device
*adev
)
1193 adev
->gmc
.gmc_funcs
= &gmc_v9_0_gmc_funcs
;
1196 static void gmc_v9_0_set_umc_funcs(struct amdgpu_device
*adev
)
1198 switch (adev
->ip_versions
[UMC_HWIP
][0]) {
1199 case IP_VERSION(6, 0, 0):
1200 adev
->umc
.funcs
= &umc_v6_0_funcs
;
1202 case IP_VERSION(6, 1, 1):
1203 adev
->umc
.max_ras_err_cnt_per_query
= UMC_V6_1_TOTAL_CHANNEL_NUM
;
1204 adev
->umc
.channel_inst_num
= UMC_V6_1_CHANNEL_INSTANCE_NUM
;
1205 adev
->umc
.umc_inst_num
= UMC_V6_1_UMC_INSTANCE_NUM
;
1206 adev
->umc
.channel_offs
= UMC_V6_1_PER_CHANNEL_OFFSET_VG20
;
1207 adev
->umc
.channel_idx_tbl
= &umc_v6_1_channel_idx_tbl
[0][0];
1208 adev
->umc
.ras
= &umc_v6_1_ras
;
1210 case IP_VERSION(6, 1, 2):
1211 adev
->umc
.max_ras_err_cnt_per_query
= UMC_V6_1_TOTAL_CHANNEL_NUM
;
1212 adev
->umc
.channel_inst_num
= UMC_V6_1_CHANNEL_INSTANCE_NUM
;
1213 adev
->umc
.umc_inst_num
= UMC_V6_1_UMC_INSTANCE_NUM
;
1214 adev
->umc
.channel_offs
= UMC_V6_1_PER_CHANNEL_OFFSET_ARCT
;
1215 adev
->umc
.channel_idx_tbl
= &umc_v6_1_channel_idx_tbl
[0][0];
1216 adev
->umc
.ras
= &umc_v6_1_ras
;
1218 case IP_VERSION(6, 7, 0):
1219 adev
->umc
.max_ras_err_cnt_per_query
=
1220 UMC_V6_7_TOTAL_CHANNEL_NUM
* UMC_V6_7_BAD_PAGE_NUM_PER_CHANNEL
;
1221 adev
->umc
.channel_inst_num
= UMC_V6_7_CHANNEL_INSTANCE_NUM
;
1222 adev
->umc
.umc_inst_num
= UMC_V6_7_UMC_INSTANCE_NUM
;
1223 adev
->umc
.channel_offs
= UMC_V6_7_PER_CHANNEL_OFFSET
;
1224 if (!adev
->gmc
.xgmi
.connected_to_cpu
)
1225 adev
->umc
.ras
= &umc_v6_7_ras
;
1226 if (1 & adev
->smuio
.funcs
->get_die_id(adev
))
1227 adev
->umc
.channel_idx_tbl
= &umc_v6_7_channel_idx_tbl_first
[0][0];
1229 adev
->umc
.channel_idx_tbl
= &umc_v6_7_channel_idx_tbl_second
[0][0];
1235 if (adev
->umc
.ras
) {
1236 amdgpu_ras_register_ras_block(adev
, &adev
->umc
.ras
->ras_block
);
1238 strcpy(adev
->umc
.ras
->ras_block
.ras_comm
.name
, "umc");
1239 adev
->umc
.ras
->ras_block
.ras_comm
.block
= AMDGPU_RAS_BLOCK__UMC
;
1240 adev
->umc
.ras
->ras_block
.ras_comm
.type
= AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE
;
1241 adev
->umc
.ras_if
= &adev
->umc
.ras
->ras_block
.ras_comm
;
1243 /* If don't define special ras_late_init function, use default ras_late_init */
1244 if (!adev
->umc
.ras
->ras_block
.ras_late_init
)
1245 adev
->umc
.ras
->ras_block
.ras_late_init
= amdgpu_umc_ras_late_init
;
1247 /* If not defined special ras_cb function, use default ras_cb */
1248 if (!adev
->umc
.ras
->ras_block
.ras_cb
)
1249 adev
->umc
.ras
->ras_block
.ras_cb
= amdgpu_umc_process_ras_data_cb
;
1253 static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device
*adev
)
1255 switch (adev
->ip_versions
[MMHUB_HWIP
][0]) {
1256 case IP_VERSION(9, 4, 1):
1257 adev
->mmhub
.funcs
= &mmhub_v9_4_funcs
;
1259 case IP_VERSION(9, 4, 2):
1260 adev
->mmhub
.funcs
= &mmhub_v1_7_funcs
;
1263 adev
->mmhub
.funcs
= &mmhub_v1_0_funcs
;
1268 static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device
*adev
)
1270 switch (adev
->ip_versions
[MMHUB_HWIP
][0]) {
1271 case IP_VERSION(9, 4, 0):
1272 adev
->mmhub
.ras
= &mmhub_v1_0_ras
;
1274 case IP_VERSION(9, 4, 1):
1275 adev
->mmhub
.ras
= &mmhub_v9_4_ras
;
1277 case IP_VERSION(9, 4, 2):
1278 adev
->mmhub
.ras
= &mmhub_v1_7_ras
;
1281 /* mmhub ras is not available */
1285 if (adev
->mmhub
.ras
) {
1286 amdgpu_ras_register_ras_block(adev
, &adev
->mmhub
.ras
->ras_block
);
1288 strcpy(adev
->mmhub
.ras
->ras_block
.ras_comm
.name
, "mmhub");
1289 adev
->mmhub
.ras
->ras_block
.ras_comm
.block
= AMDGPU_RAS_BLOCK__MMHUB
;
1290 adev
->mmhub
.ras
->ras_block
.ras_comm
.type
= AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE
;
1291 adev
->mmhub
.ras_if
= &adev
->mmhub
.ras
->ras_block
.ras_comm
;
1295 static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device
*adev
)
1297 adev
->gfxhub
.funcs
= &gfxhub_v1_0_funcs
;
1300 static void gmc_v9_0_set_hdp_ras_funcs(struct amdgpu_device
*adev
)
1302 adev
->hdp
.ras
= &hdp_v4_0_ras
;
1303 amdgpu_ras_register_ras_block(adev
, &adev
->hdp
.ras
->ras_block
);
1304 adev
->hdp
.ras_if
= &adev
->hdp
.ras
->ras_block
.ras_comm
;
1307 static void gmc_v9_0_set_mca_funcs(struct amdgpu_device
*adev
)
1309 /* is UMC the right IP to check for MCA? Maybe DF? */
1310 switch (adev
->ip_versions
[UMC_HWIP
][0]) {
1311 case IP_VERSION(6, 7, 0):
1312 if (!adev
->gmc
.xgmi
.connected_to_cpu
)
1313 adev
->mca
.funcs
= &mca_v3_0_funcs
;
1320 static int gmc_v9_0_early_init(void *handle
)
1323 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1325 /* ARCT and VEGA20 don't have XGMI defined in their IP discovery tables */
1326 if (adev
->asic_type
== CHIP_VEGA20
||
1327 adev
->asic_type
== CHIP_ARCTURUS
)
1328 adev
->gmc
.xgmi
.supported
= true;
1330 if (adev
->ip_versions
[XGMI_HWIP
][0] == IP_VERSION(6, 1, 0)) {
1331 adev
->gmc
.xgmi
.supported
= true;
1332 adev
->gmc
.xgmi
.connected_to_cpu
=
1333 adev
->smuio
.funcs
->is_host_gpu_xgmi_supported(adev
);
1336 gmc_v9_0_set_gmc_funcs(adev
);
1337 gmc_v9_0_set_irq_funcs(adev
);
1338 gmc_v9_0_set_umc_funcs(adev
);
1339 gmc_v9_0_set_mmhub_funcs(adev
);
1340 gmc_v9_0_set_mmhub_ras_funcs(adev
);
1341 gmc_v9_0_set_gfxhub_funcs(adev
);
1342 gmc_v9_0_set_hdp_ras_funcs(adev
);
1343 gmc_v9_0_set_mca_funcs(adev
);
1345 adev
->gmc
.shared_aperture_start
= 0x2000000000000000ULL
;
1346 adev
->gmc
.shared_aperture_end
=
1347 adev
->gmc
.shared_aperture_start
+ (4ULL << 30) - 1;
1348 adev
->gmc
.private_aperture_start
= 0x1000000000000000ULL
;
1349 adev
->gmc
.private_aperture_end
=
1350 adev
->gmc
.private_aperture_start
+ (4ULL << 30) - 1;
1352 r
= amdgpu_gmc_ras_early_init(adev
);
1359 static int gmc_v9_0_late_init(void *handle
)
1361 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1364 r
= amdgpu_gmc_allocate_vm_inv_eng(adev
);
1369 * Workaround performance drop issue with VBIOS enables partial
1370 * writes, while disables HBM ECC for vega10.
1372 if (!amdgpu_sriov_vf(adev
) &&
1373 (adev
->ip_versions
[UMC_HWIP
][0] == IP_VERSION(6, 0, 0))) {
1374 if (!(adev
->ras_enabled
& (1 << AMDGPU_RAS_BLOCK__UMC
))) {
1375 if (adev
->df
.funcs
&&
1376 adev
->df
.funcs
->enable_ecc_force_par_wr_rmw
)
1377 adev
->df
.funcs
->enable_ecc_force_par_wr_rmw(adev
, false);
1381 if (!amdgpu_persistent_edc_harvesting_supported(adev
)) {
1382 if (adev
->mmhub
.ras
&& adev
->mmhub
.ras
->ras_block
.hw_ops
&&
1383 adev
->mmhub
.ras
->ras_block
.hw_ops
->reset_ras_error_count
)
1384 adev
->mmhub
.ras
->ras_block
.hw_ops
->reset_ras_error_count(adev
);
1386 if (adev
->hdp
.ras
&& adev
->hdp
.ras
->ras_block
.hw_ops
&&
1387 adev
->hdp
.ras
->ras_block
.hw_ops
->reset_ras_error_count
)
1388 adev
->hdp
.ras
->ras_block
.hw_ops
->reset_ras_error_count(adev
);
1391 r
= amdgpu_gmc_ras_late_init(adev
);
1395 return amdgpu_irq_get(adev
, &adev
->gmc
.vm_fault
, 0);
1398 static void gmc_v9_0_vram_gtt_location(struct amdgpu_device
*adev
,
1399 struct amdgpu_gmc
*mc
)
1401 u64 base
= adev
->mmhub
.funcs
->get_fb_location(adev
);
1403 /* add the xgmi offset of the physical node */
1404 base
+= adev
->gmc
.xgmi
.physical_node_id
* adev
->gmc
.xgmi
.node_segment_size
;
1405 if (adev
->gmc
.xgmi
.connected_to_cpu
) {
1406 amdgpu_gmc_sysvm_location(adev
, mc
);
1408 amdgpu_gmc_vram_location(adev
, mc
, base
);
1409 amdgpu_gmc_gart_location(adev
, mc
);
1410 amdgpu_gmc_agp_location(adev
, mc
);
1412 /* base offset of vram pages */
1413 adev
->vm_manager
.vram_base_offset
= adev
->gfxhub
.funcs
->get_mc_fb_offset(adev
);
1415 /* XXX: add the xgmi offset of the physical node? */
1416 adev
->vm_manager
.vram_base_offset
+=
1417 adev
->gmc
.xgmi
.physical_node_id
* adev
->gmc
.xgmi
.node_segment_size
;
1421 * gmc_v9_0_mc_init - initialize the memory controller driver params
1423 * @adev: amdgpu_device pointer
1425 * Look up the amount of vram, vram width, and decide how to place
1426 * vram and gart within the GPU's physical address space.
1427 * Returns 0 for success.
1429 static int gmc_v9_0_mc_init(struct amdgpu_device
*adev
)
1433 /* size in MB on si */
1434 adev
->gmc
.mc_vram_size
=
1435 adev
->nbio
.funcs
->get_memsize(adev
) * 1024ULL * 1024ULL;
1436 adev
->gmc
.real_vram_size
= adev
->gmc
.mc_vram_size
;
1438 if (!(adev
->flags
& AMD_IS_APU
) &&
1439 !adev
->gmc
.xgmi
.connected_to_cpu
) {
1440 r
= amdgpu_device_resize_fb_bar(adev
);
1444 adev
->gmc
.aper_base
= pci_resource_start(adev
->pdev
, 0);
1445 adev
->gmc
.aper_size
= pci_resource_len(adev
->pdev
, 0);
1447 #ifdef CONFIG_X86_64
1449 * AMD Accelerated Processing Platform (APP) supporting GPU-HOST xgmi
1450 * interface can use VRAM through here as it appears system reserved
1451 * memory in host address space.
1453 * For APUs, VRAM is just the stolen system memory and can be accessed
1456 * Otherwise, use the legacy Host Data Path (HDP) through PCIe BAR.
1459 /* check whether both host-gpu and gpu-gpu xgmi links exist */
1460 if (((adev
->flags
& AMD_IS_APU
) && !amdgpu_passthrough(adev
)) ||
1461 (adev
->gmc
.xgmi
.supported
&&
1462 adev
->gmc
.xgmi
.connected_to_cpu
)) {
1463 adev
->gmc
.aper_base
=
1464 adev
->gfxhub
.funcs
->get_mc_fb_offset(adev
) +
1465 adev
->gmc
.xgmi
.physical_node_id
*
1466 adev
->gmc
.xgmi
.node_segment_size
;
1467 adev
->gmc
.aper_size
= adev
->gmc
.real_vram_size
;
1471 /* In case the PCI BAR is larger than the actual amount of vram */
1472 adev
->gmc
.visible_vram_size
= adev
->gmc
.aper_size
;
1473 if (adev
->gmc
.visible_vram_size
> adev
->gmc
.real_vram_size
)
1474 adev
->gmc
.visible_vram_size
= adev
->gmc
.real_vram_size
;
1476 /* set the gart size */
1477 if (amdgpu_gart_size
== -1) {
1478 switch (adev
->ip_versions
[GC_HWIP
][0]) {
1479 case IP_VERSION(9, 0, 1): /* all engines support GPUVM */
1480 case IP_VERSION(9, 2, 1): /* all engines support GPUVM */
1481 case IP_VERSION(9, 4, 0):
1482 case IP_VERSION(9, 4, 1):
1483 case IP_VERSION(9, 4, 2):
1485 adev
->gmc
.gart_size
= 512ULL << 20;
1487 case IP_VERSION(9, 1, 0): /* DCE SG support */
1488 case IP_VERSION(9, 2, 2): /* DCE SG support */
1489 case IP_VERSION(9, 3, 0):
1490 adev
->gmc
.gart_size
= 1024ULL << 20;
1494 adev
->gmc
.gart_size
= (u64
)amdgpu_gart_size
<< 20;
1497 adev
->gmc
.gart_size
+= adev
->pm
.smu_prv_buffer_size
;
1499 gmc_v9_0_vram_gtt_location(adev
, &adev
->gmc
);
1504 static int gmc_v9_0_gart_init(struct amdgpu_device
*adev
)
1508 if (adev
->gart
.bo
) {
1509 WARN(1, "VEGA10 PCIE GART already initialized\n");
1513 if (adev
->gmc
.xgmi
.connected_to_cpu
) {
1514 adev
->gmc
.vmid0_page_table_depth
= 1;
1515 adev
->gmc
.vmid0_page_table_block_size
= 12;
1517 adev
->gmc
.vmid0_page_table_depth
= 0;
1518 adev
->gmc
.vmid0_page_table_block_size
= 0;
1521 /* Initialize common gart structure */
1522 r
= amdgpu_gart_init(adev
);
1525 adev
->gart
.table_size
= adev
->gart
.num_gpu_pages
* 8;
1526 adev
->gart
.gart_pte_flags
= AMDGPU_PTE_MTYPE_VG10(MTYPE_UC
) |
1527 AMDGPU_PTE_EXECUTABLE
;
1529 r
= amdgpu_gart_table_vram_alloc(adev
);
1533 if (adev
->gmc
.xgmi
.connected_to_cpu
) {
1534 r
= amdgpu_gmc_pdb0_alloc(adev
);
1541 * gmc_v9_0_save_registers - saves regs
1543 * @adev: amdgpu_device pointer
1545 * This saves potential register values that should be
1546 * restored upon resume
1548 static void gmc_v9_0_save_registers(struct amdgpu_device
*adev
)
1550 if ((adev
->ip_versions
[DCE_HWIP
][0] == IP_VERSION(1, 0, 0)) ||
1551 (adev
->ip_versions
[DCE_HWIP
][0] == IP_VERSION(1, 0, 1)))
1552 adev
->gmc
.sdpif_register
= RREG32_SOC15(DCE
, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0
);
1555 static int gmc_v9_0_sw_init(void *handle
)
1557 int r
, vram_width
= 0, vram_type
= 0, vram_vendor
= 0, dma_addr_bits
;
1558 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1560 adev
->gfxhub
.funcs
->init(adev
);
1562 adev
->mmhub
.funcs
->init(adev
);
1563 if (adev
->mca
.funcs
)
1564 adev
->mca
.funcs
->init(adev
);
1566 spin_lock_init(&adev
->gmc
.invalidate_lock
);
1568 r
= amdgpu_atomfirmware_get_vram_info(adev
,
1569 &vram_width
, &vram_type
, &vram_vendor
);
1570 if (amdgpu_sriov_vf(adev
))
1571 /* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
1572 * and DF related registers is not readable, seems hardcord is the
1573 * only way to set the correct vram_width
1575 adev
->gmc
.vram_width
= 2048;
1576 else if (amdgpu_emu_mode
!= 1)
1577 adev
->gmc
.vram_width
= vram_width
;
1579 if (!adev
->gmc
.vram_width
) {
1580 int chansize
, numchan
;
1582 /* hbm memory channel size */
1583 if (adev
->flags
& AMD_IS_APU
)
1587 if (adev
->df
.funcs
&&
1588 adev
->df
.funcs
->get_hbm_channel_number
) {
1589 numchan
= adev
->df
.funcs
->get_hbm_channel_number(adev
);
1590 adev
->gmc
.vram_width
= numchan
* chansize
;
1594 adev
->gmc
.vram_type
= vram_type
;
1595 adev
->gmc
.vram_vendor
= vram_vendor
;
1596 switch (adev
->ip_versions
[GC_HWIP
][0]) {
1597 case IP_VERSION(9, 1, 0):
1598 case IP_VERSION(9, 2, 2):
1599 adev
->num_vmhubs
= 2;
1601 if (adev
->rev_id
== 0x0 || adev
->rev_id
== 0x1) {
1602 amdgpu_vm_adjust_size(adev
, 256 * 1024, 9, 3, 48);
1604 /* vm_size is 128TB + 512GB for legacy 3-level page support */
1605 amdgpu_vm_adjust_size(adev
, 128 * 1024 + 512, 9, 2, 48);
1606 adev
->gmc
.translate_further
=
1607 adev
->vm_manager
.num_level
> 1;
1610 case IP_VERSION(9, 0, 1):
1611 case IP_VERSION(9, 2, 1):
1612 case IP_VERSION(9, 4, 0):
1613 case IP_VERSION(9, 3, 0):
1614 case IP_VERSION(9, 4, 2):
1615 adev
->num_vmhubs
= 2;
1619 * To fulfill 4-level page support,
1620 * vm size is 256TB (48bit), maximum size of Vega10,
1621 * block size 512 (9bit)
1623 /* sriov restrict max_pfn below AMDGPU_GMC_HOLE */
1624 if (amdgpu_sriov_vf(adev
))
1625 amdgpu_vm_adjust_size(adev
, 256 * 1024, 9, 3, 47);
1627 amdgpu_vm_adjust_size(adev
, 256 * 1024, 9, 3, 48);
1628 if (adev
->ip_versions
[GC_HWIP
][0] == IP_VERSION(9, 4, 2))
1629 adev
->gmc
.translate_further
= adev
->vm_manager
.num_level
> 1;
1631 case IP_VERSION(9, 4, 1):
1632 adev
->num_vmhubs
= 3;
1634 /* Keep the vm size same with Vega20 */
1635 amdgpu_vm_adjust_size(adev
, 256 * 1024, 9, 3, 48);
1636 adev
->gmc
.translate_further
= adev
->vm_manager
.num_level
> 1;
1642 /* This interrupt is VMC page fault.*/
1643 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_VMC
, VMC_1_0__SRCID__VM_FAULT
,
1644 &adev
->gmc
.vm_fault
);
1648 if (adev
->ip_versions
[GC_HWIP
][0] == IP_VERSION(9, 4, 1)) {
1649 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_VMC1
, VMC_1_0__SRCID__VM_FAULT
,
1650 &adev
->gmc
.vm_fault
);
1655 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_UTCL2
, UTCL2_1_0__SRCID__FAULT
,
1656 &adev
->gmc
.vm_fault
);
1661 if (!amdgpu_sriov_vf(adev
) &&
1662 !adev
->gmc
.xgmi
.connected_to_cpu
) {
1663 /* interrupt sent to DF. */
1664 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_DF
, 0,
1665 &adev
->gmc
.ecc_irq
);
1670 /* Set the internal MC address mask
1671 * This is the max address of the GPU's
1672 * internal address space.
1674 adev
->gmc
.mc_mask
= 0xffffffffffffULL
; /* 48 bit MC */
1676 dma_addr_bits
= adev
->ip_versions
[GC_HWIP
][0] == IP_VERSION(9, 4, 2) ? 48:44;
1677 r
= dma_set_mask_and_coherent(adev
->dev
, DMA_BIT_MASK(dma_addr_bits
));
1679 printk(KERN_WARNING
"amdgpu: No suitable DMA available.\n");
1682 adev
->need_swiotlb
= drm_need_swiotlb(dma_addr_bits
);
1684 r
= gmc_v9_0_mc_init(adev
);
1688 amdgpu_gmc_get_vbios_allocations(adev
);
1690 /* Memory manager */
1691 r
= amdgpu_bo_init(adev
);
1695 r
= gmc_v9_0_gart_init(adev
);
1701 * VMID 0 is reserved for System
1702 * amdgpu graphics/compute will use VMIDs 1..n-1
1703 * amdkfd will use VMIDs n..15
1705 * The first KFD VMID is 8 for GPUs with graphics, 3 for
1706 * compute-only GPUs. On compute-only GPUs that leaves 2 VMIDs
1707 * for video processing.
1709 adev
->vm_manager
.first_kfd_vmid
=
1710 (adev
->ip_versions
[GC_HWIP
][0] == IP_VERSION(9, 4, 1) ||
1711 adev
->ip_versions
[GC_HWIP
][0] == IP_VERSION(9, 4, 2)) ? 3 : 8;
1713 amdgpu_vm_manager_init(adev
);
1715 gmc_v9_0_save_registers(adev
);
1720 static int gmc_v9_0_sw_fini(void *handle
)
1722 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1724 amdgpu_gmc_ras_fini(adev
);
1725 amdgpu_gem_force_release(adev
);
1726 amdgpu_vm_manager_fini(adev
);
1727 amdgpu_gart_table_vram_free(adev
);
1728 amdgpu_bo_free_kernel(&adev
->gmc
.pdb0_bo
, NULL
, &adev
->gmc
.ptr_pdb0
);
1729 amdgpu_bo_fini(adev
);
1734 static void gmc_v9_0_init_golden_registers(struct amdgpu_device
*adev
)
1737 switch (adev
->ip_versions
[MMHUB_HWIP
][0]) {
1738 case IP_VERSION(9, 0, 0):
1739 if (amdgpu_sriov_vf(adev
))
1742 case IP_VERSION(9, 4, 0):
1743 soc15_program_register_sequence(adev
,
1744 golden_settings_mmhub_1_0_0
,
1745 ARRAY_SIZE(golden_settings_mmhub_1_0_0
));
1746 soc15_program_register_sequence(adev
,
1747 golden_settings_athub_1_0_0
,
1748 ARRAY_SIZE(golden_settings_athub_1_0_0
));
1750 case IP_VERSION(9, 1, 0):
1751 case IP_VERSION(9, 2, 0):
1752 /* TODO for renoir */
1753 soc15_program_register_sequence(adev
,
1754 golden_settings_athub_1_0_0
,
1755 ARRAY_SIZE(golden_settings_athub_1_0_0
));
1763 * gmc_v9_0_restore_registers - restores regs
1765 * @adev: amdgpu_device pointer
1767 * This restores register values, saved at suspend.
1769 void gmc_v9_0_restore_registers(struct amdgpu_device
*adev
)
1771 if ((adev
->ip_versions
[DCE_HWIP
][0] == IP_VERSION(1, 0, 0)) ||
1772 (adev
->ip_versions
[DCE_HWIP
][0] == IP_VERSION(1, 0, 1))) {
1773 WREG32_SOC15(DCE
, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0
, adev
->gmc
.sdpif_register
);
1774 WARN_ON(adev
->gmc
.sdpif_register
!=
1775 RREG32_SOC15(DCE
, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0
));
1780 * gmc_v9_0_gart_enable - gart enable
1782 * @adev: amdgpu_device pointer
1784 static int gmc_v9_0_gart_enable(struct amdgpu_device
*adev
)
1788 if (adev
->gmc
.xgmi
.connected_to_cpu
)
1789 amdgpu_gmc_init_pdb0(adev
);
1791 if (adev
->gart
.bo
== NULL
) {
1792 dev_err(adev
->dev
, "No VRAM object for PCIE GART.\n");
1796 amdgpu_gtt_mgr_recover(&adev
->mman
.gtt_mgr
);
1797 r
= adev
->gfxhub
.funcs
->gart_enable(adev
);
1801 r
= adev
->mmhub
.funcs
->gart_enable(adev
);
1805 DRM_INFO("PCIE GART of %uM enabled.\n",
1806 (unsigned)(adev
->gmc
.gart_size
>> 20));
1807 if (adev
->gmc
.pdb0_bo
)
1808 DRM_INFO("PDB0 located at 0x%016llX\n",
1809 (unsigned long long)amdgpu_bo_gpu_offset(adev
->gmc
.pdb0_bo
));
1810 DRM_INFO("PTB located at 0x%016llX\n",
1811 (unsigned long long)amdgpu_bo_gpu_offset(adev
->gart
.bo
));
1816 static int gmc_v9_0_hw_init(void *handle
)
1818 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1822 /* The sequence of these two function calls matters.*/
1823 gmc_v9_0_init_golden_registers(adev
);
1825 if (adev
->mode_info
.num_crtc
) {
1826 /* Lockout access through VGA aperture*/
1827 WREG32_FIELD15(DCE
, 0, VGA_HDP_CONTROL
, VGA_MEMORY_DISABLE
, 1);
1828 /* disable VGA render */
1829 WREG32_FIELD15(DCE
, 0, VGA_RENDER_CONTROL
, VGA_VSTATUS_CNTL
, 0);
1832 if (adev
->mmhub
.funcs
->update_power_gating
)
1833 adev
->mmhub
.funcs
->update_power_gating(adev
, true);
1835 adev
->hdp
.funcs
->init_registers(adev
);
1837 /* After HDP is initialized, flush HDP.*/
1838 adev
->hdp
.funcs
->flush_hdp(adev
, NULL
);
1840 if (amdgpu_vm_fault_stop
== AMDGPU_VM_FAULT_STOP_ALWAYS
)
1845 if (!amdgpu_sriov_vf(adev
)) {
1846 adev
->gfxhub
.funcs
->set_fault_enable_default(adev
, value
);
1847 adev
->mmhub
.funcs
->set_fault_enable_default(adev
, value
);
1849 for (i
= 0; i
< adev
->num_vmhubs
; ++i
)
1850 gmc_v9_0_flush_gpu_tlb(adev
, 0, i
, 0);
1852 if (adev
->umc
.funcs
&& adev
->umc
.funcs
->init_registers
)
1853 adev
->umc
.funcs
->init_registers(adev
);
1855 r
= gmc_v9_0_gart_enable(adev
);
1859 if (amdgpu_emu_mode
== 1)
1860 return amdgpu_gmc_vram_checking(adev
);
1866 * gmc_v9_0_gart_disable - gart disable
1868 * @adev: amdgpu_device pointer
1870 * This disables all VM page table.
1872 static void gmc_v9_0_gart_disable(struct amdgpu_device
*adev
)
1874 adev
->gfxhub
.funcs
->gart_disable(adev
);
1875 adev
->mmhub
.funcs
->gart_disable(adev
);
1878 static int gmc_v9_0_hw_fini(void *handle
)
1880 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1882 gmc_v9_0_gart_disable(adev
);
1884 if (amdgpu_sriov_vf(adev
)) {
1885 /* full access mode, so don't touch any GMC register */
1886 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
1891 * Pair the operations did in gmc_v9_0_hw_init and thus maintain
1892 * a correct cached state for GMC. Otherwise, the "gate" again
1893 * operation on S3 resuming will fail due to wrong cached state.
1895 if (adev
->mmhub
.funcs
->update_power_gating
)
1896 adev
->mmhub
.funcs
->update_power_gating(adev
, false);
1898 amdgpu_irq_put(adev
, &adev
->gmc
.ecc_irq
, 0);
1899 amdgpu_irq_put(adev
, &adev
->gmc
.vm_fault
, 0);
1904 static int gmc_v9_0_suspend(void *handle
)
1906 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1908 return gmc_v9_0_hw_fini(adev
);
1911 static int gmc_v9_0_resume(void *handle
)
1914 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1916 r
= gmc_v9_0_hw_init(adev
);
1920 amdgpu_vmid_reset_all(adev
);
1925 static bool gmc_v9_0_is_idle(void *handle
)
1927 /* MC is always ready in GMC v9.*/
1931 static int gmc_v9_0_wait_for_idle(void *handle
)
1933 /* There is no need to wait for MC idle in GMC v9.*/
1937 static int gmc_v9_0_soft_reset(void *handle
)
1939 /* XXX for emulation.*/
1943 static int gmc_v9_0_set_clockgating_state(void *handle
,
1944 enum amd_clockgating_state state
)
1946 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1948 adev
->mmhub
.funcs
->set_clockgating(adev
, state
);
1950 athub_v1_0_set_clockgating(adev
, state
);
1955 static void gmc_v9_0_get_clockgating_state(void *handle
, u64
*flags
)
1957 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1959 adev
->mmhub
.funcs
->get_clockgating(adev
, flags
);
1961 athub_v1_0_get_clockgating(adev
, flags
);
1964 static int gmc_v9_0_set_powergating_state(void *handle
,
1965 enum amd_powergating_state state
)
1970 const struct amd_ip_funcs gmc_v9_0_ip_funcs
= {
1972 .early_init
= gmc_v9_0_early_init
,
1973 .late_init
= gmc_v9_0_late_init
,
1974 .sw_init
= gmc_v9_0_sw_init
,
1975 .sw_fini
= gmc_v9_0_sw_fini
,
1976 .hw_init
= gmc_v9_0_hw_init
,
1977 .hw_fini
= gmc_v9_0_hw_fini
,
1978 .suspend
= gmc_v9_0_suspend
,
1979 .resume
= gmc_v9_0_resume
,
1980 .is_idle
= gmc_v9_0_is_idle
,
1981 .wait_for_idle
= gmc_v9_0_wait_for_idle
,
1982 .soft_reset
= gmc_v9_0_soft_reset
,
1983 .set_clockgating_state
= gmc_v9_0_set_clockgating_state
,
1984 .set_powergating_state
= gmc_v9_0_set_powergating_state
,
1985 .get_clockgating_state
= gmc_v9_0_get_clockgating_state
,
1988 const struct amdgpu_ip_block_version gmc_v9_0_ip_block
=
1990 .type
= AMD_IP_BLOCK_TYPE_GMC
,
1994 .funcs
= &gmc_v9_0_ip_funcs
,