]>
Commit | Line | Data |
---|---|---|
7c0f7ee0 HZ |
1 | /* |
2 | * Copyright 2022 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | */ | |
23 | #include <linux/firmware.h> | |
24 | ||
25 | #include "amdgpu.h" | |
26 | #include "amdgpu_gfx.h" | |
27 | #include "soc15.h" | |
86301129 | 28 | #include "soc15d.h" |
7c0f7ee0 | 29 | #include "soc15_common.h" |
de7511ae | 30 | #include "vega10_enum.h" |
7c0f7ee0 | 31 | |
86301129 LM |
32 | #include "v9_structs.h" |
33 | ||
34 | #include "ivsrcid/gfx/irqsrcs_gfx_9_0.h" | |
35 | ||
7c0f7ee0 HZ |
36 | #include "gc/gc_9_4_3_offset.h" |
37 | #include "gc/gc_9_4_3_sh_mask.h" | |
38 | ||
39 | #include "gfx_v9_4_3.h" | |
8e7fd193 | 40 | #include "amdgpu_xcp.h" |
7c0f7ee0 | 41 | |
86301129 LM |
42 | MODULE_FIRMWARE("amdgpu/gc_9_4_3_mec.bin"); |
43 | MODULE_FIRMWARE("amdgpu/gc_9_4_3_rlc.bin"); | |
44 | ||
45 | #define GFX9_MEC_HPD_SIZE 4096 | |
7c0f7ee0 HZ |
46 | #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L |
47 | ||
89f85765 | 48 | #define GOLDEN_GB_ADDR_CONFIG 0x2a114042 |
b5ac0880 | 49 | #define CP_HQD_PERSISTENT_STATE_DEFAULT 0xbe05301 |
89f85765 | 50 | |
92ecb92c TZ |
51 | struct amdgpu_gfx_ras gfx_v9_4_3_ras; |
52 | ||
86301129 LM |
53 | static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev); |
54 | static void gfx_v9_4_3_set_irq_funcs(struct amdgpu_device *adev); | |
55 | static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev); | |
56 | static void gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device *adev); | |
57 | static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev, | |
58 | struct amdgpu_cu_info *cu_info); | |
59 | ||
60 | static void gfx_v9_4_3_kiq_set_resources(struct amdgpu_ring *kiq_ring, | |
61 | uint64_t queue_mask) | |
62 | { | |
63 | amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6)); | |
64 | amdgpu_ring_write(kiq_ring, | |
65 | PACKET3_SET_RESOURCES_VMID_MASK(0) | | |
66 | /* vmid_mask:0* queue_type:0 (KIQ) */ | |
67 | PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); | |
68 | amdgpu_ring_write(kiq_ring, | |
69 | lower_32_bits(queue_mask)); /* queue mask lo */ | |
70 | amdgpu_ring_write(kiq_ring, | |
71 | upper_32_bits(queue_mask)); /* queue mask hi */ | |
72 | amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */ | |
73 | amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */ | |
74 | amdgpu_ring_write(kiq_ring, 0); /* oac mask */ | |
75 | amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */ | |
76 | } | |
77 | ||
78 | static void gfx_v9_4_3_kiq_map_queues(struct amdgpu_ring *kiq_ring, | |
79 | struct amdgpu_ring *ring) | |
80 | { | |
81 | struct amdgpu_device *adev = kiq_ring->adev; | |
82 | uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj); | |
83 | uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); | |
84 | uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; | |
85 | ||
86 | amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5)); | |
87 | /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/ | |
88 | amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ | |
89 | PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */ | |
90 | PACKET3_MAP_QUEUES_VMID(0) | /* VMID */ | |
91 | PACKET3_MAP_QUEUES_QUEUE(ring->queue) | | |
92 | PACKET3_MAP_QUEUES_PIPE(ring->pipe) | | |
93 | PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) | | |
94 | /*queue_type: normal compute queue */ | |
95 | PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | | |
96 | /* alloc format: all_on_one_pipe */ | |
97 | PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | | |
98 | PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) | | |
99 | /* num_queues: must be 1 */ | |
100 | PACKET3_MAP_QUEUES_NUM_QUEUES(1)); | |
101 | amdgpu_ring_write(kiq_ring, | |
102 | PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index)); | |
103 | amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr)); | |
104 | amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr)); | |
105 | amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr)); | |
106 | amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr)); | |
107 | } | |
108 | ||
109 | static void gfx_v9_4_3_kiq_unmap_queues(struct amdgpu_ring *kiq_ring, | |
110 | struct amdgpu_ring *ring, | |
111 | enum amdgpu_unmap_queues_action action, | |
112 | u64 gpu_addr, u64 seq) | |
113 | { | |
114 | uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; | |
115 | ||
116 | amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4)); | |
117 | amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ | |
118 | PACKET3_UNMAP_QUEUES_ACTION(action) | | |
119 | PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) | | |
120 | PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) | | |
121 | PACKET3_UNMAP_QUEUES_NUM_QUEUES(1)); | |
122 | amdgpu_ring_write(kiq_ring, | |
123 | PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index)); | |
124 | ||
125 | if (action == PREEMPT_QUEUES_NO_UNMAP) { | |
126 | amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr)); | |
127 | amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr)); | |
128 | amdgpu_ring_write(kiq_ring, seq); | |
129 | } else { | |
130 | amdgpu_ring_write(kiq_ring, 0); | |
131 | amdgpu_ring_write(kiq_ring, 0); | |
132 | amdgpu_ring_write(kiq_ring, 0); | |
133 | } | |
134 | } | |
135 | ||
136 | static void gfx_v9_4_3_kiq_query_status(struct amdgpu_ring *kiq_ring, | |
137 | struct amdgpu_ring *ring, | |
138 | u64 addr, | |
139 | u64 seq) | |
140 | { | |
141 | uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; | |
142 | ||
143 | amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5)); | |
144 | amdgpu_ring_write(kiq_ring, | |
145 | PACKET3_QUERY_STATUS_CONTEXT_ID(0) | | |
146 | PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) | | |
147 | PACKET3_QUERY_STATUS_COMMAND(2)); | |
148 | /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ | |
149 | amdgpu_ring_write(kiq_ring, | |
150 | PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) | | |
151 | PACKET3_QUERY_STATUS_ENG_SEL(eng_sel)); | |
152 | amdgpu_ring_write(kiq_ring, lower_32_bits(addr)); | |
153 | amdgpu_ring_write(kiq_ring, upper_32_bits(addr)); | |
154 | amdgpu_ring_write(kiq_ring, lower_32_bits(seq)); | |
155 | amdgpu_ring_write(kiq_ring, upper_32_bits(seq)); | |
156 | } | |
157 | ||
158 | static void gfx_v9_4_3_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring, | |
159 | uint16_t pasid, uint32_t flush_type, | |
160 | bool all_hub) | |
161 | { | |
162 | amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0)); | |
163 | amdgpu_ring_write(kiq_ring, | |
164 | PACKET3_INVALIDATE_TLBS_DST_SEL(1) | | |
165 | PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) | | |
166 | PACKET3_INVALIDATE_TLBS_PASID(pasid) | | |
167 | PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type)); | |
168 | } | |
169 | ||
170 | static const struct kiq_pm4_funcs gfx_v9_4_3_kiq_pm4_funcs = { | |
171 | .kiq_set_resources = gfx_v9_4_3_kiq_set_resources, | |
172 | .kiq_map_queues = gfx_v9_4_3_kiq_map_queues, | |
173 | .kiq_unmap_queues = gfx_v9_4_3_kiq_unmap_queues, | |
174 | .kiq_query_status = gfx_v9_4_3_kiq_query_status, | |
175 | .kiq_invalidate_tlbs = gfx_v9_4_3_kiq_invalidate_tlbs, | |
176 | .set_resources_size = 8, | |
177 | .map_queues_size = 7, | |
178 | .unmap_queues_size = 6, | |
179 | .query_status_size = 7, | |
180 | .invalidate_tlbs_size = 2, | |
181 | }; | |
182 | ||
183 | static void gfx_v9_4_3_set_kiq_pm4_funcs(struct amdgpu_device *adev) | |
184 | { | |
8078f1c6 LL |
185 | int i, num_xcc; |
186 | ||
187 | num_xcc = NUM_XCC(adev->gfx.xcc_mask); | |
188 | for (i = 0; i < num_xcc; i++) | |
6f917fdc | 189 | adev->gfx.kiq[i].pmf = &gfx_v9_4_3_kiq_pm4_funcs; |
86301129 LM |
190 | } |
191 | ||
192 | static void gfx_v9_4_3_init_golden_registers(struct amdgpu_device *adev) | |
193 | { | |
7aa8a266 | 194 | int i, num_xcc, dev_inst; |
86301129 | 195 | |
8078f1c6 | 196 | num_xcc = NUM_XCC(adev->gfx.xcc_mask); |
7aa8a266 LL |
197 | for (i = 0; i < num_xcc; i++) { |
198 | dev_inst = GET_INST(GC, i); | |
89f85765 | 199 | |
4755bfbd LL |
200 | WREG32_SOC15(GC, dev_inst, regGB_ADDR_CONFIG, |
201 | GOLDEN_GB_ADDR_CONFIG); | |
89f85765 SZ |
202 | /* Golden settings applied by driver for ASIC with rev_id 0 */ |
203 | if (adev->rev_id == 0) { | |
89f85765 SZ |
204 | WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL1, |
205 | REDUCE_FIFO_DEPTH_BY_2, 2); | |
7caebc8f MG |
206 | } else { |
207 | WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL2, | |
208 | SPARE, 0x1); | |
89f85765 | 209 | } |
7aa8a266 | 210 | } |
86301129 LM |
211 | } |
212 | ||
213 | static void gfx_v9_4_3_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel, | |
214 | bool wc, uint32_t reg, uint32_t val) | |
215 | { | |
216 | amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | |
217 | amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) | | |
218 | WRITE_DATA_DST_SEL(0) | | |
219 | (wc ? WR_CONFIRM : 0)); | |
220 | amdgpu_ring_write(ring, reg); | |
221 | amdgpu_ring_write(ring, 0); | |
222 | amdgpu_ring_write(ring, val); | |
223 | } | |
224 | ||
225 | static void gfx_v9_4_3_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel, | |
226 | int mem_space, int opt, uint32_t addr0, | |
227 | uint32_t addr1, uint32_t ref, uint32_t mask, | |
228 | uint32_t inv) | |
229 | { | |
230 | amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); | |
231 | amdgpu_ring_write(ring, | |
232 | /* memory (1) or register (0) */ | |
233 | (WAIT_REG_MEM_MEM_SPACE(mem_space) | | |
234 | WAIT_REG_MEM_OPERATION(opt) | /* wait */ | |
235 | WAIT_REG_MEM_FUNCTION(3) | /* equal */ | |
236 | WAIT_REG_MEM_ENGINE(eng_sel))); | |
237 | ||
238 | if (mem_space) | |
239 | BUG_ON(addr0 & 0x3); /* Dword align */ | |
240 | amdgpu_ring_write(ring, addr0); | |
241 | amdgpu_ring_write(ring, addr1); | |
242 | amdgpu_ring_write(ring, ref); | |
243 | amdgpu_ring_write(ring, mask); | |
244 | amdgpu_ring_write(ring, inv); /* poll interval */ | |
245 | } | |
246 | ||
247 | static int gfx_v9_4_3_ring_test_ring(struct amdgpu_ring *ring) | |
248 | { | |
0b02364e | 249 | uint32_t scratch_reg0_offset, xcc_offset; |
86301129 LM |
250 | struct amdgpu_device *adev = ring->adev; |
251 | uint32_t tmp = 0; | |
252 | unsigned i; | |
253 | int r; | |
89cf4549 | 254 | |
0b02364e LL |
255 | /* Use register offset which is local to XCC in the packet */ |
256 | xcc_offset = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0); | |
659a4ab8 | 257 | scratch_reg0_offset = SOC15_REG_OFFSET(GC, GET_INST(GC, ring->xcc_id), regSCRATCH_REG0); |
89cf4549 | 258 | WREG32(scratch_reg0_offset, 0xCAFEDEAD); |
3f69d586 | 259 | tmp = RREG32(scratch_reg0_offset); |
86301129 | 260 | |
86301129 LM |
261 | r = amdgpu_ring_alloc(ring, 3); |
262 | if (r) | |
263 | return r; | |
264 | ||
265 | amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); | |
0b02364e | 266 | amdgpu_ring_write(ring, xcc_offset - PACKET3_SET_UCONFIG_REG_START); |
86301129 LM |
267 | amdgpu_ring_write(ring, 0xDEADBEEF); |
268 | amdgpu_ring_commit(ring); | |
269 | ||
270 | for (i = 0; i < adev->usec_timeout; i++) { | |
89cf4549 | 271 | tmp = RREG32(scratch_reg0_offset); |
86301129 LM |
272 | if (tmp == 0xDEADBEEF) |
273 | break; | |
274 | udelay(1); | |
275 | } | |
276 | ||
277 | if (i >= adev->usec_timeout) | |
278 | r = -ETIMEDOUT; | |
279 | return r; | |
280 | } | |
281 | ||
282 | static int gfx_v9_4_3_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |
283 | { | |
284 | struct amdgpu_device *adev = ring->adev; | |
285 | struct amdgpu_ib ib; | |
286 | struct dma_fence *f = NULL; | |
287 | ||
288 | unsigned index; | |
289 | uint64_t gpu_addr; | |
290 | uint32_t tmp; | |
291 | long r; | |
292 | ||
293 | r = amdgpu_device_wb_get(adev, &index); | |
294 | if (r) | |
295 | return r; | |
296 | ||
297 | gpu_addr = adev->wb.gpu_addr + (index * 4); | |
298 | adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD); | |
299 | memset(&ib, 0, sizeof(ib)); | |
300 | r = amdgpu_ib_get(adev, NULL, 16, | |
301 | AMDGPU_IB_POOL_DIRECT, &ib); | |
302 | if (r) | |
303 | goto err1; | |
304 | ||
305 | ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3); | |
306 | ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM; | |
307 | ib.ptr[2] = lower_32_bits(gpu_addr); | |
308 | ib.ptr[3] = upper_32_bits(gpu_addr); | |
309 | ib.ptr[4] = 0xDEADBEEF; | |
310 | ib.length_dw = 5; | |
311 | ||
312 | r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); | |
313 | if (r) | |
314 | goto err2; | |
315 | ||
316 | r = dma_fence_wait_timeout(f, false, timeout); | |
317 | if (r == 0) { | |
318 | r = -ETIMEDOUT; | |
319 | goto err2; | |
320 | } else if (r < 0) { | |
321 | goto err2; | |
322 | } | |
323 | ||
324 | tmp = adev->wb.wb[index]; | |
325 | if (tmp == 0xDEADBEEF) | |
326 | r = 0; | |
327 | else | |
328 | r = -EINVAL; | |
329 | ||
330 | err2: | |
331 | amdgpu_ib_free(adev, &ib, NULL); | |
332 | dma_fence_put(f); | |
333 | err1: | |
334 | amdgpu_device_wb_free(adev, index); | |
335 | return r; | |
336 | } | |
337 | ||
338 | ||
339 | /* This value might differs per partition */ | |
de7511ae HZ |
340 | static uint64_t gfx_v9_4_3_get_gpu_clock_counter(struct amdgpu_device *adev) |
341 | { | |
342 | uint64_t clock; | |
343 | ||
de7511ae | 344 | mutex_lock(&adev->gfx.gpu_clock_mutex); |
659a4ab8 LL |
345 | WREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CAPTURE_GPU_CLOCK_COUNT, 1); |
346 | clock = (uint64_t)RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_GPU_CLOCK_COUNT_LSB) | | |
347 | ((uint64_t)RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_GPU_CLOCK_COUNT_MSB) << 32ULL); | |
de7511ae | 348 | mutex_unlock(&adev->gfx.gpu_clock_mutex); |
de7511ae HZ |
349 | |
350 | return clock; | |
351 | } | |
352 | ||
86301129 LM |
353 | static void gfx_v9_4_3_free_microcode(struct amdgpu_device *adev) |
354 | { | |
355 | amdgpu_ucode_release(&adev->gfx.pfp_fw); | |
356 | amdgpu_ucode_release(&adev->gfx.me_fw); | |
357 | amdgpu_ucode_release(&adev->gfx.ce_fw); | |
358 | amdgpu_ucode_release(&adev->gfx.rlc_fw); | |
359 | amdgpu_ucode_release(&adev->gfx.mec_fw); | |
360 | amdgpu_ucode_release(&adev->gfx.mec2_fw); | |
361 | ||
362 | kfree(adev->gfx.rlc.register_list_format); | |
363 | } | |
364 | ||
365 | static int gfx_v9_4_3_init_rlc_microcode(struct amdgpu_device *adev, | |
366 | const char *chip_name) | |
367 | { | |
368 | char fw_name[30]; | |
369 | int err; | |
370 | const struct rlc_firmware_header_v2_0 *rlc_hdr; | |
371 | uint16_t version_major; | |
372 | uint16_t version_minor; | |
373 | ||
374 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name); | |
375 | ||
376 | err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name); | |
377 | if (err) | |
378 | goto out; | |
379 | rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; | |
380 | ||
381 | version_major = le16_to_cpu(rlc_hdr->header.header_version_major); | |
382 | version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor); | |
383 | err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor); | |
384 | out: | |
385 | if (err) | |
386 | amdgpu_ucode_release(&adev->gfx.rlc_fw); | |
387 | ||
388 | return err; | |
389 | } | |
390 | ||
391 | static bool gfx_v9_4_3_should_disable_gfxoff(struct pci_dev *pdev) | |
392 | { | |
393 | return true; | |
394 | } | |
395 | ||
396 | static void gfx_v9_4_3_check_if_need_gfxoff(struct amdgpu_device *adev) | |
397 | { | |
398 | if (gfx_v9_4_3_should_disable_gfxoff(adev->pdev)) | |
399 | adev->pm.pp_feature &= ~PP_GFXOFF_MASK; | |
400 | } | |
401 | ||
402 | static int gfx_v9_4_3_init_cp_compute_microcode(struct amdgpu_device *adev, | |
403 | const char *chip_name) | |
404 | { | |
405 | char fw_name[30]; | |
406 | int err; | |
407 | ||
408 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name); | |
409 | ||
410 | err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name); | |
411 | if (err) | |
412 | goto out; | |
413 | amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1); | |
414 | amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT); | |
415 | ||
416 | adev->gfx.mec2_fw_version = adev->gfx.mec_fw_version; | |
417 | adev->gfx.mec2_feature_version = adev->gfx.mec_feature_version; | |
418 | ||
419 | gfx_v9_4_3_check_if_need_gfxoff(adev); | |
420 | ||
421 | out: | |
422 | if (err) | |
423 | amdgpu_ucode_release(&adev->gfx.mec_fw); | |
424 | return err; | |
425 | } | |
426 | ||
427 | static int gfx_v9_4_3_init_microcode(struct amdgpu_device *adev) | |
428 | { | |
429 | const char *chip_name; | |
430 | int r; | |
431 | ||
432 | chip_name = "gc_9_4_3"; | |
433 | ||
434 | r = gfx_v9_4_3_init_rlc_microcode(adev, chip_name); | |
435 | if (r) | |
436 | return r; | |
437 | ||
438 | r = gfx_v9_4_3_init_cp_compute_microcode(adev, chip_name); | |
439 | if (r) | |
440 | return r; | |
441 | ||
442 | return r; | |
443 | } | |
444 | ||
86301129 LM |
445 | static void gfx_v9_4_3_mec_fini(struct amdgpu_device *adev) |
446 | { | |
447 | amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); | |
448 | amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL); | |
449 | } | |
450 | ||
451 | static int gfx_v9_4_3_mec_init(struct amdgpu_device *adev) | |
452 | { | |
8078f1c6 | 453 | int r, i, num_xcc; |
86301129 LM |
454 | u32 *hpd; |
455 | const __le32 *fw_data; | |
456 | unsigned fw_size; | |
457 | u32 *fw; | |
458 | size_t mec_hpd_size; | |
459 | ||
460 | const struct gfx_firmware_header_v1_0 *mec_hdr; | |
461 | ||
8078f1c6 LL |
462 | num_xcc = NUM_XCC(adev->gfx.xcc_mask); |
463 | for (i = 0; i < num_xcc; i++) | |
6f917fdc LM |
464 | bitmap_zero(adev->gfx.mec_bitmap[i].queue_bitmap, |
465 | AMDGPU_MAX_COMPUTE_QUEUES); | |
86301129 LM |
466 | |
467 | /* take ownership of the relevant compute queues */ | |
468 | amdgpu_gfx_compute_queue_acquire(adev); | |
d524180b LL |
469 | mec_hpd_size = |
470 | adev->gfx.num_compute_rings * num_xcc * GFX9_MEC_HPD_SIZE; | |
86301129 LM |
471 | if (mec_hpd_size) { |
472 | r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, | |
228ce176 RB |
473 | AMDGPU_GEM_DOMAIN_VRAM | |
474 | AMDGPU_GEM_DOMAIN_GTT, | |
86301129 LM |
475 | &adev->gfx.mec.hpd_eop_obj, |
476 | &adev->gfx.mec.hpd_eop_gpu_addr, | |
477 | (void **)&hpd); | |
478 | if (r) { | |
479 | dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); | |
480 | gfx_v9_4_3_mec_fini(adev); | |
481 | return r; | |
482 | } | |
483 | ||
484 | if (amdgpu_emu_mode == 1) { | |
485 | for (i = 0; i < mec_hpd_size / 4; i++) { | |
486 | memset((void *)(hpd + i), 0, 4); | |
487 | if (i % 50 == 0) | |
488 | msleep(1); | |
489 | } | |
490 | } else { | |
491 | memset(hpd, 0, mec_hpd_size); | |
492 | } | |
493 | ||
494 | amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj); | |
495 | amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); | |
496 | } | |
497 | ||
498 | mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; | |
499 | ||
500 | fw_data = (const __le32 *) | |
501 | (adev->gfx.mec_fw->data + | |
502 | le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes)); | |
503 | fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes); | |
504 | ||
505 | r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes, | |
506 | PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, | |
507 | &adev->gfx.mec.mec_fw_obj, | |
508 | &adev->gfx.mec.mec_fw_gpu_addr, | |
509 | (void **)&fw); | |
510 | if (r) { | |
511 | dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r); | |
512 | gfx_v9_4_3_mec_fini(adev); | |
513 | return r; | |
514 | } | |
515 | ||
516 | memcpy(fw, fw_data, fw_size); | |
517 | ||
518 | amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj); | |
519 | amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj); | |
520 | ||
521 | return 0; | |
522 | } | |
523 | ||
880f8b3f LL |
524 | static void gfx_v9_4_3_xcc_select_se_sh(struct amdgpu_device *adev, u32 se_num, |
525 | u32 sh_num, u32 instance, int xcc_id) | |
de7511ae HZ |
526 | { |
527 | u32 data; | |
528 | ||
529 | if (instance == 0xffffffff) | |
530 | data = REG_SET_FIELD(0, GRBM_GFX_INDEX, | |
531 | INSTANCE_BROADCAST_WRITES, 1); | |
532 | else | |
533 | data = REG_SET_FIELD(0, GRBM_GFX_INDEX, | |
534 | INSTANCE_INDEX, instance); | |
535 | ||
536 | if (se_num == 0xffffffff) | |
537 | data = REG_SET_FIELD(data, GRBM_GFX_INDEX, | |
538 | SE_BROADCAST_WRITES, 1); | |
539 | else | |
540 | data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); | |
541 | ||
542 | if (sh_num == 0xffffffff) | |
543 | data = REG_SET_FIELD(data, GRBM_GFX_INDEX, | |
544 | SH_BROADCAST_WRITES, 1); | |
545 | else | |
546 | data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num); | |
547 | ||
659a4ab8 | 548 | WREG32_SOC15_RLC_SHADOW_EX(reg, GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX, data); |
de7511ae HZ |
549 | } |
550 | ||
553f973a | 551 | static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t address) |
de7511ae | 552 | { |
553f973a | 553 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX, |
de7511ae HZ |
554 | (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | |
555 | (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) | | |
556 | (address << SQ_IND_INDEX__INDEX__SHIFT) | | |
557 | (SQ_IND_INDEX__FORCE_READ_MASK)); | |
553f973a | 558 | return RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA); |
de7511ae HZ |
559 | } |
560 | ||
553f973a | 561 | static void wave_read_regs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, |
de7511ae HZ |
562 | uint32_t wave, uint32_t thread, |
563 | uint32_t regno, uint32_t num, uint32_t *out) | |
564 | { | |
553f973a | 565 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX, |
de7511ae HZ |
566 | (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | |
567 | (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) | | |
568 | (regno << SQ_IND_INDEX__INDEX__SHIFT) | | |
569 | (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) | | |
570 | (SQ_IND_INDEX__FORCE_READ_MASK) | | |
571 | (SQ_IND_INDEX__AUTO_INCR_MASK)); | |
572 | while (num--) | |
553f973a | 573 | *(out++) = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA); |
de7511ae HZ |
574 | } |
575 | ||
576 | static void gfx_v9_4_3_read_wave_data(struct amdgpu_device *adev, | |
553f973a | 577 | uint32_t xcc_id, uint32_t simd, uint32_t wave, |
de7511ae HZ |
578 | uint32_t *dst, int *no_fields) |
579 | { | |
580 | /* type 1 wave data */ | |
581 | dst[(*no_fields)++] = 1; | |
553f973a TSD |
582 | dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_STATUS); |
583 | dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_LO); | |
584 | dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_HI); | |
585 | dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_LO); | |
586 | dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_HI); | |
587 | dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_HW_ID); | |
588 | dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW0); | |
589 | dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW1); | |
590 | dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_GPR_ALLOC); | |
591 | dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_LDS_ALLOC); | |
592 | dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_TRAPSTS); | |
593 | dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_IB_STS); | |
594 | dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_IB_DBG0); | |
595 | dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_M0); | |
596 | dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_MODE); | |
de7511ae HZ |
597 | } |
598 | ||
553f973a | 599 | static void gfx_v9_4_3_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, |
de7511ae HZ |
600 | uint32_t wave, uint32_t start, |
601 | uint32_t size, uint32_t *dst) | |
602 | { | |
553f973a | 603 | wave_read_regs(adev, xcc_id, simd, wave, 0, |
de7511ae HZ |
604 | start + SQIND_WAVE_SGPRS_OFFSET, size, dst); |
605 | } | |
606 | ||
553f973a | 607 | static void gfx_v9_4_3_read_wave_vgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, |
de7511ae HZ |
608 | uint32_t wave, uint32_t thread, |
609 | uint32_t start, uint32_t size, | |
610 | uint32_t *dst) | |
611 | { | |
553f973a | 612 | wave_read_regs(adev, xcc_id, simd, wave, thread, |
de7511ae HZ |
613 | start + SQIND_WAVE_VGPRS_OFFSET, size, dst); |
614 | } | |
615 | ||
616 | static void gfx_v9_4_3_select_me_pipe_q(struct amdgpu_device *adev, | |
553f973a | 617 | u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id) |
de7511ae | 618 | { |
553f973a | 619 | soc15_grbm_select(adev, me, pipe, q, vm, GET_INST(GC, xcc_id)); |
de7511ae | 620 | } |
ea2d2f8e | 621 | |
de7511ae | 622 | |
98a54e88 | 623 | static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev, |
8e7fd193 | 624 | int num_xccs_per_xcp) |
98a54e88 | 625 | { |
a28eb487 | 626 | int ret, i, num_xcc; |
2d955a06 | 627 | u32 tmp = 0; |
a28eb487 LL |
628 | |
629 | if (adev->psp.funcs) { | |
630 | ret = psp_spatial_partition(&adev->psp, | |
631 | NUM_XCC(adev->gfx.xcc_mask) / | |
632 | num_xccs_per_xcp); | |
633 | if (ret) | |
634 | return ret; | |
2d955a06 MG |
635 | } else { |
636 | num_xcc = NUM_XCC(adev->gfx.xcc_mask); | |
a28eb487 | 637 | |
2d955a06 MG |
638 | for (i = 0; i < num_xcc; i++) { |
639 | tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP, | |
640 | num_xccs_per_xcp); | |
641 | tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, VIRTUAL_XCC_ID, | |
642 | i % num_xccs_per_xcp); | |
a28eb487 LL |
643 | WREG32_SOC15(GC, GET_INST(GC, i), regCP_HYP_XCP_CTL, |
644 | tmp); | |
2d955a06 MG |
645 | } |
646 | ret = 0; | |
a28eb487 | 647 | } |
98a54e88 | 648 | |
8e7fd193 | 649 | adev->gfx.num_xcc_per_xcp = num_xccs_per_xcp; |
98a54e88 | 650 | |
2d955a06 | 651 | return ret; |
98a54e88 LM |
652 | } |
653 | ||
98b2e9ca LM |
654 | static int gfx_v9_4_3_ih_to_xcc_inst(struct amdgpu_device *adev, int ih_node) |
655 | { | |
656 | int xcc; | |
657 | ||
658 | xcc = hweight8(adev->gfx.xcc_mask & GENMASK(ih_node / 2, 0)); | |
659 | if (!xcc) { | |
660 | dev_err(adev->dev, "Couldn't find xcc mapping from IH node"); | |
661 | return -EINVAL; | |
662 | } | |
663 | ||
664 | return xcc - 1; | |
665 | } | |
666 | ||
86301129 LM |
667 | static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = { |
668 | .get_gpu_clock_counter = &gfx_v9_4_3_get_gpu_clock_counter, | |
880f8b3f | 669 | .select_se_sh = &gfx_v9_4_3_xcc_select_se_sh, |
86301129 LM |
670 | .read_wave_data = &gfx_v9_4_3_read_wave_data, |
671 | .read_wave_sgprs = &gfx_v9_4_3_read_wave_sgprs, | |
672 | .read_wave_vgprs = &gfx_v9_4_3_read_wave_vgprs, | |
673 | .select_me_pipe_q = &gfx_v9_4_3_select_me_pipe_q, | |
98a54e88 | 674 | .switch_partition_mode = &gfx_v9_4_3_switch_compute_partition, |
98b2e9ca | 675 | .ih_node_to_logical_xcc = &gfx_v9_4_3_ih_to_xcc_inst, |
86301129 LM |
676 | }; |
677 | ||
678 | static int gfx_v9_4_3_gpu_early_init(struct amdgpu_device *adev) | |
679 | { | |
680 | u32 gb_addr_config; | |
681 | ||
682 | adev->gfx.funcs = &gfx_v9_4_3_gfx_funcs; | |
92ecb92c | 683 | adev->gfx.ras = &gfx_v9_4_3_ras; |
86301129 | 684 | |
4e8303cf | 685 | switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { |
86301129 LM |
686 | case IP_VERSION(9, 4, 3): |
687 | adev->gfx.config.max_hw_contexts = 8; | |
688 | adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; | |
689 | adev->gfx.config.sc_prim_fifo_size_backend = 0x100; | |
690 | adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; | |
691 | adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; | |
659a4ab8 | 692 | gb_addr_config = RREG32_SOC15(GC, GET_INST(GC, 0), regGB_ADDR_CONFIG); |
86301129 LM |
693 | break; |
694 | default: | |
695 | BUG(); | |
696 | break; | |
697 | } | |
698 | ||
699 | adev->gfx.config.gb_addr_config = gb_addr_config; | |
700 | ||
701 | adev->gfx.config.gb_addr_config_fields.num_pipes = 1 << | |
702 | REG_GET_FIELD( | |
703 | adev->gfx.config.gb_addr_config, | |
704 | GB_ADDR_CONFIG, | |
705 | NUM_PIPES); | |
706 | ||
707 | adev->gfx.config.max_tile_pipes = | |
708 | adev->gfx.config.gb_addr_config_fields.num_pipes; | |
709 | ||
710 | adev->gfx.config.gb_addr_config_fields.num_banks = 1 << | |
711 | REG_GET_FIELD( | |
712 | adev->gfx.config.gb_addr_config, | |
713 | GB_ADDR_CONFIG, | |
714 | NUM_BANKS); | |
715 | adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 << | |
716 | REG_GET_FIELD( | |
717 | adev->gfx.config.gb_addr_config, | |
718 | GB_ADDR_CONFIG, | |
719 | MAX_COMPRESSED_FRAGS); | |
720 | adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 << | |
721 | REG_GET_FIELD( | |
722 | adev->gfx.config.gb_addr_config, | |
723 | GB_ADDR_CONFIG, | |
724 | NUM_RB_PER_SE); | |
725 | adev->gfx.config.gb_addr_config_fields.num_se = 1 << | |
726 | REG_GET_FIELD( | |
727 | adev->gfx.config.gb_addr_config, | |
728 | GB_ADDR_CONFIG, | |
729 | NUM_SHADER_ENGINES); | |
730 | adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 + | |
731 | REG_GET_FIELD( | |
732 | adev->gfx.config.gb_addr_config, | |
733 | GB_ADDR_CONFIG, | |
734 | PIPE_INTERLEAVE_SIZE)); | |
735 | ||
736 | return 0; | |
737 | } | |
738 | ||
739 | static int gfx_v9_4_3_compute_ring_init(struct amdgpu_device *adev, int ring_id, | |
6f917fdc | 740 | int xcc_id, int mec, int pipe, int queue) |
86301129 LM |
741 | { |
742 | unsigned irq_type; | |
743 | struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id]; | |
744 | unsigned int hw_prio; | |
233bb373 | 745 | uint32_t xcc_doorbell_start; |
86301129 | 746 | |
233bb373 LL |
747 | ring = &adev->gfx.compute_ring[xcc_id * adev->gfx.num_compute_rings + |
748 | ring_id]; | |
86301129 LM |
749 | |
750 | /* mec0 is me1 */ | |
6f917fdc | 751 | ring->xcc_id = xcc_id; |
86301129 LM |
752 | ring->me = mec + 1; |
753 | ring->pipe = pipe; | |
754 | ring->queue = queue; | |
755 | ||
756 | ring->ring_obj = NULL; | |
757 | ring->use_doorbell = true; | |
233bb373 LL |
758 | xcc_doorbell_start = adev->doorbell_index.mec_ring0 + |
759 | xcc_id * adev->doorbell_index.xcc_doorbell_range; | |
760 | ring->doorbell_index = (xcc_doorbell_start + ring_id) << 1; | |
761 | ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + | |
762 | (ring_id + xcc_id * adev->gfx.num_compute_rings) * | |
763 | GFX9_MEC_HPD_SIZE; | |
3566938b | 764 | ring->vm_hub = AMDGPU_GFXHUB(xcc_id); |
6f917fdc LM |
765 | sprintf(ring->name, "comp_%d.%d.%d.%d", |
766 | ring->xcc_id, ring->me, ring->pipe, ring->queue); | |
86301129 LM |
767 | |
768 | irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP | |
769 | + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec) | |
770 | + ring->pipe; | |
771 | hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ? | |
772 | AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL; | |
773 | /* type-2 packets are deprecated on MEC, use type-3 instead */ | |
774 | return amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type, | |
775 | hw_prio, NULL); | |
776 | } | |
777 | ||
778 | static int gfx_v9_4_3_sw_init(void *handle) | |
779 | { | |
8078f1c6 | 780 | int i, j, k, r, ring_id, xcc_id, num_xcc; |
86301129 LM |
781 | struct amdgpu_kiq *kiq; |
782 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
783 | ||
784 | adev->gfx.mec.num_mec = 2; | |
785 | adev->gfx.mec.num_pipe_per_mec = 4; | |
786 | adev->gfx.mec.num_queue_per_pipe = 8; | |
787 | ||
8078f1c6 LL |
788 | num_xcc = NUM_XCC(adev->gfx.xcc_mask); |
789 | ||
86301129 LM |
790 | /* EOP Event */ |
791 | r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq); | |
792 | if (r) | |
793 | return r; | |
794 | ||
795 | /* Privileged reg */ | |
796 | r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT, | |
797 | &adev->gfx.priv_reg_irq); | |
798 | if (r) | |
799 | return r; | |
800 | ||
801 | /* Privileged inst */ | |
802 | r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT, | |
803 | &adev->gfx.priv_inst_irq); | |
804 | if (r) | |
805 | return r; | |
806 | ||
807 | adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE; | |
808 | ||
809 | r = adev->gfx.rlc.funcs->init(adev); | |
810 | if (r) { | |
811 | DRM_ERROR("Failed to init rlc BOs!\n"); | |
812 | return r; | |
813 | } | |
814 | ||
815 | r = gfx_v9_4_3_mec_init(adev); | |
816 | if (r) { | |
817 | DRM_ERROR("Failed to init MEC BOs!\n"); | |
818 | return r; | |
819 | } | |
820 | ||
821 | /* set up the compute queues - allocate horizontally across pipes */ | |
8078f1c6 | 822 | for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) { |
233bb373 | 823 | ring_id = 0; |
6f917fdc LM |
824 | for (i = 0; i < adev->gfx.mec.num_mec; ++i) { |
825 | for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { | |
826 | for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; | |
827 | k++) { | |
828 | if (!amdgpu_gfx_is_mec_queue_enabled( | |
829 | adev, xcc_id, i, k, j)) | |
830 | continue; | |
831 | ||
832 | r = gfx_v9_4_3_compute_ring_init(adev, | |
833 | ring_id, | |
834 | xcc_id, | |
835 | i, k, j); | |
836 | if (r) | |
837 | return r; | |
838 | ||
839 | ring_id++; | |
840 | } | |
86301129 LM |
841 | } |
842 | } | |
86301129 | 843 | |
6f917fdc LM |
844 | r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE, xcc_id); |
845 | if (r) { | |
846 | DRM_ERROR("Failed to init KIQ BOs!\n"); | |
847 | return r; | |
848 | } | |
86301129 | 849 | |
6f917fdc LM |
850 | kiq = &adev->gfx.kiq[xcc_id]; |
851 | r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq, xcc_id); | |
852 | if (r) | |
853 | return r; | |
86301129 | 854 | |
6f917fdc LM |
855 | /* create MQD for all compute queues as wel as KIQ for SRIOV case */ |
856 | r = amdgpu_gfx_mqd_sw_init(adev, | |
857 | sizeof(struct v9_mqd_allocation), xcc_id); | |
858 | if (r) | |
859 | return r; | |
860 | } | |
86301129 LM |
861 | |
862 | r = gfx_v9_4_3_gpu_early_init(adev); | |
863 | if (r) | |
864 | return r; | |
865 | ||
d30279a9 | 866 | r = amdgpu_gfx_ras_sw_init(adev); |
98a54e88 LM |
867 | if (r) |
868 | return r; | |
869 | ||
d30279a9 RB |
870 | |
871 | if (!amdgpu_sriov_vf(adev)) | |
872 | r = amdgpu_gfx_sysfs_init(adev); | |
873 | ||
874 | return r; | |
86301129 LM |
875 | } |
876 | ||
877 | static int gfx_v9_4_3_sw_fini(void *handle) | |
878 | { | |
8078f1c6 | 879 | int i, num_xcc; |
86301129 LM |
880 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
881 | ||
8078f1c6 LL |
882 | num_xcc = NUM_XCC(adev->gfx.xcc_mask); |
883 | for (i = 0; i < adev->gfx.num_compute_rings * num_xcc; i++) | |
86301129 LM |
884 | amdgpu_ring_fini(&adev->gfx.compute_ring[i]); |
885 | ||
8078f1c6 | 886 | for (i = 0; i < num_xcc; i++) { |
6f917fdc LM |
887 | amdgpu_gfx_mqd_sw_fini(adev, i); |
888 | amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[i].ring); | |
889 | amdgpu_gfx_kiq_fini(adev, i); | |
890 | } | |
86301129 LM |
891 | |
892 | gfx_v9_4_3_mec_fini(adev); | |
893 | amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj); | |
894 | gfx_v9_4_3_free_microcode(adev); | |
d30279a9 RB |
895 | if (!amdgpu_sriov_vf(adev)) |
896 | amdgpu_gfx_sysfs_fini(adev); | |
86301129 LM |
897 | |
898 | return 0; | |
899 | } | |
900 | ||
86301129 | 901 | #define DEFAULT_SH_MEM_BASES (0x6000) |
880f8b3f LL |
902 | static void gfx_v9_4_3_xcc_init_compute_vmid(struct amdgpu_device *adev, |
903 | int xcc_id) | |
86301129 LM |
904 | { |
905 | int i; | |
906 | uint32_t sh_mem_config; | |
907 | uint32_t sh_mem_bases; | |
952ee945 | 908 | uint32_t data; |
86301129 LM |
909 | |
910 | /* | |
911 | * Configure apertures: | |
912 | * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB) | |
913 | * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB) | |
914 | * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB) | |
915 | */ | |
916 | sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16); | |
917 | ||
918 | sh_mem_config = SH_MEM_ADDRESS_MODE_64 | | |
919 | SH_MEM_ALIGNMENT_MODE_UNALIGNED << | |
920 | SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT; | |
921 | ||
922 | mutex_lock(&adev->srbm_mutex); | |
923 | for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { | |
659a4ab8 | 924 | soc15_grbm_select(adev, 0, 0, 0, i, GET_INST(GC, xcc_id)); |
86301129 | 925 | /* CP and shaders */ |
659a4ab8 LL |
926 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSH_MEM_CONFIG, sh_mem_config); |
927 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSH_MEM_BASES, sh_mem_bases); | |
952ee945 EH |
928 | |
929 | /* Enable trap for each kfd vmid. */ | |
930 | data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSPI_GDBG_PER_VMID_CNTL); | |
931 | data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1); | |
932 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSPI_GDBG_PER_VMID_CNTL, data); | |
86301129 | 933 | } |
659a4ab8 | 934 | soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id)); |
86301129 LM |
935 | mutex_unlock(&adev->srbm_mutex); |
936 | ||
937 | /* Initialize all compute VMIDs to have no GDS, GWS, or OA | |
938 | acccess. These should be enabled by FW for target VMIDs. */ | |
939 | for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { | |
659a4ab8 LL |
940 | WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_BASE, 2 * i, 0); |
941 | WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_SIZE, 2 * i, 0); | |
942 | WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_GWS_VMID0, i, 0); | |
943 | WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_OA_VMID0, i, 0); | |
86301129 LM |
944 | } |
945 | } | |
946 | ||
880f8b3f | 947 | static void gfx_v9_4_3_xcc_init_gds_vmid(struct amdgpu_device *adev, int xcc_id) |
86301129 LM |
948 | { |
949 | int vmid; | |
950 | ||
951 | /* | |
952 | * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA | |
953 | * access. Compute VMIDs should be enabled by FW for target VMIDs, | |
954 | * the driver can enable them for graphics. VMID0 should maintain | |
955 | * access so that HWS firmware can save/restore entries. | |
956 | */ | |
957 | for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) { | |
659a4ab8 LL |
958 | WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_BASE, 2 * vmid, 0); |
959 | WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_SIZE, 2 * vmid, 0); | |
960 | WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_GWS_VMID0, vmid, 0); | |
961 | WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_OA_VMID0, vmid, 0); | |
86301129 LM |
962 | } |
963 | } | |
964 | ||
44b5cf2e LL |
965 | static void gfx_v9_4_3_xcc_constants_init(struct amdgpu_device *adev, |
966 | int xcc_id) | |
86301129 LM |
967 | { |
968 | u32 tmp; | |
44b5cf2e | 969 | int i; |
86301129 LM |
970 | |
971 | /* XXX SH_MEM regs */ | |
972 | /* where to put LDS, scratch, GPUVM in FSA64 space */ | |
973 | mutex_lock(&adev->srbm_mutex); | |
f4caf584 | 974 | for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) { |
44b5cf2e LL |
975 | soc15_grbm_select(adev, 0, 0, 0, i, GET_INST(GC, xcc_id)); |
976 | /* CP and shaders */ | |
977 | if (i == 0) { | |
978 | tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE, | |
979 | SH_MEM_ALIGNMENT_MODE_UNALIGNED); | |
980 | tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE, | |
981 | !!adev->gmc.noretry); | |
982 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), | |
983 | regSH_MEM_CONFIG, tmp); | |
984 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), | |
985 | regSH_MEM_BASES, 0); | |
986 | } else { | |
987 | tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE, | |
988 | SH_MEM_ALIGNMENT_MODE_UNALIGNED); | |
989 | tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE, | |
990 | !!adev->gmc.noretry); | |
991 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), | |
992 | regSH_MEM_CONFIG, tmp); | |
993 | tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE, | |
994 | (adev->gmc.private_aperture_start >> | |
995 | 48)); | |
996 | tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE, | |
997 | (adev->gmc.shared_aperture_start >> | |
998 | 48)); | |
999 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), | |
1000 | regSH_MEM_BASES, tmp); | |
86301129 LM |
1001 | } |
1002 | } | |
659a4ab8 | 1003 | soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, 0)); |
86301129 LM |
1004 | |
1005 | mutex_unlock(&adev->srbm_mutex); | |
1006 | ||
44b5cf2e LL |
1007 | gfx_v9_4_3_xcc_init_compute_vmid(adev, xcc_id); |
1008 | gfx_v9_4_3_xcc_init_gds_vmid(adev, xcc_id); | |
1009 | } | |
1010 | ||
1011 | static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev) | |
1012 | { | |
1013 | int i, num_xcc; | |
1014 | ||
1015 | num_xcc = NUM_XCC(adev->gfx.xcc_mask); | |
1016 | ||
1017 | gfx_v9_4_3_get_cu_info(adev, &adev->gfx.cu_info); | |
1018 | adev->gfx.config.db_debug2 = | |
1019 | RREG32_SOC15(GC, GET_INST(GC, 0), regDB_DEBUG2); | |
1020 | ||
1021 | for (i = 0; i < num_xcc; i++) | |
1022 | gfx_v9_4_3_xcc_constants_init(adev, i); | |
86301129 LM |
1023 | } |
1024 | ||
880f8b3f LL |
1025 | static void |
1026 | gfx_v9_4_3_xcc_enable_save_restore_machine(struct amdgpu_device *adev, | |
1027 | int xcc_id) | |
86301129 | 1028 | { |
659a4ab8 | 1029 | WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_SRM_CNTL, SRM_ENABLE, 1); |
86301129 LM |
1030 | } |
1031 | ||
880f8b3f | 1032 | static void gfx_v9_4_3_xcc_init_pg(struct amdgpu_device *adev, int xcc_id) |
86301129 | 1033 | { |
86301129 LM |
1034 | /* |
1035 | * Rlc save restore list is workable since v2_1. | |
1036 | * And it's needed by gfxoff feature. | |
1037 | */ | |
1038 | if (adev->gfx.rlc.is_rlc_v2_1) | |
880f8b3f | 1039 | gfx_v9_4_3_xcc_enable_save_restore_machine(adev, xcc_id); |
86301129 LM |
1040 | } |
1041 | ||
880f8b3f | 1042 | static void gfx_v9_4_3_xcc_disable_gpa_mode(struct amdgpu_device *adev, int xcc_id) |
86301129 LM |
1043 | { |
1044 | uint32_t data; | |
1045 | ||
659a4ab8 | 1046 | data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG); |
86301129 | 1047 | data |= CPC_PSP_DEBUG__UTCL2IUGPAOVERRIDE_MASK; |
659a4ab8 | 1048 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG, data); |
86301129 LM |
1049 | } |
1050 | ||
7c0f7ee0 HZ |
1051 | static bool gfx_v9_4_3_is_rlc_enabled(struct amdgpu_device *adev) |
1052 | { | |
1053 | uint32_t rlc_setting; | |
1054 | ||
1055 | /* if RLC is not enabled, do nothing */ | |
659a4ab8 | 1056 | rlc_setting = RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CNTL); |
7c0f7ee0 HZ |
1057 | if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK)) |
1058 | return false; | |
1059 | ||
1060 | return true; | |
1061 | } | |
1062 | ||
880f8b3f | 1063 | static void gfx_v9_4_3_xcc_set_safe_mode(struct amdgpu_device *adev, int xcc_id) |
7c0f7ee0 HZ |
1064 | { |
1065 | uint32_t data; | |
1066 | unsigned i; | |
1067 | ||
1068 | data = RLC_SAFE_MODE__CMD_MASK; | |
1069 | data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT); | |
659a4ab8 | 1070 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE, data); |
7c0f7ee0 HZ |
1071 | |
1072 | /* wait for RLC_SAFE_MODE */ | |
1073 | for (i = 0; i < adev->usec_timeout; i++) { | |
1e91a5f7 | 1074 | if (!REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) |
7c0f7ee0 HZ |
1075 | break; |
1076 | udelay(1); | |
1077 | } | |
1078 | } | |
1079 | ||
880f8b3f LL |
1080 | static void gfx_v9_4_3_xcc_unset_safe_mode(struct amdgpu_device *adev, |
1081 | int xcc_id) | |
7c0f7ee0 HZ |
1082 | { |
1083 | uint32_t data; | |
1084 | ||
1085 | data = RLC_SAFE_MODE__CMD_MASK; | |
659a4ab8 | 1086 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE, data); |
7c0f7ee0 HZ |
1087 | } |
1088 | ||
8ed49dd1 VL |
1089 | static void gfx_v9_4_3_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev) |
1090 | { | |
9bc12db4 | 1091 | int xcc_id, num_xcc; |
8ed49dd1 VL |
1092 | struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl; |
1093 | ||
9bc12db4 SZ |
1094 | num_xcc = NUM_XCC(adev->gfx.xcc_mask); |
1095 | for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) { | |
8ed49dd1 VL |
1096 | reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[GET_INST(GC, xcc_id)]; |
1097 | reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG0); | |
1098 | reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG1); | |
1099 | reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG2); | |
1100 | reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG3); | |
1101 | reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_CNTL); | |
1102 | reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX); | |
1103 | reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SPARE_INT); | |
1104 | } | |
f64c3fce | 1105 | adev->gfx.rlc.rlcg_reg_access_supported = true; |
8ed49dd1 VL |
1106 | } |
1107 | ||
7c0f7ee0 HZ |
1108 | static int gfx_v9_4_3_rlc_init(struct amdgpu_device *adev) |
1109 | { | |
1110 | /* init spm vmid with 0xf */ | |
1111 | if (adev->gfx.rlc.funcs->update_spm_vmid) | |
1112 | adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf); | |
1113 | ||
1114 | return 0; | |
1115 | } | |
1116 | ||
880f8b3f LL |
1117 | static void gfx_v9_4_3_xcc_wait_for_rlc_serdes(struct amdgpu_device *adev, |
1118 | int xcc_id) | |
7c0f7ee0 HZ |
1119 | { |
1120 | u32 i, j, k; | |
1121 | u32 mask; | |
1122 | ||
1123 | mutex_lock(&adev->grbm_idx_mutex); | |
1124 | for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { | |
1125 | for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { | |
880f8b3f LL |
1126 | gfx_v9_4_3_xcc_select_se_sh(adev, i, j, 0xffffffff, |
1127 | xcc_id); | |
7c0f7ee0 | 1128 | for (k = 0; k < adev->usec_timeout; k++) { |
1e91a5f7 | 1129 | if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SERDES_CU_MASTER_BUSY) == 0) |
7c0f7ee0 HZ |
1130 | break; |
1131 | udelay(1); | |
1132 | } | |
1133 | if (k == adev->usec_timeout) { | |
880f8b3f LL |
1134 | gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, |
1135 | 0xffffffff, | |
1136 | 0xffffffff, xcc_id); | |
7c0f7ee0 HZ |
1137 | mutex_unlock(&adev->grbm_idx_mutex); |
1138 | DRM_INFO("Timeout wait for RLC serdes %u,%u\n", | |
1139 | i, j); | |
1140 | return; | |
1141 | } | |
1142 | } | |
1143 | } | |
880f8b3f LL |
1144 | gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, |
1145 | xcc_id); | |
7c0f7ee0 HZ |
1146 | mutex_unlock(&adev->grbm_idx_mutex); |
1147 | ||
1148 | mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK | | |
1149 | RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK | | |
1150 | RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK | | |
1151 | RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK; | |
1152 | for (k = 0; k < adev->usec_timeout; k++) { | |
1e91a5f7 | 1153 | if ((RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0) |
7c0f7ee0 HZ |
1154 | break; |
1155 | udelay(1); | |
1156 | } | |
1157 | } | |
1158 | ||
880f8b3f LL |
1159 | static void gfx_v9_4_3_xcc_enable_gui_idle_interrupt(struct amdgpu_device *adev, |
1160 | bool enable, int xcc_id) | |
7c0f7ee0 HZ |
1161 | { |
1162 | u32 tmp; | |
1163 | ||
1164 | /* These interrupts should be enabled to drive DS clock */ | |
1165 | ||
659a4ab8 | 1166 | tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0); |
7c0f7ee0 HZ |
1167 | |
1168 | tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0); | |
1169 | tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0); | |
1170 | tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0); | |
7c0f7ee0 | 1171 | |
659a4ab8 | 1172 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0, tmp); |
7c0f7ee0 HZ |
1173 | } |
1174 | ||
44b5cf2e LL |
1175 | static void gfx_v9_4_3_xcc_rlc_stop(struct amdgpu_device *adev, int xcc_id) |
1176 | { | |
1177 | WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_CNTL, | |
1178 | RLC_ENABLE_F32, 0); | |
1179 | gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, false, xcc_id); | |
1180 | gfx_v9_4_3_xcc_wait_for_rlc_serdes(adev, xcc_id); | |
1181 | } | |
1182 | ||
7c0f7ee0 HZ |
1183 | static void gfx_v9_4_3_rlc_stop(struct amdgpu_device *adev) |
1184 | { | |
8078f1c6 | 1185 | int i, num_xcc; |
6f917fdc | 1186 | |
8078f1c6 | 1187 | num_xcc = NUM_XCC(adev->gfx.xcc_mask); |
44b5cf2e LL |
1188 | for (i = 0; i < num_xcc; i++) |
1189 | gfx_v9_4_3_xcc_rlc_stop(adev, i); | |
1190 | } | |
1191 | ||
1192 | static void gfx_v9_4_3_xcc_rlc_reset(struct amdgpu_device *adev, int xcc_id) | |
1193 | { | |
1194 | WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), GRBM_SOFT_RESET, | |
1195 | SOFT_RESET_RLC, 1); | |
1196 | udelay(50); | |
1197 | WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), GRBM_SOFT_RESET, | |
1198 | SOFT_RESET_RLC, 0); | |
1199 | udelay(50); | |
7c0f7ee0 HZ |
1200 | } |
1201 | ||
1202 | static void gfx_v9_4_3_rlc_reset(struct amdgpu_device *adev) | |
1203 | { | |
8078f1c6 | 1204 | int i, num_xcc; |
6f917fdc | 1205 | |
8078f1c6 | 1206 | num_xcc = NUM_XCC(adev->gfx.xcc_mask); |
44b5cf2e LL |
1207 | for (i = 0; i < num_xcc; i++) |
1208 | gfx_v9_4_3_xcc_rlc_reset(adev, i); | |
1209 | } | |
1210 | ||
1211 | static void gfx_v9_4_3_xcc_rlc_start(struct amdgpu_device *adev, int xcc_id) | |
1212 | { | |
1213 | WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_CNTL, | |
1214 | RLC_ENABLE_F32, 1); | |
1215 | udelay(50); | |
1216 | ||
1217 | /* carrizo do enable cp interrupt after cp inited */ | |
1218 | if (!(adev->flags & AMD_IS_APU)) { | |
1219 | gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, true, xcc_id); | |
6f917fdc LM |
1220 | udelay(50); |
1221 | } | |
7c0f7ee0 HZ |
1222 | } |
1223 | ||
1224 | static void gfx_v9_4_3_rlc_start(struct amdgpu_device *adev) | |
1225 | { | |
1226 | #ifdef AMDGPU_RLC_DEBUG_RETRY | |
1227 | u32 rlc_ucode_ver; | |
1228 | #endif | |
8078f1c6 | 1229 | int i, num_xcc; |
7c0f7ee0 | 1230 | |
8078f1c6 LL |
1231 | num_xcc = NUM_XCC(adev->gfx.xcc_mask); |
1232 | for (i = 0; i < num_xcc; i++) { | |
44b5cf2e | 1233 | gfx_v9_4_3_xcc_rlc_start(adev, i); |
7c0f7ee0 | 1234 | #ifdef AMDGPU_RLC_DEBUG_RETRY |
6f917fdc | 1235 | /* RLC_GPM_GENERAL_6 : RLC Ucode version */ |
659a4ab8 | 1236 | rlc_ucode_ver = RREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_GENERAL_6); |
6f917fdc LM |
1237 | if (rlc_ucode_ver == 0x108) { |
1238 | dev_info(adev->dev, | |
1239 | "Using rlc debug ucode. regRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n", | |
1240 | rlc_ucode_ver, adev->gfx.rlc_fw_version); | |
1241 | /* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles, | |
1242 | * default is 0x9C4 to create a 100us interval */ | |
659a4ab8 | 1243 | WREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_TIMER_INT_3, 0x9C4); |
6f917fdc LM |
1244 | /* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr |
1245 | * to disable the page fault retry interrupts, default is | |
1246 | * 0x100 (256) */ | |
659a4ab8 | 1247 | WREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_GENERAL_12, 0x100); |
6f917fdc | 1248 | } |
7c0f7ee0 | 1249 | #endif |
6f917fdc | 1250 | } |
7c0f7ee0 HZ |
1251 | } |
1252 | ||
880f8b3f LL |
1253 | static int gfx_v9_4_3_xcc_rlc_load_microcode(struct amdgpu_device *adev, |
1254 | int xcc_id) | |
7c0f7ee0 HZ |
1255 | { |
1256 | const struct rlc_firmware_header_v2_0 *hdr; | |
1257 | const __le32 *fw_data; | |
1258 | unsigned i, fw_size; | |
1259 | ||
1260 | if (!adev->gfx.rlc_fw) | |
1261 | return -EINVAL; | |
1262 | ||
1263 | hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; | |
1264 | amdgpu_ucode_print_rlc_hdr(&hdr->header); | |
1265 | ||
1266 | fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + | |
1267 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | |
1268 | fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; | |
1269 | ||
659a4ab8 | 1270 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_ADDR, |
7c0f7ee0 HZ |
1271 | RLCG_UCODE_LOADING_START_ADDRESS); |
1272 | for (i = 0; i < fw_size; i++) { | |
1273 | if (amdgpu_emu_mode == 1 && i % 100 == 0) { | |
1274 | dev_info(adev->dev, "Write RLC ucode data %u DWs\n", i); | |
1275 | msleep(1); | |
1276 | } | |
659a4ab8 | 1277 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++)); |
7c0f7ee0 | 1278 | } |
659a4ab8 | 1279 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version); |
7c0f7ee0 HZ |
1280 | |
1281 | return 0; | |
1282 | } | |
1283 | ||
44b5cf2e | 1284 | static int gfx_v9_4_3_xcc_rlc_resume(struct amdgpu_device *adev, int xcc_id) |
7c0f7ee0 | 1285 | { |
44b5cf2e | 1286 | int r; |
7c0f7ee0 | 1287 | |
44b5cf2e | 1288 | if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { |
00e1ab02 | 1289 | gfx_v9_4_3_xcc_rlc_stop(adev, xcc_id); |
44b5cf2e LL |
1290 | /* legacy rlc firmware loading */ |
1291 | r = gfx_v9_4_3_xcc_rlc_load_microcode(adev, xcc_id); | |
1292 | if (r) | |
1293 | return r; | |
00e1ab02 | 1294 | gfx_v9_4_3_xcc_rlc_start(adev, xcc_id); |
7c0f7ee0 HZ |
1295 | } |
1296 | ||
00e1ab02 LL |
1297 | amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id); |
1298 | /* disable CG */ | |
1299 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, 0); | |
1300 | gfx_v9_4_3_xcc_init_pg(adev, xcc_id); | |
1301 | amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id); | |
44b5cf2e LL |
1302 | |
1303 | return 0; | |
1304 | } | |
1305 | ||
1306 | static int gfx_v9_4_3_rlc_resume(struct amdgpu_device *adev) | |
1307 | { | |
1308 | int r, i, num_xcc; | |
1309 | ||
cab69d36 YW |
1310 | if (amdgpu_sriov_vf(adev)) |
1311 | return 0; | |
1312 | ||
44b5cf2e LL |
1313 | num_xcc = NUM_XCC(adev->gfx.xcc_mask); |
1314 | for (i = 0; i < num_xcc; i++) { | |
1315 | r = gfx_v9_4_3_xcc_rlc_resume(adev, i); | |
1316 | if (r) | |
1317 | return r; | |
1318 | } | |
7c0f7ee0 HZ |
1319 | |
1320 | return 0; | |
1321 | } | |
1322 | ||
6f917fdc LM |
1323 | static void gfx_v9_4_3_update_spm_vmid(struct amdgpu_device *adev, |
1324 | unsigned vmid) | |
7c0f7ee0 HZ |
1325 | { |
1326 | u32 reg, data; | |
1327 | ||
659a4ab8 | 1328 | reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL); |
7c0f7ee0 HZ |
1329 | if (amdgpu_sriov_is_pp_one_vf(adev)) |
1330 | data = RREG32_NO_KIQ(reg); | |
1331 | else | |
1332 | data = RREG32(reg); | |
1333 | ||
1334 | data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK; | |
1335 | data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT; | |
1336 | ||
1337 | if (amdgpu_sriov_is_pp_one_vf(adev)) | |
659a4ab8 | 1338 | WREG32_SOC15_NO_KIQ(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL, data); |
7c0f7ee0 | 1339 | else |
659a4ab8 | 1340 | WREG32_SOC15(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL, data); |
7c0f7ee0 HZ |
1341 | } |
1342 | ||
1343 | static const struct soc15_reg_rlcg rlcg_access_gc_9_4_3[] = { | |
1344 | {SOC15_REG_ENTRY(GC, 0, regGRBM_GFX_INDEX)}, | |
1345 | {SOC15_REG_ENTRY(GC, 0, regSQ_IND_INDEX)}, | |
1346 | }; | |
1347 | ||
1348 | static bool gfx_v9_4_3_check_rlcg_range(struct amdgpu_device *adev, | |
1349 | uint32_t offset, | |
1350 | struct soc15_reg_rlcg *entries, int arr_size) | |
1351 | { | |
659a4ab8 | 1352 | int i, inst; |
7c0f7ee0 HZ |
1353 | uint32_t reg; |
1354 | ||
1355 | if (!entries) | |
1356 | return false; | |
1357 | ||
1358 | for (i = 0; i < arr_size; i++) { | |
1359 | const struct soc15_reg_rlcg *entry; | |
1360 | ||
1361 | entry = &entries[i]; | |
659a4ab8 LL |
1362 | inst = adev->ip_map.logical_to_dev_inst ? |
1363 | adev->ip_map.logical_to_dev_inst( | |
1364 | adev, entry->hwip, entry->instance) : | |
1365 | entry->instance; | |
1366 | reg = adev->reg_offset[entry->hwip][inst][entry->segment] + | |
1367 | entry->reg; | |
7c0f7ee0 HZ |
1368 | if (offset == reg) |
1369 | return true; | |
1370 | } | |
1371 | ||
1372 | return false; | |
1373 | } | |
1374 | ||
1375 | static bool gfx_v9_4_3_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset) | |
1376 | { | |
1377 | return gfx_v9_4_3_check_rlcg_range(adev, offset, | |
1378 | (void *)rlcg_access_gc_9_4_3, | |
1379 | ARRAY_SIZE(rlcg_access_gc_9_4_3)); | |
1380 | } | |
1381 | ||
880f8b3f LL |
1382 | static void gfx_v9_4_3_xcc_cp_compute_enable(struct amdgpu_device *adev, |
1383 | bool enable, int xcc_id) | |
86301129 LM |
1384 | { |
1385 | if (enable) { | |
659a4ab8 | 1386 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MEC_CNTL, 0); |
86301129 | 1387 | } else { |
659a4ab8 | 1388 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MEC_CNTL, |
86301129 | 1389 | (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK)); |
6f917fdc | 1390 | adev->gfx.kiq[xcc_id].ring.sched.ready = false; |
86301129 LM |
1391 | } |
1392 | udelay(50); | |
1393 | } | |
de7511ae | 1394 | |
880f8b3f LL |
1395 | static int gfx_v9_4_3_xcc_cp_compute_load_microcode(struct amdgpu_device *adev, |
1396 | int xcc_id) | |
86301129 LM |
1397 | { |
1398 | const struct gfx_firmware_header_v1_0 *mec_hdr; | |
1399 | const __le32 *fw_data; | |
1400 | unsigned i; | |
1401 | u32 tmp; | |
1402 | u32 mec_ucode_addr_offset; | |
1403 | u32 mec_ucode_data_offset; | |
1404 | ||
1405 | if (!adev->gfx.mec_fw) | |
1406 | return -EINVAL; | |
1407 | ||
880f8b3f | 1408 | gfx_v9_4_3_xcc_cp_compute_enable(adev, false, xcc_id); |
86301129 LM |
1409 | |
1410 | mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; | |
1411 | amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); | |
1412 | ||
1413 | fw_data = (const __le32 *) | |
1414 | (adev->gfx.mec_fw->data + | |
1415 | le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes)); | |
1416 | tmp = 0; | |
1417 | tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0); | |
1418 | tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); | |
659a4ab8 | 1419 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_CNTL, tmp); |
86301129 | 1420 | |
659a4ab8 | 1421 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_LO, |
86301129 | 1422 | adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000); |
659a4ab8 | 1423 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_HI, |
86301129 LM |
1424 | upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr)); |
1425 | ||
1426 | mec_ucode_addr_offset = | |
659a4ab8 | 1427 | SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_MEC_ME1_UCODE_ADDR); |
86301129 | 1428 | mec_ucode_data_offset = |
659a4ab8 | 1429 | SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_MEC_ME1_UCODE_DATA); |
86301129 LM |
1430 | |
1431 | /* MEC1 */ | |
1432 | WREG32(mec_ucode_addr_offset, mec_hdr->jt_offset); | |
1433 | for (i = 0; i < mec_hdr->jt_size; i++) | |
1434 | WREG32(mec_ucode_data_offset, | |
1435 | le32_to_cpup(fw_data + mec_hdr->jt_offset + i)); | |
1436 | ||
1437 | WREG32(mec_ucode_addr_offset, adev->gfx.mec_fw_version); | |
1438 | /* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */ | |
1439 | ||
1440 | return 0; | |
1441 | } | |
1442 | ||
1443 | /* KIQ functions */ | |
880f8b3f | 1444 | static void gfx_v9_4_3_xcc_kiq_setting(struct amdgpu_ring *ring, int xcc_id) |
86301129 LM |
1445 | { |
1446 | uint32_t tmp; | |
1447 | struct amdgpu_device *adev = ring->adev; | |
1448 | ||
1449 | /* tell RLC which is KIQ queue */ | |
659a4ab8 | 1450 | tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS); |
86301129 LM |
1451 | tmp &= 0xffffff00; |
1452 | tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue); | |
659a4ab8 | 1453 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp); |
86301129 | 1454 | tmp |= 0x80; |
659a4ab8 | 1455 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp); |
86301129 LM |
1456 | } |
1457 | ||
1458 | static void gfx_v9_4_3_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd) | |
1459 | { | |
1460 | struct amdgpu_device *adev = ring->adev; | |
1461 | ||
1462 | if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { | |
1463 | if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) { | |
1464 | mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH; | |
1465 | mqd->cp_hqd_queue_priority = | |
1466 | AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM; | |
1467 | } | |
1468 | } | |
1469 | } | |
1470 | ||
c1d3f627 | 1471 | static int gfx_v9_4_3_xcc_mqd_init(struct amdgpu_ring *ring, int xcc_id) |
86301129 LM |
1472 | { |
1473 | struct amdgpu_device *adev = ring->adev; | |
1474 | struct v9_mqd *mqd = ring->mqd_ptr; | |
1475 | uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr; | |
1476 | uint32_t tmp; | |
1477 | ||
1478 | mqd->header = 0xC0310800; | |
1479 | mqd->compute_pipelinestat_enable = 0x00000001; | |
1480 | mqd->compute_static_thread_mgmt_se0 = 0xffffffff; | |
1481 | mqd->compute_static_thread_mgmt_se1 = 0xffffffff; | |
1482 | mqd->compute_static_thread_mgmt_se2 = 0xffffffff; | |
1483 | mqd->compute_static_thread_mgmt_se3 = 0xffffffff; | |
1484 | mqd->compute_misc_reserved = 0x00000003; | |
1485 | ||
1486 | mqd->dynamic_cu_mask_addr_lo = | |
1487 | lower_32_bits(ring->mqd_gpu_addr | |
1488 | + offsetof(struct v9_mqd_allocation, dynamic_cu_mask)); | |
1489 | mqd->dynamic_cu_mask_addr_hi = | |
1490 | upper_32_bits(ring->mqd_gpu_addr | |
1491 | + offsetof(struct v9_mqd_allocation, dynamic_cu_mask)); | |
1492 | ||
1493 | eop_base_addr = ring->eop_gpu_addr >> 8; | |
1494 | mqd->cp_hqd_eop_base_addr_lo = eop_base_addr; | |
1495 | mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr); | |
1496 | ||
1497 | /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ | |
c1d3f627 | 1498 | tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_CONTROL); |
86301129 LM |
1499 | tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE, |
1500 | (order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1)); | |
1501 | ||
1502 | mqd->cp_hqd_eop_control = tmp; | |
1503 | ||
1504 | /* enable doorbell? */ | |
c1d3f627 | 1505 | tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL); |
86301129 LM |
1506 | |
1507 | if (ring->use_doorbell) { | |
1508 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, | |
1509 | DOORBELL_OFFSET, ring->doorbell_index); | |
1510 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, | |
1511 | DOORBELL_EN, 1); | |
1512 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, | |
1513 | DOORBELL_SOURCE, 0); | |
1514 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, | |
1515 | DOORBELL_HIT, 0); | |
1516 | } else { | |
1517 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, | |
1518 | DOORBELL_EN, 0); | |
1519 | } | |
1520 | ||
1521 | mqd->cp_hqd_pq_doorbell_control = tmp; | |
1522 | ||
1523 | /* disable the queue if it's active */ | |
1524 | ring->wptr = 0; | |
1525 | mqd->cp_hqd_dequeue_request = 0; | |
1526 | mqd->cp_hqd_pq_rptr = 0; | |
1527 | mqd->cp_hqd_pq_wptr_lo = 0; | |
1528 | mqd->cp_hqd_pq_wptr_hi = 0; | |
1529 | ||
1530 | /* set the pointer to the MQD */ | |
1531 | mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc; | |
1532 | mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr); | |
1533 | ||
1534 | /* set MQD vmid to 0 */ | |
c1d3f627 | 1535 | tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MQD_CONTROL); |
86301129 LM |
1536 | tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0); |
1537 | mqd->cp_mqd_control = tmp; | |
1538 | ||
1539 | /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ | |
1540 | hqd_gpu_addr = ring->gpu_addr >> 8; | |
1541 | mqd->cp_hqd_pq_base_lo = hqd_gpu_addr; | |
1542 | mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr); | |
1543 | ||
1544 | /* set up the HQD, this is similar to CP_RB0_CNTL */ | |
c1d3f627 | 1545 | tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_CONTROL); |
86301129 LM |
1546 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE, |
1547 | (order_base_2(ring->ring_size / 4) - 1)); | |
1548 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE, | |
1549 | ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8)); | |
1550 | #ifdef __BIG_ENDIAN | |
1551 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1); | |
1552 | #endif | |
1553 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0); | |
1554 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0); | |
1555 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1); | |
1556 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1); | |
1557 | mqd->cp_hqd_pq_control = tmp; | |
1558 | ||
1559 | /* set the wb address whether it's enabled or not */ | |
1560 | wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); | |
1561 | mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc; | |
1562 | mqd->cp_hqd_pq_rptr_report_addr_hi = | |
1563 | upper_32_bits(wb_gpu_addr) & 0xffff; | |
1564 | ||
1565 | /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ | |
1566 | wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); | |
1567 | mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc; | |
1568 | mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; | |
1569 | ||
1570 | /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ | |
1571 | ring->wptr = 0; | |
c1d3f627 | 1572 | mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR); |
86301129 LM |
1573 | |
1574 | /* set the vmid for the queue */ | |
1575 | mqd->cp_hqd_vmid = 0; | |
1576 | ||
c1d3f627 | 1577 | tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE); |
86301129 LM |
1578 | tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53); |
1579 | mqd->cp_hqd_persistent_state = tmp; | |
1580 | ||
1581 | /* set MIN_IB_AVAIL_SIZE */ | |
c1d3f627 | 1582 | tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_IB_CONTROL); |
86301129 LM |
1583 | tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3); |
1584 | mqd->cp_hqd_ib_control = tmp; | |
1585 | ||
1586 | /* set static priority for a queue/ring */ | |
1587 | gfx_v9_4_3_mqd_set_priority(ring, mqd); | |
c1d3f627 | 1588 | mqd->cp_hqd_quantum = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_QUANTUM); |
86301129 LM |
1589 | |
1590 | /* map_queues packet doesn't need activate the queue, | |
1591 | * so only kiq need set this field. | |
1592 | */ | |
1593 | if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) | |
1594 | mqd->cp_hqd_active = 1; | |
1595 | ||
1596 | return 0; | |
1597 | } | |
1598 | ||
880f8b3f LL |
1599 | static int gfx_v9_4_3_xcc_kiq_init_register(struct amdgpu_ring *ring, |
1600 | int xcc_id) | |
86301129 LM |
1601 | { |
1602 | struct amdgpu_device *adev = ring->adev; | |
1603 | struct v9_mqd *mqd = ring->mqd_ptr; | |
1604 | int j; | |
1605 | ||
1606 | /* disable wptr polling */ | |
5a8b26a8 | 1607 | WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_WPTR_POLL_CNTL, EN, 0); |
86301129 | 1608 | |
659a4ab8 | 1609 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR, |
86301129 | 1610 | mqd->cp_hqd_eop_base_addr_lo); |
659a4ab8 | 1611 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR_HI, |
86301129 LM |
1612 | mqd->cp_hqd_eop_base_addr_hi); |
1613 | ||
1614 | /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ | |
659a4ab8 | 1615 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_CONTROL, |
86301129 LM |
1616 | mqd->cp_hqd_eop_control); |
1617 | ||
1618 | /* enable doorbell? */ | |
659a4ab8 | 1619 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, |
86301129 LM |
1620 | mqd->cp_hqd_pq_doorbell_control); |
1621 | ||
1622 | /* disable the queue if it's active */ | |
659a4ab8 LL |
1623 | if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1) { |
1624 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 1); | |
86301129 | 1625 | for (j = 0; j < adev->usec_timeout; j++) { |
659a4ab8 | 1626 | if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1)) |
86301129 LM |
1627 | break; |
1628 | udelay(1); | |
1629 | } | |
659a4ab8 | 1630 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, |
86301129 | 1631 | mqd->cp_hqd_dequeue_request); |
659a4ab8 | 1632 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR, |
86301129 | 1633 | mqd->cp_hqd_pq_rptr); |
659a4ab8 | 1634 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO, |
86301129 | 1635 | mqd->cp_hqd_pq_wptr_lo); |
659a4ab8 | 1636 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI, |
86301129 LM |
1637 | mqd->cp_hqd_pq_wptr_hi); |
1638 | } | |
1639 | ||
1640 | /* set the pointer to the MQD */ | |
659a4ab8 | 1641 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_BASE_ADDR, |
86301129 | 1642 | mqd->cp_mqd_base_addr_lo); |
659a4ab8 | 1643 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_BASE_ADDR_HI, |
86301129 LM |
1644 | mqd->cp_mqd_base_addr_hi); |
1645 | ||
1646 | /* set MQD vmid to 0 */ | |
659a4ab8 | 1647 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_CONTROL, |
86301129 LM |
1648 | mqd->cp_mqd_control); |
1649 | ||
1650 | /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ | |
659a4ab8 | 1651 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_BASE, |
86301129 | 1652 | mqd->cp_hqd_pq_base_lo); |
659a4ab8 | 1653 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_BASE_HI, |
86301129 LM |
1654 | mqd->cp_hqd_pq_base_hi); |
1655 | ||
1656 | /* set up the HQD, this is similar to CP_RB0_CNTL */ | |
659a4ab8 | 1657 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_CONTROL, |
86301129 LM |
1658 | mqd->cp_hqd_pq_control); |
1659 | ||
1660 | /* set the wb address whether it's enabled or not */ | |
659a4ab8 | 1661 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR_REPORT_ADDR, |
86301129 | 1662 | mqd->cp_hqd_pq_rptr_report_addr_lo); |
659a4ab8 | 1663 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR_REPORT_ADDR_HI, |
86301129 LM |
1664 | mqd->cp_hqd_pq_rptr_report_addr_hi); |
1665 | ||
1666 | /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ | |
659a4ab8 | 1667 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_POLL_ADDR, |
86301129 | 1668 | mqd->cp_hqd_pq_wptr_poll_addr_lo); |
659a4ab8 | 1669 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_POLL_ADDR_HI, |
86301129 LM |
1670 | mqd->cp_hqd_pq_wptr_poll_addr_hi); |
1671 | ||
1672 | /* enable the doorbell if requested */ | |
1673 | if (ring->use_doorbell) { | |
233bb373 LL |
1674 | WREG32_SOC15( |
1675 | GC, GET_INST(GC, xcc_id), | |
1676 | regCP_MEC_DOORBELL_RANGE_LOWER, | |
1677 | ((adev->doorbell_index.kiq + | |
1678 | xcc_id * adev->doorbell_index.xcc_doorbell_range) * | |
1679 | 2) << 2); | |
1680 | WREG32_SOC15( | |
1681 | GC, GET_INST(GC, xcc_id), | |
1682 | regCP_MEC_DOORBELL_RANGE_UPPER, | |
1683 | ((adev->doorbell_index.userqueue_end + | |
1684 | xcc_id * adev->doorbell_index.xcc_doorbell_range) * | |
1685 | 2) << 2); | |
86301129 LM |
1686 | } |
1687 | ||
659a4ab8 | 1688 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, |
86301129 LM |
1689 | mqd->cp_hqd_pq_doorbell_control); |
1690 | ||
1691 | /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ | |
659a4ab8 | 1692 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO, |
86301129 | 1693 | mqd->cp_hqd_pq_wptr_lo); |
659a4ab8 | 1694 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI, |
86301129 LM |
1695 | mqd->cp_hqd_pq_wptr_hi); |
1696 | ||
1697 | /* set the vmid for the queue */ | |
659a4ab8 | 1698 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_VMID, mqd->cp_hqd_vmid); |
86301129 | 1699 | |
659a4ab8 | 1700 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE, |
86301129 LM |
1701 | mqd->cp_hqd_persistent_state); |
1702 | ||
1703 | /* activate the queue */ | |
659a4ab8 | 1704 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE, |
86301129 LM |
1705 | mqd->cp_hqd_active); |
1706 | ||
1707 | if (ring->use_doorbell) | |
5a8b26a8 | 1708 | WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_STATUS, DOORBELL_ENABLE, 1); |
86301129 LM |
1709 | |
1710 | return 0; | |
1711 | } | |
1712 | ||
fee500fa | 1713 | static int gfx_v9_4_3_xcc_q_fini_register(struct amdgpu_ring *ring, |
880f8b3f | 1714 | int xcc_id) |
86301129 LM |
1715 | { |
1716 | struct amdgpu_device *adev = ring->adev; | |
1717 | int j; | |
1718 | ||
1719 | /* disable the queue if it's active */ | |
659a4ab8 | 1720 | if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1) { |
86301129 | 1721 | |
659a4ab8 | 1722 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 1); |
86301129 LM |
1723 | |
1724 | for (j = 0; j < adev->usec_timeout; j++) { | |
659a4ab8 | 1725 | if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1)) |
86301129 LM |
1726 | break; |
1727 | udelay(1); | |
1728 | } | |
1729 | ||
1730 | if (j == AMDGPU_MAX_USEC_TIMEOUT) { | |
fee500fa | 1731 | DRM_DEBUG("%s dequeue request failed.\n", ring->name); |
86301129 LM |
1732 | |
1733 | /* Manual disable if dequeue request times out */ | |
659a4ab8 | 1734 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE, 0); |
86301129 LM |
1735 | } |
1736 | ||
659a4ab8 | 1737 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, |
86301129 LM |
1738 | 0); |
1739 | } | |
1740 | ||
659a4ab8 LL |
1741 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IQ_TIMER, 0); |
1742 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IB_CONTROL, 0); | |
b5ac0880 | 1743 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE, CP_HQD_PERSISTENT_STATE_DEFAULT); |
659a4ab8 LL |
1744 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000); |
1745 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0); | |
1746 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR, 0); | |
1747 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI, 0); | |
1748 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO, 0); | |
86301129 LM |
1749 | |
1750 | return 0; | |
1751 | } | |
1752 | ||
880f8b3f | 1753 | static int gfx_v9_4_3_xcc_kiq_init_queue(struct amdgpu_ring *ring, int xcc_id) |
86301129 LM |
1754 | { |
1755 | struct amdgpu_device *adev = ring->adev; | |
1756 | struct v9_mqd *mqd = ring->mqd_ptr; | |
1757 | struct v9_mqd *tmp_mqd; | |
1758 | ||
880f8b3f | 1759 | gfx_v9_4_3_xcc_kiq_setting(ring, xcc_id); |
86301129 LM |
1760 | |
1761 | /* GPU could be in bad state during probe, driver trigger the reset | |
1762 | * after load the SMU, in this case , the mqd is not be initialized. | |
1763 | * driver need to re-init the mqd. | |
1764 | * check mqd->cp_hqd_pq_control since this value should not be 0 | |
1765 | */ | |
6f917fdc | 1766 | tmp_mqd = (struct v9_mqd *)adev->gfx.kiq[xcc_id].mqd_backup; |
86301129 LM |
1767 | if (amdgpu_in_reset(adev) && tmp_mqd->cp_hqd_pq_control) { |
1768 | /* for GPU_RESET case , reset MQD to a clean status */ | |
6f917fdc LM |
1769 | if (adev->gfx.kiq[xcc_id].mqd_backup) |
1770 | memcpy(mqd, adev->gfx.kiq[xcc_id].mqd_backup, sizeof(struct v9_mqd_allocation)); | |
86301129 LM |
1771 | |
1772 | /* reset ring buffer */ | |
1773 | ring->wptr = 0; | |
1774 | amdgpu_ring_clear_ring(ring); | |
86301129 | 1775 | mutex_lock(&adev->srbm_mutex); |
659a4ab8 | 1776 | soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id)); |
880f8b3f | 1777 | gfx_v9_4_3_xcc_kiq_init_register(ring, xcc_id); |
659a4ab8 | 1778 | soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id)); |
86301129 LM |
1779 | mutex_unlock(&adev->srbm_mutex); |
1780 | } else { | |
1781 | memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation)); | |
1782 | ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; | |
1783 | ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; | |
1784 | mutex_lock(&adev->srbm_mutex); | |
2036b34d ZL |
1785 | if (amdgpu_sriov_vf(adev) && adev->in_suspend) |
1786 | amdgpu_ring_clear_ring(ring); | |
659a4ab8 | 1787 | soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id)); |
c1d3f627 | 1788 | gfx_v9_4_3_xcc_mqd_init(ring, xcc_id); |
880f8b3f | 1789 | gfx_v9_4_3_xcc_kiq_init_register(ring, xcc_id); |
659a4ab8 | 1790 | soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id)); |
86301129 LM |
1791 | mutex_unlock(&adev->srbm_mutex); |
1792 | ||
6f917fdc LM |
1793 | if (adev->gfx.kiq[xcc_id].mqd_backup) |
1794 | memcpy(adev->gfx.kiq[xcc_id].mqd_backup, mqd, sizeof(struct v9_mqd_allocation)); | |
86301129 LM |
1795 | } |
1796 | ||
1797 | return 0; | |
1798 | } | |
1799 | ||
880f8b3f | 1800 | static int gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id) |
86301129 LM |
1801 | { |
1802 | struct amdgpu_device *adev = ring->adev; | |
1803 | struct v9_mqd *mqd = ring->mqd_ptr; | |
1804 | int mqd_idx = ring - &adev->gfx.compute_ring[0]; | |
1805 | struct v9_mqd *tmp_mqd; | |
1806 | ||
1807 | /* Same as above kiq init, driver need to re-init the mqd if mqd->cp_hqd_pq_control | |
1808 | * is not be initialized before | |
1809 | */ | |
1810 | tmp_mqd = (struct v9_mqd *)adev->gfx.mec.mqd_backup[mqd_idx]; | |
1811 | ||
1812 | if (!tmp_mqd->cp_hqd_pq_control || | |
1813 | (!amdgpu_in_reset(adev) && !adev->in_suspend)) { | |
1814 | memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation)); | |
1815 | ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; | |
1816 | ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; | |
1817 | mutex_lock(&adev->srbm_mutex); | |
659a4ab8 | 1818 | soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id)); |
c1d3f627 | 1819 | gfx_v9_4_3_xcc_mqd_init(ring, xcc_id); |
659a4ab8 | 1820 | soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id)); |
86301129 LM |
1821 | mutex_unlock(&adev->srbm_mutex); |
1822 | ||
1823 | if (adev->gfx.mec.mqd_backup[mqd_idx]) | |
1824 | memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation)); | |
45b54a7d AD |
1825 | } else { |
1826 | /* restore MQD to a clean status */ | |
86301129 LM |
1827 | if (adev->gfx.mec.mqd_backup[mqd_idx]) |
1828 | memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation)); | |
86301129 LM |
1829 | /* reset ring buffer */ |
1830 | ring->wptr = 0; | |
1831 | atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0); | |
1832 | amdgpu_ring_clear_ring(ring); | |
86301129 LM |
1833 | } |
1834 | ||
1835 | return 0; | |
1836 | } | |
1837 | ||
fee500fa SZ |
1838 | static int gfx_v9_4_3_xcc_kcq_fini_register(struct amdgpu_device *adev, int xcc_id) |
1839 | { | |
1840 | struct amdgpu_ring *ring; | |
1841 | int j; | |
1842 | ||
1843 | for (j = 0; j < adev->gfx.num_compute_rings; j++) { | |
1844 | ring = &adev->gfx.compute_ring[j + xcc_id * adev->gfx.num_compute_rings]; | |
1845 | if (!amdgpu_in_reset(adev) && !adev->in_suspend) { | |
1846 | mutex_lock(&adev->srbm_mutex); | |
1847 | soc15_grbm_select(adev, ring->me, | |
1848 | ring->pipe, | |
1849 | ring->queue, 0, GET_INST(GC, xcc_id)); | |
1850 | gfx_v9_4_3_xcc_q_fini_register(ring, xcc_id); | |
1851 | soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id)); | |
1852 | mutex_unlock(&adev->srbm_mutex); | |
1853 | } | |
1854 | } | |
1855 | ||
1856 | return 0; | |
1857 | } | |
1858 | ||
880f8b3f | 1859 | static int gfx_v9_4_3_xcc_kiq_resume(struct amdgpu_device *adev, int xcc_id) |
86301129 LM |
1860 | { |
1861 | struct amdgpu_ring *ring; | |
1862 | int r; | |
1863 | ||
6f917fdc | 1864 | ring = &adev->gfx.kiq[xcc_id].ring; |
86301129 LM |
1865 | |
1866 | r = amdgpu_bo_reserve(ring->mqd_obj, false); | |
1867 | if (unlikely(r != 0)) | |
1868 | return r; | |
1869 | ||
1870 | r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); | |
f4409a23 DC |
1871 | if (unlikely(r != 0)) { |
1872 | amdgpu_bo_unreserve(ring->mqd_obj); | |
86301129 | 1873 | return r; |
f4409a23 | 1874 | } |
86301129 | 1875 | |
880f8b3f | 1876 | gfx_v9_4_3_xcc_kiq_init_queue(ring, xcc_id); |
86301129 LM |
1877 | amdgpu_bo_kunmap(ring->mqd_obj); |
1878 | ring->mqd_ptr = NULL; | |
1879 | amdgpu_bo_unreserve(ring->mqd_obj); | |
86301129 LM |
1880 | return 0; |
1881 | } | |
1882 | ||
880f8b3f | 1883 | static int gfx_v9_4_3_xcc_kcq_resume(struct amdgpu_device *adev, int xcc_id) |
86301129 LM |
1884 | { |
1885 | struct amdgpu_ring *ring = NULL; | |
1886 | int r = 0, i; | |
1887 | ||
880f8b3f | 1888 | gfx_v9_4_3_xcc_cp_compute_enable(adev, true, xcc_id); |
86301129 LM |
1889 | |
1890 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { | |
6f917fdc | 1891 | ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings]; |
86301129 LM |
1892 | |
1893 | r = amdgpu_bo_reserve(ring->mqd_obj, false); | |
1894 | if (unlikely(r != 0)) | |
1895 | goto done; | |
1896 | r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); | |
1897 | if (!r) { | |
880f8b3f | 1898 | r = gfx_v9_4_3_xcc_kcq_init_queue(ring, xcc_id); |
86301129 LM |
1899 | amdgpu_bo_kunmap(ring->mqd_obj); |
1900 | ring->mqd_ptr = NULL; | |
1901 | } | |
1902 | amdgpu_bo_unreserve(ring->mqd_obj); | |
1903 | if (r) | |
1904 | goto done; | |
1905 | } | |
1906 | ||
6f917fdc | 1907 | r = amdgpu_gfx_enable_kcq(adev, xcc_id); |
86301129 LM |
1908 | done: |
1909 | return r; | |
1910 | } | |
1911 | ||
44b5cf2e | 1912 | static int gfx_v9_4_3_xcc_cp_resume(struct amdgpu_device *adev, int xcc_id) |
86301129 | 1913 | { |
86301129 | 1914 | struct amdgpu_ring *ring; |
44b5cf2e | 1915 | int r, j; |
86301129 | 1916 | |
44b5cf2e | 1917 | gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, false, xcc_id); |
6f917fdc | 1918 | |
44b5cf2e LL |
1919 | if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { |
1920 | gfx_v9_4_3_xcc_disable_gpa_mode(adev, xcc_id); | |
86301129 | 1921 | |
44b5cf2e LL |
1922 | r = gfx_v9_4_3_xcc_cp_compute_load_microcode(adev, xcc_id); |
1923 | if (r) | |
1924 | return r; | |
1925 | } | |
86301129 | 1926 | |
44b5cf2e LL |
1927 | r = gfx_v9_4_3_xcc_kiq_resume(adev, xcc_id); |
1928 | if (r) | |
1929 | return r; | |
1930 | ||
1931 | r = gfx_v9_4_3_xcc_kcq_resume(adev, xcc_id); | |
1932 | if (r) | |
1933 | return r; | |
86301129 | 1934 | |
44b5cf2e LL |
1935 | for (j = 0; j < adev->gfx.num_compute_rings; j++) { |
1936 | ring = &adev->gfx.compute_ring | |
1937 | [j + xcc_id * adev->gfx.num_compute_rings]; | |
1938 | r = amdgpu_ring_test_helper(ring); | |
6f917fdc LM |
1939 | if (r) |
1940 | return r; | |
44b5cf2e | 1941 | } |
86301129 | 1942 | |
44b5cf2e | 1943 | gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, true, xcc_id); |
86301129 | 1944 | |
44b5cf2e LL |
1945 | return 0; |
1946 | } | |
1947 | ||
1948 | static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev) | |
1949 | { | |
d7b8e68d | 1950 | int r = 0, i, num_xcc; |
44b5cf2e | 1951 | |
ded7d99e LL |
1952 | if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr, |
1953 | AMDGPU_XCP_FL_NONE) == | |
1954 | AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) | |
b6b85c8b LL |
1955 | r = amdgpu_xcp_switch_partition_mode(adev->xcp_mgr, |
1956 | amdgpu_user_partt_mode); | |
1957 | ||
1958 | if (r) | |
1959 | return r; | |
8e7fd193 | 1960 | |
44b5cf2e LL |
1961 | num_xcc = NUM_XCC(adev->gfx.xcc_mask); |
1962 | for (i = 0; i < num_xcc; i++) { | |
1963 | r = gfx_v9_4_3_xcc_cp_resume(adev, i); | |
1964 | if (r) | |
1965 | return r; | |
86301129 LM |
1966 | } |
1967 | ||
86301129 LM |
1968 | return 0; |
1969 | } | |
1970 | ||
880f8b3f LL |
1971 | static void gfx_v9_4_3_xcc_cp_enable(struct amdgpu_device *adev, bool enable, |
1972 | int xcc_id) | |
86301129 | 1973 | { |
880f8b3f | 1974 | gfx_v9_4_3_xcc_cp_compute_enable(adev, enable, xcc_id); |
86301129 LM |
1975 | } |
1976 | ||
44b5cf2e LL |
1977 | static void gfx_v9_4_3_xcc_fini(struct amdgpu_device *adev, int xcc_id) |
1978 | { | |
1979 | if (amdgpu_gfx_disable_kcq(adev, xcc_id)) | |
1980 | DRM_ERROR("XCD %d KCQ disable failed\n", xcc_id); | |
1981 | ||
2036b34d ZL |
1982 | if (amdgpu_sriov_vf(adev)) { |
1983 | /* must disable polling for SRIOV when hw finished, otherwise | |
1984 | * CPC engine may still keep fetching WB address which is already | |
1985 | * invalid after sw finished and trigger DMAR reading error in | |
1986 | * hypervisor side. | |
1987 | */ | |
1988 | WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_WPTR_POLL_CNTL, EN, 0); | |
1989 | return; | |
1990 | } | |
1991 | ||
44b5cf2e LL |
1992 | /* Use deinitialize sequence from CAIL when unbinding device |
1993 | * from driver, otherwise KIQ is hanging when binding back | |
1994 | */ | |
1995 | if (!amdgpu_in_reset(adev) && !adev->in_suspend) { | |
1996 | mutex_lock(&adev->srbm_mutex); | |
1997 | soc15_grbm_select(adev, adev->gfx.kiq[xcc_id].ring.me, | |
1998 | adev->gfx.kiq[xcc_id].ring.pipe, | |
1999 | adev->gfx.kiq[xcc_id].ring.queue, 0, | |
2000 | GET_INST(GC, xcc_id)); | |
fee500fa | 2001 | gfx_v9_4_3_xcc_q_fini_register(&adev->gfx.kiq[xcc_id].ring, |
44b5cf2e LL |
2002 | xcc_id); |
2003 | soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id)); | |
2004 | mutex_unlock(&adev->srbm_mutex); | |
2005 | } | |
2006 | ||
fee500fa | 2007 | gfx_v9_4_3_xcc_kcq_fini_register(adev, xcc_id); |
44b5cf2e | 2008 | gfx_v9_4_3_xcc_cp_enable(adev, false, xcc_id); |
44b5cf2e LL |
2009 | } |
2010 | ||
86301129 LM |
2011 | static int gfx_v9_4_3_hw_init(void *handle) |
2012 | { | |
2013 | int r; | |
2014 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
2015 | ||
2036b34d ZL |
2016 | if (!amdgpu_sriov_vf(adev)) |
2017 | gfx_v9_4_3_init_golden_registers(adev); | |
86301129 LM |
2018 | |
2019 | gfx_v9_4_3_constants_init(adev); | |
2020 | ||
2021 | r = adev->gfx.rlc.funcs->resume(adev); | |
2022 | if (r) | |
2023 | return r; | |
2024 | ||
2025 | r = gfx_v9_4_3_cp_resume(adev); | |
2026 | if (r) | |
2027 | return r; | |
2028 | ||
2029 | return r; | |
2030 | } | |
2031 | ||
2032 | static int gfx_v9_4_3_hw_fini(void *handle) | |
2033 | { | |
2034 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
8078f1c6 | 2035 | int i, num_xcc; |
86301129 LM |
2036 | |
2037 | amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); | |
2038 | amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); | |
2039 | ||
8078f1c6 LL |
2040 | num_xcc = NUM_XCC(adev->gfx.xcc_mask); |
2041 | for (i = 0; i < num_xcc; i++) { | |
44b5cf2e | 2042 | gfx_v9_4_3_xcc_fini(adev, i); |
86301129 LM |
2043 | } |
2044 | ||
86301129 LM |
2045 | return 0; |
2046 | } | |
2047 | ||
2048 | static int gfx_v9_4_3_suspend(void *handle) | |
2049 | { | |
2050 | return gfx_v9_4_3_hw_fini(handle); | |
2051 | } | |
2052 | ||
2053 | static int gfx_v9_4_3_resume(void *handle) | |
2054 | { | |
2055 | return gfx_v9_4_3_hw_init(handle); | |
2056 | } | |
2057 | ||
2058 | static bool gfx_v9_4_3_is_idle(void *handle) | |
2059 | { | |
2060 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
8078f1c6 | 2061 | int i, num_xcc; |
86301129 | 2062 | |
8078f1c6 LL |
2063 | num_xcc = NUM_XCC(adev->gfx.xcc_mask); |
2064 | for (i = 0; i < num_xcc; i++) { | |
659a4ab8 | 2065 | if (REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, i), regGRBM_STATUS), |
6f917fdc LM |
2066 | GRBM_STATUS, GUI_ACTIVE)) |
2067 | return false; | |
2068 | } | |
2069 | return true; | |
86301129 LM |
2070 | } |
2071 | ||
2072 | static int gfx_v9_4_3_wait_for_idle(void *handle) | |
2073 | { | |
2074 | unsigned i; | |
2075 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
2076 | ||
2077 | for (i = 0; i < adev->usec_timeout; i++) { | |
2078 | if (gfx_v9_4_3_is_idle(handle)) | |
2079 | return 0; | |
2080 | udelay(1); | |
2081 | } | |
2082 | return -ETIMEDOUT; | |
2083 | } | |
2084 | ||
2085 | static int gfx_v9_4_3_soft_reset(void *handle) | |
2086 | { | |
2087 | u32 grbm_soft_reset = 0; | |
2088 | u32 tmp; | |
2089 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
2090 | ||
2091 | /* GRBM_STATUS */ | |
659a4ab8 | 2092 | tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_STATUS); |
86301129 LM |
2093 | if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK | |
2094 | GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK | | |
2095 | GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK | | |
2096 | GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK | | |
2097 | GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK | | |
2098 | GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) { | |
2099 | grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, | |
2100 | GRBM_SOFT_RESET, SOFT_RESET_CP, 1); | |
2101 | grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, | |
2102 | GRBM_SOFT_RESET, SOFT_RESET_GFX, 1); | |
2103 | } | |
2104 | ||
2105 | if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) { | |
2106 | grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, | |
2107 | GRBM_SOFT_RESET, SOFT_RESET_CP, 1); | |
2108 | } | |
2109 | ||
2110 | /* GRBM_STATUS2 */ | |
659a4ab8 | 2111 | tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_STATUS2); |
86301129 LM |
2112 | if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY)) |
2113 | grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, | |
2114 | GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); | |
2115 | ||
2116 | ||
2117 | if (grbm_soft_reset) { | |
2118 | /* stop the rlc */ | |
2119 | adev->gfx.rlc.funcs->stop(adev); | |
2120 | ||
2121 | /* Disable MEC parsing/prefetching */ | |
880f8b3f | 2122 | gfx_v9_4_3_xcc_cp_compute_enable(adev, false, 0); |
86301129 LM |
2123 | |
2124 | if (grbm_soft_reset) { | |
659a4ab8 | 2125 | tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET); |
86301129 LM |
2126 | tmp |= grbm_soft_reset; |
2127 | dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp); | |
659a4ab8 LL |
2128 | WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp); |
2129 | tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET); | |
86301129 LM |
2130 | |
2131 | udelay(50); | |
2132 | ||
2133 | tmp &= ~grbm_soft_reset; | |
659a4ab8 LL |
2134 | WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp); |
2135 | tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET); | |
86301129 LM |
2136 | } |
2137 | ||
2138 | /* Wait a little for things to settle down */ | |
2139 | udelay(50); | |
2140 | } | |
2141 | return 0; | |
2142 | } | |
2143 | ||
2144 | static void gfx_v9_4_3_ring_emit_gds_switch(struct amdgpu_ring *ring, | |
2145 | uint32_t vmid, | |
2146 | uint32_t gds_base, uint32_t gds_size, | |
2147 | uint32_t gws_base, uint32_t gws_size, | |
2148 | uint32_t oa_base, uint32_t oa_size) | |
2149 | { | |
2150 | struct amdgpu_device *adev = ring->adev; | |
2151 | ||
2152 | /* GDS Base */ | |
2153 | gfx_v9_4_3_write_data_to_reg(ring, 0, false, | |
659a4ab8 | 2154 | SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_VMID0_BASE) + 2 * vmid, |
86301129 LM |
2155 | gds_base); |
2156 | ||
2157 | /* GDS Size */ | |
2158 | gfx_v9_4_3_write_data_to_reg(ring, 0, false, | |
659a4ab8 | 2159 | SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_VMID0_SIZE) + 2 * vmid, |
86301129 LM |
2160 | gds_size); |
2161 | ||
2162 | /* GWS */ | |
2163 | gfx_v9_4_3_write_data_to_reg(ring, 0, false, | |
659a4ab8 | 2164 | SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_GWS_VMID0) + vmid, |
86301129 LM |
2165 | gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base); |
2166 | ||
2167 | /* OA */ | |
2168 | gfx_v9_4_3_write_data_to_reg(ring, 0, false, | |
659a4ab8 | 2169 | SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_OA_VMID0) + vmid, |
86301129 LM |
2170 | (1 << (oa_size + oa_base)) - (1 << oa_base)); |
2171 | } | |
2172 | ||
2173 | static int gfx_v9_4_3_early_init(void *handle) | |
2174 | { | |
2175 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
0fa49d10 | 2176 | |
86301129 LM |
2177 | adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev), |
2178 | AMDGPU_MAX_COMPUTE_RINGS); | |
2179 | gfx_v9_4_3_set_kiq_pm4_funcs(adev); | |
2180 | gfx_v9_4_3_set_ring_funcs(adev); | |
2181 | gfx_v9_4_3_set_irq_funcs(adev); | |
2182 | gfx_v9_4_3_set_gds_init(adev); | |
2183 | gfx_v9_4_3_set_rlc_funcs(adev); | |
2184 | ||
8ed49dd1 VL |
2185 | /* init rlcg reg access ctrl */ |
2186 | gfx_v9_4_3_init_rlcg_reg_access_ctrl(adev); | |
2187 | ||
86301129 LM |
2188 | return gfx_v9_4_3_init_microcode(adev); |
2189 | } | |
2190 | ||
2191 | static int gfx_v9_4_3_late_init(void *handle) | |
2192 | { | |
2193 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
2194 | int r; | |
2195 | ||
2196 | r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0); | |
2197 | if (r) | |
2198 | return r; | |
2199 | ||
2200 | r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0); | |
2201 | if (r) | |
2202 | return r; | |
2203 | ||
bd974498 TZ |
2204 | if (adev->gfx.ras && |
2205 | adev->gfx.ras->enable_watchdog_timer) | |
2206 | adev->gfx.ras->enable_watchdog_timer(adev); | |
2207 | ||
86301129 LM |
2208 | return 0; |
2209 | } | |
2210 | ||
34fd9d68 LL |
2211 | static void gfx_v9_4_3_xcc_update_sram_fgcg(struct amdgpu_device *adev, |
2212 | bool enable, int xcc_id) | |
2213 | { | |
2214 | uint32_t def, data; | |
2215 | ||
2216 | if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_FGCG)) | |
2217 | return; | |
2218 | ||
2219 | def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), | |
2220 | regRLC_CGTT_MGCG_OVERRIDE); | |
2221 | ||
2222 | if (enable) | |
2223 | data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK; | |
2224 | else | |
2225 | data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK; | |
2226 | ||
2227 | if (def != data) | |
2228 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), | |
2229 | regRLC_CGTT_MGCG_OVERRIDE, data); | |
2230 | ||
34fd9d68 LL |
2231 | } |
2232 | ||
2233 | static void gfx_v9_4_3_xcc_update_repeater_fgcg(struct amdgpu_device *adev, | |
2234 | bool enable, int xcc_id) | |
2235 | { | |
2236 | uint32_t def, data; | |
2237 | ||
2238 | if (!(adev->cg_flags & AMD_CG_SUPPORT_REPEATER_FGCG)) | |
2239 | return; | |
2240 | ||
2241 | def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), | |
2242 | regRLC_CGTT_MGCG_OVERRIDE); | |
2243 | ||
2244 | if (enable) | |
2245 | data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_REP_FGCG_OVERRIDE_MASK; | |
2246 | else | |
2247 | data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_REP_FGCG_OVERRIDE_MASK; | |
2248 | ||
2249 | if (def != data) | |
2250 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), | |
2251 | regRLC_CGTT_MGCG_OVERRIDE, data); | |
2252 | } | |
2253 | ||
880f8b3f LL |
2254 | static void |
2255 | gfx_v9_4_3_xcc_update_medium_grain_clock_gating(struct amdgpu_device *adev, | |
2256 | bool enable, int xcc_id) | |
86301129 LM |
2257 | { |
2258 | uint32_t data, def; | |
2259 | ||
86301129 LM |
2260 | /* It is disabled by HW by default */ |
2261 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) { | |
2262 | /* 1 - RLC_CGTT_MGCG_OVERRIDE */ | |
659a4ab8 | 2263 | def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE); |
86301129 LM |
2264 | |
2265 | data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | | |
2266 | RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK | | |
b7c7011e | 2267 | RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK | |
86301129 LM |
2268 | RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK); |
2269 | ||
86301129 | 2270 | if (def != data) |
659a4ab8 | 2271 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data); |
86301129 LM |
2272 | |
2273 | /* MGLS is a global flag to control all MGLS in GFX */ | |
2274 | if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) { | |
2275 | /* 2 - RLC memory Light sleep */ | |
2276 | if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) { | |
659a4ab8 | 2277 | def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL); |
86301129 LM |
2278 | data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK; |
2279 | if (def != data) | |
659a4ab8 | 2280 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL, data); |
86301129 LM |
2281 | } |
2282 | /* 3 - CP memory Light sleep */ | |
2283 | if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) { | |
659a4ab8 | 2284 | def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL); |
86301129 LM |
2285 | data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; |
2286 | if (def != data) | |
659a4ab8 | 2287 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL, data); |
86301129 LM |
2288 | } |
2289 | } | |
2290 | } else { | |
2291 | /* 1 - MGCG_OVERRIDE */ | |
659a4ab8 | 2292 | def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE); |
86301129 LM |
2293 | |
2294 | data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK | | |
2295 | RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | | |
2296 | RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK | | |
2297 | RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK); | |
2298 | ||
2299 | if (def != data) | |
659a4ab8 | 2300 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data); |
86301129 LM |
2301 | |
2302 | /* 2 - disable MGLS in RLC */ | |
659a4ab8 | 2303 | data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL); |
86301129 LM |
2304 | if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) { |
2305 | data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK; | |
659a4ab8 | 2306 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL, data); |
86301129 LM |
2307 | } |
2308 | ||
2309 | /* 3 - disable MGLS in CP */ | |
659a4ab8 | 2310 | data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL); |
86301129 LM |
2311 | if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) { |
2312 | data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; | |
659a4ab8 | 2313 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL, data); |
86301129 LM |
2314 | } |
2315 | } | |
2316 | ||
86301129 LM |
2317 | } |
2318 | ||
880f8b3f LL |
2319 | static void |
2320 | gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(struct amdgpu_device *adev, | |
2321 | bool enable, int xcc_id) | |
86301129 LM |
2322 | { |
2323 | uint32_t def, data; | |
2324 | ||
86301129 | 2325 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) { |
b7c7011e | 2326 | |
659a4ab8 | 2327 | def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE); |
86301129 LM |
2328 | /* unset CGCG override */ |
2329 | data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK; | |
2330 | if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) | |
2331 | data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK; | |
2332 | else | |
2333 | data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK; | |
2334 | /* update CGCG and CGLS override bits */ | |
2335 | if (def != data) | |
659a4ab8 | 2336 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data); |
86301129 LM |
2337 | |
2338 | /* enable cgcg FSM(0x0000363F) */ | |
659a4ab8 | 2339 | def = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL); |
86301129 | 2340 | |
b7c7011e LL |
2341 | data = (0x36 |
2342 | << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | | |
2343 | RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK; | |
86301129 LM |
2344 | if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) |
2345 | data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) | | |
2346 | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK; | |
2347 | if (def != data) | |
659a4ab8 | 2348 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, data); |
86301129 LM |
2349 | |
2350 | /* set IDLE_POLL_COUNT(0x00900100) */ | |
659a4ab8 | 2351 | def = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL); |
86301129 LM |
2352 | data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) | |
2353 | (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT); | |
2354 | if (def != data) | |
659a4ab8 | 2355 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL, data); |
86301129 | 2356 | } else { |
659a4ab8 | 2357 | def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL); |
86301129 LM |
2358 | /* reset CGCG/CGLS bits */ |
2359 | data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK); | |
2360 | /* disable cgcg and cgls in FSM */ | |
2361 | if (def != data) | |
659a4ab8 | 2362 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, data); |
86301129 LM |
2363 | } |
2364 | ||
86301129 LM |
2365 | } |
2366 | ||
880f8b3f LL |
2367 | static int gfx_v9_4_3_xcc_update_gfx_clock_gating(struct amdgpu_device *adev, |
2368 | bool enable, int xcc_id) | |
86301129 | 2369 | { |
34fd9d68 LL |
2370 | amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id); |
2371 | ||
86301129 | 2372 | if (enable) { |
34fd9d68 LL |
2373 | /* FGCG */ |
2374 | gfx_v9_4_3_xcc_update_sram_fgcg(adev, enable, xcc_id); | |
2375 | gfx_v9_4_3_xcc_update_repeater_fgcg(adev, enable, xcc_id); | |
2376 | ||
86301129 LM |
2377 | /* CGCG/CGLS should be enabled after MGCG/MGLS |
2378 | * === MGCG + MGLS === | |
2379 | */ | |
880f8b3f LL |
2380 | gfx_v9_4_3_xcc_update_medium_grain_clock_gating(adev, enable, |
2381 | xcc_id); | |
86301129 | 2382 | /* === CGCG + CGLS === */ |
880f8b3f LL |
2383 | gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(adev, enable, |
2384 | xcc_id); | |
86301129 LM |
2385 | } else { |
2386 | /* CGCG/CGLS should be disabled before MGCG/MGLS | |
2387 | * === CGCG + CGLS === | |
2388 | */ | |
880f8b3f LL |
2389 | gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(adev, enable, |
2390 | xcc_id); | |
86301129 | 2391 | /* === MGCG + MGLS === */ |
880f8b3f LL |
2392 | gfx_v9_4_3_xcc_update_medium_grain_clock_gating(adev, enable, |
2393 | xcc_id); | |
34fd9d68 LL |
2394 | |
2395 | /* FGCG */ | |
2396 | gfx_v9_4_3_xcc_update_sram_fgcg(adev, enable, xcc_id); | |
2397 | gfx_v9_4_3_xcc_update_repeater_fgcg(adev, enable, xcc_id); | |
86301129 | 2398 | } |
34fd9d68 LL |
2399 | |
2400 | amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id); | |
2401 | ||
86301129 LM |
2402 | return 0; |
2403 | } | |
2404 | ||
2405 | static const struct amdgpu_rlc_funcs gfx_v9_4_3_rlc_funcs = { | |
2406 | .is_rlc_enabled = gfx_v9_4_3_is_rlc_enabled, | |
880f8b3f LL |
2407 | .set_safe_mode = gfx_v9_4_3_xcc_set_safe_mode, |
2408 | .unset_safe_mode = gfx_v9_4_3_xcc_unset_safe_mode, | |
86301129 | 2409 | .init = gfx_v9_4_3_rlc_init, |
86301129 LM |
2410 | .resume = gfx_v9_4_3_rlc_resume, |
2411 | .stop = gfx_v9_4_3_rlc_stop, | |
2412 | .reset = gfx_v9_4_3_rlc_reset, | |
2413 | .start = gfx_v9_4_3_rlc_start, | |
2414 | .update_spm_vmid = gfx_v9_4_3_update_spm_vmid, | |
2415 | .is_rlcg_access_range = gfx_v9_4_3_is_rlcg_access_range, | |
2416 | }; | |
2417 | ||
2418 | static int gfx_v9_4_3_set_powergating_state(void *handle, | |
2419 | enum amd_powergating_state state) | |
2420 | { | |
2421 | return 0; | |
2422 | } | |
2423 | ||
2424 | static int gfx_v9_4_3_set_clockgating_state(void *handle, | |
2425 | enum amd_clockgating_state state) | |
2426 | { | |
2427 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
8078f1c6 | 2428 | int i, num_xcc; |
86301129 LM |
2429 | |
2430 | if (amdgpu_sriov_vf(adev)) | |
2431 | return 0; | |
2432 | ||
8078f1c6 | 2433 | num_xcc = NUM_XCC(adev->gfx.xcc_mask); |
4e8303cf | 2434 | switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { |
86301129 | 2435 | case IP_VERSION(9, 4, 3): |
8078f1c6 | 2436 | for (i = 0; i < num_xcc; i++) |
880f8b3f LL |
2437 | gfx_v9_4_3_xcc_update_gfx_clock_gating( |
2438 | adev, state == AMD_CG_STATE_GATE, i); | |
86301129 LM |
2439 | break; |
2440 | default: | |
2441 | break; | |
2442 | } | |
2443 | return 0; | |
2444 | } | |
2445 | ||
2446 | static void gfx_v9_4_3_get_clockgating_state(void *handle, u64 *flags) | |
2447 | { | |
2448 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
2449 | int data; | |
2450 | ||
2451 | if (amdgpu_sriov_vf(adev)) | |
2452 | *flags = 0; | |
2453 | ||
2454 | /* AMD_CG_SUPPORT_GFX_MGCG */ | |
659a4ab8 | 2455 | data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_CGTT_MGCG_OVERRIDE)); |
86301129 LM |
2456 | if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK)) |
2457 | *flags |= AMD_CG_SUPPORT_GFX_MGCG; | |
2458 | ||
2459 | /* AMD_CG_SUPPORT_GFX_CGCG */ | |
659a4ab8 | 2460 | data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_CGCG_CGLS_CTRL)); |
86301129 LM |
2461 | if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK) |
2462 | *flags |= AMD_CG_SUPPORT_GFX_CGCG; | |
2463 | ||
2464 | /* AMD_CG_SUPPORT_GFX_CGLS */ | |
2465 | if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK) | |
2466 | *flags |= AMD_CG_SUPPORT_GFX_CGLS; | |
2467 | ||
2468 | /* AMD_CG_SUPPORT_GFX_RLC_LS */ | |
659a4ab8 | 2469 | data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_MEM_SLP_CNTL)); |
86301129 LM |
2470 | if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) |
2471 | *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS; | |
2472 | ||
2473 | /* AMD_CG_SUPPORT_GFX_CP_LS */ | |
659a4ab8 | 2474 | data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regCP_MEM_SLP_CNTL)); |
86301129 LM |
2475 | if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) |
2476 | *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS; | |
2477 | } | |
2478 | ||
2479 | static void gfx_v9_4_3_ring_emit_hdp_flush(struct amdgpu_ring *ring) | |
2480 | { | |
2481 | struct amdgpu_device *adev = ring->adev; | |
2482 | u32 ref_and_mask, reg_mem_engine; | |
2483 | const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; | |
2484 | ||
2485 | if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { | |
2486 | switch (ring->me) { | |
2487 | case 1: | |
2488 | ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe; | |
2489 | break; | |
2490 | case 2: | |
2491 | ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe; | |
2492 | break; | |
2493 | default: | |
2494 | return; | |
2495 | } | |
2496 | reg_mem_engine = 0; | |
2497 | } else { | |
2498 | ref_and_mask = nbio_hf_reg->ref_and_mask_cp0; | |
2499 | reg_mem_engine = 1; /* pfp */ | |
2500 | } | |
2501 | ||
2502 | gfx_v9_4_3_wait_reg_mem(ring, reg_mem_engine, 0, 1, | |
2503 | adev->nbio.funcs->get_hdp_flush_req_offset(adev), | |
2504 | adev->nbio.funcs->get_hdp_flush_done_offset(adev), | |
2505 | ref_and_mask, ref_and_mask, 0x20); | |
2506 | } | |
2507 | ||
2508 | static void gfx_v9_4_3_ring_emit_ib_compute(struct amdgpu_ring *ring, | |
2509 | struct amdgpu_job *job, | |
2510 | struct amdgpu_ib *ib, | |
2511 | uint32_t flags) | |
2512 | { | |
2513 | unsigned vmid = AMDGPU_JOB_GET_VMID(job); | |
2514 | u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); | |
2515 | ||
2516 | /* Currently, there is a high possibility to get wave ID mismatch | |
2517 | * between ME and GDS, leading to a hw deadlock, because ME generates | |
2518 | * different wave IDs than the GDS expects. This situation happens | |
2519 | * randomly when at least 5 compute pipes use GDS ordered append. | |
2520 | * The wave IDs generated by ME are also wrong after suspend/resume. | |
2521 | * Those are probably bugs somewhere else in the kernel driver. | |
2522 | * | |
2523 | * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and | |
2524 | * GDS to 0 for this ring (me/pipe). | |
2525 | */ | |
2526 | if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) { | |
2527 | amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); | |
2528 | amdgpu_ring_write(ring, regGDS_COMPUTE_MAX_WAVE_ID); | |
2529 | amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id); | |
2530 | } | |
2531 | ||
2532 | amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); | |
2533 | BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ | |
2534 | amdgpu_ring_write(ring, | |
2535 | #ifdef __BIG_ENDIAN | |
2536 | (2 << 0) | | |
2537 | #endif | |
2538 | lower_32_bits(ib->gpu_addr)); | |
2539 | amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); | |
2540 | amdgpu_ring_write(ring, control); | |
2541 | } | |
2542 | ||
2543 | static void gfx_v9_4_3_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, | |
2544 | u64 seq, unsigned flags) | |
2545 | { | |
2546 | bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; | |
2547 | bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; | |
2548 | bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY; | |
2549 | ||
2550 | /* RELEASE_MEM - flush caches, send int */ | |
2551 | amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6)); | |
2552 | amdgpu_ring_write(ring, ((writeback ? (EOP_TC_WB_ACTION_EN | | |
2553 | EOP_TC_NC_ACTION_EN) : | |
2554 | (EOP_TCL1_ACTION_EN | | |
2555 | EOP_TC_ACTION_EN | | |
2556 | EOP_TC_WB_ACTION_EN | | |
2557 | EOP_TC_MD_ACTION_EN)) | | |
2558 | EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | | |
2559 | EVENT_INDEX(5))); | |
2560 | amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0)); | |
2561 | ||
2562 | /* | |
2563 | * the address should be Qword aligned if 64bit write, Dword | |
2564 | * aligned if only send 32bit data low (discard data high) | |
2565 | */ | |
2566 | if (write64bit) | |
2567 | BUG_ON(addr & 0x7); | |
2568 | else | |
2569 | BUG_ON(addr & 0x3); | |
2570 | amdgpu_ring_write(ring, lower_32_bits(addr)); | |
2571 | amdgpu_ring_write(ring, upper_32_bits(addr)); | |
2572 | amdgpu_ring_write(ring, lower_32_bits(seq)); | |
2573 | amdgpu_ring_write(ring, upper_32_bits(seq)); | |
2574 | amdgpu_ring_write(ring, 0); | |
2575 | } | |
2576 | ||
2577 | static void gfx_v9_4_3_ring_emit_pipeline_sync(struct amdgpu_ring *ring) | |
2578 | { | |
2579 | int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); | |
2580 | uint32_t seq = ring->fence_drv.sync_seq; | |
2581 | uint64_t addr = ring->fence_drv.gpu_addr; | |
2582 | ||
2583 | gfx_v9_4_3_wait_reg_mem(ring, usepfp, 1, 0, | |
2584 | lower_32_bits(addr), upper_32_bits(addr), | |
2585 | seq, 0xffffffff, 4); | |
2586 | } | |
2587 | ||
2588 | static void gfx_v9_4_3_ring_emit_vm_flush(struct amdgpu_ring *ring, | |
2589 | unsigned vmid, uint64_t pd_addr) | |
2590 | { | |
2591 | amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); | |
2592 | } | |
2593 | ||
2594 | static u64 gfx_v9_4_3_ring_get_rptr_compute(struct amdgpu_ring *ring) | |
2595 | { | |
2596 | return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 hardware is 32bit rptr */ | |
2597 | } | |
2598 | ||
2599 | static u64 gfx_v9_4_3_ring_get_wptr_compute(struct amdgpu_ring *ring) | |
2600 | { | |
2601 | u64 wptr; | |
2602 | ||
2603 | /* XXX check if swapping is necessary on BE */ | |
2604 | if (ring->use_doorbell) | |
2605 | wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]); | |
2606 | else | |
2607 | BUG(); | |
2608 | return wptr; | |
2609 | } | |
2610 | ||
2611 | static void gfx_v9_4_3_ring_set_wptr_compute(struct amdgpu_ring *ring) | |
2612 | { | |
2613 | struct amdgpu_device *adev = ring->adev; | |
2614 | ||
2615 | /* XXX check if swapping is necessary on BE */ | |
2616 | if (ring->use_doorbell) { | |
2617 | atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], ring->wptr); | |
2618 | WDOORBELL64(ring->doorbell_index, ring->wptr); | |
2619 | } else { | |
2620 | BUG(); /* only DOORBELL method supported on gfx9 now */ | |
2621 | } | |
2622 | } | |
2623 | ||
2624 | static void gfx_v9_4_3_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr, | |
2625 | u64 seq, unsigned int flags) | |
2626 | { | |
2627 | struct amdgpu_device *adev = ring->adev; | |
2628 | ||
2629 | /* we only allocate 32bit for each seq wb address */ | |
2630 | BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT); | |
2631 | ||
2632 | /* write fence seq to the "addr" */ | |
2633 | amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | |
2634 | amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | | |
2635 | WRITE_DATA_DST_SEL(5) | WR_CONFIRM)); | |
2636 | amdgpu_ring_write(ring, lower_32_bits(addr)); | |
2637 | amdgpu_ring_write(ring, upper_32_bits(addr)); | |
2638 | amdgpu_ring_write(ring, lower_32_bits(seq)); | |
2639 | ||
2640 | if (flags & AMDGPU_FENCE_FLAG_INT) { | |
2641 | /* set register to trigger INT */ | |
2642 | amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | |
2643 | amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | | |
2644 | WRITE_DATA_DST_SEL(0) | WR_CONFIRM)); | |
659a4ab8 | 2645 | amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regCPC_INT_STATUS)); |
86301129 LM |
2646 | amdgpu_ring_write(ring, 0); |
2647 | amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */ | |
2648 | } | |
2649 | } | |
2650 | ||
2651 | static void gfx_v9_4_3_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg, | |
2652 | uint32_t reg_val_offs) | |
2653 | { | |
2654 | struct amdgpu_device *adev = ring->adev; | |
2655 | ||
2656 | amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4)); | |
2657 | amdgpu_ring_write(ring, 0 | /* src: register*/ | |
2658 | (5 << 8) | /* dst: memory */ | |
2659 | (1 << 20)); /* write confirm */ | |
2660 | amdgpu_ring_write(ring, reg); | |
2661 | amdgpu_ring_write(ring, 0); | |
2662 | amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + | |
2663 | reg_val_offs * 4)); | |
2664 | amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + | |
2665 | reg_val_offs * 4)); | |
2666 | } | |
2667 | ||
2668 | static void gfx_v9_4_3_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, | |
2669 | uint32_t val) | |
2670 | { | |
2671 | uint32_t cmd = 0; | |
2672 | ||
2673 | switch (ring->funcs->type) { | |
2674 | case AMDGPU_RING_TYPE_GFX: | |
2675 | cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM; | |
2676 | break; | |
2677 | case AMDGPU_RING_TYPE_KIQ: | |
2678 | cmd = (1 << 16); /* no inc addr */ | |
2679 | break; | |
2680 | default: | |
2681 | cmd = WR_CONFIRM; | |
2682 | break; | |
2683 | } | |
2684 | amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | |
2685 | amdgpu_ring_write(ring, cmd); | |
2686 | amdgpu_ring_write(ring, reg); | |
2687 | amdgpu_ring_write(ring, 0); | |
2688 | amdgpu_ring_write(ring, val); | |
2689 | } | |
2690 | ||
2691 | static void gfx_v9_4_3_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, | |
2692 | uint32_t val, uint32_t mask) | |
2693 | { | |
2694 | gfx_v9_4_3_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20); | |
2695 | } | |
2696 | ||
2697 | static void gfx_v9_4_3_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, | |
2698 | uint32_t reg0, uint32_t reg1, | |
2699 | uint32_t ref, uint32_t mask) | |
2700 | { | |
2701 | amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1, | |
2702 | ref, mask); | |
2703 | } | |
2704 | ||
880f8b3f LL |
2705 | static void gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( |
2706 | struct amdgpu_device *adev, int me, int pipe, | |
2707 | enum amdgpu_interrupt_state state, int xcc_id) | |
86301129 LM |
2708 | { |
2709 | u32 mec_int_cntl, mec_int_cntl_reg; | |
2710 | ||
2711 | /* | |
2712 | * amdgpu controls only the first MEC. That's why this function only | |
2713 | * handles the setting of interrupts for this specific MEC. All other | |
2714 | * pipes' interrupts are set by amdkfd. | |
2715 | */ | |
2716 | ||
2717 | if (me == 1) { | |
2718 | switch (pipe) { | |
2719 | case 0: | |
659a4ab8 | 2720 | mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE0_INT_CNTL); |
86301129 LM |
2721 | break; |
2722 | case 1: | |
659a4ab8 | 2723 | mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE1_INT_CNTL); |
86301129 LM |
2724 | break; |
2725 | case 2: | |
659a4ab8 | 2726 | mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE2_INT_CNTL); |
86301129 LM |
2727 | break; |
2728 | case 3: | |
659a4ab8 | 2729 | mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE3_INT_CNTL); |
86301129 LM |
2730 | break; |
2731 | default: | |
2732 | DRM_DEBUG("invalid pipe %d\n", pipe); | |
2733 | return; | |
2734 | } | |
2735 | } else { | |
2736 | DRM_DEBUG("invalid me %d\n", me); | |
2737 | return; | |
2738 | } | |
2739 | ||
2740 | switch (state) { | |
2741 | case AMDGPU_IRQ_STATE_DISABLE: | |
85150626 | 2742 | mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, xcc_id); |
86301129 LM |
2743 | mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, |
2744 | TIME_STAMP_INT_ENABLE, 0); | |
85150626 | 2745 | WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, xcc_id); |
86301129 LM |
2746 | break; |
2747 | case AMDGPU_IRQ_STATE_ENABLE: | |
85150626 | 2748 | mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, xcc_id); |
86301129 LM |
2749 | mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, |
2750 | TIME_STAMP_INT_ENABLE, 1); | |
85150626 | 2751 | WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, xcc_id); |
86301129 LM |
2752 | break; |
2753 | default: | |
2754 | break; | |
2755 | } | |
2756 | } | |
2757 | ||
2758 | static int gfx_v9_4_3_set_priv_reg_fault_state(struct amdgpu_device *adev, | |
2759 | struct amdgpu_irq_src *source, | |
2760 | unsigned type, | |
2761 | enum amdgpu_interrupt_state state) | |
2762 | { | |
8078f1c6 | 2763 | int i, num_xcc; |
6f917fdc | 2764 | |
8078f1c6 | 2765 | num_xcc = NUM_XCC(adev->gfx.xcc_mask); |
86301129 LM |
2766 | switch (state) { |
2767 | case AMDGPU_IRQ_STATE_DISABLE: | |
2768 | case AMDGPU_IRQ_STATE_ENABLE: | |
8078f1c6 | 2769 | for (i = 0; i < num_xcc; i++) |
659a4ab8 | 2770 | WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0, |
6f917fdc LM |
2771 | PRIV_REG_INT_ENABLE, |
2772 | state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); | |
86301129 LM |
2773 | break; |
2774 | default: | |
2775 | break; | |
2776 | } | |
2777 | ||
2778 | return 0; | |
2779 | } | |
2780 | ||
2781 | static int gfx_v9_4_3_set_priv_inst_fault_state(struct amdgpu_device *adev, | |
2782 | struct amdgpu_irq_src *source, | |
2783 | unsigned type, | |
2784 | enum amdgpu_interrupt_state state) | |
2785 | { | |
8078f1c6 | 2786 | int i, num_xcc; |
6f917fdc | 2787 | |
8078f1c6 | 2788 | num_xcc = NUM_XCC(adev->gfx.xcc_mask); |
86301129 LM |
2789 | switch (state) { |
2790 | case AMDGPU_IRQ_STATE_DISABLE: | |
2791 | case AMDGPU_IRQ_STATE_ENABLE: | |
8078f1c6 | 2792 | for (i = 0; i < num_xcc; i++) |
659a4ab8 | 2793 | WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0, |
6f917fdc LM |
2794 | PRIV_INSTR_INT_ENABLE, |
2795 | state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); | |
86301129 LM |
2796 | break; |
2797 | default: | |
2798 | break; | |
2799 | } | |
2800 | ||
2801 | return 0; | |
2802 | } | |
2803 | ||
2804 | static int gfx_v9_4_3_set_eop_interrupt_state(struct amdgpu_device *adev, | |
2805 | struct amdgpu_irq_src *src, | |
2806 | unsigned type, | |
2807 | enum amdgpu_interrupt_state state) | |
2808 | { | |
8078f1c6 LL |
2809 | int i, num_xcc; |
2810 | ||
2811 | num_xcc = NUM_XCC(adev->gfx.xcc_mask); | |
2812 | for (i = 0; i < num_xcc; i++) { | |
6f917fdc LM |
2813 | switch (type) { |
2814 | case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP: | |
880f8b3f LL |
2815 | gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( |
2816 | adev, 1, 0, state, i); | |
6f917fdc LM |
2817 | break; |
2818 | case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP: | |
880f8b3f LL |
2819 | gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( |
2820 | adev, 1, 1, state, i); | |
6f917fdc LM |
2821 | break; |
2822 | case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP: | |
880f8b3f LL |
2823 | gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( |
2824 | adev, 1, 2, state, i); | |
6f917fdc LM |
2825 | break; |
2826 | case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP: | |
880f8b3f LL |
2827 | gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( |
2828 | adev, 1, 3, state, i); | |
6f917fdc LM |
2829 | break; |
2830 | case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP: | |
880f8b3f LL |
2831 | gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( |
2832 | adev, 2, 0, state, i); | |
6f917fdc LM |
2833 | break; |
2834 | case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP: | |
880f8b3f LL |
2835 | gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( |
2836 | adev, 2, 1, state, i); | |
6f917fdc LM |
2837 | break; |
2838 | case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP: | |
880f8b3f LL |
2839 | gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( |
2840 | adev, 2, 2, state, i); | |
6f917fdc LM |
2841 | break; |
2842 | case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP: | |
880f8b3f LL |
2843 | gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( |
2844 | adev, 2, 3, state, i); | |
6f917fdc LM |
2845 | break; |
2846 | default: | |
2847 | break; | |
2848 | } | |
86301129 | 2849 | } |
6f917fdc | 2850 | |
86301129 LM |
2851 | return 0; |
2852 | } | |
2853 | ||
2854 | static int gfx_v9_4_3_eop_irq(struct amdgpu_device *adev, | |
2855 | struct amdgpu_irq_src *source, | |
2856 | struct amdgpu_iv_entry *entry) | |
2857 | { | |
870d1e5a | 2858 | int i, xcc_id; |
86301129 LM |
2859 | u8 me_id, pipe_id, queue_id; |
2860 | struct amdgpu_ring *ring; | |
2861 | ||
2862 | DRM_DEBUG("IH: CP EOP\n"); | |
2863 | me_id = (entry->ring_id & 0x0c) >> 2; | |
2864 | pipe_id = (entry->ring_id & 0x03) >> 0; | |
2865 | queue_id = (entry->ring_id & 0x70) >> 4; | |
2866 | ||
870d1e5a LL |
2867 | xcc_id = gfx_v9_4_3_ih_to_xcc_inst(adev, entry->node_id); |
2868 | ||
2869 | if (xcc_id == -EINVAL) | |
2870 | return -EINVAL; | |
15091a6f | 2871 | |
86301129 LM |
2872 | switch (me_id) { |
2873 | case 0: | |
2874 | case 1: | |
2875 | case 2: | |
2876 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { | |
870d1e5a LL |
2877 | ring = &adev->gfx.compute_ring |
2878 | [i + | |
2879 | xcc_id * adev->gfx.num_compute_rings]; | |
86301129 LM |
2880 | /* Per-queue interrupt is supported for MEC starting from VI. |
2881 | * The interrupt can only be enabled/disabled per pipe instead of per queue. | |
2882 | */ | |
8078f1c6 | 2883 | |
86301129 LM |
2884 | if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id)) |
2885 | amdgpu_fence_process(ring); | |
2886 | } | |
2887 | break; | |
2888 | } | |
2889 | return 0; | |
2890 | } | |
2891 | ||
2892 | static void gfx_v9_4_3_fault(struct amdgpu_device *adev, | |
2893 | struct amdgpu_iv_entry *entry) | |
2894 | { | |
2895 | u8 me_id, pipe_id, queue_id; | |
2896 | struct amdgpu_ring *ring; | |
870d1e5a | 2897 | int i, xcc_id; |
86301129 LM |
2898 | |
2899 | me_id = (entry->ring_id & 0x0c) >> 2; | |
2900 | pipe_id = (entry->ring_id & 0x03) >> 0; | |
2901 | queue_id = (entry->ring_id & 0x70) >> 4; | |
2902 | ||
870d1e5a LL |
2903 | xcc_id = gfx_v9_4_3_ih_to_xcc_inst(adev, entry->node_id); |
2904 | ||
2905 | if (xcc_id == -EINVAL) | |
2906 | return; | |
2907 | ||
86301129 LM |
2908 | switch (me_id) { |
2909 | case 0: | |
2910 | case 1: | |
2911 | case 2: | |
2912 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { | |
870d1e5a LL |
2913 | ring = &adev->gfx.compute_ring |
2914 | [i + | |
2915 | xcc_id * adev->gfx.num_compute_rings]; | |
86301129 LM |
2916 | if (ring->me == me_id && ring->pipe == pipe_id && |
2917 | ring->queue == queue_id) | |
2918 | drm_sched_fault(&ring->sched); | |
2919 | } | |
2920 | break; | |
2921 | } | |
2922 | } | |
2923 | ||
2924 | static int gfx_v9_4_3_priv_reg_irq(struct amdgpu_device *adev, | |
2925 | struct amdgpu_irq_src *source, | |
2926 | struct amdgpu_iv_entry *entry) | |
2927 | { | |
2928 | DRM_ERROR("Illegal register access in command stream\n"); | |
2929 | gfx_v9_4_3_fault(adev, entry); | |
2930 | return 0; | |
2931 | } | |
2932 | ||
2933 | static int gfx_v9_4_3_priv_inst_irq(struct amdgpu_device *adev, | |
2934 | struct amdgpu_irq_src *source, | |
2935 | struct amdgpu_iv_entry *entry) | |
2936 | { | |
2937 | DRM_ERROR("Illegal instruction in command stream\n"); | |
2938 | gfx_v9_4_3_fault(adev, entry); | |
2939 | return 0; | |
2940 | } | |
2941 | ||
2942 | static void gfx_v9_4_3_emit_mem_sync(struct amdgpu_ring *ring) | |
2943 | { | |
2944 | const unsigned int cp_coher_cntl = | |
2945 | PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_ICACHE_ACTION_ENA(1) | | |
2946 | PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_ACTION_ENA(1) | | |
2947 | PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_ACTION_ENA(1) | | |
2948 | PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_ACTION_ENA(1) | | |
2949 | PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WB_ACTION_ENA(1); | |
2950 | ||
2951 | /* ACQUIRE_MEM -make one or more surfaces valid for use by the subsequent operations */ | |
2952 | amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5)); | |
2953 | amdgpu_ring_write(ring, cp_coher_cntl); /* CP_COHER_CNTL */ | |
2954 | amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */ | |
2955 | amdgpu_ring_write(ring, 0xffffff); /* CP_COHER_SIZE_HI */ | |
2956 | amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */ | |
2957 | amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */ | |
2958 | amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */ | |
2959 | } | |
2960 | ||
2961 | static void gfx_v9_4_3_emit_wave_limit_cs(struct amdgpu_ring *ring, | |
2962 | uint32_t pipe, bool enable) | |
2963 | { | |
2964 | struct amdgpu_device *adev = ring->adev; | |
2965 | uint32_t val; | |
2966 | uint32_t wcl_cs_reg; | |
2967 | ||
2968 | /* regSPI_WCL_PIPE_PERCENT_CS[0-7]_DEFAULT values are same */ | |
2969 | val = enable ? 0x1 : 0x7f; | |
2970 | ||
2971 | switch (pipe) { | |
2972 | case 0: | |
659a4ab8 | 2973 | wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS0); |
86301129 LM |
2974 | break; |
2975 | case 1: | |
659a4ab8 | 2976 | wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS1); |
86301129 LM |
2977 | break; |
2978 | case 2: | |
659a4ab8 | 2979 | wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS2); |
86301129 LM |
2980 | break; |
2981 | case 3: | |
659a4ab8 | 2982 | wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS3); |
86301129 LM |
2983 | break; |
2984 | default: | |
2985 | DRM_DEBUG("invalid pipe %d\n", pipe); | |
2986 | return; | |
2987 | } | |
2988 | ||
2989 | amdgpu_ring_emit_wreg(ring, wcl_cs_reg, val); | |
2990 | ||
2991 | } | |
2992 | static void gfx_v9_4_3_emit_wave_limit(struct amdgpu_ring *ring, bool enable) | |
2993 | { | |
2994 | struct amdgpu_device *adev = ring->adev; | |
2995 | uint32_t val; | |
2996 | int i; | |
2997 | ||
2998 | /* regSPI_WCL_PIPE_PERCENT_GFX is 7 bit multiplier register to limit | |
2999 | * number of gfx waves. Setting 5 bit will make sure gfx only gets | |
3000 | * around 25% of gpu resources. | |
3001 | */ | |
3002 | val = enable ? 0x1f : 0x07ffffff; | |
3003 | amdgpu_ring_emit_wreg(ring, | |
659a4ab8 | 3004 | SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_GFX), |
86301129 LM |
3005 | val); |
3006 | ||
3007 | /* Restrict waves for normal/low priority compute queues as well | |
3008 | * to get best QoS for high priority compute jobs. | |
3009 | * | |
3010 | * amdgpu controls only 1st ME(0-3 CS pipes). | |
3011 | */ | |
3012 | for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) { | |
3013 | if (i != ring->pipe) | |
3014 | gfx_v9_4_3_emit_wave_limit_cs(ring, i, enable); | |
3015 | ||
3016 | } | |
3017 | } | |
3018 | ||
5c1c09a7 TZ |
3019 | enum amdgpu_gfx_cp_ras_mem_id { |
3020 | AMDGPU_GFX_CP_MEM1 = 1, | |
3021 | AMDGPU_GFX_CP_MEM2, | |
3022 | AMDGPU_GFX_CP_MEM3, | |
3023 | AMDGPU_GFX_CP_MEM4, | |
3024 | AMDGPU_GFX_CP_MEM5, | |
3025 | }; | |
3026 | ||
3027 | enum amdgpu_gfx_gcea_ras_mem_id { | |
3028 | AMDGPU_GFX_GCEA_IOWR_CMDMEM = 4, | |
3029 | AMDGPU_GFX_GCEA_IORD_CMDMEM, | |
3030 | AMDGPU_GFX_GCEA_GMIWR_CMDMEM, | |
3031 | AMDGPU_GFX_GCEA_GMIRD_CMDMEM, | |
3032 | AMDGPU_GFX_GCEA_DRAMWR_CMDMEM, | |
3033 | AMDGPU_GFX_GCEA_DRAMRD_CMDMEM, | |
3034 | AMDGPU_GFX_GCEA_MAM_DMEM0, | |
3035 | AMDGPU_GFX_GCEA_MAM_DMEM1, | |
3036 | AMDGPU_GFX_GCEA_MAM_DMEM2, | |
3037 | AMDGPU_GFX_GCEA_MAM_DMEM3, | |
3038 | AMDGPU_GFX_GCEA_MAM_AMEM0, | |
3039 | AMDGPU_GFX_GCEA_MAM_AMEM1, | |
3040 | AMDGPU_GFX_GCEA_MAM_AMEM2, | |
3041 | AMDGPU_GFX_GCEA_MAM_AMEM3, | |
3042 | AMDGPU_GFX_GCEA_MAM_AFLUSH_BUFFER, | |
3043 | AMDGPU_GFX_GCEA_WRET_TAGMEM, | |
3044 | AMDGPU_GFX_GCEA_RRET_TAGMEM, | |
3045 | AMDGPU_GFX_GCEA_IOWR_DATAMEM, | |
3046 | AMDGPU_GFX_GCEA_GMIWR_DATAMEM, | |
3047 | AMDGPU_GFX_GCEA_DRAM_DATAMEM, | |
3048 | }; | |
3049 | ||
3050 | enum amdgpu_gfx_gc_cane_ras_mem_id { | |
3051 | AMDGPU_GFX_GC_CANE_MEM0 = 0, | |
3052 | }; | |
3053 | ||
3054 | enum amdgpu_gfx_gcutcl2_ras_mem_id { | |
3055 | AMDGPU_GFX_GCUTCL2_MEM2P512X95 = 160, | |
3056 | }; | |
3057 | ||
3058 | enum amdgpu_gfx_gds_ras_mem_id { | |
3059 | AMDGPU_GFX_GDS_MEM0 = 0, | |
3060 | }; | |
3061 | ||
3062 | enum amdgpu_gfx_lds_ras_mem_id { | |
3063 | AMDGPU_GFX_LDS_BANK0 = 0, | |
3064 | AMDGPU_GFX_LDS_BANK1, | |
3065 | AMDGPU_GFX_LDS_BANK2, | |
3066 | AMDGPU_GFX_LDS_BANK3, | |
3067 | AMDGPU_GFX_LDS_BANK4, | |
3068 | AMDGPU_GFX_LDS_BANK5, | |
3069 | AMDGPU_GFX_LDS_BANK6, | |
3070 | AMDGPU_GFX_LDS_BANK7, | |
3071 | AMDGPU_GFX_LDS_BANK8, | |
3072 | AMDGPU_GFX_LDS_BANK9, | |
3073 | AMDGPU_GFX_LDS_BANK10, | |
3074 | AMDGPU_GFX_LDS_BANK11, | |
3075 | AMDGPU_GFX_LDS_BANK12, | |
3076 | AMDGPU_GFX_LDS_BANK13, | |
3077 | AMDGPU_GFX_LDS_BANK14, | |
3078 | AMDGPU_GFX_LDS_BANK15, | |
3079 | AMDGPU_GFX_LDS_BANK16, | |
3080 | AMDGPU_GFX_LDS_BANK17, | |
3081 | AMDGPU_GFX_LDS_BANK18, | |
3082 | AMDGPU_GFX_LDS_BANK19, | |
3083 | AMDGPU_GFX_LDS_BANK20, | |
3084 | AMDGPU_GFX_LDS_BANK21, | |
3085 | AMDGPU_GFX_LDS_BANK22, | |
3086 | AMDGPU_GFX_LDS_BANK23, | |
3087 | AMDGPU_GFX_LDS_BANK24, | |
3088 | AMDGPU_GFX_LDS_BANK25, | |
3089 | AMDGPU_GFX_LDS_BANK26, | |
3090 | AMDGPU_GFX_LDS_BANK27, | |
3091 | AMDGPU_GFX_LDS_BANK28, | |
3092 | AMDGPU_GFX_LDS_BANK29, | |
3093 | AMDGPU_GFX_LDS_BANK30, | |
3094 | AMDGPU_GFX_LDS_BANK31, | |
3095 | AMDGPU_GFX_LDS_SP_BUFFER_A, | |
3096 | AMDGPU_GFX_LDS_SP_BUFFER_B, | |
3097 | }; | |
3098 | ||
3099 | enum amdgpu_gfx_rlc_ras_mem_id { | |
3100 | AMDGPU_GFX_RLC_GPMF32 = 1, | |
3101 | AMDGPU_GFX_RLC_RLCVF32, | |
3102 | AMDGPU_GFX_RLC_SCRATCH, | |
3103 | AMDGPU_GFX_RLC_SRM_ARAM, | |
3104 | AMDGPU_GFX_RLC_SRM_DRAM, | |
3105 | AMDGPU_GFX_RLC_TCTAG, | |
3106 | AMDGPU_GFX_RLC_SPM_SE, | |
3107 | AMDGPU_GFX_RLC_SPM_GRBMT, | |
3108 | }; | |
3109 | ||
3110 | enum amdgpu_gfx_sp_ras_mem_id { | |
3111 | AMDGPU_GFX_SP_SIMDID0 = 0, | |
3112 | }; | |
3113 | ||
3114 | enum amdgpu_gfx_spi_ras_mem_id { | |
3115 | AMDGPU_GFX_SPI_MEM0 = 0, | |
3116 | AMDGPU_GFX_SPI_MEM1, | |
3117 | AMDGPU_GFX_SPI_MEM2, | |
3118 | AMDGPU_GFX_SPI_MEM3, | |
3119 | }; | |
3120 | ||
3121 | enum amdgpu_gfx_sqc_ras_mem_id { | |
3122 | AMDGPU_GFX_SQC_INST_CACHE_A = 100, | |
3123 | AMDGPU_GFX_SQC_INST_CACHE_B = 101, | |
3124 | AMDGPU_GFX_SQC_INST_CACHE_TAG_A = 102, | |
3125 | AMDGPU_GFX_SQC_INST_CACHE_TAG_B = 103, | |
3126 | AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_A = 104, | |
3127 | AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_B = 105, | |
3128 | AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_A = 106, | |
3129 | AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_B = 107, | |
3130 | AMDGPU_GFX_SQC_DATA_CACHE_A = 200, | |
3131 | AMDGPU_GFX_SQC_DATA_CACHE_B = 201, | |
3132 | AMDGPU_GFX_SQC_DATA_CACHE_TAG_A = 202, | |
3133 | AMDGPU_GFX_SQC_DATA_CACHE_TAG_B = 203, | |
3134 | AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_A = 204, | |
3135 | AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_B = 205, | |
3136 | AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_A = 206, | |
3137 | AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_B = 207, | |
3138 | AMDGPU_GFX_SQC_DIRTY_BIT_A = 208, | |
3139 | AMDGPU_GFX_SQC_DIRTY_BIT_B = 209, | |
3140 | AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU0 = 210, | |
3141 | AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU1 = 211, | |
3142 | AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A = 212, | |
3143 | AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B = 213, | |
3144 | AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_INST_CACHE = 108, | |
3145 | }; | |
3146 | ||
3147 | enum amdgpu_gfx_sq_ras_mem_id { | |
3148 | AMDGPU_GFX_SQ_SGPR_MEM0 = 0, | |
3149 | AMDGPU_GFX_SQ_SGPR_MEM1, | |
3150 | AMDGPU_GFX_SQ_SGPR_MEM2, | |
3151 | AMDGPU_GFX_SQ_SGPR_MEM3, | |
3152 | }; | |
3153 | ||
3154 | enum amdgpu_gfx_ta_ras_mem_id { | |
3155 | AMDGPU_GFX_TA_FS_AFIFO_RAM_LO = 1, | |
3156 | AMDGPU_GFX_TA_FS_AFIFO_RAM_HI, | |
3157 | AMDGPU_GFX_TA_FS_CFIFO_RAM, | |
3158 | AMDGPU_GFX_TA_FSX_LFIFO, | |
3159 | AMDGPU_GFX_TA_FS_DFIFO_RAM, | |
3160 | }; | |
3161 | ||
3162 | enum amdgpu_gfx_tcc_ras_mem_id { | |
3163 | AMDGPU_GFX_TCC_MEM1 = 1, | |
3164 | }; | |
3165 | ||
3166 | enum amdgpu_gfx_tca_ras_mem_id { | |
3167 | AMDGPU_GFX_TCA_MEM1 = 1, | |
3168 | }; | |
3169 | ||
3170 | enum amdgpu_gfx_tci_ras_mem_id { | |
3171 | AMDGPU_GFX_TCIW_MEM = 1, | |
3172 | }; | |
3173 | ||
3174 | enum amdgpu_gfx_tcp_ras_mem_id { | |
3175 | AMDGPU_GFX_TCP_LFIFO0 = 1, | |
3176 | AMDGPU_GFX_TCP_SET0BANK0_RAM, | |
3177 | AMDGPU_GFX_TCP_SET0BANK1_RAM, | |
3178 | AMDGPU_GFX_TCP_SET0BANK2_RAM, | |
3179 | AMDGPU_GFX_TCP_SET0BANK3_RAM, | |
3180 | AMDGPU_GFX_TCP_SET1BANK0_RAM, | |
3181 | AMDGPU_GFX_TCP_SET1BANK1_RAM, | |
3182 | AMDGPU_GFX_TCP_SET1BANK2_RAM, | |
3183 | AMDGPU_GFX_TCP_SET1BANK3_RAM, | |
3184 | AMDGPU_GFX_TCP_SET2BANK0_RAM, | |
3185 | AMDGPU_GFX_TCP_SET2BANK1_RAM, | |
3186 | AMDGPU_GFX_TCP_SET2BANK2_RAM, | |
3187 | AMDGPU_GFX_TCP_SET2BANK3_RAM, | |
3188 | AMDGPU_GFX_TCP_SET3BANK0_RAM, | |
3189 | AMDGPU_GFX_TCP_SET3BANK1_RAM, | |
3190 | AMDGPU_GFX_TCP_SET3BANK2_RAM, | |
3191 | AMDGPU_GFX_TCP_SET3BANK3_RAM, | |
3192 | AMDGPU_GFX_TCP_VM_FIFO, | |
3193 | AMDGPU_GFX_TCP_DB_TAGRAM0, | |
3194 | AMDGPU_GFX_TCP_DB_TAGRAM1, | |
3195 | AMDGPU_GFX_TCP_DB_TAGRAM2, | |
3196 | AMDGPU_GFX_TCP_DB_TAGRAM3, | |
3197 | AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE0, | |
3198 | AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE1, | |
3199 | AMDGPU_GFX_TCP_CMD_FIFO, | |
3200 | }; | |
3201 | ||
3202 | enum amdgpu_gfx_td_ras_mem_id { | |
3203 | AMDGPU_GFX_TD_UTD_CS_FIFO_MEM = 1, | |
3204 | AMDGPU_GFX_TD_UTD_SS_FIFO_LO_MEM, | |
3205 | AMDGPU_GFX_TD_UTD_SS_FIFO_HI_MEM, | |
3206 | }; | |
3207 | ||
3208 | enum amdgpu_gfx_tcx_ras_mem_id { | |
3209 | AMDGPU_GFX_TCX_FIFOD0 = 0, | |
3210 | AMDGPU_GFX_TCX_FIFOD1, | |
3211 | AMDGPU_GFX_TCX_FIFOD2, | |
3212 | AMDGPU_GFX_TCX_FIFOD3, | |
3213 | AMDGPU_GFX_TCX_FIFOD4, | |
3214 | AMDGPU_GFX_TCX_FIFOD5, | |
3215 | AMDGPU_GFX_TCX_FIFOD6, | |
3216 | AMDGPU_GFX_TCX_FIFOD7, | |
3217 | AMDGPU_GFX_TCX_FIFOB0, | |
3218 | AMDGPU_GFX_TCX_FIFOB1, | |
3219 | AMDGPU_GFX_TCX_FIFOB2, | |
3220 | AMDGPU_GFX_TCX_FIFOB3, | |
3221 | AMDGPU_GFX_TCX_FIFOB4, | |
3222 | AMDGPU_GFX_TCX_FIFOB5, | |
3223 | AMDGPU_GFX_TCX_FIFOB6, | |
3224 | AMDGPU_GFX_TCX_FIFOB7, | |
3225 | AMDGPU_GFX_TCX_FIFOA0, | |
3226 | AMDGPU_GFX_TCX_FIFOA1, | |
3227 | AMDGPU_GFX_TCX_FIFOA2, | |
3228 | AMDGPU_GFX_TCX_FIFOA3, | |
3229 | AMDGPU_GFX_TCX_FIFOA4, | |
3230 | AMDGPU_GFX_TCX_FIFOA5, | |
3231 | AMDGPU_GFX_TCX_FIFOA6, | |
3232 | AMDGPU_GFX_TCX_FIFOA7, | |
3233 | AMDGPU_GFX_TCX_CFIFO0, | |
3234 | AMDGPU_GFX_TCX_CFIFO1, | |
3235 | AMDGPU_GFX_TCX_CFIFO2, | |
3236 | AMDGPU_GFX_TCX_CFIFO3, | |
3237 | AMDGPU_GFX_TCX_CFIFO4, | |
3238 | AMDGPU_GFX_TCX_CFIFO5, | |
3239 | AMDGPU_GFX_TCX_CFIFO6, | |
3240 | AMDGPU_GFX_TCX_CFIFO7, | |
3241 | AMDGPU_GFX_TCX_FIFO_ACKB0, | |
3242 | AMDGPU_GFX_TCX_FIFO_ACKB1, | |
3243 | AMDGPU_GFX_TCX_FIFO_ACKB2, | |
3244 | AMDGPU_GFX_TCX_FIFO_ACKB3, | |
3245 | AMDGPU_GFX_TCX_FIFO_ACKB4, | |
3246 | AMDGPU_GFX_TCX_FIFO_ACKB5, | |
3247 | AMDGPU_GFX_TCX_FIFO_ACKB6, | |
3248 | AMDGPU_GFX_TCX_FIFO_ACKB7, | |
3249 | AMDGPU_GFX_TCX_FIFO_ACKD0, | |
3250 | AMDGPU_GFX_TCX_FIFO_ACKD1, | |
3251 | AMDGPU_GFX_TCX_FIFO_ACKD2, | |
3252 | AMDGPU_GFX_TCX_FIFO_ACKD3, | |
3253 | AMDGPU_GFX_TCX_FIFO_ACKD4, | |
3254 | AMDGPU_GFX_TCX_FIFO_ACKD5, | |
3255 | AMDGPU_GFX_TCX_FIFO_ACKD6, | |
3256 | AMDGPU_GFX_TCX_FIFO_ACKD7, | |
3257 | AMDGPU_GFX_TCX_DST_FIFOA0, | |
3258 | AMDGPU_GFX_TCX_DST_FIFOA1, | |
3259 | AMDGPU_GFX_TCX_DST_FIFOA2, | |
3260 | AMDGPU_GFX_TCX_DST_FIFOA3, | |
3261 | AMDGPU_GFX_TCX_DST_FIFOA4, | |
3262 | AMDGPU_GFX_TCX_DST_FIFOA5, | |
3263 | AMDGPU_GFX_TCX_DST_FIFOA6, | |
3264 | AMDGPU_GFX_TCX_DST_FIFOA7, | |
3265 | AMDGPU_GFX_TCX_DST_FIFOB0, | |
3266 | AMDGPU_GFX_TCX_DST_FIFOB1, | |
3267 | AMDGPU_GFX_TCX_DST_FIFOB2, | |
3268 | AMDGPU_GFX_TCX_DST_FIFOB3, | |
3269 | AMDGPU_GFX_TCX_DST_FIFOB4, | |
3270 | AMDGPU_GFX_TCX_DST_FIFOB5, | |
3271 | AMDGPU_GFX_TCX_DST_FIFOB6, | |
3272 | AMDGPU_GFX_TCX_DST_FIFOB7, | |
3273 | AMDGPU_GFX_TCX_DST_FIFOD0, | |
3274 | AMDGPU_GFX_TCX_DST_FIFOD1, | |
3275 | AMDGPU_GFX_TCX_DST_FIFOD2, | |
3276 | AMDGPU_GFX_TCX_DST_FIFOD3, | |
3277 | AMDGPU_GFX_TCX_DST_FIFOD4, | |
3278 | AMDGPU_GFX_TCX_DST_FIFOD5, | |
3279 | AMDGPU_GFX_TCX_DST_FIFOD6, | |
3280 | AMDGPU_GFX_TCX_DST_FIFOD7, | |
3281 | AMDGPU_GFX_TCX_DST_FIFO_ACKB0, | |
3282 | AMDGPU_GFX_TCX_DST_FIFO_ACKB1, | |
3283 | AMDGPU_GFX_TCX_DST_FIFO_ACKB2, | |
3284 | AMDGPU_GFX_TCX_DST_FIFO_ACKB3, | |
3285 | AMDGPU_GFX_TCX_DST_FIFO_ACKB4, | |
3286 | AMDGPU_GFX_TCX_DST_FIFO_ACKB5, | |
3287 | AMDGPU_GFX_TCX_DST_FIFO_ACKB6, | |
3288 | AMDGPU_GFX_TCX_DST_FIFO_ACKB7, | |
3289 | AMDGPU_GFX_TCX_DST_FIFO_ACKD0, | |
3290 | AMDGPU_GFX_TCX_DST_FIFO_ACKD1, | |
3291 | AMDGPU_GFX_TCX_DST_FIFO_ACKD2, | |
3292 | AMDGPU_GFX_TCX_DST_FIFO_ACKD3, | |
3293 | AMDGPU_GFX_TCX_DST_FIFO_ACKD4, | |
3294 | AMDGPU_GFX_TCX_DST_FIFO_ACKD5, | |
3295 | AMDGPU_GFX_TCX_DST_FIFO_ACKD6, | |
3296 | AMDGPU_GFX_TCX_DST_FIFO_ACKD7, | |
3297 | }; | |
3298 | ||
3299 | enum amdgpu_gfx_atc_l2_ras_mem_id { | |
3300 | AMDGPU_GFX_ATC_L2_MEM0 = 0, | |
3301 | }; | |
3302 | ||
3303 | enum amdgpu_gfx_utcl2_ras_mem_id { | |
3304 | AMDGPU_GFX_UTCL2_MEM0 = 0, | |
3305 | }; | |
3306 | ||
3307 | enum amdgpu_gfx_vml2_ras_mem_id { | |
3308 | AMDGPU_GFX_VML2_MEM0 = 0, | |
3309 | }; | |
3310 | ||
3311 | enum amdgpu_gfx_vml2_walker_ras_mem_id { | |
3312 | AMDGPU_GFX_VML2_WALKER_MEM0 = 0, | |
3313 | }; | |
3314 | ||
3315 | static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_cp_mem_list[] = { | |
3316 | {AMDGPU_GFX_CP_MEM1, "CP_MEM1"}, | |
3317 | {AMDGPU_GFX_CP_MEM2, "CP_MEM2"}, | |
3318 | {AMDGPU_GFX_CP_MEM3, "CP_MEM3"}, | |
3319 | {AMDGPU_GFX_CP_MEM4, "CP_MEM4"}, | |
3320 | {AMDGPU_GFX_CP_MEM5, "CP_MEM5"}, | |
3321 | }; | |
3322 | ||
3323 | static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gcea_mem_list[] = { | |
3324 | {AMDGPU_GFX_GCEA_IOWR_CMDMEM, "GCEA_IOWR_CMDMEM"}, | |
3325 | {AMDGPU_GFX_GCEA_IORD_CMDMEM, "GCEA_IORD_CMDMEM"}, | |
3326 | {AMDGPU_GFX_GCEA_GMIWR_CMDMEM, "GCEA_GMIWR_CMDMEM"}, | |
3327 | {AMDGPU_GFX_GCEA_GMIRD_CMDMEM, "GCEA_GMIRD_CMDMEM"}, | |
3328 | {AMDGPU_GFX_GCEA_DRAMWR_CMDMEM, "GCEA_DRAMWR_CMDMEM"}, | |
3329 | {AMDGPU_GFX_GCEA_DRAMRD_CMDMEM, "GCEA_DRAMRD_CMDMEM"}, | |
3330 | {AMDGPU_GFX_GCEA_MAM_DMEM0, "GCEA_MAM_DMEM0"}, | |
3331 | {AMDGPU_GFX_GCEA_MAM_DMEM1, "GCEA_MAM_DMEM1"}, | |
3332 | {AMDGPU_GFX_GCEA_MAM_DMEM2, "GCEA_MAM_DMEM2"}, | |
3333 | {AMDGPU_GFX_GCEA_MAM_DMEM3, "GCEA_MAM_DMEM3"}, | |
3334 | {AMDGPU_GFX_GCEA_MAM_AMEM0, "GCEA_MAM_AMEM0"}, | |
3335 | {AMDGPU_GFX_GCEA_MAM_AMEM1, "GCEA_MAM_AMEM1"}, | |
3336 | {AMDGPU_GFX_GCEA_MAM_AMEM2, "GCEA_MAM_AMEM2"}, | |
3337 | {AMDGPU_GFX_GCEA_MAM_AMEM3, "GCEA_MAM_AMEM3"}, | |
3338 | {AMDGPU_GFX_GCEA_MAM_AFLUSH_BUFFER, "GCEA_MAM_AFLUSH_BUFFER"}, | |
3339 | {AMDGPU_GFX_GCEA_WRET_TAGMEM, "GCEA_WRET_TAGMEM"}, | |
3340 | {AMDGPU_GFX_GCEA_RRET_TAGMEM, "GCEA_RRET_TAGMEM"}, | |
3341 | {AMDGPU_GFX_GCEA_IOWR_DATAMEM, "GCEA_IOWR_DATAMEM"}, | |
3342 | {AMDGPU_GFX_GCEA_GMIWR_DATAMEM, "GCEA_GMIWR_DATAMEM"}, | |
3343 | {AMDGPU_GFX_GCEA_DRAM_DATAMEM, "GCEA_DRAM_DATAMEM"}, | |
3344 | }; | |
3345 | ||
3346 | static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gc_cane_mem_list[] = { | |
3347 | {AMDGPU_GFX_GC_CANE_MEM0, "GC_CANE_MEM0"}, | |
3348 | }; | |
3349 | ||
3350 | static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gcutcl2_mem_list[] = { | |
3351 | {AMDGPU_GFX_GCUTCL2_MEM2P512X95, "GCUTCL2_MEM2P512X95"}, | |
3352 | }; | |
3353 | ||
3354 | static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gds_mem_list[] = { | |
3355 | {AMDGPU_GFX_GDS_MEM0, "GDS_MEM"}, | |
3356 | }; | |
3357 | ||
3358 | static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_lds_mem_list[] = { | |
3359 | {AMDGPU_GFX_LDS_BANK0, "LDS_BANK0"}, | |
3360 | {AMDGPU_GFX_LDS_BANK1, "LDS_BANK1"}, | |
3361 | {AMDGPU_GFX_LDS_BANK2, "LDS_BANK2"}, | |
3362 | {AMDGPU_GFX_LDS_BANK3, "LDS_BANK3"}, | |
3363 | {AMDGPU_GFX_LDS_BANK4, "LDS_BANK4"}, | |
3364 | {AMDGPU_GFX_LDS_BANK5, "LDS_BANK5"}, | |
3365 | {AMDGPU_GFX_LDS_BANK6, "LDS_BANK6"}, | |
3366 | {AMDGPU_GFX_LDS_BANK7, "LDS_BANK7"}, | |
3367 | {AMDGPU_GFX_LDS_BANK8, "LDS_BANK8"}, | |
3368 | {AMDGPU_GFX_LDS_BANK9, "LDS_BANK9"}, | |
3369 | {AMDGPU_GFX_LDS_BANK10, "LDS_BANK10"}, | |
3370 | {AMDGPU_GFX_LDS_BANK11, "LDS_BANK11"}, | |
3371 | {AMDGPU_GFX_LDS_BANK12, "LDS_BANK12"}, | |
3372 | {AMDGPU_GFX_LDS_BANK13, "LDS_BANK13"}, | |
3373 | {AMDGPU_GFX_LDS_BANK14, "LDS_BANK14"}, | |
3374 | {AMDGPU_GFX_LDS_BANK15, "LDS_BANK15"}, | |
3375 | {AMDGPU_GFX_LDS_BANK16, "LDS_BANK16"}, | |
3376 | {AMDGPU_GFX_LDS_BANK17, "LDS_BANK17"}, | |
3377 | {AMDGPU_GFX_LDS_BANK18, "LDS_BANK18"}, | |
3378 | {AMDGPU_GFX_LDS_BANK19, "LDS_BANK19"}, | |
3379 | {AMDGPU_GFX_LDS_BANK20, "LDS_BANK20"}, | |
3380 | {AMDGPU_GFX_LDS_BANK21, "LDS_BANK21"}, | |
3381 | {AMDGPU_GFX_LDS_BANK22, "LDS_BANK22"}, | |
3382 | {AMDGPU_GFX_LDS_BANK23, "LDS_BANK23"}, | |
3383 | {AMDGPU_GFX_LDS_BANK24, "LDS_BANK24"}, | |
3384 | {AMDGPU_GFX_LDS_BANK25, "LDS_BANK25"}, | |
3385 | {AMDGPU_GFX_LDS_BANK26, "LDS_BANK26"}, | |
3386 | {AMDGPU_GFX_LDS_BANK27, "LDS_BANK27"}, | |
3387 | {AMDGPU_GFX_LDS_BANK28, "LDS_BANK28"}, | |
3388 | {AMDGPU_GFX_LDS_BANK29, "LDS_BANK29"}, | |
3389 | {AMDGPU_GFX_LDS_BANK30, "LDS_BANK30"}, | |
3390 | {AMDGPU_GFX_LDS_BANK31, "LDS_BANK31"}, | |
3391 | {AMDGPU_GFX_LDS_SP_BUFFER_A, "LDS_SP_BUFFER_A"}, | |
3392 | {AMDGPU_GFX_LDS_SP_BUFFER_B, "LDS_SP_BUFFER_B"}, | |
3393 | }; | |
3394 | ||
3395 | static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_rlc_mem_list[] = { | |
3396 | {AMDGPU_GFX_RLC_GPMF32, "RLC_GPMF32"}, | |
3397 | {AMDGPU_GFX_RLC_RLCVF32, "RLC_RLCVF32"}, | |
3398 | {AMDGPU_GFX_RLC_SCRATCH, "RLC_SCRATCH"}, | |
3399 | {AMDGPU_GFX_RLC_SRM_ARAM, "RLC_SRM_ARAM"}, | |
3400 | {AMDGPU_GFX_RLC_SRM_DRAM, "RLC_SRM_DRAM"}, | |
3401 | {AMDGPU_GFX_RLC_TCTAG, "RLC_TCTAG"}, | |
3402 | {AMDGPU_GFX_RLC_SPM_SE, "RLC_SPM_SE"}, | |
3403 | {AMDGPU_GFX_RLC_SPM_GRBMT, "RLC_SPM_GRBMT"}, | |
3404 | }; | |
3405 | ||
3406 | static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sp_mem_list[] = { | |
3407 | {AMDGPU_GFX_SP_SIMDID0, "SP_SIMDID0"}, | |
3408 | }; | |
3409 | ||
3410 | static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_spi_mem_list[] = { | |
3411 | {AMDGPU_GFX_SPI_MEM0, "SPI_MEM0"}, | |
3412 | {AMDGPU_GFX_SPI_MEM1, "SPI_MEM1"}, | |
3413 | {AMDGPU_GFX_SPI_MEM2, "SPI_MEM2"}, | |
3414 | {AMDGPU_GFX_SPI_MEM3, "SPI_MEM3"}, | |
3415 | }; | |
3416 | ||
3417 | static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sqc_mem_list[] = { | |
3418 | {AMDGPU_GFX_SQC_INST_CACHE_A, "SQC_INST_CACHE_A"}, | |
3419 | {AMDGPU_GFX_SQC_INST_CACHE_B, "SQC_INST_CACHE_B"}, | |
3420 | {AMDGPU_GFX_SQC_INST_CACHE_TAG_A, "SQC_INST_CACHE_TAG_A"}, | |
3421 | {AMDGPU_GFX_SQC_INST_CACHE_TAG_B, "SQC_INST_CACHE_TAG_B"}, | |
3422 | {AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_A, "SQC_INST_CACHE_MISS_FIFO_A"}, | |
3423 | {AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_B, "SQC_INST_CACHE_MISS_FIFO_B"}, | |
3424 | {AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_A, "SQC_INST_CACHE_GATCL1_MISS_FIFO_A"}, | |
3425 | {AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_B, "SQC_INST_CACHE_GATCL1_MISS_FIFO_B"}, | |
3426 | {AMDGPU_GFX_SQC_DATA_CACHE_A, "SQC_DATA_CACHE_A"}, | |
3427 | {AMDGPU_GFX_SQC_DATA_CACHE_B, "SQC_DATA_CACHE_B"}, | |
3428 | {AMDGPU_GFX_SQC_DATA_CACHE_TAG_A, "SQC_DATA_CACHE_TAG_A"}, | |
3429 | {AMDGPU_GFX_SQC_DATA_CACHE_TAG_B, "SQC_DATA_CACHE_TAG_B"}, | |
3430 | {AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_A, "SQC_DATA_CACHE_MISS_FIFO_A"}, | |
3431 | {AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_B, "SQC_DATA_CACHE_MISS_FIFO_B"}, | |
3432 | {AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_A, "SQC_DATA_CACHE_HIT_FIFO_A"}, | |
3433 | {AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_B, "SQC_DATA_CACHE_HIT_FIFO_B"}, | |
3434 | {AMDGPU_GFX_SQC_DIRTY_BIT_A, "SQC_DIRTY_BIT_A"}, | |
3435 | {AMDGPU_GFX_SQC_DIRTY_BIT_B, "SQC_DIRTY_BIT_B"}, | |
3436 | {AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU0, "SQC_WRITE_DATA_BUFFER_CU0"}, | |
3437 | {AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU1, "SQC_WRITE_DATA_BUFFER_CU1"}, | |
3438 | {AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A, "SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A"}, | |
3439 | {AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B, "SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B"}, | |
3440 | {AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_INST_CACHE, "SQC_UTCL1_MISS_LFIFO_INST_CACHE"}, | |
3441 | }; | |
3442 | ||
3443 | static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sq_mem_list[] = { | |
3444 | {AMDGPU_GFX_SQ_SGPR_MEM0, "SQ_SGPR_MEM0"}, | |
3445 | {AMDGPU_GFX_SQ_SGPR_MEM1, "SQ_SGPR_MEM1"}, | |
3446 | {AMDGPU_GFX_SQ_SGPR_MEM2, "SQ_SGPR_MEM2"}, | |
3447 | {AMDGPU_GFX_SQ_SGPR_MEM3, "SQ_SGPR_MEM3"}, | |
3448 | }; | |
3449 | ||
3450 | static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_ta_mem_list[] = { | |
3451 | {AMDGPU_GFX_TA_FS_AFIFO_RAM_LO, "TA_FS_AFIFO_RAM_LO"}, | |
3452 | {AMDGPU_GFX_TA_FS_AFIFO_RAM_HI, "TA_FS_AFIFO_RAM_HI"}, | |
3453 | {AMDGPU_GFX_TA_FS_CFIFO_RAM, "TA_FS_CFIFO_RAM"}, | |
3454 | {AMDGPU_GFX_TA_FSX_LFIFO, "TA_FSX_LFIFO"}, | |
3455 | {AMDGPU_GFX_TA_FS_DFIFO_RAM, "TA_FS_DFIFO_RAM"}, | |
3456 | }; | |
3457 | ||
3458 | static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcc_mem_list[] = { | |
3459 | {AMDGPU_GFX_TCC_MEM1, "TCC_MEM1"}, | |
3460 | }; | |
3461 | ||
3462 | static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tca_mem_list[] = { | |
3463 | {AMDGPU_GFX_TCA_MEM1, "TCA_MEM1"}, | |
3464 | }; | |
3465 | ||
3466 | static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tci_mem_list[] = { | |
3467 | {AMDGPU_GFX_TCIW_MEM, "TCIW_MEM"}, | |
3468 | }; | |
3469 | ||
3470 | static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcp_mem_list[] = { | |
3471 | {AMDGPU_GFX_TCP_LFIFO0, "TCP_LFIFO0"}, | |
3472 | {AMDGPU_GFX_TCP_SET0BANK0_RAM, "TCP_SET0BANK0_RAM"}, | |
3473 | {AMDGPU_GFX_TCP_SET0BANK1_RAM, "TCP_SET0BANK1_RAM"}, | |
3474 | {AMDGPU_GFX_TCP_SET0BANK2_RAM, "TCP_SET0BANK2_RAM"}, | |
3475 | {AMDGPU_GFX_TCP_SET0BANK3_RAM, "TCP_SET0BANK3_RAM"}, | |
3476 | {AMDGPU_GFX_TCP_SET1BANK0_RAM, "TCP_SET1BANK0_RAM"}, | |
3477 | {AMDGPU_GFX_TCP_SET1BANK1_RAM, "TCP_SET1BANK1_RAM"}, | |
3478 | {AMDGPU_GFX_TCP_SET1BANK2_RAM, "TCP_SET1BANK2_RAM"}, | |
3479 | {AMDGPU_GFX_TCP_SET1BANK3_RAM, "TCP_SET1BANK3_RAM"}, | |
3480 | {AMDGPU_GFX_TCP_SET2BANK0_RAM, "TCP_SET2BANK0_RAM"}, | |
3481 | {AMDGPU_GFX_TCP_SET2BANK1_RAM, "TCP_SET2BANK1_RAM"}, | |
3482 | {AMDGPU_GFX_TCP_SET2BANK2_RAM, "TCP_SET2BANK2_RAM"}, | |
3483 | {AMDGPU_GFX_TCP_SET2BANK3_RAM, "TCP_SET2BANK3_RAM"}, | |
3484 | {AMDGPU_GFX_TCP_SET3BANK0_RAM, "TCP_SET3BANK0_RAM"}, | |
3485 | {AMDGPU_GFX_TCP_SET3BANK1_RAM, "TCP_SET3BANK1_RAM"}, | |
3486 | {AMDGPU_GFX_TCP_SET3BANK2_RAM, "TCP_SET3BANK2_RAM"}, | |
3487 | {AMDGPU_GFX_TCP_SET3BANK3_RAM, "TCP_SET3BANK3_RAM"}, | |
3488 | {AMDGPU_GFX_TCP_VM_FIFO, "TCP_VM_FIFO"}, | |
3489 | {AMDGPU_GFX_TCP_DB_TAGRAM0, "TCP_DB_TAGRAM0"}, | |
3490 | {AMDGPU_GFX_TCP_DB_TAGRAM1, "TCP_DB_TAGRAM1"}, | |
3491 | {AMDGPU_GFX_TCP_DB_TAGRAM2, "TCP_DB_TAGRAM2"}, | |
3492 | {AMDGPU_GFX_TCP_DB_TAGRAM3, "TCP_DB_TAGRAM3"}, | |
3493 | {AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE0, "TCP_UTCL1_LFIFO_PROBE0"}, | |
3494 | {AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE1, "TCP_UTCL1_LFIFO_PROBE1"}, | |
3495 | {AMDGPU_GFX_TCP_CMD_FIFO, "TCP_CMD_FIFO"}, | |
3496 | }; | |
3497 | ||
3498 | static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_td_mem_list[] = { | |
3499 | {AMDGPU_GFX_TD_UTD_CS_FIFO_MEM, "TD_UTD_CS_FIFO_MEM"}, | |
3500 | {AMDGPU_GFX_TD_UTD_SS_FIFO_LO_MEM, "TD_UTD_SS_FIFO_LO_MEM"}, | |
3501 | {AMDGPU_GFX_TD_UTD_SS_FIFO_HI_MEM, "TD_UTD_SS_FIFO_HI_MEM"}, | |
3502 | }; | |
3503 | ||
3504 | static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcx_mem_list[] = { | |
3505 | {AMDGPU_GFX_TCX_FIFOD0, "TCX_FIFOD0"}, | |
3506 | {AMDGPU_GFX_TCX_FIFOD1, "TCX_FIFOD1"}, | |
3507 | {AMDGPU_GFX_TCX_FIFOD2, "TCX_FIFOD2"}, | |
3508 | {AMDGPU_GFX_TCX_FIFOD3, "TCX_FIFOD3"}, | |
3509 | {AMDGPU_GFX_TCX_FIFOD4, "TCX_FIFOD4"}, | |
3510 | {AMDGPU_GFX_TCX_FIFOD5, "TCX_FIFOD5"}, | |
3511 | {AMDGPU_GFX_TCX_FIFOD6, "TCX_FIFOD6"}, | |
3512 | {AMDGPU_GFX_TCX_FIFOD7, "TCX_FIFOD7"}, | |
3513 | {AMDGPU_GFX_TCX_FIFOB0, "TCX_FIFOB0"}, | |
3514 | {AMDGPU_GFX_TCX_FIFOB1, "TCX_FIFOB1"}, | |
3515 | {AMDGPU_GFX_TCX_FIFOB2, "TCX_FIFOB2"}, | |
3516 | {AMDGPU_GFX_TCX_FIFOB3, "TCX_FIFOB3"}, | |
3517 | {AMDGPU_GFX_TCX_FIFOB4, "TCX_FIFOB4"}, | |
3518 | {AMDGPU_GFX_TCX_FIFOB5, "TCX_FIFOB5"}, | |
3519 | {AMDGPU_GFX_TCX_FIFOB6, "TCX_FIFOB6"}, | |
3520 | {AMDGPU_GFX_TCX_FIFOB7, "TCX_FIFOB7"}, | |
3521 | {AMDGPU_GFX_TCX_FIFOA0, "TCX_FIFOA0"}, | |
3522 | {AMDGPU_GFX_TCX_FIFOA1, "TCX_FIFOA1"}, | |
3523 | {AMDGPU_GFX_TCX_FIFOA2, "TCX_FIFOA2"}, | |
3524 | {AMDGPU_GFX_TCX_FIFOA3, "TCX_FIFOA3"}, | |
3525 | {AMDGPU_GFX_TCX_FIFOA4, "TCX_FIFOA4"}, | |
3526 | {AMDGPU_GFX_TCX_FIFOA5, "TCX_FIFOA5"}, | |
3527 | {AMDGPU_GFX_TCX_FIFOA6, "TCX_FIFOA6"}, | |
3528 | {AMDGPU_GFX_TCX_FIFOA7, "TCX_FIFOA7"}, | |
3529 | {AMDGPU_GFX_TCX_CFIFO0, "TCX_CFIFO0"}, | |
3530 | {AMDGPU_GFX_TCX_CFIFO1, "TCX_CFIFO1"}, | |
3531 | {AMDGPU_GFX_TCX_CFIFO2, "TCX_CFIFO2"}, | |
3532 | {AMDGPU_GFX_TCX_CFIFO3, "TCX_CFIFO3"}, | |
3533 | {AMDGPU_GFX_TCX_CFIFO4, "TCX_CFIFO4"}, | |
3534 | {AMDGPU_GFX_TCX_CFIFO5, "TCX_CFIFO5"}, | |
3535 | {AMDGPU_GFX_TCX_CFIFO6, "TCX_CFIFO6"}, | |
3536 | {AMDGPU_GFX_TCX_CFIFO7, "TCX_CFIFO7"}, | |
3537 | {AMDGPU_GFX_TCX_FIFO_ACKB0, "TCX_FIFO_ACKB0"}, | |
3538 | {AMDGPU_GFX_TCX_FIFO_ACKB1, "TCX_FIFO_ACKB1"}, | |
3539 | {AMDGPU_GFX_TCX_FIFO_ACKB2, "TCX_FIFO_ACKB2"}, | |
3540 | {AMDGPU_GFX_TCX_FIFO_ACKB3, "TCX_FIFO_ACKB3"}, | |
3541 | {AMDGPU_GFX_TCX_FIFO_ACKB4, "TCX_FIFO_ACKB4"}, | |
3542 | {AMDGPU_GFX_TCX_FIFO_ACKB5, "TCX_FIFO_ACKB5"}, | |
3543 | {AMDGPU_GFX_TCX_FIFO_ACKB6, "TCX_FIFO_ACKB6"}, | |
3544 | {AMDGPU_GFX_TCX_FIFO_ACKB7, "TCX_FIFO_ACKB7"}, | |
3545 | {AMDGPU_GFX_TCX_FIFO_ACKD0, "TCX_FIFO_ACKD0"}, | |
3546 | {AMDGPU_GFX_TCX_FIFO_ACKD1, "TCX_FIFO_ACKD1"}, | |
3547 | {AMDGPU_GFX_TCX_FIFO_ACKD2, "TCX_FIFO_ACKD2"}, | |
3548 | {AMDGPU_GFX_TCX_FIFO_ACKD3, "TCX_FIFO_ACKD3"}, | |
3549 | {AMDGPU_GFX_TCX_FIFO_ACKD4, "TCX_FIFO_ACKD4"}, | |
3550 | {AMDGPU_GFX_TCX_FIFO_ACKD5, "TCX_FIFO_ACKD5"}, | |
3551 | {AMDGPU_GFX_TCX_FIFO_ACKD6, "TCX_FIFO_ACKD6"}, | |
3552 | {AMDGPU_GFX_TCX_FIFO_ACKD7, "TCX_FIFO_ACKD7"}, | |
3553 | {AMDGPU_GFX_TCX_DST_FIFOA0, "TCX_DST_FIFOA0"}, | |
3554 | {AMDGPU_GFX_TCX_DST_FIFOA1, "TCX_DST_FIFOA1"}, | |
3555 | {AMDGPU_GFX_TCX_DST_FIFOA2, "TCX_DST_FIFOA2"}, | |
3556 | {AMDGPU_GFX_TCX_DST_FIFOA3, "TCX_DST_FIFOA3"}, | |
3557 | {AMDGPU_GFX_TCX_DST_FIFOA4, "TCX_DST_FIFOA4"}, | |
3558 | {AMDGPU_GFX_TCX_DST_FIFOA5, "TCX_DST_FIFOA5"}, | |
3559 | {AMDGPU_GFX_TCX_DST_FIFOA6, "TCX_DST_FIFOA6"}, | |
3560 | {AMDGPU_GFX_TCX_DST_FIFOA7, "TCX_DST_FIFOA7"}, | |
3561 | {AMDGPU_GFX_TCX_DST_FIFOB0, "TCX_DST_FIFOB0"}, | |
3562 | {AMDGPU_GFX_TCX_DST_FIFOB1, "TCX_DST_FIFOB1"}, | |
3563 | {AMDGPU_GFX_TCX_DST_FIFOB2, "TCX_DST_FIFOB2"}, | |
3564 | {AMDGPU_GFX_TCX_DST_FIFOB3, "TCX_DST_FIFOB3"}, | |
3565 | {AMDGPU_GFX_TCX_DST_FIFOB4, "TCX_DST_FIFOB4"}, | |
3566 | {AMDGPU_GFX_TCX_DST_FIFOB5, "TCX_DST_FIFOB5"}, | |
3567 | {AMDGPU_GFX_TCX_DST_FIFOB6, "TCX_DST_FIFOB6"}, | |
3568 | {AMDGPU_GFX_TCX_DST_FIFOB7, "TCX_DST_FIFOB7"}, | |
3569 | {AMDGPU_GFX_TCX_DST_FIFOD0, "TCX_DST_FIFOD0"}, | |
3570 | {AMDGPU_GFX_TCX_DST_FIFOD1, "TCX_DST_FIFOD1"}, | |
3571 | {AMDGPU_GFX_TCX_DST_FIFOD2, "TCX_DST_FIFOD2"}, | |
3572 | {AMDGPU_GFX_TCX_DST_FIFOD3, "TCX_DST_FIFOD3"}, | |
3573 | {AMDGPU_GFX_TCX_DST_FIFOD4, "TCX_DST_FIFOD4"}, | |
3574 | {AMDGPU_GFX_TCX_DST_FIFOD5, "TCX_DST_FIFOD5"}, | |
3575 | {AMDGPU_GFX_TCX_DST_FIFOD6, "TCX_DST_FIFOD6"}, | |
3576 | {AMDGPU_GFX_TCX_DST_FIFOD7, "TCX_DST_FIFOD7"}, | |
3577 | {AMDGPU_GFX_TCX_DST_FIFO_ACKB0, "TCX_DST_FIFO_ACKB0"}, | |
3578 | {AMDGPU_GFX_TCX_DST_FIFO_ACKB1, "TCX_DST_FIFO_ACKB1"}, | |
3579 | {AMDGPU_GFX_TCX_DST_FIFO_ACKB2, "TCX_DST_FIFO_ACKB2"}, | |
3580 | {AMDGPU_GFX_TCX_DST_FIFO_ACKB3, "TCX_DST_FIFO_ACKB3"}, | |
3581 | {AMDGPU_GFX_TCX_DST_FIFO_ACKB4, "TCX_DST_FIFO_ACKB4"}, | |
3582 | {AMDGPU_GFX_TCX_DST_FIFO_ACKB5, "TCX_DST_FIFO_ACKB5"}, | |
3583 | {AMDGPU_GFX_TCX_DST_FIFO_ACKB6, "TCX_DST_FIFO_ACKB6"}, | |
3584 | {AMDGPU_GFX_TCX_DST_FIFO_ACKB7, "TCX_DST_FIFO_ACKB7"}, | |
3585 | {AMDGPU_GFX_TCX_DST_FIFO_ACKD0, "TCX_DST_FIFO_ACKD0"}, | |
3586 | {AMDGPU_GFX_TCX_DST_FIFO_ACKD1, "TCX_DST_FIFO_ACKD1"}, | |
3587 | {AMDGPU_GFX_TCX_DST_FIFO_ACKD2, "TCX_DST_FIFO_ACKD2"}, | |
3588 | {AMDGPU_GFX_TCX_DST_FIFO_ACKD3, "TCX_DST_FIFO_ACKD3"}, | |
3589 | {AMDGPU_GFX_TCX_DST_FIFO_ACKD4, "TCX_DST_FIFO_ACKD4"}, | |
3590 | {AMDGPU_GFX_TCX_DST_FIFO_ACKD5, "TCX_DST_FIFO_ACKD5"}, | |
3591 | {AMDGPU_GFX_TCX_DST_FIFO_ACKD6, "TCX_DST_FIFO_ACKD6"}, | |
3592 | {AMDGPU_GFX_TCX_DST_FIFO_ACKD7, "TCX_DST_FIFO_ACKD7"}, | |
3593 | }; | |
3594 | ||
3595 | static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_atc_l2_mem_list[] = { | |
3596 | {AMDGPU_GFX_ATC_L2_MEM, "ATC_L2_MEM"}, | |
3597 | }; | |
3598 | ||
3599 | static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_utcl2_mem_list[] = { | |
3600 | {AMDGPU_GFX_UTCL2_MEM, "UTCL2_MEM"}, | |
3601 | }; | |
3602 | ||
3603 | static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_vml2_mem_list[] = { | |
3604 | {AMDGPU_GFX_VML2_MEM, "VML2_MEM"}, | |
3605 | }; | |
3606 | ||
3607 | static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_vml2_walker_mem_list[] = { | |
3608 | {AMDGPU_GFX_VML2_WALKER_MEM, "VML2_WALKER_MEM"}, | |
3609 | }; | |
3610 | ||
3611 | static const struct amdgpu_gfx_ras_mem_id_entry gfx_v9_4_3_ras_mem_list_array[AMDGPU_GFX_MEM_TYPE_NUM] = { | |
3612 | AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_cp_mem_list) | |
3613 | AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gcea_mem_list) | |
3614 | AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gc_cane_mem_list) | |
3615 | AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gcutcl2_mem_list) | |
3616 | AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gds_mem_list) | |
3617 | AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_lds_mem_list) | |
3618 | AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_rlc_mem_list) | |
3619 | AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sp_mem_list) | |
3620 | AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_spi_mem_list) | |
3621 | AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sqc_mem_list) | |
3622 | AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sq_mem_list) | |
3623 | AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_ta_mem_list) | |
3624 | AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcc_mem_list) | |
3625 | AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tca_mem_list) | |
3626 | AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tci_mem_list) | |
3627 | AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcp_mem_list) | |
3628 | AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_td_mem_list) | |
3629 | AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcx_mem_list) | |
3630 | AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_atc_l2_mem_list) | |
3631 | AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_utcl2_mem_list) | |
3632 | AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_vml2_mem_list) | |
3633 | AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_vml2_walker_mem_list) | |
3634 | }; | |
3635 | ||
3636 | static const struct amdgpu_gfx_ras_reg_entry gfx_v9_4_3_ce_reg_list[] = { | |
3637 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regRLC_CE_ERR_STATUS_LOW, regRLC_CE_ERR_STATUS_HIGH), | |
3638 | 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "RLC"}, | |
3639 | AMDGPU_GFX_RLC_MEM, 1}, | |
3640 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPC_CE_ERR_STATUS_LO, regCPC_CE_ERR_STATUS_HI), | |
3641 | 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPC"}, | |
3642 | AMDGPU_GFX_CP_MEM, 1}, | |
3643 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPF_CE_ERR_STATUS_LO, regCPF_CE_ERR_STATUS_HI), | |
3644 | 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPF"}, | |
3645 | AMDGPU_GFX_CP_MEM, 1}, | |
3646 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPG_CE_ERR_STATUS_LO, regCPG_CE_ERR_STATUS_HI), | |
3647 | 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPG"}, | |
3648 | AMDGPU_GFX_CP_MEM, 1}, | |
3649 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGDS_CE_ERR_STATUS_LO, regGDS_CE_ERR_STATUS_HI), | |
3650 | 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GDS"}, | |
3651 | AMDGPU_GFX_GDS_MEM, 1}, | |
3652 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGC_CANE_CE_ERR_STATUS_LO, regGC_CANE_CE_ERR_STATUS_HI), | |
3653 | 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CANE"}, | |
3654 | AMDGPU_GFX_GC_CANE_MEM, 1}, | |
3655 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSPI_CE_ERR_STATUS_LO, regSPI_CE_ERR_STATUS_HI), | |
3656 | 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SPI"}, | |
c2c23a10 | 3657 | AMDGPU_GFX_SPI_MEM, 1}, |
5c1c09a7 TZ |
3658 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP0_CE_ERR_STATUS_LO, regSP0_CE_ERR_STATUS_HI), |
3659 | 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP0"}, | |
c2c23a10 | 3660 | AMDGPU_GFX_SP_MEM, 4}, |
5c1c09a7 TZ |
3661 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP1_CE_ERR_STATUS_LO, regSP1_CE_ERR_STATUS_HI), |
3662 | 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP1"}, | |
c2c23a10 | 3663 | AMDGPU_GFX_SP_MEM, 4}, |
5c1c09a7 TZ |
3664 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQ_CE_ERR_STATUS_LO, regSQ_CE_ERR_STATUS_HI), |
3665 | 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQ"}, | |
c2c23a10 | 3666 | AMDGPU_GFX_SQ_MEM, 4}, |
5c1c09a7 TZ |
3667 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQC_CE_EDC_LO, regSQC_CE_EDC_HI), |
3668 | 5, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQC"}, | |
c2c23a10 | 3669 | AMDGPU_GFX_SQC_MEM, 4}, |
5c1c09a7 TZ |
3670 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCX_CE_ERR_STATUS_LO, regTCX_CE_ERR_STATUS_HI), |
3671 | 2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCX"}, | |
3672 | AMDGPU_GFX_TCX_MEM, 1}, | |
3673 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCC_CE_ERR_STATUS_LO, regTCC_CE_ERR_STATUS_HI), | |
3674 | 16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCC"}, | |
3675 | AMDGPU_GFX_TCC_MEM, 1}, | |
3676 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTA_CE_EDC_LO, regTA_CE_EDC_HI), | |
3677 | 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TA"}, | |
c2c23a10 | 3678 | AMDGPU_GFX_TA_MEM, 4}, |
5c1c09a7 | 3679 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCI_CE_EDC_LO_REG, regTCI_CE_EDC_HI_REG), |
c2c23a10 | 3680 | 27, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCI"}, |
5c1c09a7 TZ |
3681 | AMDGPU_GFX_TCI_MEM, 1}, |
3682 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCP_CE_EDC_LO_REG, regTCP_CE_EDC_HI_REG), | |
3683 | 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCP"}, | |
c2c23a10 | 3684 | AMDGPU_GFX_TCP_MEM, 4}, |
5c1c09a7 TZ |
3685 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTD_CE_EDC_LO, regTD_CE_EDC_HI), |
3686 | 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TD"}, | |
c2c23a10 | 3687 | AMDGPU_GFX_TD_MEM, 4}, |
5c1c09a7 TZ |
3688 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGCEA_CE_ERR_STATUS_LO, regGCEA_CE_ERR_STATUS_HI), |
3689 | 16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GCEA"}, | |
3690 | AMDGPU_GFX_GCEA_MEM, 1}, | |
3691 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regLDS_CE_ERR_STATUS_LO, regLDS_CE_ERR_STATUS_HI), | |
3692 | 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "LDS"}, | |
c2c23a10 | 3693 | AMDGPU_GFX_LDS_MEM, 4}, |
5c1c09a7 TZ |
3694 | }; |
3695 | ||
3696 | static const struct amdgpu_gfx_ras_reg_entry gfx_v9_4_3_ue_reg_list[] = { | |
3697 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regRLC_UE_ERR_STATUS_LOW, regRLC_UE_ERR_STATUS_HIGH), | |
3698 | 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "RLC"}, | |
3699 | AMDGPU_GFX_RLC_MEM, 1}, | |
3700 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPC_UE_ERR_STATUS_LO, regCPC_UE_ERR_STATUS_HI), | |
3701 | 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPC"}, | |
3702 | AMDGPU_GFX_CP_MEM, 1}, | |
3703 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPF_UE_ERR_STATUS_LO, regCPF_UE_ERR_STATUS_HI), | |
3704 | 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPF"}, | |
3705 | AMDGPU_GFX_CP_MEM, 1}, | |
3706 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPG_UE_ERR_STATUS_LO, regCPG_UE_ERR_STATUS_HI), | |
3707 | 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPG"}, | |
3708 | AMDGPU_GFX_CP_MEM, 1}, | |
3709 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGDS_UE_ERR_STATUS_LO, regGDS_UE_ERR_STATUS_HI), | |
3710 | 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GDS"}, | |
3711 | AMDGPU_GFX_GDS_MEM, 1}, | |
3712 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGC_CANE_UE_ERR_STATUS_LO, regGC_CANE_UE_ERR_STATUS_HI), | |
3713 | 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CANE"}, | |
3714 | AMDGPU_GFX_GC_CANE_MEM, 1}, | |
3715 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSPI_UE_ERR_STATUS_LO, regSPI_UE_ERR_STATUS_HI), | |
3716 | 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SPI"}, | |
c2c23a10 | 3717 | AMDGPU_GFX_SPI_MEM, 1}, |
5c1c09a7 TZ |
3718 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP0_UE_ERR_STATUS_LO, regSP0_UE_ERR_STATUS_HI), |
3719 | 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP0"}, | |
c2c23a10 | 3720 | AMDGPU_GFX_SP_MEM, 4}, |
5c1c09a7 TZ |
3721 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP1_UE_ERR_STATUS_LO, regSP1_UE_ERR_STATUS_HI), |
3722 | 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP1"}, | |
c2c23a10 | 3723 | AMDGPU_GFX_SP_MEM, 4}, |
5c1c09a7 TZ |
3724 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQ_UE_ERR_STATUS_LO, regSQ_UE_ERR_STATUS_HI), |
3725 | 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQ"}, | |
c2c23a10 | 3726 | AMDGPU_GFX_SQ_MEM, 4}, |
5c1c09a7 TZ |
3727 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQC_UE_EDC_LO, regSQC_UE_EDC_HI), |
3728 | 5, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQC"}, | |
c2c23a10 | 3729 | AMDGPU_GFX_SQC_MEM, 4}, |
5c1c09a7 TZ |
3730 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCX_UE_ERR_STATUS_LO, regTCX_UE_ERR_STATUS_HI), |
3731 | 2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCX"}, | |
3732 | AMDGPU_GFX_TCX_MEM, 1}, | |
3733 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCC_UE_ERR_STATUS_LO, regTCC_UE_ERR_STATUS_HI), | |
3734 | 16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCC"}, | |
3735 | AMDGPU_GFX_TCC_MEM, 1}, | |
3736 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTA_UE_EDC_LO, regTA_UE_EDC_HI), | |
3737 | 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TA"}, | |
c2c23a10 | 3738 | AMDGPU_GFX_TA_MEM, 4}, |
5c1c09a7 | 3739 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCI_UE_EDC_LO_REG, regTCI_UE_EDC_HI_REG), |
c2c23a10 | 3740 | 27, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCI"}, |
5c1c09a7 TZ |
3741 | AMDGPU_GFX_TCI_MEM, 1}, |
3742 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCP_UE_EDC_LO_REG, regTCP_UE_EDC_HI_REG), | |
3743 | 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCP"}, | |
c2c23a10 | 3744 | AMDGPU_GFX_TCP_MEM, 4}, |
5c1c09a7 TZ |
3745 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTD_UE_EDC_LO, regTD_UE_EDC_HI), |
3746 | 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TD"}, | |
c2c23a10 | 3747 | AMDGPU_GFX_TD_MEM, 4}, |
5c1c09a7 TZ |
3748 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCA_UE_ERR_STATUS_LO, regTCA_UE_ERR_STATUS_HI), |
3749 | 2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCA"}, | |
3750 | AMDGPU_GFX_TCA_MEM, 1}, | |
3751 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGCEA_UE_ERR_STATUS_LO, regGCEA_UE_ERR_STATUS_HI), | |
3752 | 16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GCEA"}, | |
3753 | AMDGPU_GFX_GCEA_MEM, 1}, | |
3754 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regLDS_UE_ERR_STATUS_LO, regLDS_UE_ERR_STATUS_HI), | |
3755 | 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "LDS"}, | |
c2c23a10 | 3756 | AMDGPU_GFX_LDS_MEM, 4}, |
5c1c09a7 TZ |
3757 | }; |
3758 | ||
bfa84da6 TZ |
3759 | static void gfx_v9_4_3_inst_query_ras_err_count(struct amdgpu_device *adev, |
3760 | void *ras_error_status, int xcc_id) | |
3761 | { | |
3762 | struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; | |
3763 | unsigned long ce_count = 0, ue_count = 0; | |
3764 | uint32_t i, j, k; | |
3765 | ||
156c2814 YW |
3766 | /* NOTE: convert xcc_id to physical XCD ID (XCD0 or XCD1) */ |
3767 | struct amdgpu_smuio_mcm_config_info mcm_info = { | |
3768 | .socket_id = adev->smuio.funcs->get_socket_id(adev), | |
3769 | .die_id = xcc_id & 0x01 ? 1 : 0, | |
3770 | }; | |
3771 | ||
bfa84da6 TZ |
3772 | mutex_lock(&adev->grbm_idx_mutex); |
3773 | ||
3774 | for (i = 0; i < ARRAY_SIZE(gfx_v9_4_3_ce_reg_list); i++) { | |
3775 | for (j = 0; j < gfx_v9_4_3_ce_reg_list[i].se_num; j++) { | |
3776 | for (k = 0; k < gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst; k++) { | |
3777 | /* no need to select if instance number is 1 */ | |
3778 | if (gfx_v9_4_3_ce_reg_list[i].se_num > 1 || | |
3779 | gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst > 1) | |
3780 | gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id); | |
3781 | ||
3782 | amdgpu_ras_inst_query_ras_error_count(adev, | |
3783 | &(gfx_v9_4_3_ce_reg_list[i].reg_entry), | |
3784 | 1, | |
3785 | gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ce_reg_list[i].mem_id_type].mem_id_ent, | |
3786 | gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ce_reg_list[i].mem_id_type].size, | |
3787 | GET_INST(GC, xcc_id), | |
3788 | AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE, | |
3789 | &ce_count); | |
3790 | ||
3791 | amdgpu_ras_inst_query_ras_error_count(adev, | |
3792 | &(gfx_v9_4_3_ue_reg_list[i].reg_entry), | |
3793 | 1, | |
3794 | gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].mem_id_ent, | |
3795 | gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].size, | |
3796 | GET_INST(GC, xcc_id), | |
3797 | AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, | |
3798 | &ue_count); | |
3799 | } | |
3800 | } | |
3801 | } | |
3802 | ||
61fe5536 TZ |
3803 | /* handle extra register entries of UE */ |
3804 | for (; i < ARRAY_SIZE(gfx_v9_4_3_ue_reg_list); i++) { | |
3805 | for (j = 0; j < gfx_v9_4_3_ue_reg_list[i].se_num; j++) { | |
3806 | for (k = 0; k < gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst; k++) { | |
3807 | /* no need to select if instance number is 1 */ | |
3808 | if (gfx_v9_4_3_ue_reg_list[i].se_num > 1 || | |
3809 | gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst > 1) | |
3810 | gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id); | |
3811 | ||
3812 | amdgpu_ras_inst_query_ras_error_count(adev, | |
3813 | &(gfx_v9_4_3_ue_reg_list[i].reg_entry), | |
3814 | 1, | |
3815 | gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].mem_id_ent, | |
3816 | gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].size, | |
3817 | GET_INST(GC, xcc_id), | |
3818 | AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, | |
3819 | &ue_count); | |
3820 | } | |
3821 | } | |
3822 | } | |
3823 | ||
bfa84da6 TZ |
3824 | gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, |
3825 | xcc_id); | |
3826 | mutex_unlock(&adev->grbm_idx_mutex); | |
3827 | ||
3828 | /* the caller should make sure initialize value of | |
3829 | * err_data->ue_count and err_data->ce_count | |
3830 | */ | |
156c2814 YW |
3831 | amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count); |
3832 | amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, ce_count); | |
bfa84da6 TZ |
3833 | } |
3834 | ||
30feef06 TZ |
3835 | static void gfx_v9_4_3_inst_reset_ras_err_count(struct amdgpu_device *adev, |
3836 | void *ras_error_status, int xcc_id) | |
3837 | { | |
3838 | uint32_t i, j, k; | |
3839 | ||
3840 | mutex_lock(&adev->grbm_idx_mutex); | |
3841 | ||
3842 | for (i = 0; i < ARRAY_SIZE(gfx_v9_4_3_ce_reg_list); i++) { | |
3843 | for (j = 0; j < gfx_v9_4_3_ce_reg_list[i].se_num; j++) { | |
3844 | for (k = 0; k < gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst; k++) { | |
3845 | /* no need to select if instance number is 1 */ | |
3846 | if (gfx_v9_4_3_ce_reg_list[i].se_num > 1 || | |
3847 | gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst > 1) | |
3848 | gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id); | |
3849 | ||
3850 | amdgpu_ras_inst_reset_ras_error_count(adev, | |
3851 | &(gfx_v9_4_3_ce_reg_list[i].reg_entry), | |
3852 | 1, | |
3853 | GET_INST(GC, xcc_id)); | |
3854 | ||
3855 | amdgpu_ras_inst_reset_ras_error_count(adev, | |
3856 | &(gfx_v9_4_3_ue_reg_list[i].reg_entry), | |
3857 | 1, | |
3858 | GET_INST(GC, xcc_id)); | |
3859 | } | |
3860 | } | |
3861 | } | |
3862 | ||
61fe5536 TZ |
3863 | /* handle extra register entries of UE */ |
3864 | for (; i < ARRAY_SIZE(gfx_v9_4_3_ue_reg_list); i++) { | |
3865 | for (j = 0; j < gfx_v9_4_3_ue_reg_list[i].se_num; j++) { | |
3866 | for (k = 0; k < gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst; k++) { | |
3867 | /* no need to select if instance number is 1 */ | |
3868 | if (gfx_v9_4_3_ue_reg_list[i].se_num > 1 || | |
3869 | gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst > 1) | |
3870 | gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id); | |
3871 | ||
3872 | amdgpu_ras_inst_reset_ras_error_count(adev, | |
3873 | &(gfx_v9_4_3_ue_reg_list[i].reg_entry), | |
3874 | 1, | |
3875 | GET_INST(GC, xcc_id)); | |
3876 | } | |
3877 | } | |
3878 | } | |
3879 | ||
30feef06 TZ |
3880 | gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, |
3881 | xcc_id); | |
3882 | mutex_unlock(&adev->grbm_idx_mutex); | |
3883 | } | |
3884 | ||
bf16235b TZ |
3885 | static void gfx_v9_4_3_inst_query_utc_err_status(struct amdgpu_device *adev, |
3886 | int xcc_id) | |
3887 | { | |
3888 | uint32_t data; | |
3889 | ||
3890 | data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regUTCL2_MEM_ECC_STATUS); | |
3891 | if (data) { | |
3892 | dev_warn(adev->dev, "GFX UTCL2 Mem Ecc Status: 0x%x!\n", data); | |
3893 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), regUTCL2_MEM_ECC_STATUS, 0x3); | |
3894 | } | |
3895 | ||
3896 | data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regVML2_MEM_ECC_STATUS); | |
3897 | if (data) { | |
3898 | dev_warn(adev->dev, "GFX VML2 Mem Ecc Status: 0x%x!\n", data); | |
3899 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), regVML2_MEM_ECC_STATUS, 0x3); | |
3900 | } | |
3901 | ||
3902 | data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), | |
3903 | regVML2_WALKER_MEM_ECC_STATUS); | |
3904 | if (data) { | |
3905 | dev_warn(adev->dev, "GFX VML2 Walker Mem Ecc Status: 0x%x!\n", data); | |
3906 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), regVML2_WALKER_MEM_ECC_STATUS, | |
3907 | 0x3); | |
3908 | } | |
3909 | } | |
3910 | ||
0386d52d TZ |
3911 | static void gfx_v9_4_3_log_cu_timeout_status(struct amdgpu_device *adev, |
3912 | uint32_t status, int xcc_id) | |
3913 | { | |
3914 | struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info; | |
3915 | uint32_t i, simd, wave; | |
3916 | uint32_t wave_status; | |
3917 | uint32_t wave_pc_lo, wave_pc_hi; | |
3918 | uint32_t wave_exec_lo, wave_exec_hi; | |
3919 | uint32_t wave_inst_dw0, wave_inst_dw1; | |
3920 | uint32_t wave_ib_sts; | |
3921 | ||
3922 | for (i = 0; i < 32; i++) { | |
3923 | if (!((i << 1) & status)) | |
3924 | continue; | |
3925 | ||
3926 | simd = i / cu_info->max_waves_per_simd; | |
3927 | wave = i % cu_info->max_waves_per_simd; | |
3928 | ||
3929 | wave_status = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_STATUS); | |
3930 | wave_pc_lo = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_LO); | |
3931 | wave_pc_hi = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_HI); | |
3932 | wave_exec_lo = | |
3933 | wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_LO); | |
3934 | wave_exec_hi = | |
3935 | wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_HI); | |
3936 | wave_inst_dw0 = | |
3937 | wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW0); | |
3938 | wave_inst_dw1 = | |
3939 | wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW1); | |
3940 | wave_ib_sts = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_IB_STS); | |
3941 | ||
3942 | dev_info( | |
3943 | adev->dev, | |
3944 | "\t SIMD %d, Wave %d: status 0x%x, pc 0x%llx, exec 0x%llx, inst 0x%llx, ib_sts 0x%x\n", | |
3945 | simd, wave, wave_status, | |
3946 | ((uint64_t)wave_pc_hi << 32 | wave_pc_lo), | |
3947 | ((uint64_t)wave_exec_hi << 32 | wave_exec_lo), | |
3948 | ((uint64_t)wave_inst_dw1 << 32 | wave_inst_dw0), | |
3949 | wave_ib_sts); | |
3950 | } | |
3951 | } | |
3952 | ||
3953 | static void gfx_v9_4_3_inst_query_sq_timeout_status(struct amdgpu_device *adev, | |
3954 | int xcc_id) | |
3955 | { | |
3956 | uint32_t se_idx, sh_idx, cu_idx; | |
3957 | uint32_t status; | |
3958 | ||
3959 | mutex_lock(&adev->grbm_idx_mutex); | |
3960 | for (se_idx = 0; se_idx < adev->gfx.config.max_shader_engines; se_idx++) { | |
3961 | for (sh_idx = 0; sh_idx < adev->gfx.config.max_sh_per_se; sh_idx++) { | |
3962 | for (cu_idx = 0; cu_idx < adev->gfx.config.max_cu_per_sh; cu_idx++) { | |
3963 | gfx_v9_4_3_xcc_select_se_sh(adev, se_idx, sh_idx, | |
3964 | cu_idx, xcc_id); | |
3965 | status = RREG32_SOC15(GC, GET_INST(GC, xcc_id), | |
3966 | regSQ_TIMEOUT_STATUS); | |
3967 | if (status != 0) { | |
3968 | dev_info( | |
3969 | adev->dev, | |
3970 | "GFX Watchdog Timeout: SE %d, SH %d, CU %d\n", | |
3971 | se_idx, sh_idx, cu_idx); | |
3972 | gfx_v9_4_3_log_cu_timeout_status( | |
3973 | adev, status, xcc_id); | |
3974 | } | |
3975 | /* clear old status */ | |
3976 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), | |
3977 | regSQ_TIMEOUT_STATUS, 0); | |
3978 | } | |
3979 | } | |
3980 | } | |
3981 | gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, | |
3982 | xcc_id); | |
3983 | mutex_unlock(&adev->grbm_idx_mutex); | |
3984 | } | |
3985 | ||
bf16235b TZ |
3986 | static void gfx_v9_4_3_inst_query_ras_err_status(struct amdgpu_device *adev, |
3987 | void *ras_error_status, int xcc_id) | |
3988 | { | |
bf16235b | 3989 | gfx_v9_4_3_inst_query_utc_err_status(adev, xcc_id); |
0386d52d | 3990 | gfx_v9_4_3_inst_query_sq_timeout_status(adev, xcc_id); |
bf16235b TZ |
3991 | } |
3992 | ||
47e7f527 TZ |
3993 | static void gfx_v9_4_3_inst_reset_utc_err_status(struct amdgpu_device *adev, |
3994 | int xcc_id) | |
3995 | { | |
3996 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), regUTCL2_MEM_ECC_STATUS, 0x3); | |
3997 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), regVML2_MEM_ECC_STATUS, 0x3); | |
3998 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), regVML2_WALKER_MEM_ECC_STATUS, 0x3); | |
3999 | } | |
4000 | ||
0386d52d TZ |
4001 | static void gfx_v9_4_3_inst_reset_sq_timeout_status(struct amdgpu_device *adev, |
4002 | int xcc_id) | |
4003 | { | |
4004 | uint32_t se_idx, sh_idx, cu_idx; | |
4005 | ||
4006 | mutex_lock(&adev->grbm_idx_mutex); | |
4007 | for (se_idx = 0; se_idx < adev->gfx.config.max_shader_engines; se_idx++) { | |
4008 | for (sh_idx = 0; sh_idx < adev->gfx.config.max_sh_per_se; sh_idx++) { | |
4009 | for (cu_idx = 0; cu_idx < adev->gfx.config.max_cu_per_sh; cu_idx++) { | |
4010 | gfx_v9_4_3_xcc_select_se_sh(adev, se_idx, sh_idx, | |
4011 | cu_idx, xcc_id); | |
4012 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), | |
4013 | regSQ_TIMEOUT_STATUS, 0); | |
4014 | } | |
4015 | } | |
4016 | } | |
4017 | gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, | |
4018 | xcc_id); | |
4019 | mutex_unlock(&adev->grbm_idx_mutex); | |
4020 | } | |
4021 | ||
47e7f527 TZ |
4022 | static void gfx_v9_4_3_inst_reset_ras_err_status(struct amdgpu_device *adev, |
4023 | void *ras_error_status, int xcc_id) | |
4024 | { | |
4025 | gfx_v9_4_3_inst_reset_utc_err_status(adev, xcc_id); | |
0386d52d | 4026 | gfx_v9_4_3_inst_reset_sq_timeout_status(adev, xcc_id); |
47e7f527 TZ |
4027 | } |
4028 | ||
bd974498 TZ |
4029 | static void gfx_v9_4_3_inst_enable_watchdog_timer(struct amdgpu_device *adev, |
4030 | void *ras_error_status, int xcc_id) | |
4031 | { | |
4032 | uint32_t i; | |
4033 | uint32_t data; | |
4034 | ||
ac3343c7 TZ |
4035 | data = RREG32_SOC15(GC, GET_INST(GC, 0), regSQ_TIMEOUT_CONFIG); |
4036 | data = REG_SET_FIELD(data, SQ_TIMEOUT_CONFIG, TIMEOUT_FATAL_DISABLE, | |
bd974498 TZ |
4037 | amdgpu_watchdog_timer.timeout_fatal_disable ? 1 : 0); |
4038 | ||
4039 | if (amdgpu_watchdog_timer.timeout_fatal_disable && | |
4040 | (amdgpu_watchdog_timer.period < 1 || | |
4041 | amdgpu_watchdog_timer.period > 0x23)) { | |
4042 | dev_warn(adev->dev, "Watchdog period range is 1 to 0x23\n"); | |
4043 | amdgpu_watchdog_timer.period = 0x23; | |
4044 | } | |
4045 | data = REG_SET_FIELD(data, SQ_TIMEOUT_CONFIG, PERIOD_SEL, | |
4046 | amdgpu_watchdog_timer.period); | |
4047 | ||
4048 | mutex_lock(&adev->grbm_idx_mutex); | |
4049 | for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { | |
4050 | gfx_v9_4_3_xcc_select_se_sh(adev, i, 0xffffffff, 0xffffffff, xcc_id); | |
4051 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_TIMEOUT_CONFIG, data); | |
4052 | } | |
4053 | gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, | |
4054 | xcc_id); | |
4055 | mutex_unlock(&adev->grbm_idx_mutex); | |
4056 | } | |
4057 | ||
bfa84da6 TZ |
4058 | static void gfx_v9_4_3_query_ras_error_count(struct amdgpu_device *adev, |
4059 | void *ras_error_status) | |
4060 | { | |
4061 | amdgpu_gfx_ras_error_func(adev, ras_error_status, | |
4062 | gfx_v9_4_3_inst_query_ras_err_count); | |
4063 | } | |
4064 | ||
30feef06 TZ |
4065 | static void gfx_v9_4_3_reset_ras_error_count(struct amdgpu_device *adev) |
4066 | { | |
4067 | amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_reset_ras_err_count); | |
4068 | } | |
4069 | ||
bf16235b TZ |
4070 | static void gfx_v9_4_3_query_ras_error_status(struct amdgpu_device *adev) |
4071 | { | |
4072 | amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_query_ras_err_status); | |
4073 | } | |
4074 | ||
47e7f527 TZ |
4075 | static void gfx_v9_4_3_reset_ras_error_status(struct amdgpu_device *adev) |
4076 | { | |
4077 | amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_reset_ras_err_status); | |
4078 | } | |
4079 | ||
bd974498 TZ |
4080 | static void gfx_v9_4_3_enable_watchdog_timer(struct amdgpu_device *adev) |
4081 | { | |
4082 | amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_enable_watchdog_timer); | |
4083 | } | |
4084 | ||
86301129 LM |
4085 | static const struct amd_ip_funcs gfx_v9_4_3_ip_funcs = { |
4086 | .name = "gfx_v9_4_3", | |
4087 | .early_init = gfx_v9_4_3_early_init, | |
4088 | .late_init = gfx_v9_4_3_late_init, | |
4089 | .sw_init = gfx_v9_4_3_sw_init, | |
4090 | .sw_fini = gfx_v9_4_3_sw_fini, | |
4091 | .hw_init = gfx_v9_4_3_hw_init, | |
4092 | .hw_fini = gfx_v9_4_3_hw_fini, | |
4093 | .suspend = gfx_v9_4_3_suspend, | |
4094 | .resume = gfx_v9_4_3_resume, | |
4095 | .is_idle = gfx_v9_4_3_is_idle, | |
4096 | .wait_for_idle = gfx_v9_4_3_wait_for_idle, | |
4097 | .soft_reset = gfx_v9_4_3_soft_reset, | |
4098 | .set_clockgating_state = gfx_v9_4_3_set_clockgating_state, | |
4099 | .set_powergating_state = gfx_v9_4_3_set_powergating_state, | |
4100 | .get_clockgating_state = gfx_v9_4_3_get_clockgating_state, | |
4101 | }; | |
4102 | ||
4103 | static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_compute = { | |
4104 | .type = AMDGPU_RING_TYPE_COMPUTE, | |
4105 | .align_mask = 0xff, | |
4106 | .nop = PACKET3(PACKET3_NOP, 0x3FFF), | |
4107 | .support_64bit_ptrs = true, | |
4108 | .get_rptr = gfx_v9_4_3_ring_get_rptr_compute, | |
4109 | .get_wptr = gfx_v9_4_3_ring_get_wptr_compute, | |
4110 | .set_wptr = gfx_v9_4_3_ring_set_wptr_compute, | |
4111 | .emit_frame_size = | |
4112 | 20 + /* gfx_v9_4_3_ring_emit_gds_switch */ | |
4113 | 7 + /* gfx_v9_4_3_ring_emit_hdp_flush */ | |
4114 | 5 + /* hdp invalidate */ | |
4115 | 7 + /* gfx_v9_4_3_ring_emit_pipeline_sync */ | |
4116 | SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + | |
4117 | SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + | |
4118 | 2 + /* gfx_v9_4_3_ring_emit_vm_flush */ | |
4119 | 8 + 8 + 8 + /* gfx_v9_4_3_ring_emit_fence x3 for user fence, vm fence */ | |
4120 | 7 + /* gfx_v9_4_3_emit_mem_sync */ | |
4121 | 5 + /* gfx_v9_4_3_emit_wave_limit for updating regSPI_WCL_PIPE_PERCENT_GFX register */ | |
4122 | 15, /* for updating 3 regSPI_WCL_PIPE_PERCENT_CS registers */ | |
4123 | .emit_ib_size = 7, /* gfx_v9_4_3_ring_emit_ib_compute */ | |
4124 | .emit_ib = gfx_v9_4_3_ring_emit_ib_compute, | |
4125 | .emit_fence = gfx_v9_4_3_ring_emit_fence, | |
4126 | .emit_pipeline_sync = gfx_v9_4_3_ring_emit_pipeline_sync, | |
4127 | .emit_vm_flush = gfx_v9_4_3_ring_emit_vm_flush, | |
4128 | .emit_gds_switch = gfx_v9_4_3_ring_emit_gds_switch, | |
4129 | .emit_hdp_flush = gfx_v9_4_3_ring_emit_hdp_flush, | |
4130 | .test_ring = gfx_v9_4_3_ring_test_ring, | |
4131 | .test_ib = gfx_v9_4_3_ring_test_ib, | |
4132 | .insert_nop = amdgpu_ring_insert_nop, | |
4133 | .pad_ib = amdgpu_ring_generic_pad_ib, | |
4134 | .emit_wreg = gfx_v9_4_3_ring_emit_wreg, | |
4135 | .emit_reg_wait = gfx_v9_4_3_ring_emit_reg_wait, | |
4136 | .emit_reg_write_reg_wait = gfx_v9_4_3_ring_emit_reg_write_reg_wait, | |
4137 | .emit_mem_sync = gfx_v9_4_3_emit_mem_sync, | |
4138 | .emit_wave_limit = gfx_v9_4_3_emit_wave_limit, | |
4139 | }; | |
4140 | ||
4141 | static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_kiq = { | |
4142 | .type = AMDGPU_RING_TYPE_KIQ, | |
4143 | .align_mask = 0xff, | |
4144 | .nop = PACKET3(PACKET3_NOP, 0x3FFF), | |
4145 | .support_64bit_ptrs = true, | |
4146 | .get_rptr = gfx_v9_4_3_ring_get_rptr_compute, | |
4147 | .get_wptr = gfx_v9_4_3_ring_get_wptr_compute, | |
4148 | .set_wptr = gfx_v9_4_3_ring_set_wptr_compute, | |
4149 | .emit_frame_size = | |
4150 | 20 + /* gfx_v9_4_3_ring_emit_gds_switch */ | |
4151 | 7 + /* gfx_v9_4_3_ring_emit_hdp_flush */ | |
4152 | 5 + /* hdp invalidate */ | |
4153 | 7 + /* gfx_v9_4_3_ring_emit_pipeline_sync */ | |
4154 | SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + | |
4155 | SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + | |
4156 | 2 + /* gfx_v9_4_3_ring_emit_vm_flush */ | |
4157 | 8 + 8 + 8, /* gfx_v9_4_3_ring_emit_fence_kiq x3 for user fence, vm fence */ | |
4158 | .emit_ib_size = 7, /* gfx_v9_4_3_ring_emit_ib_compute */ | |
4159 | .emit_fence = gfx_v9_4_3_ring_emit_fence_kiq, | |
4160 | .test_ring = gfx_v9_4_3_ring_test_ring, | |
4161 | .insert_nop = amdgpu_ring_insert_nop, | |
4162 | .pad_ib = amdgpu_ring_generic_pad_ib, | |
4163 | .emit_rreg = gfx_v9_4_3_ring_emit_rreg, | |
4164 | .emit_wreg = gfx_v9_4_3_ring_emit_wreg, | |
4165 | .emit_reg_wait = gfx_v9_4_3_ring_emit_reg_wait, | |
4166 | .emit_reg_write_reg_wait = gfx_v9_4_3_ring_emit_reg_write_reg_wait, | |
4167 | }; | |
4168 | ||
4169 | static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev) | |
4170 | { | |
8078f1c6 | 4171 | int i, j, num_xcc; |
86301129 | 4172 | |
8078f1c6 LL |
4173 | num_xcc = NUM_XCC(adev->gfx.xcc_mask); |
4174 | for (i = 0; i < num_xcc; i++) { | |
6f917fdc | 4175 | adev->gfx.kiq[i].ring.funcs = &gfx_v9_4_3_ring_funcs_kiq; |
86301129 | 4176 | |
6f917fdc LM |
4177 | for (j = 0; j < adev->gfx.num_compute_rings; j++) |
4178 | adev->gfx.compute_ring[j + i * adev->gfx.num_compute_rings].funcs | |
4179 | = &gfx_v9_4_3_ring_funcs_compute; | |
4180 | } | |
86301129 LM |
4181 | } |
4182 | ||
4183 | static const struct amdgpu_irq_src_funcs gfx_v9_4_3_eop_irq_funcs = { | |
4184 | .set = gfx_v9_4_3_set_eop_interrupt_state, | |
4185 | .process = gfx_v9_4_3_eop_irq, | |
4186 | }; | |
4187 | ||
4188 | static const struct amdgpu_irq_src_funcs gfx_v9_4_3_priv_reg_irq_funcs = { | |
4189 | .set = gfx_v9_4_3_set_priv_reg_fault_state, | |
4190 | .process = gfx_v9_4_3_priv_reg_irq, | |
4191 | }; | |
4192 | ||
4193 | static const struct amdgpu_irq_src_funcs gfx_v9_4_3_priv_inst_irq_funcs = { | |
4194 | .set = gfx_v9_4_3_set_priv_inst_fault_state, | |
4195 | .process = gfx_v9_4_3_priv_inst_irq, | |
4196 | }; | |
4197 | ||
4198 | static void gfx_v9_4_3_set_irq_funcs(struct amdgpu_device *adev) | |
4199 | { | |
4200 | adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST; | |
4201 | adev->gfx.eop_irq.funcs = &gfx_v9_4_3_eop_irq_funcs; | |
4202 | ||
4203 | adev->gfx.priv_reg_irq.num_types = 1; | |
4204 | adev->gfx.priv_reg_irq.funcs = &gfx_v9_4_3_priv_reg_irq_funcs; | |
4205 | ||
4206 | adev->gfx.priv_inst_irq.num_types = 1; | |
4207 | adev->gfx.priv_inst_irq.funcs = &gfx_v9_4_3_priv_inst_irq_funcs; | |
4208 | } | |
4209 | ||
4210 | static void gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device *adev) | |
4211 | { | |
4212 | adev->gfx.rlc.funcs = &gfx_v9_4_3_rlc_funcs; | |
4213 | } | |
4214 | ||
4215 | ||
4216 | static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev) | |
4217 | { | |
4218 | /* init asci gds info */ | |
4e8303cf | 4219 | switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { |
86301129 LM |
4220 | case IP_VERSION(9, 4, 3): |
4221 | /* 9.4.3 removed all the GDS internal memory, | |
4222 | * only support GWS opcode in kernel, like barrier | |
4223 | * semaphore.etc */ | |
4224 | adev->gds.gds_size = 0; | |
4225 | break; | |
4226 | default: | |
4227 | adev->gds.gds_size = 0x10000; | |
4228 | break; | |
4229 | } | |
4230 | ||
4e8303cf | 4231 | switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { |
86301129 LM |
4232 | case IP_VERSION(9, 4, 3): |
4233 | /* deprecated for 9.4.3, no usage at all */ | |
4234 | adev->gds.gds_compute_max_wave_id = 0; | |
4235 | break; | |
4236 | default: | |
4237 | /* this really depends on the chip */ | |
4238 | adev->gds.gds_compute_max_wave_id = 0x7ff; | |
4239 | break; | |
4240 | } | |
4241 | ||
4242 | adev->gds.gws_size = 64; | |
4243 | adev->gds.oa_size = 16; | |
4244 | } | |
4245 | ||
4246 | static void gfx_v9_4_3_set_user_cu_inactive_bitmap(struct amdgpu_device *adev, | |
f705a6f0 | 4247 | u32 bitmap, int xcc_id) |
86301129 LM |
4248 | { |
4249 | u32 data; | |
4250 | ||
4251 | if (!bitmap) | |
4252 | return; | |
4253 | ||
4254 | data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; | |
4255 | data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; | |
4256 | ||
f705a6f0 | 4257 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG, data); |
86301129 LM |
4258 | } |
4259 | ||
f705a6f0 | 4260 | static u32 gfx_v9_4_3_get_cu_active_bitmap(struct amdgpu_device *adev, int xcc_id) |
86301129 LM |
4261 | { |
4262 | u32 data, mask; | |
4263 | ||
f705a6f0 MJ |
4264 | data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCC_GC_SHADER_ARRAY_CONFIG); |
4265 | data |= RREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG); | |
86301129 LM |
4266 | |
4267 | data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; | |
4268 | data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; | |
4269 | ||
4270 | mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh); | |
4271 | ||
4272 | return (~data) & mask; | |
4273 | } | |
4274 | ||
4275 | static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev, | |
4276 | struct amdgpu_cu_info *cu_info) | |
4277 | { | |
f705a6f0 | 4278 | int i, j, k, counter, xcc_id, active_cu_number = 0; |
86301129 LM |
4279 | u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0; |
4280 | unsigned disable_masks[4 * 4]; | |
4281 | ||
4282 | if (!adev || !cu_info) | |
4283 | return -EINVAL; | |
4284 | ||
4285 | /* | |
4286 | * 16 comes from bitmap array size 4*4, and it can cover all gfx9 ASICs | |
4287 | */ | |
4288 | if (adev->gfx.config.max_shader_engines * | |
4289 | adev->gfx.config.max_sh_per_se > 16) | |
4290 | return -EINVAL; | |
4291 | ||
4292 | amdgpu_gfx_parse_disable_cu(disable_masks, | |
4293 | adev->gfx.config.max_shader_engines, | |
4294 | adev->gfx.config.max_sh_per_se); | |
4295 | ||
4296 | mutex_lock(&adev->grbm_idx_mutex); | |
f705a6f0 MJ |
4297 | for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++) { |
4298 | for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { | |
4299 | for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { | |
4300 | mask = 1; | |
4301 | ao_bitmap = 0; | |
4302 | counter = 0; | |
4303 | gfx_v9_4_3_xcc_select_se_sh(adev, i, j, 0xffffffff, xcc_id); | |
4304 | gfx_v9_4_3_set_user_cu_inactive_bitmap( | |
4305 | adev, | |
4306 | disable_masks[i * adev->gfx.config.max_sh_per_se + j], | |
4307 | xcc_id); | |
4308 | bitmap = gfx_v9_4_3_get_cu_active_bitmap(adev, xcc_id); | |
4309 | ||
4310 | cu_info->bitmap[xcc_id][i][j] = bitmap; | |
4311 | ||
4312 | for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) { | |
4313 | if (bitmap & mask) { | |
4314 | if (counter < adev->gfx.config.max_cu_per_sh) | |
4315 | ao_bitmap |= mask; | |
4316 | counter++; | |
4317 | } | |
4318 | mask <<= 1; | |
86301129 | 4319 | } |
f705a6f0 MJ |
4320 | active_cu_number += counter; |
4321 | if (i < 2 && j < 2) | |
4322 | ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8)); | |
4323 | cu_info->ao_cu_bitmap[i][j] = ao_bitmap; | |
86301129 | 4324 | } |
86301129 | 4325 | } |
f705a6f0 MJ |
4326 | gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, |
4327 | xcc_id); | |
86301129 | 4328 | } |
86301129 LM |
4329 | mutex_unlock(&adev->grbm_idx_mutex); |
4330 | ||
4331 | cu_info->number = active_cu_number; | |
4332 | cu_info->ao_cu_mask = ao_cu_mask; | |
4333 | cu_info->simd_per_cu = NUM_SIMD_PER_CU; | |
4334 | ||
4335 | return 0; | |
4336 | } | |
4337 | ||
4338 | const struct amdgpu_ip_block_version gfx_v9_4_3_ip_block = { | |
4339 | .type = AMD_IP_BLOCK_TYPE_GFX, | |
4340 | .major = 9, | |
4341 | .minor = 4, | |
4eaa007c | 4342 | .rev = 3, |
86301129 | 4343 | .funcs = &gfx_v9_4_3_ip_funcs, |
7c0f7ee0 | 4344 | }; |
73c84f7c LL |
4345 | |
4346 | static int gfx_v9_4_3_xcp_resume(void *handle, uint32_t inst_mask) | |
4347 | { | |
4348 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
4349 | uint32_t tmp_mask; | |
4350 | int i, r; | |
4351 | ||
4352 | /* TODO : Initialize golden regs */ | |
4353 | /* gfx_v9_4_3_init_golden_registers(adev); */ | |
4354 | ||
4355 | tmp_mask = inst_mask; | |
4356 | for_each_inst(i, tmp_mask) | |
4357 | gfx_v9_4_3_xcc_constants_init(adev, i); | |
4358 | ||
cab69d36 YW |
4359 | if (!amdgpu_sriov_vf(adev)) { |
4360 | tmp_mask = inst_mask; | |
4361 | for_each_inst(i, tmp_mask) { | |
4362 | r = gfx_v9_4_3_xcc_rlc_resume(adev, i); | |
4363 | if (r) | |
4364 | return r; | |
4365 | } | |
73c84f7c LL |
4366 | } |
4367 | ||
4368 | tmp_mask = inst_mask; | |
4369 | for_each_inst(i, tmp_mask) { | |
4370 | r = gfx_v9_4_3_xcc_cp_resume(adev, i); | |
4371 | if (r) | |
4372 | return r; | |
4373 | } | |
4374 | ||
4375 | return 0; | |
4376 | } | |
4377 | ||
4378 | static int gfx_v9_4_3_xcp_suspend(void *handle, uint32_t inst_mask) | |
4379 | { | |
4380 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
4381 | int i; | |
4382 | ||
4383 | for_each_inst(i, inst_mask) | |
4384 | gfx_v9_4_3_xcc_fini(adev, i); | |
4385 | ||
4386 | return 0; | |
4387 | } | |
4388 | ||
4389 | struct amdgpu_xcp_ip_funcs gfx_v9_4_3_xcp_funcs = { | |
4390 | .suspend = &gfx_v9_4_3_xcp_suspend, | |
4391 | .resume = &gfx_v9_4_3_xcp_resume | |
4392 | }; | |
92ecb92c TZ |
4393 | |
4394 | struct amdgpu_ras_block_hw_ops gfx_v9_4_3_ras_ops = { | |
4395 | .query_ras_error_count = &gfx_v9_4_3_query_ras_error_count, | |
4396 | .reset_ras_error_count = &gfx_v9_4_3_reset_ras_error_count, | |
4397 | .query_ras_error_status = &gfx_v9_4_3_query_ras_error_status, | |
4398 | .reset_ras_error_status = &gfx_v9_4_3_reset_ras_error_status, | |
4399 | }; | |
4400 | ||
4401 | struct amdgpu_gfx_ras gfx_v9_4_3_ras = { | |
4402 | .ras_block = { | |
4403 | .hw_ops = &gfx_v9_4_3_ras_ops, | |
4404 | }, | |
bd974498 | 4405 | .enable_watchdog_timer = &gfx_v9_4_3_enable_watchdog_timer, |
92ecb92c | 4406 | }; |