]>
Commit | Line | Data |
---|---|---|
d38ceaf9 AD |
1 | /* |
2 | * Copyright 2014 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | */ | |
23 | ||
24 | #ifndef __AMDGPU_GFX_H__ | |
25 | #define __AMDGPU_GFX_H__ | |
26 | ||
448fe192 HR |
27 | /* |
28 | * GFX stuff | |
29 | */ | |
30 | #include "clearstate_defs.h" | |
31 | #include "amdgpu_ring.h" | |
88dfc9a3 | 32 | #include "amdgpu_rlc.h" |
289bcffb | 33 | #include "amdgpu_imu.h" |
22616eb5 | 34 | #include "soc15.h" |
8b0fb0e9 | 35 | #include "amdgpu_ras.h" |
ded946f3 | 36 | #include "amdgpu_ring_mux.h" |
d38ceaf9 | 37 | |
448fe192 HR |
38 | /* GFX current status */ |
39 | #define AMDGPU_GFX_NORMAL_MODE 0x00000000L | |
40 | #define AMDGPU_GFX_SAFE_MODE 0x00000001L | |
41 | #define AMDGPU_GFX_PG_DISABLED_MODE 0x00000002L | |
42 | #define AMDGPU_GFX_CG_DISABLED_MODE 0x00000004L | |
43 | #define AMDGPU_GFX_LBPW_DISABLED_MODE 0x00000008L | |
6f8941a2 | 44 | |
541372bb | 45 | #define AMDGPU_MAX_GC_INSTANCES 8 |
68fa72a4 | 46 | #define AMDGPU_MAX_QUEUES 128 |
541372bb | 47 | |
68fa72a4 MJ |
48 | #define AMDGPU_MAX_GFX_QUEUES AMDGPU_MAX_QUEUES |
49 | #define AMDGPU_MAX_COMPUTE_QUEUES AMDGPU_MAX_QUEUES | |
448fe192 | 50 | |
34eaf30f ND |
51 | enum amdgpu_gfx_pipe_priority { |
52 | AMDGPU_GFX_PIPE_PRIO_NORMAL = AMDGPU_RING_PRIO_1, | |
53 | AMDGPU_GFX_PIPE_PRIO_HIGH = AMDGPU_RING_PRIO_2 | |
33abcb1f ND |
54 | }; |
55 | ||
56 | #define AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM 0 | |
57 | #define AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM 15 | |
58 | ||
541372bb LM |
59 | enum amdgpu_gfx_partition { |
60 | AMDGPU_SPX_PARTITION_MODE = 0, | |
61 | AMDGPU_DPX_PARTITION_MODE = 1, | |
62 | AMDGPU_TPX_PARTITION_MODE = 2, | |
63 | AMDGPU_QPX_PARTITION_MODE = 3, | |
64 | AMDGPU_CPX_PARTITION_MODE = 4, | |
75d16923 | 65 | AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE = -1, |
570de94b LL |
66 | /* Automatically choose the right mode */ |
67 | AMDGPU_AUTO_COMPUTE_PARTITION_MODE = -2, | |
541372bb LM |
68 | }; |
69 | ||
8078f1c6 LL |
70 | #define NUM_XCC(x) hweight16(x) |
71 | ||
77462ab8 TZ |
72 | enum amdgpu_gfx_ras_mem_id_type { |
73 | AMDGPU_GFX_CP_MEM = 0, | |
74 | AMDGPU_GFX_GCEA_MEM, | |
75 | AMDGPU_GFX_GC_CANE_MEM, | |
76 | AMDGPU_GFX_GCUTCL2_MEM, | |
77 | AMDGPU_GFX_GDS_MEM, | |
78 | AMDGPU_GFX_LDS_MEM, | |
79 | AMDGPU_GFX_RLC_MEM, | |
80 | AMDGPU_GFX_SP_MEM, | |
81 | AMDGPU_GFX_SPI_MEM, | |
82 | AMDGPU_GFX_SQC_MEM, | |
83 | AMDGPU_GFX_SQ_MEM, | |
84 | AMDGPU_GFX_TA_MEM, | |
85 | AMDGPU_GFX_TCC_MEM, | |
86 | AMDGPU_GFX_TCA_MEM, | |
87 | AMDGPU_GFX_TCI_MEM, | |
88 | AMDGPU_GFX_TCP_MEM, | |
89 | AMDGPU_GFX_TD_MEM, | |
90 | AMDGPU_GFX_TCX_MEM, | |
91 | AMDGPU_GFX_ATC_L2_MEM, | |
92 | AMDGPU_GFX_UTCL2_MEM, | |
93 | AMDGPU_GFX_VML2_MEM, | |
94 | AMDGPU_GFX_VML2_WALKER_MEM, | |
95 | AMDGPU_GFX_MEM_TYPE_NUM | |
96 | }; | |
97 | ||
448fe192 HR |
98 | struct amdgpu_mec { |
99 | struct amdgpu_bo *hpd_eop_obj; | |
100 | u64 hpd_eop_gpu_addr; | |
101 | struct amdgpu_bo *mec_fw_obj; | |
102 | u64 mec_fw_gpu_addr; | |
3d879e81 HZ |
103 | struct amdgpu_bo *mec_fw_data_obj; |
104 | u64 mec_fw_data_gpu_addr; | |
105 | ||
448fe192 HR |
106 | u32 num_mec; |
107 | u32 num_pipe_per_mec; | |
108 | u32 num_queue_per_pipe; | |
c38be070 | 109 | void *mqd_backup[AMDGPU_MAX_COMPUTE_RINGS * AMDGPU_MAX_GC_INSTANCES]; |
be697aa3 | 110 | }; |
448fe192 | 111 | |
be697aa3 | 112 | struct amdgpu_mec_bitmap { |
448fe192 HR |
113 | /* These are the resources for which amdgpu takes ownership */ |
114 | DECLARE_BITMAP(queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); | |
115 | }; | |
116 | ||
19191961 JX |
117 | enum amdgpu_unmap_queues_action { |
118 | PREEMPT_QUEUES = 0, | |
119 | RESET_QUEUES, | |
120 | DISABLE_PROCESS_QUEUES, | |
121 | PREEMPT_QUEUES_NO_UNMAP, | |
122 | }; | |
123 | ||
bc4a6f71 RZ |
124 | struct kiq_pm4_funcs { |
125 | /* Support ASIC-specific kiq pm4 packets*/ | |
126 | void (*kiq_set_resources)(struct amdgpu_ring *kiq_ring, | |
127 | uint64_t queue_mask); | |
128 | void (*kiq_map_queues)(struct amdgpu_ring *kiq_ring, | |
129 | struct amdgpu_ring *ring); | |
130 | void (*kiq_unmap_queues)(struct amdgpu_ring *kiq_ring, | |
19191961 JX |
131 | struct amdgpu_ring *ring, |
132 | enum amdgpu_unmap_queues_action action, | |
133 | u64 gpu_addr, u64 seq); | |
bc4a6f71 RZ |
134 | void (*kiq_query_status)(struct amdgpu_ring *kiq_ring, |
135 | struct amdgpu_ring *ring, | |
136 | u64 addr, | |
137 | u64 seq); | |
58e508b6 AS |
138 | void (*kiq_invalidate_tlbs)(struct amdgpu_ring *kiq_ring, |
139 | uint16_t pasid, uint32_t flush_type, | |
140 | bool all_hub); | |
bc4a6f71 RZ |
141 | /* Packet sizes */ |
142 | int set_resources_size; | |
143 | int map_queues_size; | |
144 | int unmap_queues_size; | |
145 | int query_status_size; | |
58e508b6 | 146 | int invalidate_tlbs_size; |
bc4a6f71 RZ |
147 | }; |
148 | ||
448fe192 HR |
149 | struct amdgpu_kiq { |
150 | u64 eop_gpu_addr; | |
151 | struct amdgpu_bo *eop_obj; | |
152 | spinlock_t ring_lock; | |
153 | struct amdgpu_ring ring; | |
154 | struct amdgpu_irq_src irq; | |
bc4a6f71 | 155 | const struct kiq_pm4_funcs *pmf; |
c38be070 | 156 | void *mqd_backup; |
448fe192 HR |
157 | }; |
158 | ||
448fe192 HR |
159 | /* |
160 | * GFX configurations | |
161 | */ | |
162 | #define AMDGPU_GFX_MAX_SE 4 | |
163 | #define AMDGPU_GFX_MAX_SH_PER_SE 2 | |
164 | ||
165 | struct amdgpu_rb_config { | |
166 | uint32_t rb_backend_disable; | |
167 | uint32_t user_rb_backend_disable; | |
168 | uint32_t raster_config; | |
169 | uint32_t raster_config_1; | |
170 | }; | |
171 | ||
172 | struct gb_addr_config { | |
173 | uint16_t pipe_interleave_size; | |
174 | uint8_t num_pipes; | |
175 | uint8_t max_compress_frags; | |
176 | uint8_t num_banks; | |
177 | uint8_t num_se; | |
178 | uint8_t num_rb_per_se; | |
933c8a93 | 179 | uint8_t num_pkrs; |
448fe192 HR |
180 | }; |
181 | ||
182 | struct amdgpu_gfx_config { | |
183 | unsigned max_shader_engines; | |
184 | unsigned max_tile_pipes; | |
185 | unsigned max_cu_per_sh; | |
186 | unsigned max_sh_per_se; | |
187 | unsigned max_backends_per_se; | |
188 | unsigned max_texture_channel_caches; | |
189 | unsigned max_gprs; | |
190 | unsigned max_gs_threads; | |
191 | unsigned max_hw_contexts; | |
192 | unsigned sc_prim_fifo_size_frontend; | |
193 | unsigned sc_prim_fifo_size_backend; | |
194 | unsigned sc_hiz_tile_fifo_size; | |
195 | unsigned sc_earlyz_tile_fifo_size; | |
196 | ||
197 | unsigned num_tile_pipes; | |
198 | unsigned backend_enable_mask; | |
199 | unsigned mem_max_burst_length_bytes; | |
200 | unsigned mem_row_size_in_kb; | |
201 | unsigned shader_engine_tile_size; | |
202 | unsigned num_gpus; | |
203 | unsigned multi_gpu_tile_size; | |
204 | unsigned mc_arb_ramcfg; | |
94b5c215 YZ |
205 | unsigned num_banks; |
206 | unsigned num_ranks; | |
448fe192 HR |
207 | unsigned gb_addr_config; |
208 | unsigned num_rbs; | |
209 | unsigned gs_vgt_table_depth; | |
210 | unsigned gs_prim_buffer_depth; | |
211 | ||
212 | uint32_t tile_mode_array[32]; | |
213 | uint32_t macrotile_mode_array[16]; | |
214 | ||
215 | struct gb_addr_config gb_addr_config_fields; | |
216 | struct amdgpu_rb_config rb_config[AMDGPU_GFX_MAX_SE][AMDGPU_GFX_MAX_SH_PER_SE]; | |
217 | ||
218 | /* gfx configure feature */ | |
219 | uint32_t double_offchip_lds_buf; | |
220 | /* cached value of DB_DEBUG2 */ | |
221 | uint32_t db_debug2; | |
02a9e40a HZ |
222 | /* gfx10 specific config */ |
223 | uint32_t num_sc_per_sh; | |
224 | uint32_t num_packer_per_sc; | |
3e514732 | 225 | uint32_t pa_sc_tile_steering_override; |
b299221f MO |
226 | /* Whether texture coordinate truncation is conformant. */ |
227 | bool ta_cntl2_truncate_coord_mode; | |
cf21e76a | 228 | uint64_t tcc_disabled_mask; |
5cb1cfd5 AD |
229 | uint32_t gc_num_tcp_per_sa; |
230 | uint32_t gc_num_sdp_interface; | |
231 | uint32_t gc_num_tcps; | |
232 | uint32_t gc_num_tcp_per_wpg; | |
233 | uint32_t gc_tcp_l1_size; | |
234 | uint32_t gc_num_sqc_per_wgp; | |
235 | uint32_t gc_l1_instruction_cache_size_per_sqc; | |
236 | uint32_t gc_l1_data_cache_size_per_sqc; | |
237 | uint32_t gc_gl1c_per_sa; | |
238 | uint32_t gc_gl1c_size_per_instance; | |
239 | uint32_t gc_gl2c_per_gpu; | |
e240020a LM |
240 | uint32_t gc_tcp_size_per_cu; |
241 | uint32_t gc_num_cu_per_sqc; | |
242 | uint32_t gc_tcc_size; | |
448fe192 HR |
243 | }; |
244 | ||
245 | struct amdgpu_cu_info { | |
246 | uint32_t simd_per_cu; | |
247 | uint32_t max_waves_per_simd; | |
248 | uint32_t wave_front_size; | |
249 | uint32_t max_scratch_slots_per_cu; | |
250 | uint32_t lds_size; | |
251 | ||
252 | /* total active CU number */ | |
253 | uint32_t number; | |
254 | uint32_t ao_cu_mask; | |
255 | uint32_t ao_cu_bitmap[4][4]; | |
f705a6f0 | 256 | uint32_t bitmap[AMDGPU_MAX_GC_INSTANCES][4][4]; |
448fe192 HR |
257 | }; |
258 | ||
8b0fb0e9 | 259 | struct amdgpu_gfx_ras { |
260 | struct amdgpu_ras_block_object ras_block; | |
719a9b33 | 261 | void (*enable_watchdog_timer)(struct amdgpu_device *adev); |
6475ae2b | 262 | bool (*query_utcl2_poison_status)(struct amdgpu_device *adev); |
ae6f2db4 YC |
263 | int (*rlc_gc_fed_irq)(struct amdgpu_device *adev, |
264 | struct amdgpu_irq_src *source, | |
265 | struct amdgpu_iv_entry *entry); | |
ac7b25d9 YC |
266 | int (*poison_consumption_handler)(struct amdgpu_device *adev, |
267 | struct amdgpu_iv_entry *entry); | |
719a9b33 HZ |
268 | }; |
269 | ||
0db0c037 AD |
270 | struct amdgpu_gfx_shadow_info { |
271 | u32 shadow_size; | |
272 | u32 shadow_alignment; | |
273 | u32 csa_size; | |
274 | u32 csa_alignment; | |
275 | }; | |
276 | ||
448fe192 HR |
277 | struct amdgpu_gfx_funcs { |
278 | /* get the gpu clock counter */ | |
279 | uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev); | |
280 | void (*select_se_sh)(struct amdgpu_device *adev, u32 se_num, | |
d51ac6d0 | 281 | u32 sh_num, u32 instance, int xcc_id); |
553f973a | 282 | void (*read_wave_data)(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, |
448fe192 | 283 | uint32_t wave, uint32_t *dst, int *no_fields); |
553f973a | 284 | void (*read_wave_vgprs)(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, |
448fe192 HR |
285 | uint32_t wave, uint32_t thread, uint32_t start, |
286 | uint32_t size, uint32_t *dst); | |
553f973a | 287 | void (*read_wave_sgprs)(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, |
448fe192 HR |
288 | uint32_t wave, uint32_t start, uint32_t size, |
289 | uint32_t *dst); | |
290 | void (*select_me_pipe_q)(struct amdgpu_device *adev, u32 me, u32 pipe, | |
553f973a | 291 | u32 queue, u32 vmid, u32 xcc_id); |
d58fe3cf | 292 | void (*init_spm_golden)(struct amdgpu_device *adev); |
3e66275e | 293 | void (*update_perfmon_mgcg)(struct amdgpu_device *adev, bool enable); |
0db0c037 AD |
294 | int (*get_gfx_shadow_info)(struct amdgpu_device *adev, |
295 | struct amdgpu_gfx_shadow_info *shadow_info); | |
98a54e88 LM |
296 | enum amdgpu_gfx_partition |
297 | (*query_partition_mode)(struct amdgpu_device *adev); | |
298 | int (*switch_partition_mode)(struct amdgpu_device *adev, | |
9cb18287 | 299 | int num_xccs_per_xcp); |
98b2e9ca | 300 | int (*ih_node_to_logical_xcc)(struct amdgpu_device *adev, int ih_node); |
448fe192 HR |
301 | }; |
302 | ||
448fe192 HR |
303 | struct sq_work { |
304 | struct work_struct work; | |
305 | unsigned ih_data; | |
306 | }; | |
307 | ||
068ed934 HZ |
308 | struct amdgpu_pfp { |
309 | struct amdgpu_bo *pfp_fw_obj; | |
310 | uint64_t pfp_fw_gpu_addr; | |
311 | uint32_t *pfp_fw_ptr; | |
3d879e81 HZ |
312 | |
313 | struct amdgpu_bo *pfp_fw_data_obj; | |
314 | uint64_t pfp_fw_data_gpu_addr; | |
315 | uint32_t *pfp_fw_data_ptr; | |
068ed934 HZ |
316 | }; |
317 | ||
2a00bb13 HZ |
318 | struct amdgpu_ce { |
319 | struct amdgpu_bo *ce_fw_obj; | |
320 | uint64_t ce_fw_gpu_addr; | |
321 | uint32_t *ce_fw_ptr; | |
322 | }; | |
323 | ||
8825af65 HZ |
324 | struct amdgpu_me { |
325 | struct amdgpu_bo *me_fw_obj; | |
326 | uint64_t me_fw_gpu_addr; | |
327 | uint32_t *me_fw_ptr; | |
3d879e81 HZ |
328 | |
329 | struct amdgpu_bo *me_fw_data_obj; | |
330 | uint64_t me_fw_data_gpu_addr; | |
331 | uint32_t *me_fw_data_ptr; | |
332 | ||
cf02b03f HZ |
333 | uint32_t num_me; |
334 | uint32_t num_pipe_per_me; | |
335 | uint32_t num_queue_per_pipe; | |
0900a9ef | 336 | void *mqd_backup[AMDGPU_MAX_GFX_RINGS]; |
cf02b03f HZ |
337 | |
338 | /* These are the resources for which amdgpu takes ownership */ | |
339 | DECLARE_BITMAP(queue_bitmap, AMDGPU_MAX_GFX_QUEUES); | |
8825af65 HZ |
340 | }; |
341 | ||
448fe192 HR |
342 | struct amdgpu_gfx { |
343 | struct mutex gpu_clock_mutex; | |
344 | struct amdgpu_gfx_config config; | |
345 | struct amdgpu_rlc rlc; | |
068ed934 | 346 | struct amdgpu_pfp pfp; |
2a00bb13 | 347 | struct amdgpu_ce ce; |
8825af65 | 348 | struct amdgpu_me me; |
448fe192 | 349 | struct amdgpu_mec mec; |
be697aa3 | 350 | struct amdgpu_mec_bitmap mec_bitmap[AMDGPU_MAX_GC_INSTANCES]; |
277bd337 | 351 | struct amdgpu_kiq kiq[AMDGPU_MAX_GC_INSTANCES]; |
289bcffb | 352 | struct amdgpu_imu imu; |
3d879e81 | 353 | bool rs64_enable; /* firmware format */ |
448fe192 HR |
354 | const struct firmware *me_fw; /* ME firmware */ |
355 | uint32_t me_fw_version; | |
356 | const struct firmware *pfp_fw; /* PFP firmware */ | |
357 | uint32_t pfp_fw_version; | |
358 | const struct firmware *ce_fw; /* CE firmware */ | |
359 | uint32_t ce_fw_version; | |
360 | const struct firmware *rlc_fw; /* RLC firmware */ | |
361 | uint32_t rlc_fw_version; | |
362 | const struct firmware *mec_fw; /* MEC firmware */ | |
363 | uint32_t mec_fw_version; | |
364 | const struct firmware *mec2_fw; /* MEC2 firmware */ | |
365 | uint32_t mec2_fw_version; | |
289bcffb LG |
366 | const struct firmware *imu_fw; /* IMU firmware */ |
367 | uint32_t imu_fw_version; | |
448fe192 HR |
368 | uint32_t me_feature_version; |
369 | uint32_t ce_feature_version; | |
370 | uint32_t pfp_feature_version; | |
371 | uint32_t rlc_feature_version; | |
372 | uint32_t rlc_srlc_fw_version; | |
373 | uint32_t rlc_srlc_feature_version; | |
374 | uint32_t rlc_srlg_fw_version; | |
375 | uint32_t rlc_srlg_feature_version; | |
376 | uint32_t rlc_srls_fw_version; | |
377 | uint32_t rlc_srls_feature_version; | |
ed2eee42 HZ |
378 | uint32_t rlcp_ucode_version; |
379 | uint32_t rlcp_ucode_feature_version; | |
380 | uint32_t rlcv_ucode_version; | |
381 | uint32_t rlcv_ucode_feature_version; | |
448fe192 HR |
382 | uint32_t mec_feature_version; |
383 | uint32_t mec2_feature_version; | |
39b62541 ED |
384 | bool mec_fw_write_wait; |
385 | bool me_fw_write_wait; | |
589b64a7 | 386 | bool cp_fw_write_wait; |
448fe192 HR |
387 | struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS]; |
388 | unsigned num_gfx_rings; | |
541372bb | 389 | struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS * AMDGPU_MAX_GC_INSTANCES]; |
448fe192 HR |
390 | unsigned num_compute_rings; |
391 | struct amdgpu_irq_src eop_irq; | |
392 | struct amdgpu_irq_src priv_reg_irq; | |
393 | struct amdgpu_irq_src priv_inst_irq; | |
394 | struct amdgpu_irq_src cp_ecc_error_irq; | |
395 | struct amdgpu_irq_src sq_irq; | |
ae6f2db4 | 396 | struct amdgpu_irq_src rlc_gc_fed_irq; |
448fe192 HR |
397 | struct sq_work sq_work; |
398 | ||
399 | /* gfx status */ | |
400 | uint32_t gfx_current_status; | |
401 | /* ce ram size*/ | |
402 | unsigned ce_ram_size; | |
403 | struct amdgpu_cu_info cu_info; | |
404 | const struct amdgpu_gfx_funcs *funcs; | |
405 | ||
406 | /* reset mask */ | |
407 | uint32_t grbm_soft_reset; | |
408 | uint32_t srbm_soft_reset; | |
44779b43 | 409 | |
448fe192 | 410 | /* gfx off */ |
a021e2aa AA |
411 | bool gfx_off_state; /* true: enabled, false: disabled */ |
412 | struct mutex gfx_off_mutex; /* mutex to change gfxoff state */ | |
413 | uint32_t gfx_off_req_count; /* default 1, enable gfx off: dec 1, disable gfx off: add 1 */ | |
414 | struct delayed_work gfx_off_delay_work; /* async work to set gfx block off */ | |
415 | uint32_t gfx_off_residency; /* last logged residency */ | |
416 | uint64_t gfx_off_entrycount; /* count of times GPU has get into GFXOFF state */ | |
448fe192 HR |
417 | |
418 | /* pipe reservation */ | |
419 | struct mutex pipe_reserve_mutex; | |
420 | DECLARE_BITMAP (pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); | |
760a1d55 FX |
421 | |
422 | /*ras */ | |
1c2014da TY |
423 | struct ras_common_if *ras_if; |
424 | struct amdgpu_gfx_ras *ras; | |
425 | ||
426 | bool is_poweron; | |
ded946f3 | 427 | |
0c97a19a | 428 | struct amdgpu_ring sw_gfx_ring[AMDGPU_MAX_SW_GFX_RINGS]; |
ded946f3 | 429 | struct amdgpu_ring_mux muxer; |
541372bb | 430 | |
89d8445e AD |
431 | bool cp_gfx_shadow; /* for gfx11 */ |
432 | ||
8078f1c6 | 433 | uint16_t xcc_mask; |
541372bb | 434 | uint32_t num_xcc_per_xcp; |
98a54e88 | 435 | struct mutex partition_mutex; |
02ff519e | 436 | bool mcbp; /* mid command buffer preemption */ |
448fe192 HR |
437 | }; |
438 | ||
77462ab8 TZ |
439 | struct amdgpu_gfx_ras_reg_entry { |
440 | struct amdgpu_ras_err_status_reg_entry reg_entry; | |
441 | enum amdgpu_gfx_ras_mem_id_type mem_id_type; | |
442 | uint32_t se_num; | |
443 | }; | |
444 | ||
445 | struct amdgpu_gfx_ras_mem_id_entry { | |
446 | const struct amdgpu_ras_memory_id_entry *mem_id_ent; | |
447 | uint32_t size; | |
448 | }; | |
449 | ||
450 | #define AMDGPU_GFX_MEMID_ENT(x) {(x), ARRAY_SIZE(x)}, | |
451 | ||
448fe192 | 452 | #define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev)) |
d51ac6d0 | 453 | #define amdgpu_gfx_select_se_sh(adev, se, sh, instance, xcc_id) ((adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance), (xcc_id))) |
553f973a | 454 | #define amdgpu_gfx_select_me_pipe_q(adev, me, pipe, q, vmid, xcc_id) ((adev)->gfx.funcs->select_me_pipe_q((adev), (me), (pipe), (q), (vmid), (xcc_id))) |
d58fe3cf | 455 | #define amdgpu_gfx_init_spm_golden(adev) (adev)->gfx.funcs->init_spm_golden((adev)) |
0db0c037 | 456 | #define amdgpu_gfx_get_gfx_shadow_info(adev, si) ((adev)->gfx.funcs->get_gfx_shadow_info((adev), (si))) |
b9683c21 | 457 | |
378506a7 AD |
458 | /** |
459 | * amdgpu_gfx_create_bitmask - create a bitmask | |
460 | * | |
461 | * @bit_width: length of the mask | |
462 | * | |
463 | * create a variable length bit mask. | |
464 | * Returns the bitmask. | |
465 | */ | |
466 | static inline u32 amdgpu_gfx_create_bitmask(u32 bit_width) | |
467 | { | |
468 | return (u32)((1ULL << bit_width) - 1); | |
469 | } | |
470 | ||
448fe192 HR |
471 | void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, |
472 | unsigned max_sh); | |
2db0cdbe | 473 | |
448fe192 HR |
474 | int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, |
475 | struct amdgpu_ring *ring, | |
def799c6 | 476 | struct amdgpu_irq_src *irq, int xcc_id); |
2db0cdbe | 477 | |
9f0256da | 478 | void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring); |
2db0cdbe | 479 | |
def799c6 | 480 | void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev, int xcc_id); |
448fe192 | 481 | int amdgpu_gfx_kiq_init(struct amdgpu_device *adev, |
def799c6 | 482 | unsigned hpd_size, int xcc_id); |
448fe192 | 483 | |
4fc6a88f | 484 | int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, |
def799c6 LM |
485 | unsigned mqd_size, int xcc_id); |
486 | void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev, int xcc_id); | |
487 | int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id); | |
488 | int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id); | |
1156e1a6 AD |
489 | int amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id); |
490 | int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id); | |
448fe192 HR |
491 | |
492 | void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev); | |
e537c994 HZ |
493 | void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev); |
494 | ||
7470bfcf HZ |
495 | int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec, |
496 | int pipe, int queue); | |
5c180eb9 | 497 | void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit, |
7470bfcf | 498 | int *mec, int *pipe, int *queue); |
def799c6 | 499 | bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, int xcc_id, |
be697aa3 | 500 | int mec, int pipe, int queue); |
33abcb1f | 501 | bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev, |
8c0225d7 | 502 | struct amdgpu_ring *ring); |
b07d1d73 APS |
503 | bool amdgpu_gfx_is_high_priority_graphics_queue(struct amdgpu_device *adev, |
504 | struct amdgpu_ring *ring); | |
7470bfcf HZ |
505 | int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev, int me, |
506 | int pipe, int queue); | |
507 | void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit, | |
508 | int *me, int *pipe, int *queue); | |
509 | bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, int me, | |
510 | int pipe, int queue); | |
c2d358d7 | 511 | void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable); |
443c7f3c | 512 | int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value); |
4e9b1fa5 | 513 | int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block); |
0ad7347a AA |
514 | void amdgpu_gfx_ras_fini(struct amdgpu_device *adev); |
515 | int amdgpu_get_gfx_off_entrycount(struct amdgpu_device *adev, u64 *value); | |
516 | int amdgpu_get_gfx_off_residency(struct amdgpu_device *adev, u32 *residency); | |
517 | int amdgpu_set_gfx_off_residency(struct amdgpu_device *adev, bool value); | |
725253ab TZ |
518 | int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev, |
519 | void *err_data, | |
520 | struct amdgpu_iv_entry *entry); | |
521 | int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev, | |
522 | struct amdgpu_irq_src *source, | |
523 | struct amdgpu_iv_entry *entry); | |
85150626 VL |
524 | uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t xcc_id); |
525 | void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t xcc_id); | |
a3bab325 | 526 | int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev); |
2d89e2dd | 527 | void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev, uint32_t ucode_id); |
ec71b250 | 528 | |
89e4c448 | 529 | int amdgpu_gfx_ras_sw_init(struct amdgpu_device *adev); |
ac7b25d9 YC |
530 | int amdgpu_gfx_poison_consumption_handler(struct amdgpu_device *adev, |
531 | struct amdgpu_iv_entry *entry); | |
66daccde LM |
532 | |
533 | bool amdgpu_gfx_is_master_xcc(struct amdgpu_device *adev, int xcc_id); | |
98a54e88 | 534 | int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev); |
993d218f | 535 | void amdgpu_gfx_sysfs_fini(struct amdgpu_device *adev); |
d78c7132 TZ |
536 | void amdgpu_gfx_ras_error_func(struct amdgpu_device *adev, |
537 | void *ras_error_status, | |
538 | void (*func)(struct amdgpu_device *adev, void *ras_error_status, | |
539 | int xcc_id)); | |
f9632096 LL |
540 | |
541 | static inline const char *amdgpu_gfx_compute_mode_desc(int mode) | |
542 | { | |
543 | switch (mode) { | |
544 | case AMDGPU_SPX_PARTITION_MODE: | |
545 | return "SPX"; | |
546 | case AMDGPU_DPX_PARTITION_MODE: | |
547 | return "DPX"; | |
548 | case AMDGPU_TPX_PARTITION_MODE: | |
549 | return "TPX"; | |
550 | case AMDGPU_QPX_PARTITION_MODE: | |
551 | return "QPX"; | |
552 | case AMDGPU_CPX_PARTITION_MODE: | |
553 | return "CPX"; | |
554 | default: | |
555 | return "UNKNOWN"; | |
556 | } | |
557 | ||
558 | return "UNKNOWN"; | |
559 | } | |
560 | ||
d38ceaf9 | 561 | #endif |