j = i + xcc_id * adev->gfx.num_compute_rings;
amdgpu_mes_unmap_legacy_queue(adev,
&adev->gfx.compute_ring[j],
- RESET_QUEUES, 0, 0);
+ RESET_QUEUES, 0, 0, xcc_id);
}
return 0;
}
j = i + xcc_id * adev->gfx.num_gfx_rings;
amdgpu_mes_unmap_legacy_queue(adev,
&adev->gfx.gfx_ring[j],
- PREEMPT_QUEUES, 0, 0);
+ PREEMPT_QUEUES, 0, 0, xcc_id);
}
}
return 0;
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
j = i + xcc_id * adev->gfx.num_compute_rings;
r = amdgpu_mes_map_legacy_queue(adev,
- &adev->gfx.compute_ring[j]);
+ &adev->gfx.compute_ring[j],
+ xcc_id);
if (r) {
dev_err(adev->dev, "failed to map compute queue\n");
return r;
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
j = i + xcc_id * adev->gfx.num_gfx_rings;
r = amdgpu_mes_map_legacy_queue(adev,
- &adev->gfx.gfx_ring[j]);
+ &adev->gfx.gfx_ring[j],
+ xcc_id);
if (r) {
dev_err(adev->dev, "failed to map gfx queue\n");
return r;
return 0;
if (adev->mes.ring[0].sched.ready)
- return amdgpu_mes_rreg(adev, reg);
+ return amdgpu_mes_rreg(adev, reg, xcc_id);
BUG_ON(!ring->funcs->emit_rreg);
return;
if (adev->mes.ring[0].sched.ready) {
- amdgpu_mes_wreg(adev, reg, v);
+ amdgpu_mes_wreg(adev, reg, v, xcc_id);
return;
}
if (adev->mes.ring[0].sched.ready) {
amdgpu_mes_reg_write_reg_wait(adev, reg0, reg1,
- ref, mask);
+ ref, mask, xcc_inst);
return;
}
int amdgpu_mes_init(struct amdgpu_device *adev)
{
int i, r, num_pipes;
+ int num_xcc = NUM_XCC(adev->gfx.xcc_mask);
adev->mes.adev = adev;
spin_lock_init(&adev->mes.queue_id_lock);
mutex_init(&adev->mes.mutex_hidden);
- for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++)
+ for (i = 0; i < AMDGPU_MAX_MES_PIPES * num_xcc; i++)
spin_lock_init(&adev->mes.ring_lock[i]);
adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK;
adev->mes.sdma_hqd_mask[i] = 0xfc;
}
- for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) {
+ for (i = 0; i < AMDGPU_MAX_MES_PIPES * num_xcc; i++) {
r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs[i]);
if (r) {
dev_err(adev->dev,
goto error_doorbell;
if (adev->mes.hung_queue_db_array_size) {
- r = amdgpu_bo_create_kernel(adev,
- adev->mes.hung_queue_db_array_size * sizeof(u32),
- PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_GTT,
- &adev->mes.hung_queue_db_array_gpu_obj,
- &adev->mes.hung_queue_db_array_gpu_addr,
- &adev->mes.hung_queue_db_array_cpu_addr);
- if (r) {
- dev_warn(adev->dev, "failed to create MES hung db array buffer (%d)", r);
- goto error_doorbell;
+ for (i = 0; i < AMDGPU_MAX_MES_PIPES * num_xcc; i++) {
+ r = amdgpu_bo_create_kernel(adev,
+ adev->mes.hung_queue_db_array_size * sizeof(u32),
+ PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->mes.hung_queue_db_array_gpu_obj[i],
+ &adev->mes.hung_queue_db_array_gpu_addr[i],
+ &adev->mes.hung_queue_db_array_cpu_addr[i]);
+ if (r) {
+ dev_warn(adev->dev, "failed to create MES hung db array buffer (%d)", r);
+ goto error_doorbell;
+ }
}
}
error_doorbell:
amdgpu_mes_doorbell_free(adev);
error:
- for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) {
+ for (i = 0; i < AMDGPU_MAX_MES_PIPES * num_xcc; i++) {
if (adev->mes.sch_ctx_ptr[i])
amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs[i]);
if (adev->mes.query_status_fence_ptr[i])
amdgpu_device_wb_free(adev,
adev->mes.query_status_fence_offs[i]);
+ if (adev->mes.hung_queue_db_array_gpu_obj[i])
+ amdgpu_bo_free_kernel(&adev->mes.hung_queue_db_array_gpu_obj[i],
+ &adev->mes.hung_queue_db_array_gpu_addr[i],
+ &adev->mes.hung_queue_db_array_cpu_addr[i]);
}
idr_destroy(&adev->mes.pasid_idr);
void amdgpu_mes_fini(struct amdgpu_device *adev)
{
- int i;
-
- amdgpu_bo_free_kernel(&adev->mes.hung_queue_db_array_gpu_obj,
- &adev->mes.hung_queue_db_array_gpu_addr,
- &adev->mes.hung_queue_db_array_cpu_addr);
+ int i, num_xcc = NUM_XCC(adev->gfx.xcc_mask);
amdgpu_bo_free_kernel(&adev->mes.event_log_gpu_obj,
&adev->mes.event_log_gpu_addr,
&adev->mes.event_log_cpu_addr);
- for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) {
+ for (i = 0; i < AMDGPU_MAX_MES_PIPES * num_xcc; i++) {
+ amdgpu_bo_free_kernel(&adev->mes.hung_queue_db_array_gpu_obj[i],
+ &adev->mes.hung_queue_db_array_gpu_addr[i],
+ &adev->mes.hung_queue_db_array_cpu_addr[i]);
+
if (adev->mes.sch_ctx_ptr[i])
amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs[i]);
if (adev->mes.query_status_fence_ptr[i])
}
int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
+ struct amdgpu_ring *ring, uint32_t xcc_id)
{
struct mes_map_legacy_queue_input queue_input;
int r;
memset(&queue_input, 0, sizeof(queue_input));
+ queue_input.xcc_id = xcc_id;
queue_input.queue_type = ring->funcs->type;
queue_input.doorbell_offset = ring->doorbell_index;
queue_input.pipe_id = ring->pipe;
int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
enum amdgpu_unmap_queues_action action,
- u64 gpu_addr, u64 seq)
+ u64 gpu_addr, u64 seq, uint32_t xcc_id)
{
struct mes_unmap_legacy_queue_input queue_input;
int r;
+ queue_input.xcc_id = xcc_id;
queue_input.action = action;
queue_input.queue_type = ring->funcs->type;
queue_input.doorbell_offset = ring->doorbell_index;
int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
unsigned int vmid,
- bool use_mmio)
+ bool use_mmio,
+ uint32_t xcc_id)
{
struct mes_reset_queue_input queue_input;
int r;
memset(&queue_input, 0, sizeof(queue_input));
+ queue_input.xcc_id = xcc_id;
queue_input.queue_type = ring->funcs->type;
queue_input.doorbell_offset = ring->doorbell_index;
queue_input.me_id = ring->me;
int queue_type,
bool detect_only,
unsigned int *hung_db_num,
- u32 *hung_db_array)
-
+ u32 *hung_db_array,
+ uint32_t xcc_id)
{
struct mes_detect_and_reset_queue_input input;
- u32 *db_array = adev->mes.hung_queue_db_array_cpu_addr;
+ u32 *db_array = adev->mes.hung_queue_db_array_cpu_addr[xcc_id];
int r, i;
if (!hung_db_num || !hung_db_array)
return -EINVAL;
/* Clear the doorbell array before detection */
- memset(adev->mes.hung_queue_db_array_cpu_addr, AMDGPU_MES_INVALID_DB_OFFSET,
+ memset(adev->mes.hung_queue_db_array_cpu_addr[xcc_id], AMDGPU_MES_INVALID_DB_OFFSET,
adev->mes.hung_queue_db_array_size * sizeof(u32));
input.queue_type = queue_type;
input.detect_only = detect_only;
return r;
}
-uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
+uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg,
+ uint32_t xcc_id)
{
struct mes_misc_op_input op_input;
int r, val = 0;
}
read_val_gpu_addr = adev->wb.gpu_addr + (addr_offset * 4);
read_val_ptr = (uint32_t *)&adev->wb.wb[addr_offset];
+ op_input.xcc_id = xcc_id;
op_input.op = MES_MISC_OP_READ_REG;
op_input.read_reg.reg_offset = reg;
op_input.read_reg.buffer_addr = read_val_gpu_addr;
return val;
}
-int amdgpu_mes_wreg(struct amdgpu_device *adev,
- uint32_t reg, uint32_t val)
+int amdgpu_mes_wreg(struct amdgpu_device *adev, uint32_t reg,
+ uint32_t val, uint32_t xcc_id)
{
struct mes_misc_op_input op_input;
int r;
+ op_input.xcc_id = xcc_id;
op_input.op = MES_MISC_OP_WRITE_REG;
op_input.write_reg.reg_offset = reg;
op_input.write_reg.reg_value = val;
int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
uint32_t reg0, uint32_t reg1,
- uint32_t ref, uint32_t mask)
+ uint32_t ref, uint32_t mask,
+ uint32_t xcc_id)
{
struct mes_misc_op_input op_input;
int r;
+ op_input.xcc_id = xcc_id;
op_input.op = MES_MISC_OP_WRM_REG_WR_WAIT;
op_input.wrm_reg.reg0 = reg0;
op_input.wrm_reg.reg1 = reg1;
ref_and_mask = adev->nbio.hdp_flush_reg->ref_and_mask_cp0;
return amdgpu_mes_reg_write_reg_wait(adev, hdp_flush_req_offset, hdp_flush_done_offset,
- ref_and_mask, ref_and_mask);
+ ref_and_mask, ref_and_mask, 0);
}
int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
uint32_t spi_gdbg_per_vmid_cntl,
const uint32_t *tcp_watch_cntl,
uint32_t flags,
- bool trap_en)
+ bool trap_en,
+ uint32_t xcc_id)
{
struct mes_misc_op_input op_input = {0};
int r;
return -EINVAL;
}
+ op_input.xcc_id = xcc_id;
op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
op_input.set_shader_debugger.process_context_addr = process_context_addr;
op_input.set_shader_debugger.flags.u32all = flags;
}
int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
- uint64_t process_context_addr)
+ uint64_t process_context_addr,
+ uint32_t xcc_id)
{
struct mes_misc_op_input op_input = {0};
int r;
return -EINVAL;
}
+ op_input.xcc_id = xcc_id;
op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
op_input.set_shader_debugger.process_context_addr = process_context_addr;
op_input.set_shader_debugger.flags.process_ctx_flush = true;
struct amdgpu_mes_funcs;
enum amdgpu_mes_pipe {
- AMDGPU_MES_SCHED_PIPE = 0,
- AMDGPU_MES_KIQ_PIPE,
+ AMDGPU_MES_PIPE_0 = 0,
+ AMDGPU_MES_PIPE_1,
AMDGPU_MAX_MES_PIPES = 2,
};
+#define AMDGPU_MES_SCHED_PIPE AMDGPU_MES_PIPE_0
+#define AMDGPU_MES_KIQ_PIPE AMDGPU_MES_PIPE_1
+
+#define AMDGPU_MAX_MES_INST_PIPES \
+ (AMDGPU_MAX_MES_PIPES * AMDGPU_MAX_GC_INSTANCES)
+
+#define MES_PIPE_INST(xcc_id, pipe_id) \
+ (xcc_id * AMDGPU_MAX_MES_PIPES + pipe_id)
+
struct amdgpu_mes {
struct amdgpu_device *adev;
uint64_t default_process_quantum;
uint64_t default_gang_quantum;
- struct amdgpu_ring ring[AMDGPU_MAX_MES_PIPES];
- spinlock_t ring_lock[AMDGPU_MAX_MES_PIPES];
+ struct amdgpu_ring ring[AMDGPU_MAX_MES_INST_PIPES];
+ spinlock_t ring_lock[AMDGPU_MAX_MES_INST_PIPES];
const struct firmware *fw[AMDGPU_MAX_MES_PIPES];
/* mes ucode */
- struct amdgpu_bo *ucode_fw_obj[AMDGPU_MAX_MES_PIPES];
- uint64_t ucode_fw_gpu_addr[AMDGPU_MAX_MES_PIPES];
- uint32_t *ucode_fw_ptr[AMDGPU_MAX_MES_PIPES];
+ struct amdgpu_bo *ucode_fw_obj[AMDGPU_MAX_MES_INST_PIPES];
+ uint64_t ucode_fw_gpu_addr[AMDGPU_MAX_MES_INST_PIPES];
+ uint32_t *ucode_fw_ptr[AMDGPU_MAX_MES_INST_PIPES];
uint64_t uc_start_addr[AMDGPU_MAX_MES_PIPES];
/* mes ucode data */
- struct amdgpu_bo *data_fw_obj[AMDGPU_MAX_MES_PIPES];
- uint64_t data_fw_gpu_addr[AMDGPU_MAX_MES_PIPES];
- uint32_t *data_fw_ptr[AMDGPU_MAX_MES_PIPES];
+ struct amdgpu_bo *data_fw_obj[AMDGPU_MAX_MES_INST_PIPES];
+ uint64_t data_fw_gpu_addr[AMDGPU_MAX_MES_INST_PIPES];
+ uint32_t *data_fw_ptr[AMDGPU_MAX_MES_INST_PIPES];
uint64_t data_start_addr[AMDGPU_MAX_MES_PIPES];
/* eop gpu obj */
- struct amdgpu_bo *eop_gpu_obj[AMDGPU_MAX_MES_PIPES];
- uint64_t eop_gpu_addr[AMDGPU_MAX_MES_PIPES];
+ struct amdgpu_bo *eop_gpu_obj[AMDGPU_MAX_MES_INST_PIPES];
+ uint64_t eop_gpu_addr[AMDGPU_MAX_MES_INST_PIPES];
- void *mqd_backup[AMDGPU_MAX_MES_PIPES];
- struct amdgpu_irq_src irq[AMDGPU_MAX_MES_PIPES];
+ void *mqd_backup[AMDGPU_MAX_MES_INST_PIPES];
+ struct amdgpu_irq_src irq[AMDGPU_MAX_MES_INST_PIPES];
uint32_t vmid_mask_gfxhub;
uint32_t vmid_mask_mmhub;
uint32_t compute_hqd_mask[AMDGPU_MES_MAX_COMPUTE_PIPES];
uint32_t sdma_hqd_mask[AMDGPU_MES_MAX_SDMA_PIPES];
uint32_t aggregated_doorbells[AMDGPU_MES_PRIORITY_NUM_LEVELS];
- uint32_t sch_ctx_offs[AMDGPU_MAX_MES_PIPES];
- uint64_t sch_ctx_gpu_addr[AMDGPU_MAX_MES_PIPES];
- uint64_t *sch_ctx_ptr[AMDGPU_MAX_MES_PIPES];
- uint32_t query_status_fence_offs[AMDGPU_MAX_MES_PIPES];
- uint64_t query_status_fence_gpu_addr[AMDGPU_MAX_MES_PIPES];
- uint64_t *query_status_fence_ptr[AMDGPU_MAX_MES_PIPES];
+
+ uint32_t sch_ctx_offs[AMDGPU_MAX_MES_INST_PIPES];
+ uint64_t sch_ctx_gpu_addr[AMDGPU_MAX_MES_INST_PIPES];
+ uint64_t *sch_ctx_ptr[AMDGPU_MAX_MES_INST_PIPES];
+ uint32_t query_status_fence_offs[AMDGPU_MAX_MES_INST_PIPES];
+ uint64_t query_status_fence_gpu_addr[AMDGPU_MAX_MES_INST_PIPES];
+ uint64_t *query_status_fence_ptr[AMDGPU_MAX_MES_INST_PIPES];
uint32_t saved_flags;
/* initialize kiq pipe */
- int (*kiq_hw_init)(struct amdgpu_device *adev);
- int (*kiq_hw_fini)(struct amdgpu_device *adev);
+ int (*kiq_hw_init)(struct amdgpu_device *adev,
+ uint32_t xcc_id);
+ int (*kiq_hw_fini)(struct amdgpu_device *adev,
+ uint32_t xcc_id);
/* MES doorbells */
uint32_t db_start_dw_offset;
int hung_queue_db_array_size;
int hung_queue_hqd_info_offset;
- struct amdgpu_bo *hung_queue_db_array_gpu_obj;
- uint64_t hung_queue_db_array_gpu_addr;
- void *hung_queue_db_array_cpu_addr;
+ struct amdgpu_bo *hung_queue_db_array_gpu_obj[AMDGPU_MAX_MES_PIPES];
+ uint64_t hung_queue_db_array_gpu_addr[AMDGPU_MAX_MES_PIPES];
+ void *hung_queue_db_array_cpu_addr[AMDGPU_MAX_MES_PIPES];
};
struct amdgpu_mes_gang {
};
struct mes_add_queue_input {
+ uint32_t xcc_id;
uint32_t process_id;
uint64_t page_table_base_addr;
uint64_t process_va_start;
};
struct mes_remove_queue_input {
+ uint32_t xcc_id;
uint32_t doorbell_offset;
uint64_t gang_context_addr;
bool remove_queue_after_reset;
};
struct mes_map_legacy_queue_input {
+ uint32_t xcc_id;
uint32_t queue_type;
uint32_t doorbell_offset;
uint32_t pipe_id;
};
struct mes_unmap_legacy_queue_input {
+ uint32_t xcc_id;
enum amdgpu_unmap_queues_action action;
uint32_t queue_type;
uint32_t doorbell_offset;
};
struct mes_suspend_gang_input {
+ uint32_t xcc_id;
bool suspend_all_gangs;
uint64_t gang_context_addr;
uint64_t suspend_fence_addr;
};
struct mes_resume_gang_input {
+ uint32_t xcc_id;
bool resume_all_gangs;
uint64_t gang_context_addr;
};
struct mes_reset_queue_input {
+ uint32_t xcc_id;
uint32_t queue_type;
uint32_t doorbell_offset;
bool use_mmio;
};
struct mes_misc_op_input {
- enum mes_misc_opcode op;
+ uint32_t xcc_id;
+ enum mes_misc_opcode op;
union {
struct {
struct mes_inv_tlbs_pasid_input *input);
};
-#define amdgpu_mes_kiq_hw_init(adev) (adev)->mes.kiq_hw_init((adev))
-#define amdgpu_mes_kiq_hw_fini(adev) (adev)->mes.kiq_hw_fini((adev))
+#define amdgpu_mes_kiq_hw_init(adev, xcc_id) \
+ (adev)->mes.kiq_hw_init((adev), (xcc_id))
+#define amdgpu_mes_kiq_hw_fini(adev, xcc_id) \
+ (adev)->mes.kiq_hw_fini((adev), (xcc_id))
int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe);
int amdgpu_mes_init(struct amdgpu_device *adev);
int amdgpu_mes_resume(struct amdgpu_device *adev);
int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev,
- struct amdgpu_ring *ring);
+ struct amdgpu_ring *ring, uint32_t xcc_id);
int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
enum amdgpu_unmap_queues_action action,
- u64 gpu_addr, u64 seq);
+ u64 gpu_addr, u64 seq, uint32_t xcc_id);
int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
unsigned int vmid,
- bool use_mmio);
+ bool use_mmio,
+ uint32_t xcc_id);
int amdgpu_mes_get_hung_queue_db_array_size(struct amdgpu_device *adev);
int amdgpu_mes_detect_and_reset_hung_queues(struct amdgpu_device *adev,
int queue_type,
bool detect_only,
unsigned int *hung_db_num,
- u32 *hung_db_array);
+ u32 *hung_db_array,
+ uint32_t xcc_id);
-uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg);
+uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg,
+ uint32_t xcc_id);
int amdgpu_mes_wreg(struct amdgpu_device *adev,
- uint32_t reg, uint32_t val);
+ uint32_t reg, uint32_t val, uint32_t xcc_id);
int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
uint32_t reg0, uint32_t reg1,
- uint32_t ref, uint32_t mask);
+ uint32_t ref, uint32_t mask, uint32_t xcc_id);
int amdgpu_mes_hdp_flush(struct amdgpu_device *adev);
int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
uint64_t process_context_addr,
uint32_t spi_gdbg_per_vmid_cntl,
const uint32_t *tcp_watch_cntl,
uint32_t flags,
- bool trap_en);
+ bool trap_en,
+ uint32_t xcc_id);
int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
- uint64_t process_context_addr);
+ uint64_t process_context_addr, uint32_t xcc_id);
uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev,
enum amdgpu_mes_priority_level prio);
uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
if (adev->enable_mes && !adev->gfx.kiq[0].ring.sched.ready) {
- amdgpu_mes_unmap_legacy_queue(adev, ring, action, gpu_addr, seq);
+ amdgpu_mes_unmap_legacy_queue(adev, ring, action,
+ gpu_addr, seq, 0);
return;
}
}
if (adev->enable_mes_kiq && adev->mes.kiq_hw_init)
- r = amdgpu_mes_kiq_hw_init(adev);
+ r = amdgpu_mes_kiq_hw_init(adev, 0);
else
r = gfx_v11_0_kiq_resume(adev);
if (r)
if (amdgpu_gfx_disable_kcq(adev, 0))
DRM_ERROR("KCQ disable failed\n");
- amdgpu_mes_kiq_hw_fini(adev);
+ amdgpu_mes_kiq_hw_fini(adev, 0);
}
if (amdgpu_sriov_vf(adev))
amdgpu_ring_reset_helper_begin(ring, timedout_fence);
- r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false);
+ r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false, 0);
if (r) {
dev_warn(adev->dev, "reset via MES failed and try pipe reset %d\n", r);
return r;
}
- r = amdgpu_mes_map_legacy_queue(adev, ring);
+ r = amdgpu_mes_map_legacy_queue(adev, ring, 0);
if (r) {
dev_err(adev->dev, "failed to remap kgq\n");
return r;
amdgpu_ring_reset_helper_begin(ring, timedout_fence);
- r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, true);
+ r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, true, 0);
if (r) {
dev_warn(adev->dev, "fail(%d) to reset kcq and try pipe reset\n", r);
r = gfx_v11_0_reset_compute_pipe(ring);
dev_err(adev->dev, "fail to init kcq\n");
return r;
}
- r = amdgpu_mes_map_legacy_queue(adev, ring);
+ r = amdgpu_mes_map_legacy_queue(adev, ring, 0);
if (r) {
dev_err(adev->dev, "failed to remap kcq\n");
return r;
uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
if (adev->enable_mes && !adev->gfx.kiq[0].ring.sched.ready) {
- amdgpu_mes_unmap_legacy_queue(adev, ring, action, gpu_addr, seq);
+ amdgpu_mes_unmap_legacy_queue(adev, ring, action,
+ gpu_addr, seq, 0);
return;
}
}
if (adev->enable_mes_kiq && adev->mes.kiq_hw_init)
- r = amdgpu_mes_kiq_hw_init(adev);
+ r = amdgpu_mes_kiq_hw_init(adev, 0);
else
r = gfx_v12_0_kiq_resume(adev);
if (r)
if (amdgpu_gfx_disable_kcq(adev, 0))
DRM_ERROR("KCQ disable failed\n");
- amdgpu_mes_kiq_hw_fini(adev);
+ amdgpu_mes_kiq_hw_fini(adev, 0);
}
if (amdgpu_sriov_vf(adev)) {
amdgpu_ring_reset_helper_begin(ring, timedout_fence);
- r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false);
+ r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false, 0);
if (r) {
dev_warn(adev->dev, "reset via MES failed and try pipe reset %d\n", r);
r = gfx_v12_reset_gfx_pipe(ring);
return r;
}
- r = amdgpu_mes_map_legacy_queue(adev, ring);
+ r = amdgpu_mes_map_legacy_queue(adev, ring, 0);
if (r) {
dev_err(adev->dev, "failed to remap kgq\n");
return r;
amdgpu_ring_reset_helper_begin(ring, timedout_fence);
- r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, true);
+ r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, true, 0);
if (r) {
dev_warn(adev->dev, "fail(%d) to reset kcq and try pipe reset\n", r);
r = gfx_v12_0_reset_compute_pipe(ring);
dev_err(adev->dev, "failed to init kcq\n");
return r;
}
- r = amdgpu_mes_map_legacy_queue(adev, ring);
+ r = amdgpu_mes_map_legacy_queue(adev, ring, 0);
if (r) {
dev_err(adev->dev, "failed to remap kcq\n");
return r;
amdgpu_mes_lock(&adev->mes);
r = amdgpu_mes_detect_and_reset_hung_queues(adev, queue_type, false,
- &hung_db_num, db_array);
+ &hung_db_num, db_array, 0);
amdgpu_mes_unlock(&adev->mes);
if (r) {
dev_err(adev->dev, "Failed to detect and reset queues, err (%d)\n", r);
static int mes_v11_0_hw_init(struct amdgpu_ip_block *ip_block);
static int mes_v11_0_hw_fini(struct amdgpu_ip_block *ip_block);
-static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev);
-static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev);
+static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev, uint32_t xcc_id);
+static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev, uint32_t xcc_id);
#define MES_EOP_SIZE 2048
#define GFX_MES_DRAM_SIZE 0x80000
mes_reset_queue_pkt.queue_type =
convert_to_mes_queue_type(input->queue_type);
mes_reset_queue_pkt.doorbell_offset_addr =
- mes->hung_queue_db_array_gpu_addr;
+ mes->hung_queue_db_array_gpu_addr[0];
if (input->detect_only)
mes_reset_queue_pkt.hang_detect_only = 1;
WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
}
-static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev)
+static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev, uint32_t xcc_id)
{
int r = 0;
struct amdgpu_ip_block *ip_block;
return r;
}
-static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev)
+static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev, uint32_t xcc_id)
{
if (adev->mes.ring[0].sched.ready) {
mes_v11_0_kiq_dequeue(&adev->mes.ring[0]);
static int mes_v12_0_hw_init(struct amdgpu_ip_block *ip_block);
static int mes_v12_0_hw_fini(struct amdgpu_ip_block *ip_block);
-static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev);
-static int mes_v12_0_kiq_hw_fini(struct amdgpu_device *adev);
+static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev, uint32_t xcc_id);
+static int mes_v12_0_kiq_hw_fini(struct amdgpu_device *adev, uint32_t xcc_id);
#define MES_EOP_SIZE 2048
mes_reset_queue_pkt.queue_type =
convert_to_mes_queue_type(input->queue_type);
mes_reset_queue_pkt.doorbell_offset_addr =
- mes->hung_queue_db_array_gpu_addr;
+ mes->hung_queue_db_array_gpu_addr[0];
if (input->detect_only)
mes_reset_queue_pkt.hang_detect_only = 1;
if (pipe == AMDGPU_MES_SCHED_PIPE) {
if (adev->enable_uni_mes)
- r = amdgpu_mes_map_legacy_queue(adev, ring);
+ r = amdgpu_mes_map_legacy_queue(adev, ring, 0);
else
r = mes_v12_0_kiq_enable_queue(adev);
if (r)
WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp | 0x80);
}
-static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev)
+static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev, uint32_t xcc_id)
{
int r = 0;
struct amdgpu_ip_block *ip_block;
return r;
}
-static int mes_v12_0_kiq_hw_fini(struct amdgpu_device *adev)
+static int mes_v12_0_kiq_hw_fini(struct amdgpu_device *adev, uint32_t xcc_id)
{
if (adev->mes.ring[0].sched.ready) {
if (adev->enable_uni_mes)
amdgpu_mes_unmap_legacy_queue(adev,
&adev->mes.ring[AMDGPU_MES_SCHED_PIPE],
- RESET_QUEUES, 0, 0);
+ RESET_QUEUES, 0, 0, 0);
else
mes_v12_0_kiq_dequeue_sched(adev);
amdgpu_ring_reset_helper_begin(ring, timedout_fence);
- r = amdgpu_mes_reset_legacy_queue(adev, ring, vmid, true);
+ r = amdgpu_mes_reset_legacy_queue(adev, ring, vmid, true, 0);
if (r)
return r;
amdgpu_ring_reset_helper_begin(ring, timedout_fence);
- r = amdgpu_mes_reset_legacy_queue(adev, ring, vmid, true);
+ r = amdgpu_mes_reset_legacy_queue(adev, ring, vmid, true, 0);
if (r)
return r;
}
return amdgpu_mes_set_shader_debugger(pdd->dev->adev, pdd->proc_ctx_gpu_addr, spi_dbg_cntl,
- pdd->watch_points, flags, sq_trap_en);
+ pdd->watch_points, flags, sq_trap_en, 0);
}
#define KFD_DEBUGGER_INVALID_WATCH_POINT_ID -1
if (dev->kfd->shared_resources.enable_mes && !!pdd->proc_ctx_gpu_addr &&
down_read_trylock(&dev->adev->reset_domain->sem)) {
amdgpu_mes_flush_shader_debugger(dev->adev,
- pdd->proc_ctx_gpu_addr);
+ pdd->proc_ctx_gpu_addr, 0);
up_read(&dev->adev->reset_domain->sem);
}
pdd->already_dequeued = true;