amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
adev->gfx.compute_supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
- if (!amdgpu_sriov_vf(adev) && !adev->debug_disable_gpu_ring_reset)
+ if (!amdgpu_sriov_vf(adev) && !adev->debug_disable_gpu_ring_reset) {
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ }
r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE, 0);
if (r) {
amdgpu_ring_insert_nop(ring, num_nop - 1);
}
+static void gfx_v9_0_ring_emit_wreg_me(struct amdgpu_ring *ring,
+ uint32_t reg,
+ uint32_t val)
+{
+ uint32_t cmd = 0;
+
+ switch (ring->funcs->type) {
+ case AMDGPU_RING_TYPE_KIQ:
+ cmd = (1 << 16); /* no inc addr */
+ break;
+ default:
+ cmd = WR_CONFIRM;
+ break;
+ }
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ amdgpu_ring_write(ring, cmd);
+ amdgpu_ring_write(ring, reg);
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, val);
+}
+
+static int gfx_v9_0_reset_kgq(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
+ struct amdgpu_ring *kiq_ring = &kiq->ring;
+ unsigned long flags;
+ u32 tmp;
+ int r;
+
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+
+ if (amdgpu_ring_alloc(kiq_ring, 5)) {
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ return -ENOMEM;
+ }
+
+ /* send the reset - 5 */
+ tmp = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid);
+ gfx_v9_0_ring_emit_wreg(kiq_ring,
+ SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), tmp);
+ amdgpu_ring_commit(kiq_ring);
+ r = amdgpu_ring_test_ring(kiq_ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ if (r)
+ return r;
+
+ if (amdgpu_ring_alloc(ring, 8 + 7 + 5 + 2 + 8 + 7))
+ return -ENOMEM;
+ /* emit the fence to finish the reset - 8 */
+ ring->trail_seq++;
+ gfx_v9_0_ring_emit_fence(ring, ring->trail_fence_gpu_addr,
+ ring->trail_seq, AMDGPU_FENCE_FLAG_EXEC);
+ /* wait for the fence - 7 */
+ gfx_v9_0_wait_reg_mem(ring, 0, 1, 0,
+ lower_32_bits(ring->trail_fence_gpu_addr),
+ upper_32_bits(ring->trail_fence_gpu_addr),
+ ring->trail_seq, 0xffffffff, 4);
+ /* clear mmCP_VMID_RESET - 5 */
+ gfx_v9_0_ring_emit_wreg_me(ring,
+ SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), 0);
+ /* event write ENABLE_LEGACY_PIPELINE - 2 */
+ gfx_v9_0_ring_emit_event_write(ring, ENABLE_LEGACY_PIPELINE, 0);
+ /* emit a regular fence - 8 */
+ ring->trail_seq++;
+ gfx_v9_0_ring_emit_fence(ring, ring->trail_fence_gpu_addr,
+ ring->trail_seq, AMDGPU_FENCE_FLAG_EXEC);
+ /* wait for the fence - 7 */
+ gfx_v9_0_wait_reg_mem(ring, 1, 1, 0,
+ lower_32_bits(ring->trail_fence_gpu_addr),
+ upper_32_bits(ring->trail_fence_gpu_addr),
+ ring->trail_seq, 0xffffffff, 4);
+ amdgpu_ring_commit(ring);
+ /* wait for the commands to complete */
+ r = amdgpu_ring_test_ring(ring);
+ if (r)
+ return r;
+
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
+}
+
static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring,
unsigned int vmid,
struct amdgpu_fence *timedout_fence)
.emit_wreg = gfx_v9_0_ring_emit_wreg,
.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
- .soft_recovery = gfx_v9_0_ring_soft_recovery,
.emit_mem_sync = gfx_v9_0_emit_mem_sync,
.emit_cleaner_shader = gfx_v9_0_ring_emit_cleaner_shader,
+ .reset = gfx_v9_0_reset_kgq,
.begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
.end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
};
.emit_wreg = gfx_v9_0_ring_emit_wreg,
.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
- .soft_recovery = gfx_v9_0_ring_soft_recovery,
.emit_mem_sync = gfx_v9_0_emit_mem_sync,
.emit_wave_limit = gfx_v9_0_emit_wave_limit,
.reset = gfx_v9_0_reset_kcq,