if (r)
return r;
- adev->hdp.funcs->flush_hdp(adev, NULL);
-
value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
false : true;
adev->gfxhub.funcs->set_fault_enable_default(adev, value);
- /* TODO investigate why this and the hdp flush above is needed,
+ /* TODO investigate why TLB flush is needed,
* are we missing a flush somewhere else? */
adev->gmc.gmc_funcs->flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB(0), 0);
}
}
-static void gfx_v12_1_ring_emit_hdp_flush(struct amdgpu_ring *ring)
-{
- struct amdgpu_device *adev = ring->adev;
- u32 ref_and_mask, reg_mem_engine;
- const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
-
- if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
- switch (ring->me) {
- case 1:
- ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
- break;
- case 2:
- ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
- break;
- default:
- return;
- }
- reg_mem_engine = 0;
- } else {
- ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
- reg_mem_engine = 1; /* pfp */
- }
-
- gfx_v12_1_wait_reg_mem(ring, reg_mem_engine, 0, 1,
- adev->nbio.funcs->get_hdp_flush_req_offset(adev),
- adev->nbio.funcs->get_hdp_flush_done_offset(adev),
- ref_and_mask, ref_and_mask, 0x20);
-}
-
static void gfx_v12_1_ring_emit_ib_compute(struct amdgpu_ring *ring,
struct amdgpu_job *job,
struct amdgpu_ib *ib,
.get_wptr = gfx_v12_1_ring_get_wptr_compute,
.set_wptr = gfx_v12_1_ring_set_wptr_compute,
.emit_frame_size =
- 7 + /* gfx_v12_1_ring_emit_hdp_flush */
- 5 + /* hdp invalidate */
7 + /* gfx_v12_1_ring_emit_pipeline_sync */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
.emit_fence = gfx_v12_1_ring_emit_fence,
.emit_pipeline_sync = gfx_v12_1_ring_emit_pipeline_sync,
.emit_vm_flush = gfx_v12_1_ring_emit_vm_flush,
- .emit_hdp_flush = gfx_v12_1_ring_emit_hdp_flush,
.test_ring = gfx_v12_1_ring_test_ring,
.test_ib = gfx_v12_1_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.get_wptr = gfx_v12_1_ring_get_wptr_compute,
.set_wptr = gfx_v12_1_ring_set_wptr_compute,
.emit_frame_size =
- 7 + /* gfx_v12_1_ring_emit_hdp_flush */
- 5 + /*hdp invalidate */
7 + /* gfx_v12_1_ring_emit_pipeline_sync */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
#include "gc/gc_12_1_0_offset.h"
#include "gc/gc_12_1_0_sh_mask.h"
-#include "hdp/hdp_6_0_0_offset.h"
#include "ivsrcid/gfx/irqsrcs_gfx_11_0_0.h"
#include "soc15_common.h"
}
-/**
- * sdma_v7_1_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
- *
- * @ring: amdgpu ring pointer
- *
- * Emit an hdp flush packet on the requested DMA ring.
- */
-static void sdma_v7_1_ring_emit_hdp_flush(struct amdgpu_ring *ring)
-{
- struct amdgpu_device *adev = ring->adev;
- u32 ref_and_mask = 0;
- const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
-
- ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0
- << (ring->me % adev->sdma.num_inst_per_xcc);
-
- amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_POLL_REGMEM) |
- SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
- SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
- amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
- amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
- amdgpu_ring_write(ring, ref_and_mask); /* reference */
- amdgpu_ring_write(ring, ref_and_mask); /* mask */
- amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
- SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
-}
-
/**
* sdma_v7_1_ring_emit_fence - emit a fence on the DMA ring
*
/* wait for idle */
amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_POLL_REGMEM) |
- SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1));
amdgpu_ring_write(ring, addr & 0xfffffffc);
uint32_t val, uint32_t mask)
{
amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_POLL_REGMEM) |
- SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* equal */
amdgpu_ring_write(ring, reg << 2);
amdgpu_ring_write(ring, 0);
.set_wptr = sdma_v7_1_ring_set_wptr,
.emit_frame_size =
5 + /* sdma_v7_1_ring_init_cond_exec */
- 6 + /* sdma_v7_1_ring_emit_hdp_flush */
6 + /* sdma_v7_1_ring_emit_pipeline_sync */
/* sdma_v7_1_ring_emit_vm_flush */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
.emit_fence = sdma_v7_1_ring_emit_fence,
.emit_pipeline_sync = sdma_v7_1_ring_emit_pipeline_sync,
.emit_vm_flush = sdma_v7_1_ring_emit_vm_flush,
- .emit_hdp_flush = sdma_v7_1_ring_emit_hdp_flush,
.test_ring = sdma_v7_1_ring_test_ring,
.test_ib = sdma_v7_1_ring_test_ib,
.insert_nop = sdma_v7_1_ring_insert_nop,