void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
{
uint32_t occupied, chunk1, chunk2;
- uint32_t *dst;
occupied = ring->wptr & ring->buf_mask;
- dst = (void *)&ring->ring[occupied];
chunk1 = ring->buf_mask + 1 - occupied;
chunk1 = (chunk1 >= count) ? count : chunk1;
chunk2 = count - chunk1;
if (chunk1)
- memset32(dst, ring->funcs->nop, chunk1);
+ memset32(&ring->ring[occupied], ring->funcs->nop, chunk1);
- if (chunk2) {
- dst = (void *)ring->ring;
- memset32(dst, ring->funcs->nop, chunk2);
- }
+ if (chunk2)
+ memset32(ring->ring, ring->funcs->nop, chunk2);
ring->wptr += count;
ring->wptr &= ring->ptr_mask;
struct drm_gpu_scheduler sched;
struct amdgpu_bo *ring_obj;
- volatile uint32_t *ring;
+ uint32_t *ring;
unsigned rptr_offs;
u64 rptr_gpu_addr;
volatile u32 *rptr_cpu_addr;
u64 cond_exe_gpu_addr;
volatile u32 *cond_exe_cpu_addr;
unsigned int set_q_mode_offs;
- volatile u32 *set_q_mode_ptr;
+ u32 *set_q_mode_ptr;
u64 set_q_mode_token;
unsigned vm_hub;
unsigned vm_inv_eng;
void *src, int count_dw)
{
unsigned occupied, chunk1, chunk2;
- void *dst;
occupied = ring->wptr & ring->buf_mask;
- dst = (void *)&ring->ring[occupied];
chunk1 = ring->buf_mask + 1 - occupied;
chunk1 = (chunk1 >= count_dw) ? count_dw : chunk1;
chunk2 = count_dw - chunk1;
chunk2 <<= 2;
if (chunk1)
- memcpy(dst, src, chunk1);
+ memcpy(&ring->ring[occupied], src, chunk1);
if (chunk2) {
src += chunk1;
- dst = (void *)ring->ring;
- memcpy(dst, src, chunk2);
+ memcpy(ring->ring, src, chunk2);
}
ring->wptr += count_dw;