struct intel_dsb {
enum intel_dsb_id id;
- struct intel_dsb_buffer dsb_buf;
+ struct intel_dsb_buffer *dsb_buf;
struct intel_crtc *crtc;
/*
for (i = 0; i < ALIGN(dsb->free_pos, 64 / 4); i += 4)
drm_dbg_kms(display->drm,
" 0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", i * 4,
- intel_dsb_buffer_read(&dsb->dsb_buf, i),
- intel_dsb_buffer_read(&dsb->dsb_buf, i + 1),
- intel_dsb_buffer_read(&dsb->dsb_buf, i + 2),
- intel_dsb_buffer_read(&dsb->dsb_buf, i + 3));
+ intel_dsb_buffer_read(dsb->dsb_buf, i),
+ intel_dsb_buffer_read(dsb->dsb_buf, i + 1),
+ intel_dsb_buffer_read(dsb->dsb_buf, i + 2),
+ intel_dsb_buffer_read(dsb->dsb_buf, i + 3));
drm_dbg_kms(display->drm, "}\n");
}
unsigned int intel_dsb_head(struct intel_dsb *dsb)
{
- return intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf);
+ return intel_dsb_buffer_ggtt_offset(dsb->dsb_buf);
}
static unsigned int intel_dsb_tail(struct intel_dsb *dsb)
{
- return intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf) + intel_dsb_size(dsb);
+ return intel_dsb_buffer_ggtt_offset(dsb->dsb_buf) + intel_dsb_size(dsb);
}
static void intel_dsb_ins_align(struct intel_dsb *dsb)
dsb->ins[0] = ldw;
dsb->ins[1] = udw;
- intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, dsb->ins[0]);
- intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, dsb->ins[1]);
+ intel_dsb_buffer_write(dsb->dsb_buf, dsb->free_pos++, dsb->ins[0]);
+ intel_dsb_buffer_write(dsb->dsb_buf, dsb->free_pos++, dsb->ins[1]);
}
static bool intel_dsb_prev_ins_is_write(struct intel_dsb *dsb,
/* Update the count */
dsb->ins[0]++;
- intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset + 0,
+ intel_dsb_buffer_write(dsb->dsb_buf, dsb->ins_start_offset + 0,
dsb->ins[0]);
- intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, val);
+ intel_dsb_buffer_write(dsb->dsb_buf, dsb->free_pos++, val);
/* if number of data words is odd, then the last dword should be 0.*/
if (dsb->free_pos & 0x1)
- intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos, 0);
+ intel_dsb_buffer_write(dsb->dsb_buf, dsb->free_pos, 0);
}
void intel_dsb_reg_write(struct intel_dsb *dsb,
aligned_tail = ALIGN(tail, CACHELINE_BYTES);
if (aligned_tail > tail)
- intel_dsb_buffer_memset(&dsb->dsb_buf, dsb->free_pos, 0,
+ intel_dsb_buffer_memset(dsb->dsb_buf, dsb->free_pos, 0,
aligned_tail - tail);
dsb->free_pos = aligned_tail / 4;
* "Ensure GOSUB is not placed in cacheline QW slot 6 or 7 (numbered 0-7)"
*/
if (aligned_tail - tail <= 2 * 8)
- intel_dsb_buffer_memset(&dsb->dsb_buf, dsb->free_pos, 0,
+ intel_dsb_buffer_memset(dsb->dsb_buf, dsb->free_pos, 0,
aligned_tail - tail);
dsb->free_pos = aligned_tail / 4;
*/
intel_dsb_noop(dsb, 8);
- intel_dsb_buffer_flush_map(&dsb->dsb_buf);
+ intel_dsb_buffer_flush_map(dsb->dsb_buf);
}
void intel_dsb_finish(struct intel_dsb *dsb)
{
intel_dsb_align_tail(dsb);
- intel_dsb_buffer_flush_map(&dsb->dsb_buf);
+ intel_dsb_buffer_flush_map(dsb->dsb_buf);
}
static u32 dsb_error_int_status(struct intel_display *display)
!is_busy,
100, 1000, false);
if (ret) {
- u32 offset = intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf);
+ u32 offset = intel_dsb_buffer_ggtt_offset(dsb->dsb_buf);
intel_de_write_fw(display, DSB_CTRL(pipe, dsb->id),
DSB_ENABLE | DSB_HALT);
unsigned int max_cmds)
{
struct intel_display *display = to_intel_display(state);
+ struct intel_dsb_buffer *dsb_buf;
struct ref_tracker *wakeref;
struct intel_dsb *dsb;
unsigned int size;
/* ~1 qword per instruction, full cachelines */
size = ALIGN(max_cmds * 8, CACHELINE_BYTES);
- if (!intel_dsb_buffer_create(display->drm, &dsb->dsb_buf, size))
+ dsb_buf = intel_dsb_buffer_create(display->drm, size);
+ if (IS_ERR(dsb_buf))
goto out_put_rpm;
+ dsb->dsb_buf = dsb_buf;
+
intel_display_rpm_put(display, wakeref);
dsb->id = dsb_id;
*/
void intel_dsb_cleanup(struct intel_dsb *dsb)
{
- intel_dsb_buffer_cleanup(&dsb->dsb_buf);
+ intel_dsb_buffer_cleanup(dsb->dsb_buf);
kfree(dsb);
}
memset(&dsb_buf->cmd_buf[idx], val, size);
}
-bool intel_dsb_buffer_create(struct drm_device *drm, struct intel_dsb_buffer *dsb_buf, size_t size)
+struct intel_dsb_buffer *intel_dsb_buffer_create(struct drm_device *drm, size_t size)
{
struct drm_i915_private *i915 = to_i915(drm);
+ struct intel_dsb_buffer *dsb_buf;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
u32 *buf;
+ int ret;
+
+ dsb_buf = kzalloc(sizeof(*dsb_buf), GFP_KERNEL);
+ if (!dsb_buf)
+ return ERR_PTR(-ENOMEM);
if (HAS_LMEM(i915)) {
obj = i915_gem_object_create_lmem(i915, PAGE_ALIGN(size),
I915_BO_ALLOC_CONTIGUOUS);
- if (IS_ERR(obj))
- return false;
+ if (IS_ERR(obj)) {
+ ret = PTR_ERR(obj);
+ goto err;
+ }
} else {
obj = i915_gem_object_create_internal(i915, PAGE_ALIGN(size));
- if (IS_ERR(obj))
- return false;
+ if (IS_ERR(obj)) {
+ ret = PTR_ERR(obj);
+ goto err;
+ }
i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
}
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
i915_gem_object_put(obj);
- return false;
+ goto err;
}
buf = i915_gem_object_pin_map_unlocked(vma->obj, I915_MAP_WC);
if (IS_ERR(buf)) {
+ ret = PTR_ERR(buf);
i915_vma_unpin_and_release(&vma, I915_VMA_RELEASE_MAP);
- return false;
+ goto err;
}
dsb_buf->vma = vma;
dsb_buf->cmd_buf = buf;
dsb_buf->buf_size = size;
- return true;
+ return dsb_buf;
+
+err:
+ kfree(dsb_buf);
+
+ return ERR_PTR(ret);
}
void intel_dsb_buffer_cleanup(struct intel_dsb_buffer *dsb_buf)
{
i915_vma_unpin_and_release(&dsb_buf->vma, I915_VMA_RELEASE_MAP);
+ kfree(dsb_buf);
}
void intel_dsb_buffer_flush_map(struct intel_dsb_buffer *dsb_buf)
iosys_map_memset(&dsb_buf->vma->bo->vmap, idx * 4, val, size);
}
-bool intel_dsb_buffer_create(struct drm_device *drm, struct intel_dsb_buffer *dsb_buf, size_t size)
+struct intel_dsb_buffer *intel_dsb_buffer_create(struct drm_device *drm, size_t size)
{
struct xe_device *xe = to_xe_device(drm);
+ struct intel_dsb_buffer *dsb_buf;
struct xe_bo *obj;
struct i915_vma *vma;
+ int ret;
+
+ dsb_buf = kzalloc(sizeof(*dsb_buf), GFP_KERNEL);
+ if (!dsb_buf)
+ return ERR_PTR(-ENOMEM);
vma = kzalloc(sizeof(*vma), GFP_KERNEL);
- if (!vma)
- return false;
+ if (!vma) {
+ ret = -ENOMEM;
+ goto err_vma;
+ }
/* Set scanout flag for WC mapping */
obj = xe_bo_create_pin_map_novm(xe, xe_device_get_root_tile(xe),
XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
XE_BO_FLAG_SCANOUT | XE_BO_FLAG_GGTT, false);
if (IS_ERR(obj)) {
- kfree(vma);
- return false;
+ ret = PTR_ERR(obj);
+ goto err_pin_map;
}
vma->bo = obj;
dsb_buf->vma = vma;
dsb_buf->buf_size = size;
- return true;
+ return dsb_buf;
+
+err_pin_map:
+ kfree(vma);
+err_vma:
+ kfree(dsb_buf);
+
+ return ERR_PTR(ret);
}
void intel_dsb_buffer_cleanup(struct intel_dsb_buffer *dsb_buf)
{
xe_bo_unpin_map_no_vm(dsb_buf->vma->bo);
kfree(dsb_buf->vma);
+ kfree(dsb_buf);
}
void intel_dsb_buffer_flush_map(struct intel_dsb_buffer *dsb_buf)