if (q->xef)
xe_file_put(q->xef);
+ kvfree(q->replay_state);
kfree(q);
}
struct xe_lrc *lrc;
xe_gt_sriov_vf_wait_valid_ggtt(q->gt);
- lrc = xe_lrc_create(q->hwe, q->vm, xe_lrc_ring_size(),
- q->msix_vec, flags);
+ lrc = xe_lrc_create(q->hwe, q->vm, q->replay_state,
+ xe_lrc_ring_size(), q->msix_vec, flags);
if (IS_ERR(lrc)) {
err = PTR_ERR(lrc);
goto err_lrc;
return xe_pxp_exec_queue_set_type(xe->pxp, q, DRM_XE_PXP_TYPE_HWDRM);
}
+static int exec_queue_set_hang_replay_state(struct xe_device *xe,
+ struct xe_exec_queue *q,
+ u64 value)
+{
+ size_t size = xe_gt_lrc_hang_replay_size(q->gt, q->class);
+ u64 __user *address = u64_to_user_ptr(value);
+ void *ptr;
+
+ ptr = vmemdup_user(address, size);
+ if (XE_IOCTL_DBG(xe, IS_ERR(ptr)))
+ return PTR_ERR(ptr);
+
+ q->replay_state = ptr;
+
+ return 0;
+}
+
typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
struct xe_exec_queue *q,
u64 value);
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority,
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice,
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE] = exec_queue_set_pxp_type,
+ [DRM_XE_EXEC_QUEUE_SET_HANG_REPLAY_STATE] = exec_queue_set_hang_replay_state,
};
static int exec_queue_user_ext_set_property(struct xe_device *xe,
XE_IOCTL_DBG(xe, ext.pad) ||
XE_IOCTL_DBG(xe, ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY &&
ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE &&
- ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE))
+ ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE &&
+ ext.property != DRM_XE_EXEC_QUEUE_SET_HANG_REPLAY_STATE))
return -EINVAL;
idx = array_index_nospec(ext.property, ARRAY_SIZE(exec_queue_set_property_funcs));
return false;
}
-size_t xe_gt_lrc_size(struct xe_gt *gt, enum xe_engine_class class)
+/**
+ * xe_gt_lrc_hang_replay_size() - Hang replay size
+ * @gt: The GT
+ * @class: Hardware engine class
+ *
+ * Determine size of GPU hang replay state for a GT and hardware engine class.
+ *
+ * Return: Size of GPU hang replay size
+ */
+size_t xe_gt_lrc_hang_replay_size(struct xe_gt *gt, enum xe_engine_class class)
{
struct xe_device *xe = gt_to_xe(gt);
- size_t size;
-
- /* Per-process HW status page (PPHWSP) */
- size = LRC_PPHWSP_SIZE;
+ size_t size = 0;
/* Engine context image */
switch (class) {
size += 1 * SZ_4K;
}
+ return size;
+}
+
+size_t xe_gt_lrc_size(struct xe_gt *gt, enum xe_engine_class class)
+{
+ size_t size = xe_gt_lrc_hang_replay_size(gt, class);
+
/* Add indirect ring state page */
if (xe_gt_has_indirect_ring_state(gt))
size += LRC_INDIRECT_RING_STATE_SIZE;
- return size;
+ return size + LRC_PPHWSP_SIZE;
}
/*
}
static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
- struct xe_vm *vm, u32 ring_size, u16 msix_vec,
+ struct xe_vm *vm, void *replay_state, u32 ring_size,
+ u16 msix_vec,
u32 init_flags)
{
struct xe_gt *gt = hwe->gt;
kref_init(&lrc->refcount);
lrc->gt = gt;
- lrc->replay_size = xe_gt_lrc_size(gt, hwe->class);
- if (xe_gt_has_indirect_ring_state(gt))
- lrc->replay_size -= LRC_INDIRECT_RING_STATE_SIZE;
+ lrc->replay_size = xe_gt_lrc_hang_replay_size(gt, hwe->class);
lrc->size = lrc_size;
lrc->flags = 0;
lrc->ring.size = ring_size;
* scratch.
*/
map = __xe_lrc_pphwsp_map(lrc);
- if (gt->default_lrc[hwe->class]) {
+ if (gt->default_lrc[hwe->class] || replay_state) {
xe_map_memset(xe, &map, 0, 0, LRC_PPHWSP_SIZE); /* PPHWSP */
xe_map_memcpy_to(xe, &map, LRC_PPHWSP_SIZE,
gt->default_lrc[hwe->class] + LRC_PPHWSP_SIZE,
lrc_size - LRC_PPHWSP_SIZE);
+ if (replay_state)
+ xe_map_memcpy_to(xe, &map, LRC_PPHWSP_SIZE,
+ replay_state, lrc->replay_size);
} else {
void *init_data = empty_lrc_data(hwe);
* xe_lrc_create - Create a LRC
* @hwe: Hardware Engine
* @vm: The VM (address space)
+ * @replay_state: GPU hang replay state
* @ring_size: LRC ring size
* @msix_vec: MSI-X interrupt vector (for platforms that support it)
* @flags: LRC initialization flags
* upon failure.
*/
struct xe_lrc *xe_lrc_create(struct xe_hw_engine *hwe, struct xe_vm *vm,
- u32 ring_size, u16 msix_vec, u32 flags)
+ void *replay_state, u32 ring_size, u16 msix_vec, u32 flags)
{
struct xe_lrc *lrc;
int err;
if (!lrc)
return ERR_PTR(-ENOMEM);
- err = xe_lrc_init(lrc, hwe, vm, ring_size, msix_vec, flags);
+ err = xe_lrc_init(lrc, hwe, vm, replay_state, ring_size, msix_vec, flags);
if (err) {
kfree(lrc);
return ERR_PTR(err);
#define XE_LRC_CREATE_USER_CTX BIT(2)
struct xe_lrc *xe_lrc_create(struct xe_hw_engine *hwe, struct xe_vm *vm,
- u32 ring_size, u16 msix_vec, u32 flags);
+ void *replay_state, u32 ring_size, u16 msix_vec, u32 flags);
void xe_lrc_destroy(struct kref *ref);
/**
return SZ_16K;
}
+size_t xe_gt_lrc_hang_replay_size(struct xe_gt *gt, enum xe_engine_class class);
size_t xe_gt_lrc_size(struct xe_gt *gt, enum xe_engine_class class);
u32 xe_lrc_pphwsp_offset(struct xe_lrc *lrc);
u32 xe_lrc_regs_offset(struct xe_lrc *lrc);