static int aie2_hwctx_status_cb(struct amdxdna_hwctx *hwctx, void *arg)
{
- struct amdxdna_drm_query_hwctx *tmp __free(kfree) = NULL;
- struct amdxdna_drm_get_info *get_info_args = arg;
- struct amdxdna_drm_query_hwctx __user *buf;
+ struct amdxdna_drm_hwctx_entry *tmp __free(kfree) = NULL;
+ struct amdxdna_drm_get_array *array_args = arg;
+ struct amdxdna_drm_hwctx_entry __user *buf;
+ u32 size;
- if (get_info_args->buffer_size < sizeof(*tmp))
+ if (!array_args->num_element)
return -EINVAL;
tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
tmp->num_col = hwctx->num_col;
tmp->command_submissions = hwctx->priv->seq;
tmp->command_completions = hwctx->priv->completed;
-
- buf = u64_to_user_ptr(get_info_args->buffer);
-
- if (copy_to_user(buf, tmp, sizeof(*tmp)))
+ tmp->pasid = hwctx->client->pasid;
+ tmp->priority = hwctx->qos.priority;
+ tmp->gops = hwctx->qos.gops;
+ tmp->fps = hwctx->qos.fps;
+ tmp->dma_bandwidth = hwctx->qos.dma_bandwidth;
+ tmp->latency = hwctx->qos.latency;
+ tmp->frame_exec_time = hwctx->qos.frame_exec_time;
+ tmp->state = AMDXDNA_HWCTX_STATE_ACTIVE;
+
+ buf = u64_to_user_ptr(array_args->buffer);
+ size = min(sizeof(*tmp), array_args->element_size);
+
+ if (copy_to_user(buf, tmp, size))
return -EFAULT;
- get_info_args->buffer += sizeof(*tmp);
- get_info_args->buffer_size -= sizeof(*tmp);
+ array_args->buffer += size;
+ array_args->num_element--;
return 0;
}
static int aie2_get_hwctx_status(struct amdxdna_client *client,
struct amdxdna_drm_get_info *args)
{
+ struct amdxdna_drm_get_array array_args;
struct amdxdna_dev *xdna = client->xdna;
- struct amdxdna_drm_get_info info_args;
struct amdxdna_client *tmp_client;
int ret;
drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
- info_args.buffer = args->buffer;
- info_args.buffer_size = args->buffer_size;
-
+ array_args.element_size = sizeof(struct amdxdna_drm_query_hwctx);
+ array_args.buffer = args->buffer;
+ array_args.num_element = args->buffer_size / array_args.element_size;
list_for_each_entry(tmp_client, &xdna->client_list, node) {
- ret = amdxdna_hwctx_walk(tmp_client, &info_args, aie2_hwctx_status_cb);
+ ret = amdxdna_hwctx_walk(tmp_client, &array_args,
+ aie2_hwctx_status_cb);
if (ret)
break;
}
- args->buffer_size = (u32)(info_args.buffer - args->buffer);
+ args->buffer_size -= (u32)(array_args.buffer - args->buffer);
return ret;
}
return ret;
}
+static int aie2_query_ctx_status_array(struct amdxdna_client *client,
+ struct amdxdna_drm_get_array *args)
+{
+ struct amdxdna_drm_get_array array_args;
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_client *tmp_client;
+ int ret;
+
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+
+ array_args.element_size = min(args->element_size,
+ sizeof(struct amdxdna_drm_hwctx_entry));
+ array_args.buffer = args->buffer;
+ array_args.num_element = args->num_element * args->element_size /
+ array_args.element_size;
+ list_for_each_entry(tmp_client, &xdna->client_list, node) {
+ ret = amdxdna_hwctx_walk(tmp_client, &array_args,
+ aie2_hwctx_status_cb);
+ if (ret)
+ break;
+ }
+
+ args->element_size = array_args.element_size;
+ args->num_element = (u32)((array_args.buffer - args->buffer) /
+ args->element_size);
+
+ return ret;
+}
+
+static int aie2_get_array(struct amdxdna_client *client,
+ struct amdxdna_drm_get_array *args)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+ int ret, idx;
+
+ if (!drm_dev_enter(&xdna->ddev, &idx))
+ return -ENODEV;
+
+ switch (args->param) {
+ case DRM_AMDXDNA_HW_CONTEXT_ALL:
+ ret = aie2_query_ctx_status_array(client, args);
+ break;
+ default:
+ XDNA_ERR(xdna, "Not supported request parameter %u", args->param);
+ ret = -EOPNOTSUPP;
+ }
+ XDNA_DBG(xdna, "Got param %d", args->param);
+
+ drm_dev_exit(idx);
+ return ret;
+}
+
static int aie2_set_power_mode(struct amdxdna_client *client,
struct amdxdna_drm_set_state *args)
{
}
const struct amdxdna_dev_ops aie2_ops = {
- .init = aie2_init,
- .fini = aie2_fini,
- .resume = aie2_hw_resume,
- .suspend = aie2_hw_suspend,
- .get_aie_info = aie2_get_info,
- .set_aie_state = aie2_set_state,
- .hwctx_init = aie2_hwctx_init,
- .hwctx_fini = aie2_hwctx_fini,
- .hwctx_config = aie2_hwctx_config,
- .cmd_submit = aie2_cmd_submit,
+ .init = aie2_init,
+ .fini = aie2_fini,
+ .resume = aie2_hw_resume,
+ .suspend = aie2_hw_suspend,
+ .get_aie_info = aie2_get_info,
+ .set_aie_state = aie2_set_state,
+ .hwctx_init = aie2_hwctx_init,
+ .hwctx_fini = aie2_hwctx_fini,
+ .hwctx_config = aie2_hwctx_config,
+ .cmd_submit = aie2_cmd_submit,
.hmm_invalidate = aie2_hmm_invalidate,
+ .get_array = aie2_get_array,
};
MODULE_FIRMWARE("amdnpu/17f0_11/npu.sbin");
MODULE_FIRMWARE("amdnpu/17f0_20/npu.sbin");
+/*
+ * 0.0: Initial version
+ * 0.1: Support getting all hardware contexts by DRM_IOCTL_AMDXDNA_GET_ARRAY
+ */
+#define AMDXDNA_DRIVER_MAJOR 0
+#define AMDXDNA_DRIVER_MINOR 1
+
/*
* Bind the driver base on (vendor_id, device_id) pair and later use the
* (device_id, rev_id) pair as a key to select the devices. The devices with
return ret;
}
+static int amdxdna_drm_get_array_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_drm_get_array *args = data;
+
+ if (!xdna->dev_info->ops->get_array)
+ return -EOPNOTSUPP;
+
+ if (args->pad || !args->num_element || !args->element_size)
+ return -EINVAL;
+
+ guard(mutex)(&xdna->dev_lock);
+ return xdna->dev_info->ops->get_array(client, args);
+}
+
static int amdxdna_drm_set_state_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
struct amdxdna_client *client = filp->driver_priv;
DRM_IOCTL_DEF_DRV(AMDXDNA_EXEC_CMD, amdxdna_drm_submit_cmd_ioctl, 0),
/* AIE hardware */
DRM_IOCTL_DEF_DRV(AMDXDNA_GET_INFO, amdxdna_drm_get_info_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(AMDXDNA_GET_ARRAY, amdxdna_drm_get_array_ioctl, 0),
DRM_IOCTL_DEF_DRV(AMDXDNA_SET_STATE, amdxdna_drm_set_state_ioctl, DRM_ROOT_ONLY),
};
.fops = &amdxdna_fops,
.name = "amdxdna_accel_driver",
.desc = "AMD XDNA DRM implementation",
+ .major = AMDXDNA_DRIVER_MAJOR,
+ .minor = AMDXDNA_DRIVER_MINOR,
.open = amdxdna_drm_open,
.postclose = amdxdna_drm_close,
.ioctls = amdxdna_drm_ioctls,
DRM_AMDXDNA_EXEC_CMD,
DRM_AMDXDNA_GET_INFO,
DRM_AMDXDNA_SET_STATE,
+ DRM_AMDXDNA_GET_ARRAY = 10,
};
/**
__u64 buffer; /* in/out */
};
+#define AMDXDNA_HWCTX_STATE_IDLE 0
+#define AMDXDNA_HWCTX_STATE_ACTIVE 1
+
+/**
+ * struct amdxdna_drm_hwctx_entry - The hardware context array entry
+ */
+struct amdxdna_drm_hwctx_entry {
+ /** @context_id: Context ID. */
+ __u32 context_id;
+ /** @start_col: Start AIE array column assigned to context. */
+ __u32 start_col;
+ /** @num_col: Number of AIE array columns assigned to context. */
+ __u32 num_col;
+ /** @hwctx_id: The real hardware context id. */
+ __u32 hwctx_id;
+ /** @pid: ID of process which created this context. */
+ __s64 pid;
+ /** @command_submissions: Number of commands submitted. */
+ __u64 command_submissions;
+ /** @command_completions: Number of commands completed. */
+ __u64 command_completions;
+ /** @migrations: Number of times been migrated. */
+ __u64 migrations;
+ /** @preemptions: Number of times been preempted. */
+ __u64 preemptions;
+ /** @errors: Number of errors happened. */
+ __u64 errors;
+ /** @priority: Context priority. */
+ __u64 priority;
+ /** @heap_usage: Usage of device heap buffer. */
+ __u64 heap_usage;
+ /** @suspensions: Number of times been suspended. */
+ __u64 suspensions;
+ /**
+ * @state: Context state.
+ * %AMDXDNA_HWCTX_STATE_IDLE
+ * %AMDXDNA_HWCTX_STATE_ACTIVE
+ */
+ __u32 state;
+ /** @pasid: PASID been bound. */
+ __u32 pasid;
+ /** @gops: Giga operations per second. */
+ __u32 gops;
+ /** @fps: Frames per second. */
+ __u32 fps;
+ /** @dma_bandwidth: DMA bandwidth. */
+ __u32 dma_bandwidth;
+ /** @latency: Frame response latency. */
+ __u32 latency;
+ /** @frame_exec_time: Frame execution time. */
+ __u32 frame_exec_time;
+ /** @txn_op_idx: Index of last control code executed. */
+ __u32 txn_op_idx;
+ /** @ctx_pc: Program counter. */
+ __u32 ctx_pc;
+ /** @fatal_error_type: Fatal error type if context crashes. */
+ __u32 fatal_error_type;
+ /** @fatal_error_exception_type: Firmware exception type. */
+ __u32 fatal_error_exception_type;
+ /** @fatal_error_exception_pc: Firmware exception program counter. */
+ __u32 fatal_error_exception_pc;
+ /** @fatal_error_app_module: Exception module name. */
+ __u32 fatal_error_app_module;
+ /** @pad: Structure pad. */
+ __u32 pad;
+};
+
+#define DRM_AMDXDNA_HW_CONTEXT_ALL 0
+
+/**
+ * struct amdxdna_drm_get_array - Get information array.
+ */
+struct amdxdna_drm_get_array {
+ /**
+ * @param:
+ *
+ * Supported params:
+ *
+ * %DRM_AMDXDNA_HW_CONTEXT_ALL:
+ * Returns all created hardware contexts.
+ */
+ __u32 param;
+ /**
+ * @element_size:
+ *
+ * Specifies maximum element size and returns the actual element size.
+ */
+ __u32 element_size;
+ /**
+ * @num_element:
+ *
+ * Specifies maximum number of elements and returns the actual number
+ * of elements.
+ */
+ __u32 num_element; /* in/out */
+ /** @pad: MBZ */
+ __u32 pad;
+ /**
+ * @buffer:
+ *
+ * Specifies the match conditions and returns the matched information
+ * array.
+ */
+ __u64 buffer;
+};
+
enum amdxdna_drm_set_param {
DRM_AMDXDNA_SET_POWER_MODE,
DRM_AMDXDNA_WRITE_AIE_MEM,
DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDXDNA_SET_STATE, \
struct amdxdna_drm_set_state)
+#define DRM_IOCTL_AMDXDNA_GET_ARRAY \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDXDNA_GET_ARRAY, \
+ struct amdxdna_drm_get_array)
+
#if defined(__cplusplus)
} /* extern c end */
#endif