pr_debug("Size lower. clamped to KFD_MIN_QUEUE_RING_SIZE");
}
+ if ((args->metadata_ring_size != 0) && !is_power_of_2(args->metadata_ring_size)) {
+ pr_err("Metadata ring size must be a power of 2 or 0\n");
+ return -EINVAL;
+ }
+
if (!access_ok((const void __user *) args->read_pointer_address,
sizeof(uint32_t))) {
pr_err("Can't access read pointer\n");
q_properties->priority = args->queue_priority;
q_properties->queue_address = args->ring_base_address;
q_properties->queue_size = args->ring_size;
+ if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL)
+ q_properties->metadata_queue_size = args->metadata_ring_size;
+
q_properties->read_ptr = (void __user *)args->read_pointer_address;
q_properties->write_ptr = (void __user *)args->write_pointer_address;
q_properties->eop_ring_buffer_address = args->eop_buffer_address;
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
+ if (q->metadata_queue_size) {
+ /* On GC 12.1 is 64 DWs which is 4 times size of AQL packet */
+ if (q->metadata_queue_size == q->queue_size * 4) {
+ /*
+ * User application allocates main queue ring and metadata queue ring
+ * with a single allocation. metadata queue ring starts after main
+ * queue ring.
+ */
+ m->cp_hqd_kd_base =
+ lower_32_bits((q->queue_address + q->queue_size) >> 8);
+ m->cp_hqd_kd_base_hi =
+ upper_32_bits((q->queue_address + q->queue_size) >> 8);
+
+ m->cp_hqd_kd_cntl |= CP_HQD_KD_CNTL__KD_FETCHER_ENABLE_MASK;
+ /* KD_SIZE = 2 for metadata packet = 64 DWs */
+ m->cp_hqd_kd_cntl |= 2 << CP_HQD_KD_CNTL__KD_SIZE__SHIFT;
+ } else {
+ pr_warn("Invalid metadata ring size, metadata queue will be ignored\n");
+ }
+ }
+
m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
m->cp_hqd_pq_wptr_poll_addr_lo = lower_32_bits((uint64_t)q->write_ptr);
enum kfd_queue_format format;
unsigned int queue_id;
uint64_t queue_address;
- uint64_t queue_size;
+ uint64_t queue_size;
+ uint64_t metadata_queue_size;
uint32_t priority;
uint32_t queue_percent;
void __user *read_ptr;
properties->format == KFD_QUEUE_FORMAT_AQL &&
topo_dev->node_props.gfx_target_version >= 70000 &&
topo_dev->node_props.gfx_target_version < 90000)
- expected_queue_size = properties->queue_size / 2;
+ /* metadata_queue_size not supported on GFX7/GFX8 */
+ expected_queue_size =
+ properties->queue_size / 2;
else
- expected_queue_size = properties->queue_size;
+ expected_queue_size =
+ properties->queue_size + properties->metadata_queue_size;
vm = drm_priv_to_vm(pdd->drm_priv);
err = amdgpu_bo_reserve(vm->root.bo, false);
* - 1.19 - Add a new ioctl to craete secondary kfd processes
* - 1.20 - Trap handler support for expert scheduling mode available
* - 1.21 - Debugger support to subscribe to LDS out-of-address exceptions
+ * - 1.22 - Add queue creation with metadata ring base address
*/
#define KFD_IOCTL_MAJOR_VERSION 1
-#define KFD_IOCTL_MINOR_VERSION 21
+#define KFD_IOCTL_MINOR_VERSION 22
struct kfd_ioctl_get_version_args {
__u32 major_version; /* from KFD */
__u32 ctx_save_restore_size; /* to KFD */
__u32 ctl_stack_size; /* to KFD */
__u32 sdma_engine_id; /* to KFD */
- __u32 pad;
+ __u32 metadata_ring_size; /* to KFD */
};
struct kfd_ioctl_destroy_queue_args {