uint32_t is_aql_queue;
uint32_t queue_size;
uint32_t exclusively_scheduled;
+ uint32_t sh_mem_config_data;
};
struct mes_remove_queue_input {
mes_add_queue_pkt.is_aql_queue = input->is_aql_queue;
mes_add_queue_pkt.gds_size = input->queue_size;
+ mes_add_queue_pkt.full_sh_mem_config_data = input->sh_mem_config_data;
+
return mes_v12_1_submit_pkt_and_poll_completion(mes,
xcc_id, AMDGPU_MES_SCHED_PIPE,
&mes_add_queue_pkt, sizeof(mes_add_queue_pkt),
queue_input.queue_type = (uint32_t)queue_type;
queue_input.exclusively_scheduled = q->properties.is_gws;
+ queue_input.sh_mem_config_data = qpd->sh_mem_config;
amdgpu_mes_lock(&adev->mes);
r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
((KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2)) || \
(KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3)) || \
(KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 4)) || \
- (KFD_GC_VERSION(dev) == IP_VERSION(9, 5, 0)))
+ (KFD_GC_VERSION(dev) == IP_VERSION(9, 5, 0)) || \
+ (KFD_GC_VERSION(dev) == IP_VERSION(12, 1, 0)))
struct kfd_node;
* management and memory-manager-related preemptions or
* even deadlocks.
*/
- if (KFD_GC_VERSION(dev) >= IP_VERSION(10, 1, 1))
+ if (KFD_GC_VERSION(dev) >= IP_VERSION(10, 1, 1) &&
+ KFD_GC_VERSION(dev) < IP_VERSION(12, 1, 0))
return false;
if (dev->kfd->noretry)