uint64_t kfd_mqd_stride(struct mqd_manager *mm,
struct queue_properties *q)
{
+ if (KFD_GC_VERSION(mm->dev) >= IP_VERSION(11, 0, 0))
+ return AMDGPU_MQD_SIZE_ALIGN(mm->mqd_size);
+
return mm->mqd_size;
}
static struct kfd_mem_obj *allocate_mqd(struct mqd_manager *mm,
struct queue_properties *q)
{
+ u32 mqd_size = AMDGPU_MQD_SIZE_ALIGN(mm->mqd_size);
struct kfd_node *node = mm->dev;
struct kfd_mem_obj *mqd_mem_obj;
- int size;
-
- /*
- * MES write to areas beyond MQD size. So allocate
- * 1 PAGE_SIZE memory for MQD is MES is enabled.
- */
- if (node->kfd->shared_resources.enable_mes)
- size = PAGE_SIZE;
- else
- size = sizeof(struct v11_compute_mqd);
- if (kfd_gtt_sa_allocate(node, size, &mqd_mem_obj))
+ if (kfd_gtt_sa_allocate(node, mqd_size, &mqd_mem_obj))
return NULL;
return mqd_mem_obj;
{
uint64_t addr;
struct v11_compute_mqd *m;
- int size;
+ u32 mqd_size = AMDGPU_MQD_SIZE_ALIGN(mm->mqd_size);
uint32_t wa_mask = q->is_dbg_wa ? 0xffff : 0xffffffff;
m = (struct v11_compute_mqd *) mqd_mem_obj->cpu_ptr;
addr = mqd_mem_obj->gpu_addr;
- if (mm->dev->kfd->shared_resources.enable_mes)
- size = PAGE_SIZE;
- else
- size = sizeof(struct v11_compute_mqd);
-
- memset(m, 0, size);
+ memset(m, 0, mqd_size);
m->header = 0xC0310800;
m->compute_pipelinestat_enable = 1;
static struct kfd_mem_obj *allocate_mqd(struct mqd_manager *mm,
struct queue_properties *q)
{
+ u32 mqd_size = AMDGPU_MQD_SIZE_ALIGN(mm->mqd_size);
struct kfd_node *node = mm->dev;
struct kfd_mem_obj *mqd_mem_obj;
- /*
- * Allocate one PAGE_SIZE memory for MQD as MES writes to areas beyond
- * struct MQD size.
- */
- if (kfd_gtt_sa_allocate(node, PAGE_SIZE, &mqd_mem_obj))
+ if (kfd_gtt_sa_allocate(node, mqd_size, &mqd_mem_obj))
return NULL;
return mqd_mem_obj;
{
uint64_t addr;
struct v12_compute_mqd *m;
+ u32 mqd_size = AMDGPU_MQD_SIZE_ALIGN(mm->mqd_size);
m = (struct v12_compute_mqd *) mqd_mem_obj->cpu_ptr;
addr = mqd_mem_obj->gpu_addr;
- memset(m, 0, PAGE_SIZE);
+ memset(m, 0, mqd_size);
m->header = 0xC0310800;
m->compute_pipelinestat_enable = 1;
#include "amdgpu_amdkfd.h"
#include "kfd_device_queue_manager.h"
-#define MQD_SIZE (2 * PAGE_SIZE)
-
-static uint64_t mqd_stride_v12_1(struct mqd_manager *mm,
- struct queue_properties *q)
-{
- if (q->type == KFD_QUEUE_TYPE_COMPUTE)
- return MQD_SIZE;
- else
- return PAGE_SIZE;
-}
-
static inline struct v12_1_compute_mqd *get_mqd(void *mqd)
{
return (struct v12_1_compute_mqd *)mqd;
static struct kfd_mem_obj *allocate_mqd(struct mqd_manager *mm,
struct queue_properties *q)
{
+ u32 mqd_size = AMDGPU_MQD_SIZE_ALIGN(mm->mqd_size);
struct kfd_node *node = mm->dev;
struct kfd_mem_obj *mqd_mem_obj;
- unsigned int size;
- /*
- * Allocate two PAGE_SIZE memory for Compute MQD as MES writes to areas beyond
- * struct MQD size. Size of the Compute MQD is 1 PAGE_SIZE.
- * For SDMA MQD, we allocate 1 Page_size.
- */
if (q->type == KFD_QUEUE_TYPE_COMPUTE)
- size = MQD_SIZE * NUM_XCC(node->xcc_mask);
- else
- size = PAGE_SIZE;
+ mqd_size *= NUM_XCC(node->xcc_mask);
- if (kfd_gtt_sa_allocate(node, size, &mqd_mem_obj))
+ if (kfd_gtt_sa_allocate(node, mqd_size, &mqd_mem_obj))
return NULL;
return mqd_mem_obj;
{
uint64_t addr;
struct v12_1_compute_mqd *m;
+ u32 mqd_size = AMDGPU_MQD_SIZE_ALIGN(mm->mqd_size);
m = (struct v12_1_compute_mqd *) mqd_mem_obj->cpu_ptr;
addr = mqd_mem_obj->gpu_addr;
- memset(m, 0, MQD_SIZE);
+ memset(m, 0, mqd_size);
m->header = 0xC0310800;
m->compute_pipelinestat_enable = 1;
mqd->is_occupied = kfd_is_occupied_cp;
mqd->mqd_size = sizeof(struct v12_1_compute_mqd);
mqd->get_wave_state = get_wave_state_v12_1;
- mqd->mqd_stride = mqd_stride_v12_1;
+ mqd->mqd_stride = kfd_mqd_stride;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif