spin_lock_init(&adev->gmc.invalidate_lock);
- r = amdgpu_atomfirmware_get_vram_info(adev,
- &vram_width, &vram_type, &vram_vendor);
- adev->gmc.vram_width = vram_width;
-
- adev->gmc.vram_type = vram_type;
- adev->gmc.vram_vendor = vram_vendor;
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(12, 1, 0)) {
+ gmc_v12_1_init_vram_info(adev);
+ } else {
+ r = amdgpu_atomfirmware_get_vram_info(adev,
+ &vram_width, &vram_type, &vram_vendor);
+ adev->gmc.vram_width = vram_width;
+ adev->gmc.vram_type = vram_type;
+ adev->gmc.vram_vendor = vram_vendor;
+ }
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(12, 0, 0):
adev->gmc.vm_fault.num_types = 1;
adev->gmc.vm_fault.funcs = &gmc_v12_1_irq_funcs;
}
+
+void gmc_v12_1_init_vram_info(struct amdgpu_device *adev)
+{
+ /* TODO: query vram_info from ip discovery binary */
+ adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM4;
+ adev->gmc.vram_width = 384 * 64;
+}