From: Matt Roper Date: Mon, 13 Oct 2025 20:09:48 +0000 (-0700) Subject: drm/xe: Move 'vram_flags' flag back to platform descriptor X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=76b7aedd66040bc2d70f28e49cdf7533c971b2c0;p=thirdparty%2Fkernel%2Flinux.git drm/xe: Move 'vram_flags' flag back to platform descriptor Restrictions and requirements on VRAM alignment are something that should be tracked at the platform level rather than the IP level. Even when mixing and matching various graphics, media, and display IP blocks, the platform as a whole has to have consistent memory allocation handling. This is also a trait that should be tied to the platform even if the graphics IP itself is not present (e.g., if we disable the primary GT via configfs). Reviewed-by: Lucas De Marchi Link: https://lore.kernel.org/r/20251013200944.2499947-30-matthew.d.roper@intel.com Signed-off-by: Matt Roper --- diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c index ffb7afcda8de4..01bb70285dacf 100644 --- a/drivers/gpu/drm/xe/xe_pci.c +++ b/drivers/gpu/drm/xe/xe_pci.c @@ -63,7 +63,6 @@ static const struct xe_graphics_desc graphics_xehpg = { BIT(XE_HW_ENGINE_CCS2) | BIT(XE_HW_ENGINE_CCS3), XE_HP_FEATURES, - .vram_flags = XE_VRAM_FLAGS_NEED64K, .has_flat_ccs = 1, }; @@ -79,7 +78,6 @@ static const struct xe_graphics_desc graphics_xehpc = { BIT(XE_HW_ENGINE_CCS2) | BIT(XE_HW_ENGINE_CCS3), XE_HP_FEATURES, - .vram_flags = XE_VRAM_FLAGS_NEED64K, .has_asid = 1, .has_atomic_enable_pte_bit = 1, @@ -270,7 +268,8 @@ static const u16 dg2_g12_ids[] = { INTEL_DG2_G12_IDS(NOP), 0 }; { } \ }, \ .va_bits = 48, \ - .vm_max_level = 3 + .vm_max_level = 3, \ + .vram_flags = XE_VRAM_FLAGS_NEED64K static const struct xe_device_desc ats_m_desc = { .pre_gmdid_graphics_ip = &graphics_ip_xehpg, @@ -310,6 +309,7 @@ static const __maybe_unused struct xe_device_desc pvc_desc = { .require_force_probe = true, .va_bits = 57, .vm_max_level = 4, + .vram_flags = XE_VRAM_FLAGS_NEED64K, .has_mbx_power_limits = false, }; @@ -600,6 +600,7 @@ static int xe_info_init_early(struct xe_device *xe, xe->info.dma_mask_size = desc->dma_mask_size; xe->info.va_bits = desc->va_bits; xe->info.vm_max_level = desc->vm_max_level; + xe->info.vram_flags = desc->vram_flags; xe->info.is_dgfx = desc->is_dgfx; xe->info.has_fan_control = desc->has_fan_control; @@ -730,7 +731,6 @@ static int xe_info_init(struct xe_device *xe, media_desc = NULL; } - xe->info.vram_flags = graphics_desc->vram_flags; xe->info.has_asid = graphics_desc->has_asid; xe->info.has_atomic_enable_pte_bit = graphics_desc->has_atomic_enable_pte_bit; if (xe->info.platform != XE_PVC) diff --git a/drivers/gpu/drm/xe/xe_pci_types.h b/drivers/gpu/drm/xe/xe_pci_types.h index e59b59ec636dd..3189bd95bb6e3 100644 --- a/drivers/gpu/drm/xe/xe_pci_types.h +++ b/drivers/gpu/drm/xe/xe_pci_types.h @@ -32,6 +32,7 @@ struct xe_device_desc { u8 max_gt_per_tile:2; u8 va_bits; u8 vm_max_level; + u8 vram_flags; u8 require_force_probe:1; u8 is_dgfx:1; @@ -54,8 +55,6 @@ struct xe_device_desc { }; struct xe_graphics_desc { - u8 vram_flags; - u64 hw_engine_mask; /* hardware engines provided by graphics IP */ u8 has_asid:1;