]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/amdgpu: Add host driver reserved-region
authorLijo Lazar <lijo.lazar@amd.com>
Wed, 25 Mar 2026 13:10:24 +0000 (18:40 +0530)
committerAlex Deucher <alexander.deucher@amd.com>
Fri, 3 Apr 2026 17:50:01 +0000 (13:50 -0400)
Use reserve region helpers for initializing/reserving host driver
reserved region in virtualization environment.

Signed-off-by: Lijo Lazar <lijo.lazar@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_virt_ras_cmd.c

index 51e692712d3b3080e9e29e66cb2ade178444cc36..6860a3a4d466769448fa4875166a03c3a891ac49 100644 (file)
@@ -161,9 +161,9 @@ static int amdgpu_atomfirmware_allocate_fb_v2_2(struct amdgpu_device *adev,
            ((drv_start_addr & (ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION <<
                ATOM_VRAM_OPERATION_FLAGS_SHIFT)) == 0)) {
                /* driver request VRAM reservation for SR-IOV */
-               adev->mman.drv_vram_usage_start_offset = (drv_start_addr &
-                       (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
-               adev->mman.drv_vram_usage_size = drv_size << 10;
+               amdgpu_ttm_init_vram_resv(adev, AMDGPU_RESV_DRV_VRAM_USAGE,
+                                 (drv_start_addr & (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10,
+                                 drv_size << 10, true);
        }
 
        *usage_bytes = 0;
index b82ab4794a6b74ec453c71190406a02f2b9e4e4a..feea4bbe5c9507c92c2a114d30ed9e166034b73f 100644 (file)
@@ -1729,48 +1729,6 @@ void amdgpu_ttm_unmark_vram_reserved(struct amdgpu_device *adev,
        memset(resv, 0, sizeof(*resv));
 }
 
-/*
- * Driver Reservation functions
- */
-/**
- * amdgpu_ttm_drv_reserve_vram_fini - free drv reserved vram
- *
- * @adev: amdgpu_device pointer
- *
- * free drv reserved vram if it has been reserved.
- */
-static void amdgpu_ttm_drv_reserve_vram_fini(struct amdgpu_device *adev)
-{
-       amdgpu_bo_free_kernel(&adev->mman.drv_vram_usage_reserved_bo,
-                                                 NULL,
-                                                 &adev->mman.drv_vram_usage_va);
-}
-
-/**
- * amdgpu_ttm_drv_reserve_vram_init - create bo vram reservation from driver
- *
- * @adev: amdgpu_device pointer
- *
- * create bo vram reservation from drv.
- */
-static int amdgpu_ttm_drv_reserve_vram_init(struct amdgpu_device *adev)
-{
-       u64 vram_size = adev->gmc.visible_vram_size;
-
-       adev->mman.drv_vram_usage_va = NULL;
-       adev->mman.drv_vram_usage_reserved_bo = NULL;
-
-       if (adev->mman.drv_vram_usage_size == 0 ||
-           adev->mman.drv_vram_usage_size > vram_size)
-               return 0;
-
-       return amdgpu_bo_create_kernel_at(adev,
-                                         adev->mman.drv_vram_usage_start_offset,
-                                         adev->mman.drv_vram_usage_size,
-                                         &adev->mman.drv_vram_usage_reserved_bo,
-                                         &adev->mman.drv_vram_usage_va);
-}
-
 /*
  * Memoy training reservation functions
  */
@@ -2148,9 +2106,14 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
         * The reserved VRAM for the driver must be pinned to a specific
         * location in VRAM, so reserve it early.
         */
-       r = amdgpu_ttm_drv_reserve_vram_init(adev);
-       if (r)
-               return r;
+       if (adev->mman.resv_region[AMDGPU_RESV_DRV_VRAM_USAGE].size >
+           adev->gmc.visible_vram_size) {
+               adev->mman.resv_region[AMDGPU_RESV_DRV_VRAM_USAGE].size = 0;
+       } else {
+               r = amdgpu_ttm_mark_vram_reserved(adev, AMDGPU_RESV_DRV_VRAM_USAGE);
+               if (r)
+                       return r;
+       }
 
        /*
         * only NAVI10 and later ASICs support IP discovery.
@@ -2306,7 +2269,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
 
        amdgpu_ttm_free_mmio_remap_bo(adev);
        amdgpu_ttm_unmark_vram_reserved(adev, AMDGPU_RESV_FW_VRAM_USAGE);
-       amdgpu_ttm_drv_reserve_vram_fini(adev);
+       amdgpu_ttm_unmark_vram_reserved(adev, AMDGPU_RESV_DRV_VRAM_USAGE);
 
        if (drm_dev_enter(adev_to_drm(adev), &idx)) {
 
index 98387984e44882e28345dd05e2b76a5761f3f979..f2f23a42b3cc45c9ec0fd969d4c9f9dd48b24183 100644 (file)
@@ -105,12 +105,6 @@ struct amdgpu_mman {
 
        bool                    keep_stolen_vga_memory;
 
-       /* driver VRAM reservation */
-       u64             drv_vram_usage_start_offset;
-       u64             drv_vram_usage_size;
-       struct amdgpu_bo        *drv_vram_usage_reserved_bo;
-       void            *drv_vram_usage_va;
-
        struct amdgpu_vram_resv         resv_region[AMDGPU_RESV_MAX];
 
        /* PAGE_SIZE'd BO for process memory r/w over SDMA. */
index 882d8524f4dcc52cff9fb14055475cbd13ea8c0b..e8d180a412d166fd80d2a505e6e581603e01ab86 100644 (file)
@@ -438,7 +438,8 @@ static void amdgpu_virt_add_bad_page(struct amdgpu_device *adev,
        uint64_t retired_page;
        uint32_t bp_idx, bp_cnt;
        void *fw_va = adev->mman.resv_region[AMDGPU_RESV_FW_VRAM_USAGE].cpu_ptr;
-       void *vram_usage_va = fw_va ? fw_va : adev->mman.drv_vram_usage_va;
+       void *drv_va = adev->mman.resv_region[AMDGPU_RESV_DRV_VRAM_USAGE].cpu_ptr;
+       void *vram_usage_va = fw_va ? fw_va : drv_va;
 
        memset(&bp, 0, sizeof(bp));
 
@@ -707,15 +708,16 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
 {
        uint32_t *pfvf_data = NULL;
        void *fw_va = adev->mman.resv_region[AMDGPU_RESV_FW_VRAM_USAGE].cpu_ptr;
+       void *drv_va = adev->mman.resv_region[AMDGPU_RESV_DRV_VRAM_USAGE].cpu_ptr;
 
        adev->virt.fw_reserve.p_pf2vf = NULL;
        adev->virt.fw_reserve.p_vf2pf = NULL;
        adev->virt.vf2pf_update_interval_ms = 0;
        adev->virt.vf2pf_update_retry_cnt = 0;
 
-       if (fw_va && adev->mman.drv_vram_usage_va) {
+       if (fw_va && drv_va) {
                dev_warn(adev->dev, "Currently fw_vram and drv_vram should not have values at the same time!");
-       } else if (fw_va || adev->mman.drv_vram_usage_va) {
+       } else if (fw_va || drv_va) {
                /* go through this logic in ip_init and reset to init workqueue*/
                amdgpu_virt_exchange_data(adev);
 
@@ -761,8 +763,9 @@ void amdgpu_virt_exchange_data(struct amdgpu_device *adev)
        uint32_t bp_block_size = 0;
        struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL;
        void *fw_va = adev->mman.resv_region[AMDGPU_RESV_FW_VRAM_USAGE].cpu_ptr;
+       void *drv_va = adev->mman.resv_region[AMDGPU_RESV_DRV_VRAM_USAGE].cpu_ptr;
 
-       if (fw_va || adev->mman.drv_vram_usage_va) {
+       if (fw_va || drv_va) {
                if (fw_va) {
                        if (adev->virt.req_init_data_ver == GPU_CRIT_REGION_V2) {
                                adev->virt.fw_reserve.p_pf2vf =
@@ -787,15 +790,15 @@ void amdgpu_virt_exchange_data(struct amdgpu_device *adev)
                                adev->virt.fw_reserve.ras_telemetry =
                                        (fw_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB_V1 << 10));
                        }
-               } else if (adev->mman.drv_vram_usage_va) {
+               } else if (drv_va) {
                        adev->virt.fw_reserve.p_pf2vf =
                                (struct amd_sriov_msg_pf2vf_info_header *)
-                               (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 << 10));
+                               (drv_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 << 10));
                        adev->virt.fw_reserve.p_vf2pf =
                                (struct amd_sriov_msg_vf2pf_info_header *)
-                               (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB_V1 << 10));
+                               (drv_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB_V1 << 10));
                        adev->virt.fw_reserve.ras_telemetry =
-                               (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB_V1 << 10));
+                               (drv_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB_V1 << 10));
                }
 
                amdgpu_virt_read_pf2vf_data(adev);
index 5648e4c7afa4b23fbdd45e92b11a450675bbec9c..eb552d0cce4aeaaae7aac939b1af7cae2af546cc 100644 (file)
@@ -37,6 +37,7 @@ static int amdgpu_virt_ras_get_cmd_shared_mem(struct ras_core_context *ras_core,
        struct amdsriov_ras_telemetry *ras_telemetry_cpu;
        struct amdsriov_ras_telemetry *ras_telemetry_gpu;
        void *fw_va = adev->mman.resv_region[AMDGPU_RESV_FW_VRAM_USAGE].cpu_ptr;
+       void *drv_va = adev->mman.resv_region[AMDGPU_RESV_DRV_VRAM_USAGE].cpu_ptr;
        uint64_t fw_vram_usage_start_offset = 0;
        uint64_t ras_telemetry_offset = 0;
 
@@ -47,11 +48,10 @@ static int amdgpu_virt_ras_get_cmd_shared_mem(struct ras_core_context *ras_core,
                fw_vram_usage_start_offset = adev->mman.resv_region[AMDGPU_RESV_FW_VRAM_USAGE].offset;
                ras_telemetry_offset = (uintptr_t)adev->virt.fw_reserve.ras_telemetry -
                                (uintptr_t)fw_va;
-       } else if (adev->mman.drv_vram_usage_va &&
-               adev->mman.drv_vram_usage_va <= adev->virt.fw_reserve.ras_telemetry) {
-               fw_vram_usage_start_offset = adev->mman.drv_vram_usage_start_offset;
+       } else if (drv_va && drv_va <= adev->virt.fw_reserve.ras_telemetry) {
+               fw_vram_usage_start_offset = adev->mman.resv_region[AMDGPU_RESV_DRV_VRAM_USAGE].offset;
                ras_telemetry_offset = (uintptr_t)adev->virt.fw_reserve.ras_telemetry -
-                               (uintptr_t)adev->mman.drv_vram_usage_va;
+                               (uintptr_t)drv_va;
        } else {
                return -EINVAL;
        }