(uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION <<
ATOM_VRAM_OPERATION_FLAGS_SHIFT)) {
/* Firmware request VRAM reservation for SR-IOV */
- adev->mman.fw_vram_usage_start_offset = (start_addr &
- (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
- adev->mman.fw_vram_usage_size = size << 10;
+ amdgpu_ttm_init_vram_resv(adev, AMDGPU_RESV_FW_VRAM_USAGE,
+ (start_addr & (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10,
+ size << 10, true);
/* Use the default scratch size */
usage_bytes = 0;
} else {
(u32)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION <<
ATOM_VRAM_OPERATION_FLAGS_SHIFT)) {
/* Firmware request VRAM reservation for SR-IOV */
- adev->mman.fw_vram_usage_start_offset = (start_addr &
- (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
- adev->mman.fw_vram_usage_size = fw_size << 10;
+ amdgpu_ttm_init_vram_resv(adev, AMDGPU_RESV_FW_VRAM_USAGE,
+ (start_addr & (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10,
+ fw_size << 10, true);
/* Use the default scratch size */
*usage_bytes = 0;
} else {
((fw_start_addr & (ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION <<
ATOM_VRAM_OPERATION_FLAGS_SHIFT)) == 0)) {
/* Firmware request VRAM reservation for SR-IOV */
- adev->mman.fw_vram_usage_start_offset = (fw_start_addr &
- (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
- adev->mman.fw_vram_usage_size = fw_size << 10;
+ amdgpu_ttm_init_vram_resv(adev, AMDGPU_RESV_FW_VRAM_USAGE,
+ (fw_start_addr & (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10,
+ fw_size << 10, true);
}
if (amdgpu_sriov_vf(adev) &&
memset(resv, 0, sizeof(*resv));
}
-/*
- * Firmware Reservation functions
- */
-/**
- * amdgpu_ttm_fw_reserve_vram_fini - free fw reserved vram
- *
- * @adev: amdgpu_device pointer
- *
- * free fw reserved vram if it has been reserved.
- */
-static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
-{
- amdgpu_bo_free_kernel(&adev->mman.fw_vram_usage_reserved_bo,
- NULL, &adev->mman.fw_vram_usage_va);
-}
-
/*
* Driver Reservation functions
*/
&adev->mman.drv_vram_usage_va);
}
-/**
- * amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw
- *
- * @adev: amdgpu_device pointer
- *
- * create bo vram reservation from fw.
- */
-static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
-{
- uint64_t vram_size = adev->gmc.visible_vram_size;
-
- adev->mman.fw_vram_usage_va = NULL;
- adev->mman.fw_vram_usage_reserved_bo = NULL;
-
- if (adev->mman.fw_vram_usage_size == 0 ||
- adev->mman.fw_vram_usage_size > vram_size)
- return 0;
-
- return amdgpu_bo_create_kernel_at(adev,
- adev->mman.fw_vram_usage_start_offset,
- adev->mman.fw_vram_usage_size,
- &adev->mman.fw_vram_usage_reserved_bo,
- &adev->mman.fw_vram_usage_va);
-}
-
/**
* amdgpu_ttm_drv_reserve_vram_init - create bo vram reservation from driver
*
*The reserved vram for firmware must be pinned to the specified
*place on the VRAM, so reserve it early.
*/
- r = amdgpu_ttm_fw_reserve_vram_init(adev);
- if (r)
- return r;
+ if (adev->mman.resv_region[AMDGPU_RESV_FW_VRAM_USAGE].size >
+ adev->gmc.visible_vram_size) {
+ adev->mman.resv_region[AMDGPU_RESV_FW_VRAM_USAGE].size = 0;
+ } else {
+ r = amdgpu_ttm_mark_vram_reserved(adev, AMDGPU_RESV_FW_VRAM_USAGE);
+ if (r)
+ return r;
+ }
/*
* The reserved VRAM for the driver must be pinned to a specific
&adev->mman.sdma_access_ptr);
amdgpu_ttm_free_mmio_remap_bo(adev);
- amdgpu_ttm_fw_reserve_vram_fini(adev);
+ amdgpu_ttm_unmark_vram_reserved(adev, AMDGPU_RESV_FW_VRAM_USAGE);
amdgpu_ttm_drv_reserve_vram_fini(adev);
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
bool keep_stolen_vga_memory;
- /* firmware VRAM reservation */
- u64 fw_vram_usage_start_offset;
- u64 fw_vram_usage_size;
- struct amdgpu_bo *fw_vram_usage_reserved_bo;
- void *fw_vram_usage_va;
-
/* driver VRAM reservation */
u64 drv_vram_usage_start_offset;
u64 drv_vram_usage_size;
struct eeprom_table_record bp;
uint64_t retired_page;
uint32_t bp_idx, bp_cnt;
- void *vram_usage_va = NULL;
-
- if (adev->mman.fw_vram_usage_va)
- vram_usage_va = adev->mman.fw_vram_usage_va;
- else
- vram_usage_va = adev->mman.drv_vram_usage_va;
+ void *fw_va = adev->mman.resv_region[AMDGPU_RESV_FW_VRAM_USAGE].cpu_ptr;
+ void *vram_usage_va = fw_va ? fw_va : adev->mman.drv_vram_usage_va;
memset(&bp, 0, sizeof(bp));
void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
{
uint32_t *pfvf_data = NULL;
+ void *fw_va = adev->mman.resv_region[AMDGPU_RESV_FW_VRAM_USAGE].cpu_ptr;
adev->virt.fw_reserve.p_pf2vf = NULL;
adev->virt.fw_reserve.p_vf2pf = NULL;
adev->virt.vf2pf_update_interval_ms = 0;
adev->virt.vf2pf_update_retry_cnt = 0;
- if (adev->mman.fw_vram_usage_va && adev->mman.drv_vram_usage_va) {
+ if (fw_va && adev->mman.drv_vram_usage_va) {
dev_warn(adev->dev, "Currently fw_vram and drv_vram should not have values at the same time!");
- } else if (adev->mman.fw_vram_usage_va || adev->mman.drv_vram_usage_va) {
+ } else if (fw_va || adev->mman.drv_vram_usage_va) {
/* go through this logic in ip_init and reset to init workqueue*/
amdgpu_virt_exchange_data(adev);
uint64_t bp_block_offset = 0;
uint32_t bp_block_size = 0;
struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL;
+ void *fw_va = adev->mman.resv_region[AMDGPU_RESV_FW_VRAM_USAGE].cpu_ptr;
- if (adev->mman.fw_vram_usage_va || adev->mman.drv_vram_usage_va) {
- if (adev->mman.fw_vram_usage_va) {
+ if (fw_va || adev->mman.drv_vram_usage_va) {
+ if (fw_va) {
if (adev->virt.req_init_data_ver == GPU_CRIT_REGION_V2) {
adev->virt.fw_reserve.p_pf2vf =
(struct amd_sriov_msg_pf2vf_info_header *)
- (adev->mman.fw_vram_usage_va +
+ (fw_va +
adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].offset);
adev->virt.fw_reserve.p_vf2pf =
(struct amd_sriov_msg_vf2pf_info_header *)
- (adev->mman.fw_vram_usage_va +
+ (fw_va +
adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].offset +
(AMD_SRIOV_MSG_SIZE_KB << 10));
adev->virt.fw_reserve.ras_telemetry =
- (adev->mman.fw_vram_usage_va +
+ (fw_va +
adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID].offset);
} else {
adev->virt.fw_reserve.p_pf2vf =
(struct amd_sriov_msg_pf2vf_info_header *)
- (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 << 10));
+ (fw_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 << 10));
adev->virt.fw_reserve.p_vf2pf =
(struct amd_sriov_msg_vf2pf_info_header *)
- (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB_V1 << 10));
+ (fw_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB_V1 << 10));
adev->virt.fw_reserve.ras_telemetry =
- (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB_V1 << 10));
+ (fw_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB_V1 << 10));
}
} else if (adev->mman.drv_vram_usage_va) {
adev->virt.fw_reserve.p_pf2vf =
}
/* reserved memory starts from crit region base offset with the size of 5MB */
- adev->mman.fw_vram_usage_start_offset = adev->virt.crit_regn.offset;
- adev->mman.fw_vram_usage_size = adev->virt.crit_regn.size_kb << 10;
+ amdgpu_ttm_init_vram_resv(adev, AMDGPU_RESV_FW_VRAM_USAGE,
+ adev->virt.crit_regn.offset,
+ adev->virt.crit_regn.size_kb << 10, true);
dev_info(adev->dev,
"critical region v%d requested to reserve memory start at %08llx with %llu KB.\n",
init_data_hdr->version,
- adev->mman.fw_vram_usage_start_offset,
- adev->mman.fw_vram_usage_size >> 10);
+ adev->mman.resv_region[AMDGPU_RESV_FW_VRAM_USAGE].offset,
+ adev->mman.resv_region[AMDGPU_RESV_FW_VRAM_USAGE].size >> 10);
adev->virt.is_dynamic_crit_regn_enabled = true;
struct amdgpu_device *adev = ras_core->dev;
struct amdsriov_ras_telemetry *ras_telemetry_cpu;
struct amdsriov_ras_telemetry *ras_telemetry_gpu;
+ void *fw_va = adev->mman.resv_region[AMDGPU_RESV_FW_VRAM_USAGE].cpu_ptr;
uint64_t fw_vram_usage_start_offset = 0;
uint64_t ras_telemetry_offset = 0;
if (!adev->virt.fw_reserve.ras_telemetry)
return -EINVAL;
- if (adev->mman.fw_vram_usage_va &&
- adev->mman.fw_vram_usage_va <= adev->virt.fw_reserve.ras_telemetry) {
- fw_vram_usage_start_offset = adev->mman.fw_vram_usage_start_offset;
+ if (fw_va && fw_va <= adev->virt.fw_reserve.ras_telemetry) {
+ fw_vram_usage_start_offset = adev->mman.resv_region[AMDGPU_RESV_FW_VRAM_USAGE].offset;
ras_telemetry_offset = (uintptr_t)adev->virt.fw_reserve.ras_telemetry -
- (uintptr_t)adev->mman.fw_vram_usage_va;
+ (uintptr_t)fw_va;
} else if (adev->mman.drv_vram_usage_va &&
adev->mman.drv_vram_usage_va <= adev->virt.fw_reserve.ras_telemetry) {
fw_vram_usage_start_offset = adev->mman.drv_vram_usage_start_offset;