}
/**
- * amdgpu_ttm_mmio_remap_bo_init - Allocate the singleton 4K MMIO_REMAP BO
+ * amdgpu_ttm_mmio_remap_bo_init - Allocate the singleton MMIO_REMAP BO
* @adev: amdgpu device
*
- * Allocates a one-page (4K) GEM BO in AMDGPU_GEM_DOMAIN_MMIO_REMAP when the
+ * Allocates a global BO with backing AMDGPU_PL_MMIO_REMAP when the
* hardware exposes a remap base (adev->rmmio_remap.bus_addr) and the host
* PAGE_SIZE is <= AMDGPU_GPU_PAGE_SIZE (4K). The BO is created as a regular
* GEM object (amdgpu_bo_create).
*
- * The BO is created as a normal GEM object via amdgpu_bo_create(), then
- * reserved and pinned at the TTM level (ttm_bo_pin()) so it can never be
- * migrated or evicted. No CPU mapping is established here.
- *
* Return:
* * 0 on success or intentional skip (feature not present/unsupported)
* * negative errno on allocation failure
*/
-static int amdgpu_ttm_mmio_remap_bo_init(struct amdgpu_device *adev)
+static int amdgpu_ttm_alloc_mmio_remap_bo(struct amdgpu_device *adev)
{
+ struct ttm_operation_ctx ctx = { false, false };
+ struct ttm_placement placement;
+ struct ttm_buffer_object *tbo;
+ struct ttm_place placements;
struct amdgpu_bo_param bp;
+ struct ttm_resource *tmp;
int r;
/* Skip if HW doesn't expose remap, or if PAGE_SIZE > AMDGPU_GPU_PAGE_SIZE (4K). */
if (!adev->rmmio_remap.bus_addr || PAGE_SIZE > AMDGPU_GPU_PAGE_SIZE)
return 0;
+ /*
+ * Allocate a BO first and then move it to AMDGPU_PL_MMIO_REMAP.
+ * The initial TTM resource assigned by amdgpu_bo_create() is
+ * replaced below with a fixed MMIO_REMAP placement.
+ */
memset(&bp, 0, sizeof(bp));
-
- /* Create exactly one GEM BO in the MMIO_REMAP domain. */
- bp.type = ttm_bo_type_device; /* userspace-mappable GEM */
- bp.size = AMDGPU_GPU_PAGE_SIZE; /* 4K */
+ bp.type = ttm_bo_type_device;
+ bp.size = AMDGPU_GPU_PAGE_SIZE;
bp.byte_align = AMDGPU_GPU_PAGE_SIZE;
- bp.domain = AMDGPU_GEM_DOMAIN_MMIO_REMAP;
+ bp.domain = 0;
bp.flags = 0;
bp.resv = NULL;
bp.bo_ptr_size = sizeof(struct amdgpu_bo);
-
r = amdgpu_bo_create(adev, &bp, &adev->rmmio_remap.bo);
if (r)
return r;
if (r)
goto err_unref;
+ tbo = &adev->rmmio_remap.bo->tbo;
+
/*
* MMIO_REMAP is a fixed I/O placement (AMDGPU_PL_MMIO_REMAP).
- * Use TTM-level pin so the BO cannot be evicted/migrated,
- * independent of GEM domains. This
- * enforces the “fixed I/O window”
*/
- ttm_bo_pin(&adev->rmmio_remap.bo->tbo);
+ placement.num_placement = 1;
+ placement.placement = &placements;
+ placements.fpfn = 0;
+ placements.lpfn = 0;
+ placements.mem_type = AMDGPU_PL_MMIO_REMAP;
+ placements.flags = 0;
+ /* Force the BO into the fixed MMIO_REMAP placement */
+ r = ttm_bo_mem_space(tbo, &placement, &tmp, &ctx);
+ if (unlikely(r))
+ goto err_unlock;
+
+ ttm_resource_free(tbo, &tbo->resource);
+ ttm_bo_assign_mem(tbo, tmp);
+ ttm_bo_pin(tbo);
amdgpu_bo_unreserve(adev->rmmio_remap.bo);
return 0;
+err_unlock:
+ amdgpu_bo_unreserve(adev->rmmio_remap.bo);
+
err_unref:
- if (adev->rmmio_remap.bo)
- amdgpu_bo_unref(&adev->rmmio_remap.bo);
+ amdgpu_bo_unref(&adev->rmmio_remap.bo);
adev->rmmio_remap.bo = NULL;
return r;
}
/**
- * amdgpu_ttm_mmio_remap_bo_fini - Free the singleton MMIO_REMAP BO
+ * amdgpu_ttm_free_mmio_remap_bo - Free the singleton MMIO_REMAP BO
* @adev: amdgpu device
*
* Frees the kernel-owned MMIO_REMAP BO if it was allocated by
* amdgpu_ttm_mmio_remap_bo_init().
*/
-static void amdgpu_ttm_mmio_remap_bo_fini(struct amdgpu_device *adev)
+static void amdgpu_ttm_free_mmio_remap_bo(struct amdgpu_device *adev)
{
- struct amdgpu_bo *bo = adev->rmmio_remap.bo;
-
- if (!bo)
- return; /* <-- safest early exit */
+ if (!adev->rmmio_remap.bo)
+ return;
if (!amdgpu_bo_reserve(adev->rmmio_remap.bo, true)) {
ttm_bo_unpin(&adev->rmmio_remap.bo->tbo);
amdgpu_bo_unreserve(adev->rmmio_remap.bo);
}
+
+ /*
+ * At this point we rely on normal DRM teardown ordering:
+ * no new user ioctls can access the global MMIO_REMAP BO
+ * once TTM teardown begins.
+ */
amdgpu_bo_unref(&adev->rmmio_remap.bo);
adev->rmmio_remap.bo = NULL;
}
return r;
}
- /* Allocate the singleton MMIO_REMAP BO (4K) if supported */
- r = amdgpu_ttm_mmio_remap_bo_init(adev);
+ /* Allocate the singleton MMIO_REMAP BO if supported */
+ r = amdgpu_ttm_alloc_mmio_remap_bo(adev);
if (r)
return r;
amdgpu_bo_free_kernel(&adev->mman.sdma_access_bo, NULL,
&adev->mman.sdma_access_ptr);
- amdgpu_ttm_mmio_remap_bo_fini(adev);
+ amdgpu_ttm_free_mmio_remap_bo(adev);
amdgpu_ttm_fw_reserve_vram_fini(adev);
amdgpu_ttm_drv_reserve_vram_fini(adev);
*
* %AMDGPU_GEM_DOMAIN_DOORBELL Doorbell. It is an MMIO region for
* signalling user mode queues.
- *
- * %AMDGPU_GEM_DOMAIN_MMIO_REMAP MMIO remap page (special mapping for HDP flushing).
*/
#define AMDGPU_GEM_DOMAIN_CPU 0x1
#define AMDGPU_GEM_DOMAIN_GTT 0x2
#define AMDGPU_GEM_DOMAIN_GWS 0x10
#define AMDGPU_GEM_DOMAIN_OA 0x20
#define AMDGPU_GEM_DOMAIN_DOORBELL 0x40
-#define AMDGPU_GEM_DOMAIN_MMIO_REMAP 0x80
#define AMDGPU_GEM_DOMAIN_MASK (AMDGPU_GEM_DOMAIN_CPU | \
AMDGPU_GEM_DOMAIN_GTT | \
AMDGPU_GEM_DOMAIN_VRAM | \
AMDGPU_GEM_DOMAIN_GDS | \
AMDGPU_GEM_DOMAIN_GWS | \
AMDGPU_GEM_DOMAIN_OA | \
- AMDGPU_GEM_DOMAIN_DOORBELL | \
- AMDGPU_GEM_DOMAIN_MMIO_REMAP)
+ AMDGPU_GEM_DOMAIN_DOORBELL)
/* Flag that CPU access will be required for the case of VRAM domain */
#define AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED (1 << 0)