uint32_t *ptr, len;
int i, ret;
- a2xx_gpummu_params(gpu->vm->mmu, &pt_base, &tran_error);
+ a2xx_gpummu_params(to_msm_vm(gpu->vm)->mmu, &pt_base, &tran_error);
DBG("%s", gpu->name);
return state;
}
-static struct msm_gem_vm *
+static struct drm_gpuvm *
a2xx_create_vm(struct msm_gpu *gpu, struct platform_device *pdev)
{
struct msm_mmu *mmu = a2xx_gpummu_new(&pdev->dev, gpu);
- struct msm_gem_vm *vm;
+ struct drm_gpuvm *vm;
vm = msm_gem_vm_create(gpu->dev, mmu, "gpu", SZ_16M, 0xfff * SZ_64K, true);
return ERR_PTR(ret);
}
- msm_mmu_set_fault_handler(gpu->vm->mmu, gpu, a5xx_fault_handler);
+ msm_mmu_set_fault_handler(to_msm_vm(gpu->vm)->mmu, gpu,
+ a5xx_fault_handler);
/* Set up the preemption specific bits and pieces for each ringbuffer */
a5xx_preempt_init(gpu);
static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu)
{
+ struct msm_mmu *mmu = to_msm_vm(gmu->vm)->mmu;
+
msm_gem_kernel_put(gmu->hfi.obj, gmu->vm);
msm_gem_kernel_put(gmu->debug.obj, gmu->vm);
msm_gem_kernel_put(gmu->icache.obj, gmu->vm);
msm_gem_kernel_put(gmu->dummy.obj, gmu->vm);
msm_gem_kernel_put(gmu->log.obj, gmu->vm);
- gmu->vm->mmu->funcs->detach(gmu->vm->mmu);
- msm_gem_vm_put(gmu->vm);
+ mmu->funcs->detach(mmu);
+ drm_gpuvm_put(gmu->vm);
}
static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo,
/* For serializing communication with the GMU: */
struct mutex lock;
- struct msm_gem_vm *vm;
+ struct drm_gpuvm *vm;
void __iomem *mmio;
void __iomem *rscc;
if (ctx->seqno == ring->cur_ctx_seqno)
return;
- if (msm_iommu_pagetable_params(ctx->vm->mmu, &ttbr, &asid))
+ if (msm_iommu_pagetable_params(to_msm_vm(ctx->vm)->mmu, &ttbr, &asid))
return;
if (adreno_gpu->info->family >= ADRENO_7XX_GEN1) {
mutex_unlock(&a6xx_gpu->gmu.lock);
}
-static struct msm_gem_vm *
+static struct drm_gpuvm *
a6xx_create_vm(struct msm_gpu *gpu, struct platform_device *pdev)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
return adreno_iommu_create_vm(gpu, pdev, quirks);
}
-static struct msm_gem_vm *
+static struct drm_gpuvm *
a6xx_create_private_vm(struct msm_gpu *gpu)
{
struct msm_mmu *mmu;
- mmu = msm_iommu_pagetable_create(gpu->vm->mmu);
+ mmu = msm_iommu_pagetable_create(to_msm_vm(gpu->vm)->mmu);
if (IS_ERR(mmu))
return ERR_CAST(mmu);
adreno_gpu->uche_trap_base = 0x1fffffffff000ull;
- msm_mmu_set_fault_handler(gpu->vm->mmu, gpu, a6xx_fault_handler);
+ msm_mmu_set_fault_handler(to_msm_vm(gpu->vm)->mmu, gpu,
+ a6xx_fault_handler);
a6xx_calc_ubwc_config(adreno_gpu);
/* Set up the preemption specific bits and pieces for each ringbuffer */
struct a7xx_cp_smmu_info *smmu_info_ptr = ptr;
- msm_iommu_pagetable_params(gpu->vm->mmu, &ttbr, &asid);
+ msm_iommu_pagetable_params(to_msm_vm(gpu->vm)->mmu, &ttbr, &asid);
smmu_info_ptr->magic = GEN7_CP_SMMU_INFO_MAGIC;
smmu_info_ptr->ttbr0 = ttbr;
return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid);
}
-struct msm_gem_vm *
+struct drm_gpuvm *
adreno_create_vm(struct msm_gpu *gpu,
struct platform_device *pdev)
{
return adreno_iommu_create_vm(gpu, pdev, 0);
}
-struct msm_gem_vm *
+struct drm_gpuvm *
adreno_iommu_create_vm(struct msm_gpu *gpu,
struct platform_device *pdev,
unsigned long quirks)
{
struct iommu_domain_geometry *geometry;
struct msm_mmu *mmu;
- struct msm_gem_vm *vm;
+ struct drm_gpuvm *vm;
u64 start, size;
mmu = msm_iommu_gpu_new(&pdev->dev, gpu, quirks);
if (!priv->stall_enabled &&
ktime_after(ktime_get(), priv->stall_reenable_time) &&
!READ_ONCE(gpu->crashstate)) {
+ struct msm_mmu *mmu = to_msm_vm(gpu->vm)->mmu;
+
priv->stall_enabled = true;
- gpu->vm->mmu->funcs->set_stall(gpu->vm->mmu, true);
+ mmu->funcs->set_stall(mmu, true);
}
spin_unlock_irqrestore(&priv->fault_stall_lock, flags);
}
u32 scratch[4])
{
struct msm_drm_private *priv = gpu->dev->dev_private;
+ struct msm_mmu *mmu = to_msm_vm(gpu->vm)->mmu;
const char *type = "UNKNOWN";
bool do_devcoredump = info && (info->fsr & ARM_SMMU_FSR_SS) &&
!READ_ONCE(gpu->crashstate);
if (priv->stall_enabled) {
priv->stall_enabled = false;
- gpu->vm->mmu->funcs->set_stall(gpu->vm->mmu, false);
+ mmu->funcs->set_stall(mmu, false);
}
priv->stall_reenable_time = ktime_add_ms(ktime_get(), 500);
return 0;
case MSM_PARAM_FAULTS:
if (ctx->vm)
- *value = gpu->global_faults + ctx->vm->faults;
+ *value = gpu->global_faults + to_msm_vm(ctx->vm)->faults;
else
*value = gpu->global_faults;
return 0;
case MSM_PARAM_VA_START:
if (ctx->vm == gpu->vm)
return UERR(EINVAL, drm, "requires per-process pgtables");
- *value = ctx->vm->base.mm_start;
+ *value = ctx->vm->mm_start;
return 0;
case MSM_PARAM_VA_SIZE:
if (ctx->vm == gpu->vm)
return UERR(EINVAL, drm, "requires per-process pgtables");
- *value = ctx->vm->base.mm_range;
+ *value = ctx->vm->mm_range;
return 0;
case MSM_PARAM_HIGHEST_BANK_BIT:
*value = adreno_gpu->ubwc_config.highest_bank_bit;
* Common helper function to initialize the default address space for arm-smmu
* attached targets
*/
-struct msm_gem_vm *
+struct drm_gpuvm *
adreno_create_vm(struct msm_gpu *gpu,
struct platform_device *pdev);
-struct msm_gem_vm *
+struct drm_gpuvm *
adreno_iommu_create_vm(struct msm_gpu *gpu,
struct platform_device *pdev,
unsigned long quirks);
if (!dpu_kms->base.vm)
return;
- mmu = dpu_kms->base.vm->mmu;
+ mmu = to_msm_vm(dpu_kms->base.vm)->mmu;
mmu->funcs->detach(mmu);
- msm_gem_vm_put(dpu_kms->base.vm);
+ drm_gpuvm_put(dpu_kms->base.vm);
dpu_kms->base.vm = NULL;
}
static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
{
- struct msm_gem_vm *vm;
+ struct drm_gpuvm *vm;
vm = msm_kms_init_vm(dpu_kms->dev);
if (IS_ERR(vm))
{
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
struct device *dev = mdp4_kms->dev->dev;
- struct msm_gem_vm *vm = kms->vm;
if (mdp4_kms->blank_cursor_iova)
msm_gem_unpin_iova(mdp4_kms->blank_cursor_bo, kms->vm);
drm_gem_object_put(mdp4_kms->blank_cursor_bo);
- if (vm) {
- vm->mmu->funcs->detach(vm->mmu);
- msm_gem_vm_put(vm);
+ if (kms->vm) {
+ struct msm_mmu *mmu = to_msm_vm(kms->vm)->mmu;
+
+ mmu->funcs->detach(mmu);
+ drm_gpuvm_put(kms->vm);
}
if (mdp4_kms->rpm_enabled)
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(priv->kms));
struct msm_kms *kms = NULL;
struct msm_mmu *mmu;
- struct msm_gem_vm *vm;
+ struct drm_gpuvm *vm;
int ret;
u32 major, minor;
unsigned long max_clk;
static void mdp5_kms_destroy(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
- struct msm_gem_vm *vm = kms->vm;
- if (vm) {
- vm->mmu->funcs->detach(vm->mmu);
- msm_gem_vm_put(vm);
+ if (kms->vm) {
+ struct msm_mmu *mmu = to_msm_vm(kms->vm)->mmu;
+
+ mmu->funcs->detach(mmu);
+ drm_gpuvm_put(kms->vm);
}
mdp_kms_destroy(&mdp5_kms->base);
struct mdp5_kms *mdp5_kms;
struct mdp5_cfg *config;
struct msm_kms *kms = priv->kms;
- struct msm_gem_vm *vm;
+ struct drm_gpuvm *vm;
int i, ret;
ret = mdp5_init(to_platform_device(dev->dev), dev);
/* DSI 6G TX buffer*/
struct drm_gem_object *tx_gem_obj;
- struct msm_gem_vm *vm;
+ struct drm_gpuvm *vm;
/* DSI v2 TX buffer */
void *tx_buf;
uint64_t iova;
u8 *data;
- msm_host->vm = msm_gem_vm_get(priv->kms->vm);
+ msm_host->vm = drm_gpuvm_get(priv->kms->vm);
data = msm_gem_kernel_new(dev, size, MSM_BO_WC,
msm_host->vm,
if (msm_host->tx_gem_obj) {
msm_gem_kernel_put(msm_host->tx_gem_obj, msm_host->vm);
- msm_gem_vm_put(msm_host->vm);
+ drm_gpuvm_put(msm_host->vm);
msm_host->tx_gem_obj = NULL;
msm_host->vm = NULL;
}
struct msm_perf_state;
struct msm_gem_submit;
struct msm_fence_context;
-struct msm_gem_vm;
-struct msm_gem_vma;
struct msm_disp_state;
#define MAX_CRTCS 8
int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu);
-struct msm_gem_vm *msm_kms_init_vm(struct drm_device *dev);
+struct drm_gpuvm *msm_kms_init_vm(struct drm_device *dev);
bool msm_use_mmu(struct drm_device *dev);
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
int msm_framebuffer_prepare(struct drm_framebuffer *fb, bool needs_dirtyfb)
{
struct msm_drm_private *priv = fb->dev->dev_private;
- struct msm_gem_vm *vm = priv->kms->vm;
+ struct drm_gpuvm *vm = priv->kms->vm;
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
int ret, i, n = fb->format->num_planes;
void msm_framebuffer_cleanup(struct drm_framebuffer *fb, bool needed_dirtyfb)
{
struct msm_drm_private *priv = fb->dev->dev_private;
- struct msm_gem_vm *vm = priv->kms->vm;
+ struct drm_gpuvm *vm = priv->kms->vm;
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
int i, n = fb->format->num_planes;
static void put_iova_spaces(struct drm_gem_object *obj, struct drm_gpuvm *vm, bool close);
-static void detach_vm(struct drm_gem_object *obj, struct msm_gem_vm *vm)
+static void detach_vm(struct drm_gem_object *obj, struct drm_gpuvm *vm)
{
msm_gem_assert_locked(obj);
- drm_gpuvm_resv_assert_held(&vm->base);
+ drm_gpuvm_resv_assert_held(vm);
- struct drm_gpuvm_bo *vm_bo = drm_gpuvm_bo_find(&vm->base, obj);
+ struct drm_gpuvm_bo *vm_bo = drm_gpuvm_bo_find(vm, obj);
if (vm_bo) {
struct drm_gpuva *vma;
drm_gpuvm_bo_for_each_va (vma, vm_bo) {
- if (vma->vm != &vm->base)
+ if (vma->vm != vm)
continue;
- msm_gem_vma_purge(to_msm_vma(vma));
- msm_gem_vma_close(to_msm_vma(vma));
+ msm_gem_vma_purge(vma);
+ msm_gem_vma_close(vma);
break;
}
msecs_to_jiffies(1000));
msm_gem_lock_vm_and_obj(&exec, obj, ctx->vm);
- put_iova_spaces(obj, &ctx->vm->base, true);
+ put_iova_spaces(obj, ctx->vm, true);
detach_vm(obj, ctx->vm);
drm_exec_fini(&exec); /* drop locks */
}
return offset;
}
-static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
- struct msm_gem_vm *vm)
+static struct drm_gpuva *lookup_vma(struct drm_gem_object *obj,
+ struct drm_gpuvm *vm)
{
struct drm_gpuvm_bo *vm_bo;
struct drm_gpuva *vma;
drm_gpuvm_bo_for_each_va (vma, vm_bo) {
- if (vma->vm == &vm->base) {
+ if (vma->vm == vm) {
/* lookup_vma() should only be used in paths
* with at most one vma per vm
*/
GEM_WARN_ON(!list_is_singular(&vm_bo->list.gpuva));
- return to_msm_vma(vma);
+ return vma;
}
}
}
drm_gpuvm_bo_get(vm_bo);
drm_gpuvm_bo_for_each_va_safe (vma, vmatmp, vm_bo) {
- struct msm_gem_vma *msm_vma = to_msm_vma(vma);
-
- msm_gem_vma_purge(msm_vma);
+ msm_gem_vma_purge(vma);
if (close)
- msm_gem_vma_close(msm_vma);
+ msm_gem_vma_close(vma);
}
drm_gpuvm_bo_put(vm_bo);
}
}
-static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj,
- struct msm_gem_vm *vm,
- u64 range_start, u64 range_end)
+static struct drm_gpuva *get_vma_locked(struct drm_gem_object *obj,
+ struct drm_gpuvm *vm, u64 range_start,
+ u64 range_end)
{
- struct msm_gem_vma *vma;
+ struct drm_gpuva *vma;
msm_gem_assert_locked(obj);
if (!vma) {
vma = msm_gem_vma_new(vm, obj, range_start, range_end);
} else {
- GEM_WARN_ON(vma->base.va.addr < range_start);
- GEM_WARN_ON((vma->base.va.addr + obj->size) > range_end);
+ GEM_WARN_ON(vma->va.addr < range_start);
+ GEM_WARN_ON((vma->va.addr + obj->size) > range_end);
}
return vma;
}
-int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
+int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct drm_gpuva *vma)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct page **pages;
update_lru_active(obj);
}
-struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
- struct msm_gem_vm *vm)
+struct drm_gpuva *msm_gem_get_vma_locked(struct drm_gem_object *obj,
+ struct drm_gpuvm *vm)
{
return get_vma_locked(obj, vm, 0, U64_MAX);
}
static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
- struct msm_gem_vm *vm, uint64_t *iova,
- u64 range_start, u64 range_end)
+ struct drm_gpuvm *vm, uint64_t *iova,
+ u64 range_start, u64 range_end)
{
- struct msm_gem_vma *vma;
+ struct drm_gpuva *vma;
int ret;
msm_gem_assert_locked(obj);
ret = msm_gem_pin_vma_locked(obj, vma);
if (!ret) {
- *iova = vma->base.va.addr;
+ *iova = vma->va.addr;
pin_obj_locked(obj);
}
* limits iova to specified range (in pages)
*/
int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
- struct msm_gem_vm *vm, uint64_t *iova,
- u64 range_start, u64 range_end)
+ struct drm_gpuvm *vm, uint64_t *iova,
+ u64 range_start, u64 range_end)
{
struct drm_exec exec;
int ret;
}
/* get iova and pin it. Should have a matching put */
-int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
- struct msm_gem_vm *vm, uint64_t *iova)
+int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm,
+ uint64_t *iova)
{
return msm_gem_get_and_pin_iova_range(obj, vm, iova, 0, U64_MAX);
}
* Get an iova but don't pin it. Doesn't need a put because iovas are currently
* valid for the life of the object
*/
-int msm_gem_get_iova(struct drm_gem_object *obj,
- struct msm_gem_vm *vm, uint64_t *iova)
+int msm_gem_get_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm,
+ uint64_t *iova)
{
- struct msm_gem_vma *vma;
+ struct drm_gpuva *vma;
struct drm_exec exec;
int ret = 0;
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
} else {
- *iova = vma->base.va.addr;
+ *iova = vma->va.addr;
}
drm_exec_fini(&exec); /* drop locks */
}
static int clear_iova(struct drm_gem_object *obj,
- struct msm_gem_vm *vm)
+ struct drm_gpuvm *vm)
{
- struct msm_gem_vma *vma = lookup_vma(obj, vm);
+ struct drm_gpuva *vma = lookup_vma(obj, vm);
if (!vma)
return 0;
* Setting an iova of zero will clear the vma.
*/
int msm_gem_set_iova(struct drm_gem_object *obj,
- struct msm_gem_vm *vm, uint64_t iova)
+ struct drm_gpuvm *vm, uint64_t iova)
{
struct drm_exec exec;
int ret = 0;
if (!iova) {
ret = clear_iova(obj, vm);
} else {
- struct msm_gem_vma *vma;
+ struct drm_gpuva *vma;
vma = get_vma_locked(obj, vm, iova, iova + obj->size);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
- } else if (GEM_WARN_ON(vma->base.va.addr != iova)) {
+ } else if (GEM_WARN_ON(vma->va.addr != iova)) {
clear_iova(obj, vm);
ret = -EBUSY;
}
* purged until something else (shrinker, mm_notifier, destroy, etc) decides
* to get rid of it
*/
-void msm_gem_unpin_iova(struct drm_gem_object *obj,
- struct msm_gem_vm *vm)
+void msm_gem_unpin_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm)
{
- struct msm_gem_vma *vma;
+ struct drm_gpuva *vma;
struct drm_exec exec;
msm_gem_lock_vm_and_obj(&exec, obj, vm);
return ERR_PTR(ret);
}
-void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
- uint32_t flags, struct msm_gem_vm *vm,
- struct drm_gem_object **bo, uint64_t *iova)
+void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, uint32_t flags,
+ struct drm_gpuvm *vm, struct drm_gem_object **bo,
+ uint64_t *iova)
{
void *vaddr;
struct drm_gem_object *obj = msm_gem_new(dev, size, flags);
}
-void msm_gem_kernel_put(struct drm_gem_object *bo,
- struct msm_gem_vm *vm)
+void msm_gem_kernel_put(struct drm_gem_object *bo, struct drm_gpuvm *vm)
{
if (IS_ERR_OR_NULL(bo))
return;
};
#define to_msm_vm(x) container_of(x, struct msm_gem_vm, base)
-struct msm_gem_vm *
-msm_gem_vm_get(struct msm_gem_vm *vm);
-
-void msm_gem_vm_put(struct msm_gem_vm *vm);
-
-struct msm_gem_vm *
+struct drm_gpuvm *
msm_gem_vm_create(struct drm_device *drm, struct msm_mmu *mmu, const char *name,
u64 va_start, u64 va_size, bool managed);
};
#define to_msm_vma(x) container_of(x, struct msm_gem_vma, base)
-struct msm_gem_vma *
-msm_gem_vma_new(struct msm_gem_vm *vm, struct drm_gem_object *obj,
+struct drm_gpuva *
+msm_gem_vma_new(struct drm_gpuvm *vm, struct drm_gem_object *obj,
u64 range_start, u64 range_end);
-void msm_gem_vma_purge(struct msm_gem_vma *vma);
-int msm_gem_vma_map(struct msm_gem_vma *vma, int prot, struct sg_table *sgt, int size);
-void msm_gem_vma_close(struct msm_gem_vma *vma);
+void msm_gem_vma_purge(struct drm_gpuva *vma);
+int msm_gem_vma_map(struct drm_gpuva *vma, int prot, struct sg_table *sgt, int size);
+void msm_gem_vma_close(struct drm_gpuva *vma);
struct msm_gem_object {
struct drm_gem_object base;
#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
-int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma);
+int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct drm_gpuva *vma);
void msm_gem_unpin_locked(struct drm_gem_object *obj);
void msm_gem_unpin_active(struct drm_gem_object *obj);
-struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
- struct msm_gem_vm *vm);
-int msm_gem_get_iova(struct drm_gem_object *obj,
- struct msm_gem_vm *vm, uint64_t *iova);
-int msm_gem_set_iova(struct drm_gem_object *obj,
- struct msm_gem_vm *vm, uint64_t iova);
+struct drm_gpuva *msm_gem_get_vma_locked(struct drm_gem_object *obj,
+ struct drm_gpuvm *vm);
+int msm_gem_get_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm,
+ uint64_t *iova);
+int msm_gem_set_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm,
+ uint64_t iova);
int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
- struct msm_gem_vm *vm, uint64_t *iova,
- u64 range_start, u64 range_end);
-int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
- struct msm_gem_vm *vm, uint64_t *iova);
-void msm_gem_unpin_iova(struct drm_gem_object *obj,
- struct msm_gem_vm *vm);
+ struct drm_gpuvm *vm, uint64_t *iova,
+ u64 range_start, u64 range_end);
+int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm,
+ uint64_t *iova);
+void msm_gem_unpin_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm);
void msm_gem_pin_obj_locked(struct drm_gem_object *obj);
struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj);
void msm_gem_unpin_pages_locked(struct drm_gem_object *obj);
uint32_t size, uint32_t flags, uint32_t *handle, char *name);
struct drm_gem_object *msm_gem_new(struct drm_device *dev,
uint32_t size, uint32_t flags);
-void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
- uint32_t flags, struct msm_gem_vm *vm,
- struct drm_gem_object **bo, uint64_t *iova);
-void msm_gem_kernel_put(struct drm_gem_object *bo,
- struct msm_gem_vm *vm);
+void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, uint32_t flags,
+ struct drm_gpuvm *vm, struct drm_gem_object **bo,
+ uint64_t *iova);
+void msm_gem_kernel_put(struct drm_gem_object *bo, struct drm_gpuvm *vm);
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
struct dma_buf *dmabuf, struct sg_table *sgt);
__printf(2, 3)
static inline int
msm_gem_lock_vm_and_obj(struct drm_exec *exec,
struct drm_gem_object *obj,
- struct msm_gem_vm *vm)
+ struct drm_gpuvm *vm)
{
int ret = 0;
drm_exec_init(exec, 0, 2);
drm_exec_until_all_locked (exec) {
- ret = drm_exec_lock_obj(exec, drm_gpuvm_resv_obj(&vm->base));
- if (!ret && (obj->resv != drm_gpuvm_resv(&vm->base)))
+ ret = drm_exec_lock_obj(exec, drm_gpuvm_resv_obj(vm));
+ if (!ret && (obj->resv != drm_gpuvm_resv(vm)))
ret = drm_exec_lock_obj(exec, obj);
drm_exec_retry_on_contention(exec);
if (GEM_WARN_ON(ret))
struct kref ref;
struct drm_device *dev;
struct msm_gpu *gpu;
- struct msm_gem_vm *vm;
+ struct drm_gpuvm *vm;
struct list_head node; /* node in ring submit list */
struct drm_exec exec;
uint32_t seqno; /* Sequence number of the submit on the ring */
drm_exec_until_all_locked (&submit->exec) {
ret = drm_exec_lock_obj(&submit->exec,
- drm_gpuvm_resv_obj(&submit->vm->base));
+ drm_gpuvm_resv_obj(submit->vm));
drm_exec_retry_on_contention(&submit->exec);
if (ret)
goto error;
for (i = 0; i < submit->nr_bos; i++) {
struct drm_gem_object *obj = submit->bos[i].obj;
- struct msm_gem_vma *vma;
+ struct drm_gpuva *vma;
/* if locking succeeded, pin bo: */
vma = msm_gem_get_vma_locked(obj, submit->vm);
if (ret)
break;
- submit->bos[i].vm_bo = drm_gpuvm_bo_get(vma->base.vm_bo);
- submit->bos[i].iova = vma->base.va.addr;
+ submit->bos[i].vm_bo = drm_gpuvm_bo_get(vma->vm_bo);
+ submit->bos[i].iova = vma->va.addr;
}
/*
kfree(vm);
}
-
-void msm_gem_vm_put(struct msm_gem_vm *vm)
-{
- if (vm)
- drm_gpuvm_put(&vm->base);
-}
-
-struct msm_gem_vm *
-msm_gem_vm_get(struct msm_gem_vm *vm)
-{
- if (!IS_ERR_OR_NULL(vm))
- drm_gpuvm_get(&vm->base);
-
- return vm;
-}
-
/* Actually unmap memory for the vma */
-void msm_gem_vma_purge(struct msm_gem_vma *vma)
+void msm_gem_vma_purge(struct drm_gpuva *vma)
{
- struct msm_gem_vm *vm = to_msm_vm(vma->base.vm);
- unsigned size = vma->base.va.range;
+ struct msm_gem_vma *msm_vma = to_msm_vma(vma);
+ struct msm_gem_vm *vm = to_msm_vm(vma->vm);
+ unsigned size = vma->va.range;
/* Don't do anything if the memory isn't mapped */
- if (!vma->mapped)
+ if (!msm_vma->mapped)
return;
- vm->mmu->funcs->unmap(vm->mmu, vma->base.va.addr, size);
+ vm->mmu->funcs->unmap(vm->mmu, vma->va.addr, size);
- vma->mapped = false;
+ msm_vma->mapped = false;
}
/* Map and pin vma: */
int
-msm_gem_vma_map(struct msm_gem_vma *vma, int prot,
+msm_gem_vma_map(struct drm_gpuva *vma, int prot,
struct sg_table *sgt, int size)
{
- struct msm_gem_vm *vm = to_msm_vm(vma->base.vm);
+ struct msm_gem_vma *msm_vma = to_msm_vma(vma);
+ struct msm_gem_vm *vm = to_msm_vm(vma->vm);
int ret;
- if (GEM_WARN_ON(!vma->base.va.addr))
+ if (GEM_WARN_ON(!vma->va.addr))
return -EINVAL;
- if (vma->mapped)
+ if (msm_vma->mapped)
return 0;
- vma->mapped = true;
+ msm_vma->mapped = true;
/*
* NOTE: iommu/io-pgtable can allocate pages, so we cannot hold
* Revisit this if we can come up with a scheme to pre-alloc pages
* for the pgtable in map/unmap ops.
*/
- ret = vm->mmu->funcs->map(vm->mmu, vma->base.va.addr, sgt, size, prot);
+ ret = vm->mmu->funcs->map(vm->mmu, vma->va.addr, sgt, size, prot);
if (ret) {
- vma->mapped = false;
+ msm_vma->mapped = false;
}
return ret;
}
/* Close an iova. Warn if it is still in use */
-void msm_gem_vma_close(struct msm_gem_vma *vma)
+void msm_gem_vma_close(struct drm_gpuva *vma)
{
- struct msm_gem_vm *vm = to_msm_vm(vma->base.vm);
+ struct msm_gem_vm *vm = to_msm_vm(vma->vm);
+ struct msm_gem_vma *msm_vma = to_msm_vma(vma);
- GEM_WARN_ON(vma->mapped);
+ GEM_WARN_ON(msm_vma->mapped);
drm_gpuvm_resv_assert_held(&vm->base);
- if (vma->base.va.addr)
- drm_mm_remove_node(&vma->node);
+ if (vma->va.addr && vm->managed)
+ drm_mm_remove_node(&msm_vma->node);
- drm_gpuva_remove(&vma->base);
- drm_gpuva_unlink(&vma->base);
+ drm_gpuva_remove(vma);
+ drm_gpuva_unlink(vma);
kfree(vma);
}
/* Create a new vma and allocate an iova for it */
-struct msm_gem_vma *
-msm_gem_vma_new(struct msm_gem_vm *vm, struct drm_gem_object *obj,
+struct drm_gpuva *
+msm_gem_vma_new(struct drm_gpuvm *gpuvm, struct drm_gem_object *obj,
u64 range_start, u64 range_end)
{
+ struct msm_gem_vm *vm = to_msm_vm(gpuvm);
struct drm_gpuvm_bo *vm_bo;
struct msm_gem_vma *vma;
int ret;
drm_gpuva_link(&vma->base, vm_bo);
GEM_WARN_ON(drm_gpuvm_bo_put(vm_bo));
- return vma;
+ return &vma->base;
err_va_remove:
drm_gpuva_remove(&vma->base);
* handles virtual address allocation, and both async and sync operations
* are supported.
*/
-struct msm_gem_vm *
+struct drm_gpuvm *
msm_gem_vm_create(struct drm_device *drm, struct msm_mmu *mmu, const char *name,
u64 va_start, u64 va_size, bool managed)
{
drm_mm_init(&vm->mm, va_start, va_size);
- return vm;
+ return &vm->base;
err_free_vm:
kfree(vm);
if (state->fault_info.ttbr0) {
struct msm_gpu_fault_info *info = &state->fault_info;
- struct msm_mmu *mmu = submit->vm->mmu;
+ struct msm_mmu *mmu = to_msm_vm(submit->vm)->mmu;
msm_iommu_pagetable_params(mmu, &info->pgtbl_ttbr0,
&info->asid);
/* Increment the fault counts */
submit->queue->faults++;
if (submit->vm)
- submit->vm->faults++;
+ to_msm_vm(submit->vm)->faults++;
get_comm_cmdline(submit, &comm, &cmd);
}
/* Return a new address space for a msm_drm_private instance */
-struct msm_gem_vm *
+struct drm_gpuvm *
msm_gpu_create_private_vm(struct msm_gpu *gpu, struct task_struct *task)
{
- struct msm_gem_vm *vm = NULL;
+ struct drm_gpuvm *vm = NULL;
+
if (!gpu)
return NULL;
if (gpu->funcs->create_private_vm) {
vm = gpu->funcs->create_private_vm(gpu);
if (!IS_ERR(vm))
- vm->pid = get_pid(task_pid(task));
+ to_msm_vm(vm)->pid = get_pid(task_pid(task));
}
if (IS_ERR_OR_NULL(vm))
- vm = msm_gem_vm_get(gpu->vm);
+ vm = drm_gpuvm_get(gpu->vm);
return vm;
}
msm_gem_kernel_put(gpu->memptrs_bo, gpu->vm);
if (!IS_ERR_OR_NULL(gpu->vm)) {
- gpu->vm->mmu->funcs->detach(gpu->vm->mmu);
- msm_gem_vm_put(gpu->vm);
+ struct msm_mmu *mmu = to_msm_vm(gpu->vm)->mmu;
+ mmu->funcs->detach(mmu);
+ drm_gpuvm_put(gpu->vm);
}
if (gpu->worker) {
/* note: gpu_set_freq() can assume that we have been pm_resumed */
void (*gpu_set_freq)(struct msm_gpu *gpu, struct dev_pm_opp *opp,
bool suspended);
- struct msm_gem_vm *(*create_vm)(struct msm_gpu *gpu, struct platform_device *pdev);
- struct msm_gem_vm *(*create_private_vm)(struct msm_gpu *gpu);
+ struct drm_gpuvm *(*create_vm)(struct msm_gpu *gpu, struct platform_device *pdev);
+ struct drm_gpuvm *(*create_private_vm)(struct msm_gpu *gpu);
uint32_t (*get_rptr)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
/**
void __iomem *mmio;
int irq;
- struct msm_gem_vm *vm;
+ struct drm_gpuvm *vm;
/* Power Control: */
struct regulator *gpu_reg, *gpu_cx;
int queueid;
/** @vm: the per-process GPU address-space */
- struct msm_gem_vm *vm;
+ struct drm_gpuvm *vm;
/** @kref: the reference count */
struct kref ref;
struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
const char *name, struct msm_gpu_config *config);
-struct msm_gem_vm *
+struct drm_gpuvm *
msm_gpu_create_private_vm(struct msm_gpu *gpu, struct task_struct *task);
void msm_gpu_cleanup(struct msm_gpu *gpu);
return -ENOSYS;
}
-struct msm_gem_vm *msm_kms_init_vm(struct drm_device *dev)
+struct drm_gpuvm *msm_kms_init_vm(struct drm_device *dev)
{
- struct msm_gem_vm *vm;
+ struct drm_gpuvm *vm;
struct msm_mmu *mmu;
struct device *mdp_dev = dev->dev;
struct device *mdss_dev = mdp_dev->parent;
return vm;
}
- msm_mmu_set_fault_handler(vm->mmu, kms, msm_kms_fault_handler);
+ msm_mmu_set_fault_handler(to_msm_vm(vm)->mmu, kms, msm_kms_fault_handler);
return vm;
}
atomic_t fault_snapshot_capture;
/* mapper-id used to request GEM buffer mapped for scanout: */
- struct msm_gem_vm *vm;
+ struct drm_gpuvm *vm;
/* disp snapshot support */
struct kthread_worker *dump_worker;
kfree(ctx->entities[i]);
}
- msm_gem_vm_put(ctx->vm);
+ drm_gpuvm_put(ctx->vm);
kfree(ctx->comm);
kfree(ctx->cmdline);
kfree(ctx);