From: Sunil Khatri Date: Thu, 25 Sep 2025 09:01:54 +0000 (+0530) Subject: drm/amdgpu/userqueue: validate userptrs for userqueues X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=42f148788469792df207751e2339ef2bb8a1e33e;p=thirdparty%2Flinux.git drm/amdgpu/userqueue: validate userptrs for userqueues userptrs could be changed by the user at any time and hence while locking all the bos before GPU start processing validate all the userptr bos. Signed-off-by: Sunil Khatri Reviewed-by: Christian König Signed-off-by: Alex Deucher --- diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c index 33c54ecd46d53..9d4751a39c20d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c @@ -29,6 +29,7 @@ #include "amdgpu.h" #include "amdgpu_vm.h" #include "amdgpu_userq.h" +#include "amdgpu_hmm.h" #include "amdgpu_userq_fence.h" u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev) @@ -856,12 +857,21 @@ static int amdgpu_userq_vm_validate(struct amdgpu_userq_mgr *uq_mgr) { struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr); + bool invalidated = false, new_addition = false; + struct ttm_operation_ctx ctx = { true, false }; struct amdgpu_device *adev = uq_mgr->adev; + struct amdgpu_hmm_range *range; struct amdgpu_vm *vm = &fpriv->vm; + unsigned long key, tmp_key; struct amdgpu_bo_va *bo_va; + struct amdgpu_bo *bo; struct drm_exec exec; + struct xarray xa; int ret; + xa_init(&xa); + +retry_lock: drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0); drm_exec_until_all_locked(&exec) { ret = amdgpu_vm_lock_pd(vm, &exec, 1); @@ -888,10 +898,72 @@ amdgpu_userq_vm_validate(struct amdgpu_userq_mgr *uq_mgr) goto unlock_all; } + if (invalidated) { + xa_for_each(&xa, tmp_key, range) { + bo = range->bo; + amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); + ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); + if (ret) + goto unlock_all; + + amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, range); + + amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); + ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); + if (ret) + goto unlock_all; + } + invalidated = false; + } + ret = amdgpu_vm_handle_moved(adev, vm, NULL); if (ret) goto unlock_all; + key = 0; + /* Validate User Ptr BOs */ + list_for_each_entry(bo_va, &vm->done, base.vm_status) { + bo = bo_va->base.bo; + + if (!amdgpu_ttm_tt_is_userptr(bo->tbo.ttm)) + continue; + + range = xa_load(&xa, key); + if (range && range->bo != bo) { + xa_erase(&xa, key); + amdgpu_hmm_range_free(range); + range = NULL; + } + + if (!range) { + range = amdgpu_hmm_range_alloc(bo); + if (!range) { + ret = -ENOMEM; + goto unlock_all; + } + + xa_store(&xa, key, range, GFP_KERNEL); + new_addition = true; + } + key++; + } + + if (new_addition) { + drm_exec_fini(&exec); + xa_for_each(&xa, tmp_key, range) { + if (!range) + continue; + bo = range->bo; + ret = amdgpu_ttm_tt_get_user_pages(bo, range); + if (ret) + goto unlock_all; + } + + invalidated = true; + new_addition = false; + goto retry_lock; + } + ret = amdgpu_vm_update_pdes(adev, vm, false); if (ret) goto unlock_all; @@ -911,6 +983,13 @@ amdgpu_userq_vm_validate(struct amdgpu_userq_mgr *uq_mgr) unlock_all: drm_exec_fini(&exec); + xa_for_each(&xa, tmp_key, range) { + if (!range) + continue; + bo = range->bo; + amdgpu_hmm_range_free(range); + } + xa_destroy(&xa); return ret; }