]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/amdgpu: use user provided hmm_range buffer in amdgpu_ttm_tt_get_user_pages
authorSunil Khatri <sunil.khatri@amd.com>
Wed, 24 Sep 2025 06:53:26 +0000 (12:23 +0530)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 13 Oct 2025 18:14:28 +0000 (14:14 -0400)
update the amdgpu_ttm_tt_get_user_pages and all dependent function
along with it callers to use a user allocated hmm_range buffer instead
hmm layer allocates the buffer.

This is a need to get hmm_range pointers easily accessible
without accessing the bo and that is a requirement for the
userqueue to lock the userptrs effectively.

Signed-off-by: Sunil Khatri <sunil.khatri@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
drivers/gpu/drm/amd/amdkfd/kfd_svm.c

index a2ca9acf8c4ea8eefd50d91ec88e046bc5cbdf29..70c83e10b9c4f3013e154b4e338cee3f86e01c0b 100644 (file)
@@ -1089,8 +1089,15 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
                return 0;
        }
 
-       ret = amdgpu_ttm_tt_get_user_pages(bo, &range);
+       range = kzalloc(sizeof(*range), GFP_KERNEL);
+       if (unlikely(!range)) {
+               ret = -ENOMEM;
+               goto unregister_out;
+       }
+
+       ret = amdgpu_ttm_tt_get_user_pages(bo, range);
        if (ret) {
+               kfree(range);
                if (ret == -EAGAIN)
                        pr_debug("Failed to get user pages, try again\n");
                else
@@ -2566,9 +2573,14 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
                        }
                }
 
+               mem->range = kzalloc(sizeof(*mem->range), GFP_KERNEL);
+               if (unlikely(!mem->range))
+                       return -ENOMEM;
                /* Get updated user pages */
-               ret = amdgpu_ttm_tt_get_user_pages(bo, &mem->range);
+               ret = amdgpu_ttm_tt_get_user_pages(bo, mem->range);
                if (ret) {
+                       kfree(mem->range);
+                       mem->range = NULL;
                        pr_debug("Failed %d to get user pages\n", ret);
 
                        /* Return -EFAULT bad address error as success. It will
index 2f6a96af7fb12b2efb3113feedf82f4afebcc1e0..59951d07570307e4f88176fec3126a836eb18025 100644 (file)
@@ -891,7 +891,11 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
                bool userpage_invalidated = false;
                struct amdgpu_bo *bo = e->bo;
 
-               r = amdgpu_ttm_tt_get_user_pages(bo, &e->range);
+               e->range = kzalloc(sizeof(*e->range), GFP_KERNEL);
+               if (unlikely(!e->range))
+                       return -ENOMEM;
+
+               r = amdgpu_ttm_tt_get_user_pages(bo, e->range);
                if (r)
                        goto out_free_user_pages;
 
index b7ebae289beabb4988d3f844139ce99513e17ed4..b0c2a1434f032e4ebb8ca121839fd316f007efbc 100644 (file)
@@ -572,10 +572,14 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
                goto release_object;
 
        if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
-               r = amdgpu_ttm_tt_get_user_pages(bo, &range);
-               if (r)
+               range = kzalloc(sizeof(*range), GFP_KERNEL);
+               if (unlikely(!range))
+                       return -ENOMEM;
+               r = amdgpu_ttm_tt_get_user_pages(bo, range);
+               if (r) {
+                       kfree(range);
                        goto release_object;
-
+               }
                r = amdgpu_bo_reserve(bo, true);
                if (r)
                        goto user_pages_done;
index 2c6a6b858112f3167f947a53bd49c58f54b93d3d..53d405a92a14ba73139729413014ce859bac1baa 100644 (file)
@@ -168,18 +168,13 @@ void amdgpu_hmm_unregister(struct amdgpu_bo *bo)
 int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
                               uint64_t start, uint64_t npages, bool readonly,
                               void *owner,
-                              struct hmm_range **phmm_range)
+                              struct hmm_range *hmm_range)
 {
-       struct hmm_range *hmm_range;
        unsigned long end;
        unsigned long timeout;
        unsigned long *pfns;
        int r = 0;
 
-       hmm_range = kzalloc(sizeof(*hmm_range), GFP_KERNEL);
-       if (unlikely(!hmm_range))
-               return -ENOMEM;
-
        pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL);
        if (unlikely(!pfns)) {
                r = -ENOMEM;
@@ -221,15 +216,11 @@ retry:
        hmm_range->start = start;
        hmm_range->hmm_pfns = pfns;
 
-       *phmm_range = hmm_range;
-
        return 0;
 
 out_free_pfns:
        kvfree(pfns);
 out_free_range:
-       kfree(hmm_range);
-
        if (r == -EBUSY)
                r = -EAGAIN;
        return r;
index 953e1d06de20efc17bf5e7e830ebdfe7c832e1d6..c54e3c64251a442fa03310c135826018da82bda1 100644 (file)
@@ -34,7 +34,7 @@
 int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
                               uint64_t start, uint64_t npages, bool readonly,
                               void *owner,
-                              struct hmm_range **phmm_range);
+                              struct hmm_range *hmm_range);
 bool amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range);
 
 #if defined(CONFIG_HMM_MIRROR)
index aa9ee5dffa45514b0133e25a521b9a8b66e5ec3d..890123bf8ee872958b5650f1d5416b9cc1585956 100644 (file)
@@ -706,10 +706,11 @@ struct amdgpu_ttm_tt {
  * memory and start HMM tracking CPU page table update
  *
  * Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only
- * once afterwards to stop HMM tracking
+ * once afterwards to stop HMM tracking. Its the caller responsibility to ensure
+ * that range is a valid memory and it is freed too.
  */
 int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
-                                struct hmm_range **range)
+                                struct hmm_range *range)
 {
        struct ttm_tt *ttm = bo->tbo.ttm;
        struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
@@ -719,9 +720,6 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
        bool readonly;
        int r = 0;
 
-       /* Make sure get_user_pages_done() can cleanup gracefully */
-       *range = NULL;
-
        mm = bo->notifier.mm;
        if (unlikely(!mm)) {
                DRM_DEBUG_DRIVER("BO is not registered?\n");
index 0be2728aa872947ebecf9807fe6f482a2514a501..64109912ae9e9d5e75ab363ae42813168c1771bd 100644 (file)
@@ -192,14 +192,14 @@ uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type);
 
 #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
 int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
-                                struct hmm_range **range);
+                                struct hmm_range *range);
 void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm,
                                      struct hmm_range *range);
 bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
                                       struct hmm_range *range);
 #else
 static inline int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
-                                              struct hmm_range **range)
+                                              struct hmm_range *range)
 {
        return -EPERM;
 }
index 9d72411c33791f95c9a1e22242da2dc86307a3f4..8c3787b00f364791e41715410b7dd20a61f9844a 100644 (file)
@@ -1737,12 +1737,15 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
                        }
 
                        WRITE_ONCE(p->svms.faulting_task, current);
+                       hmm_range = kzalloc(sizeof(*hmm_range), GFP_KERNEL);
                        r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
                                                       readonly, owner,
-                                                      &hmm_range);
+                                                      hmm_range);
                        WRITE_ONCE(p->svms.faulting_task, NULL);
-                       if (r)
+                       if (r) {
+                               kfree(hmm_range);
                                pr_debug("failed %d to get svm range pages\n", r);
+                       }
                } else {
                        r = -EFAULT;
                }