]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/amdgpu: update the functions to use amdgpu version of hmm
authorSunil Khatri <sunil.khatri@amd.com>
Fri, 10 Oct 2025 12:39:57 +0000 (18:09 +0530)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 13 Oct 2025 18:14:36 +0000 (14:14 -0400)
At times we need a bo reference for hmm and for that add
a new struct amdgpu_hmm_range which will hold an optional
bo member and hmm_range.

Use amdgpu_hmm_range instead of hmm_range and let the bo
as an optional argument for the caller if they want to
the bo reference to be taken or they want to handle that
explicitly.

Signed-off-by: Sunil Khatri <sunil.khatri@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
13 files changed:
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
drivers/gpu/drm/amd/amdkfd/kfd_svm.h

index 9e120c934cc17dd05b4dc9ed47d5297bb6faf901..8bdfcde2029b5348c66f617cd1e9493ccc7d2555 100644 (file)
@@ -71,7 +71,7 @@ struct kgd_mem {
        struct mutex lock;
        struct amdgpu_bo *bo;
        struct dma_buf *dmabuf;
-       struct hmm_range *range;
+       struct amdgpu_hmm_range *range;
        struct list_head attachments;
        /* protected by amdkfd_process_info.lock */
        struct list_head validate_list;
index d0b1468a2ae180c8c960b47b643a555ed34060c4..dbdf33bc03b8e6bc54b5daaa815ce97c10b6a33f 100644 (file)
@@ -1057,7 +1057,7 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
        struct amdkfd_process_info *process_info = mem->process_info;
        struct amdgpu_bo *bo = mem->bo;
        struct ttm_operation_ctx ctx = { true, false };
-       struct hmm_range *range;
+       struct amdgpu_hmm_range *range;
        int ret = 0;
 
        mutex_lock(&process_info->lock);
@@ -1089,7 +1089,7 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
                return 0;
        }
 
-       range = amdgpu_hmm_range_alloc();
+       range = amdgpu_hmm_range_alloc(NULL);
        if (unlikely(!range)) {
                ret = -ENOMEM;
                goto unregister_out;
@@ -2573,7 +2573,7 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
                        }
                }
 
-               mem->range = amdgpu_hmm_range_alloc();
+               mem->range = amdgpu_hmm_range_alloc(NULL);
                if (unlikely(!mem->range))
                        return -ENOMEM;
                /* Get updated user pages */
index a716c9886c7407848b40a79ee202e1f85659460d..2b5e7c46a39d41143d6ec99162c2ab33ccfac004 100644 (file)
@@ -38,7 +38,7 @@ struct amdgpu_bo_list_entry {
        struct amdgpu_bo                *bo;
        struct amdgpu_bo_va             *bo_va;
        uint32_t                        priority;
-       struct hmm_range                *range;
+       struct amdgpu_hmm_range         *range;
        bool                            user_invalidated;
 };
 
index c319d216421e82ac4564c85d5330b52a79eefb5c..ecdfe6cb36ccdc157439a77a29746a75dbda5eae 100644 (file)
@@ -29,7 +29,6 @@
 #include <linux/pagemap.h>
 #include <linux/sync_file.h>
 #include <linux/dma-buf.h>
-#include <linux/hmm.h>
 
 #include <drm/amdgpu_drm.h>
 #include <drm/drm_syncobj.h>
@@ -892,7 +891,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
                bool userpage_invalidated = false;
                struct amdgpu_bo *bo = e->bo;
 
-               e->range = amdgpu_hmm_range_alloc();
+               e->range = amdgpu_hmm_range_alloc(NULL);
                if (unlikely(!e->range))
                        return -ENOMEM;
 
@@ -901,7 +900,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
                        goto out_free_user_pages;
 
                for (i = 0; i < bo->tbo.ttm->num_pages; i++) {
-                       if (bo->tbo.ttm->pages[i] != hmm_pfn_to_page(e->range->hmm_pfns[i])) {
+                       if (bo->tbo.ttm->pages[i] !=
+                               hmm_pfn_to_page(e->range->hmm_range.hmm_pfns[i])) {
                                userpage_invalidated = true;
                                break;
                        }
index 4bc506f4924ab421253c927a1bfa42fe980ad40f..f2505ae5fd652ec248ef7127d1a34a9d5a5a752d 100644 (file)
@@ -531,7 +531,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
        struct drm_amdgpu_gem_userptr *args = data;
        struct amdgpu_fpriv *fpriv = filp->driver_priv;
        struct drm_gem_object *gobj;
-       struct hmm_range *range;
+       struct amdgpu_hmm_range *range;
        struct amdgpu_bo *bo;
        uint32_t handle;
        int r;
@@ -572,7 +572,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
                goto release_object;
 
        if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
-               range = amdgpu_hmm_range_alloc();
+               range = amdgpu_hmm_range_alloc(NULL);
                if (unlikely(!range))
                        return -ENOMEM;
                r = amdgpu_ttm_tt_get_user_pages(bo, range);
index b582fd217bd0c9c4d4030c8fe7b20a3ec6245689..04f02e0c8bb31dc77320c0ac73c9b3ef8581ada2 100644 (file)
@@ -168,12 +168,13 @@ void amdgpu_hmm_unregister(struct amdgpu_bo *bo)
 int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
                               uint64_t start, uint64_t npages, bool readonly,
                               void *owner,
-                              struct hmm_range *hmm_range)
+                              struct amdgpu_hmm_range *range)
 {
        unsigned long end;
        unsigned long timeout;
        unsigned long *pfns;
        int r = 0;
+       struct hmm_range *hmm_range = &range->hmm_range;
 
        pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL);
        if (unlikely(!pfns)) {
@@ -226,25 +227,33 @@ out_free_range:
        return r;
 }
 
-bool amdgpu_hmm_range_valid(struct hmm_range *hmm_range)
+bool amdgpu_hmm_range_valid(struct amdgpu_hmm_range *range)
 {
-       if (!hmm_range)
+       if (!range)
                return false;
 
-       return !mmu_interval_read_retry(hmm_range->notifier,
-                                       hmm_range->notifier_seq);
+       return !mmu_interval_read_retry(range->hmm_range.notifier,
+                                       range->hmm_range.notifier_seq);
 }
 
-struct hmm_range *amdgpu_hmm_range_alloc(void)
+struct amdgpu_hmm_range *amdgpu_hmm_range_alloc(struct amdgpu_bo *bo)
 {
-       return kzalloc(sizeof(struct hmm_range), GFP_KERNEL);
+       struct amdgpu_hmm_range *range;
+
+       range = kzalloc(sizeof(*range), GFP_KERNEL);
+       if (!range)
+               return NULL;
+
+       range->bo = amdgpu_bo_ref(bo);
+       return range;
 }
 
-void amdgpu_hmm_range_free(struct hmm_range *hmm_range)
+void amdgpu_hmm_range_free(struct amdgpu_hmm_range *range)
 {
-       if (!hmm_range)
+       if (!range)
                return;
 
-       kvfree(hmm_range->hmm_pfns);
-       kfree(hmm_range);
+       kvfree(range->hmm_range.hmm_pfns);
+       amdgpu_bo_unref(&range->bo);
+       kfree(range);
 }
index e85f912b89382d4f534833afd338c61eb3d52d64..140bc9cd57b422a1918e66d5e8c495be9a2df01d 100644 (file)
 #include <linux/interval_tree.h>
 #include <linux/mmu_notifier.h>
 
+struct amdgpu_hmm_range {
+       struct hmm_range hmm_range;
+       struct amdgpu_bo *bo;
+};
+
 int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
                               uint64_t start, uint64_t npages, bool readonly,
                               void *owner,
-                              struct hmm_range *hmm_range);
+                              struct amdgpu_hmm_range *range);
 
 #if defined(CONFIG_HMM_MIRROR)
-bool amdgpu_hmm_range_valid(struct hmm_range *hmm_range);
-struct hmm_range *amdgpu_hmm_range_alloc(void);
-void amdgpu_hmm_range_free(struct hmm_range *hmm_range);
+bool amdgpu_hmm_range_valid(struct amdgpu_hmm_range *range);
+struct amdgpu_hmm_range *amdgpu_hmm_range_alloc(struct amdgpu_bo *bo);
+void amdgpu_hmm_range_free(struct amdgpu_hmm_range *range);
 int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr);
 void amdgpu_hmm_unregister(struct amdgpu_bo *bo);
 #else
@@ -52,17 +57,17 @@ static inline int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr)
 
 static inline void amdgpu_hmm_unregister(struct amdgpu_bo *bo) {}
 
-static inline bool amdgpu_hmm_range_valid(struct hmm_range *hmm_range)
+static inline bool amdgpu_hmm_range_valid(struct amdgpu_hmm_range *range)
 {
        return false;
 }
 
-static inline struct hmm_range *amdgpu_hmm_range_alloc(void)
+static inline struct amdgpu_hmm_range *amdgpu_hmm_range_alloc(struct amdgpu_bo *bo)
 {
        return NULL;
 }
 
-static inline void amdgpu_hmm_range_free(struct hmm_range *hmm_range) {}
+static inline void amdgpu_hmm_range_free(struct amdgpu_hmm_range *range) {}
 #endif
 
 #endif
index 0bd02234eca0037f138b0890bedc210f73437ba7..2f2a88979644b22480accaafb44508847473db83 100644 (file)
@@ -709,7 +709,7 @@ struct amdgpu_ttm_tt {
  * that range is a valid memory and it is freed too.
  */
 int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
-                                struct hmm_range *range)
+                                struct amdgpu_hmm_range *range)
 {
        struct ttm_tt *ttm = bo->tbo.ttm;
        struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
@@ -762,12 +762,12 @@ out_unlock:
  * that backs user memory and will ultimately be mapped into the device
  * address space.
  */
-void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct hmm_range *range)
+void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct amdgpu_hmm_range *range)
 {
        unsigned long i;
 
        for (i = 0; i < ttm->num_pages; ++i)
-               ttm->pages[i] = range ? hmm_pfn_to_page(range->hmm_pfns[i]) : NULL;
+               ttm->pages[i] = range ? hmm_pfn_to_page(range->hmm_range.hmm_pfns[i]) : NULL;
 }
 
 /*
index d75866a389f1ead94e38e0a93c744ca24c4705c5..68e225efec3a81b6b83eb6846c24c30e095176ca 100644 (file)
@@ -28,6 +28,7 @@
 #include <drm/gpu_scheduler.h>
 #include <drm/ttm/ttm_placement.h>
 #include "amdgpu_vram_mgr.h"
+#include "amdgpu_hmm.h"
 
 #define AMDGPU_PL_GDS          (TTM_PL_PRIV + 0)
 #define AMDGPU_PL_GWS          (TTM_PL_PRIV + 1)
@@ -186,16 +187,16 @@ uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type);
 
 #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
 int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
-                                struct hmm_range *range);
+                                struct amdgpu_hmm_range *range);
 #else
 static inline int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
-                                              struct hmm_range *range)
+                                              struct amdgpu_hmm_range *range)
 {
        return -EPERM;
 }
 #endif
 
-void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct hmm_range *range);
+void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct amdgpu_hmm_range *range);
 int amdgpu_ttm_tt_get_userptr(const struct ttm_buffer_object *tbo,
                              uint64_t *user_addr);
 int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
index 59a5a3fea65df8ea2cefdcda5c21e440e7336a09..46c84fc60af1ddbc13ee58a374514ffcbeefc822 100644 (file)
@@ -21,7 +21,6 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  */
 #include <linux/types.h>
-#include <linux/hmm.h>
 #include <linux/dma-direction.h>
 #include <linux/dma-mapping.h>
 #include <linux/migrate.h>
index 2eebf67f9c2ce28bb6afd5fd6cdd0ed106991183..2b7fd442d29c61e32ab4833304d851375e9703c5 100644 (file)
@@ -31,7 +31,6 @@
 #include <linux/list.h>
 #include <linux/mutex.h>
 #include <linux/sched/mm.h>
-#include <linux/hmm.h>
 #include "kfd_priv.h"
 #include "kfd_svm.h"
 
index 91609dd5730f9a972a3d61be41d48f506ded0c06..f041643308ca49a7690d92ec5c48aff6cf9ccf2b 100644 (file)
@@ -1698,7 +1698,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
        start = map_start << PAGE_SHIFT;
        end = (map_last + 1) << PAGE_SHIFT;
        for (addr = start; !r && addr < end; ) {
-               struct hmm_range *hmm_range = NULL;
+               struct amdgpu_hmm_range *range = NULL;
                unsigned long map_start_vma;
                unsigned long map_last_vma;
                struct vm_area_struct *vma;
@@ -1737,13 +1737,13 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
                        }
 
                        WRITE_ONCE(p->svms.faulting_task, current);
-                       hmm_range = amdgpu_hmm_range_alloc();
+                       range = amdgpu_hmm_range_alloc(NULL);
                        r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
                                                       readonly, owner,
-                                                      hmm_range);
+                                                      range);
                        WRITE_ONCE(p->svms.faulting_task, NULL);
                        if (r) {
-                               amdgpu_hmm_range_free(hmm_range);
+                               amdgpu_hmm_range_free(range);
                                pr_debug("failed %d to get svm range pages\n", r);
                        }
                } else {
@@ -1753,7 +1753,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
                if (!r) {
                        offset = (addr >> PAGE_SHIFT) - prange->start;
                        r = svm_range_dma_map(prange, ctx->bitmap, offset, npages,
-                                             hmm_range->hmm_pfns);
+                                             range->hmm_range.hmm_pfns);
                        if (r)
                                pr_debug("failed %d to dma map range\n", r);
                }
@@ -1764,12 +1764,12 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
                 * Overrride return value to TRY AGAIN only if prior returns
                 * were successful
                 */
-               if (hmm_range && !amdgpu_hmm_range_valid(hmm_range) && !r) {
+               if (range && !amdgpu_hmm_range_valid(range) && !r) {
                        pr_debug("hmm update the range, need validate again\n");
                        r = -EAGAIN;
                }
                /* Free the hmm range */
-               amdgpu_hmm_range_free(hmm_range);
+               amdgpu_hmm_range_free(range);
 
 
                if (!r && !list_empty(&prange->child_list)) {
index 01c7a487790444ef6451c255f43935b6a2a61915..a63dfc95b602b1662874df728e312bc7bc7e798e 100644 (file)
@@ -31,7 +31,6 @@
 #include <linux/list.h>
 #include <linux/mutex.h>
 #include <linux/sched/mm.h>
-#include <linux/hmm.h>
 #include "amdgpu.h"
 #include "kfd_priv.h"