return 0;
}
- range = kzalloc(sizeof(*range), GFP_KERNEL);
+ range = amdgpu_hmm_range_alloc();
if (unlikely(!range)) {
ret = -ENOMEM;
goto unregister_out;
ret = amdgpu_ttm_tt_get_user_pages(bo, range);
if (ret) {
- kfree(range);
+ amdgpu_hmm_range_free(range);
if (ret == -EAGAIN)
pr_debug("Failed to get user pages, try again\n");
else
amdgpu_bo_unreserve(bo);
release_out:
- amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
+ amdgpu_hmm_range_free(range);
unregister_out:
if (ret)
amdgpu_hmm_unregister(bo);
if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) {
amdgpu_hmm_unregister(mem->bo);
mutex_lock(&process_info->notifier_lock);
- amdgpu_ttm_tt_discard_user_pages(mem->bo->tbo.ttm, mem->range);
+ amdgpu_hmm_range_free(mem->range);
mutex_unlock(&process_info->notifier_lock);
}
bo = mem->bo;
- amdgpu_ttm_tt_discard_user_pages(bo->tbo.ttm, mem->range);
+ amdgpu_hmm_range_free(mem->range);
mem->range = NULL;
/* BO reservations and getting user pages (hmm_range_fault)
}
}
- mem->range = kzalloc(sizeof(*mem->range), GFP_KERNEL);
+ mem->range = amdgpu_hmm_range_alloc();
if (unlikely(!mem->range))
return -ENOMEM;
/* Get updated user pages */
ret = amdgpu_ttm_tt_get_user_pages(bo, mem->range);
if (ret) {
- kfree(mem->range);
+ amdgpu_hmm_range_free(mem->range);
mem->range = NULL;
pr_debug("Failed %d to get user pages\n", ret);
continue;
/* Only check mem with hmm range associated */
- valid = amdgpu_ttm_tt_get_user_pages_done(
- mem->bo->tbo.ttm, mem->range);
+ valid = amdgpu_hmm_range_valid(mem->range);
+ amdgpu_hmm_range_free(mem->range);
mem->range = NULL;
if (!valid) {
#include "amdgpu_gmc.h"
#include "amdgpu_gem.h"
#include "amdgpu_ras.h"
+#include "amdgpu_hmm.h"
static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p,
struct amdgpu_device *adev,
bool userpage_invalidated = false;
struct amdgpu_bo *bo = e->bo;
- e->range = kzalloc(sizeof(*e->range), GFP_KERNEL);
+ e->range = amdgpu_hmm_range_alloc();
if (unlikely(!e->range))
return -ENOMEM;
out_free_user_pages:
amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
- struct amdgpu_bo *bo = e->bo;
-
- amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range);
+ amdgpu_hmm_range_free(e->range);
e->range = NULL;
}
mutex_unlock(&p->bo_list->bo_list_mutex);
*/
r = 0;
amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
- r |= !amdgpu_ttm_tt_get_user_pages_done(e->bo->tbo.ttm,
- e->range);
+ r |= !amdgpu_hmm_range_valid(e->range);
+ amdgpu_hmm_range_free(e->range);
e->range = NULL;
}
if (r) {
goto release_object;
if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
- range = kzalloc(sizeof(*range), GFP_KERNEL);
+ range = amdgpu_hmm_range_alloc();
if (unlikely(!range))
return -ENOMEM;
r = amdgpu_ttm_tt_get_user_pages(bo, range);
if (r) {
- kfree(range);
+ amdgpu_hmm_range_free(range);
goto release_object;
}
r = amdgpu_bo_reserve(bo, true);
user_pages_done:
if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE)
- amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
-
+ amdgpu_hmm_range_free(range);
release_object:
drm_gem_object_put(gobj);
return r;
}
-bool amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range)
+bool amdgpu_hmm_range_valid(struct hmm_range *hmm_range)
{
- bool r;
+ if (!hmm_range)
+ return false;
+
+ return !mmu_interval_read_retry(hmm_range->notifier,
+ hmm_range->notifier_seq);
+}
+
+struct hmm_range *amdgpu_hmm_range_alloc(void)
+{
+ return kzalloc(sizeof(struct hmm_range), GFP_KERNEL);
+}
+
+void amdgpu_hmm_range_free(struct hmm_range *hmm_range)
+{
+ if (!hmm_range)
+ return;
- r = mmu_interval_read_retry(hmm_range->notifier,
- hmm_range->notifier_seq);
kvfree(hmm_range->hmm_pfns);
kfree(hmm_range);
-
- return r;
}
uint64_t start, uint64_t npages, bool readonly,
void *owner,
struct hmm_range *hmm_range);
-bool amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range);
#if defined(CONFIG_HMM_MIRROR)
+bool amdgpu_hmm_range_valid(struct hmm_range *hmm_range);
+struct hmm_range *amdgpu_hmm_range_alloc(void);
+void amdgpu_hmm_range_free(struct hmm_range *hmm_range);
int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr);
void amdgpu_hmm_unregister(struct amdgpu_bo *bo);
#else
"add CONFIG_ZONE_DEVICE=y in config file to fix this\n");
return -ENODEV;
}
+
static inline void amdgpu_hmm_unregister(struct amdgpu_bo *bo) {}
+
+static inline bool amdgpu_hmm_range_valid(struct hmm_range *hmm_range)
+{
+ return false;
+}
+
+static inline struct hmm_range *amdgpu_hmm_range_alloc(void)
+{
+ return NULL;
+}
+
+static inline void amdgpu_hmm_range_free(struct hmm_range *hmm_range) {}
#endif
#endif
return r;
}
-/* amdgpu_ttm_tt_discard_user_pages - Discard range and pfn array allocations
- */
-void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm,
- struct hmm_range *range)
-{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
-
- if (gtt && gtt->userptr && range)
- amdgpu_hmm_range_get_pages_done(range);
-}
-
-/*
- * amdgpu_ttm_tt_get_user_pages_done - stop HMM track the CPU page table change
- * Check if the pages backing this ttm range have been invalidated
- *
- * Returns: true if pages are still valid
- */
-bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
- struct hmm_range *range)
-{
- struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
-
- if (!gtt || !gtt->userptr || !range)
- return false;
-
- DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%x\n",
- gtt->userptr, ttm->num_pages);
-
- WARN_ONCE(!range->hmm_pfns, "No user pages to check\n");
-
- return !amdgpu_hmm_range_get_pages_done(range);
-}
#endif
/*
#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
struct hmm_range *range);
-void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm,
- struct hmm_range *range);
-bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
- struct hmm_range *range);
#else
static inline int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
struct hmm_range *range)
{
return -EPERM;
}
-static inline void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm,
- struct hmm_range *range)
-{
-}
-static inline bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
- struct hmm_range *range)
-{
- return false;
-}
#endif
void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct hmm_range *range);
}
WRITE_ONCE(p->svms.faulting_task, current);
- hmm_range = kzalloc(sizeof(*hmm_range), GFP_KERNEL);
+ hmm_range = amdgpu_hmm_range_alloc();
r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
readonly, owner,
hmm_range);
WRITE_ONCE(p->svms.faulting_task, NULL);
if (r) {
- kfree(hmm_range);
+ amdgpu_hmm_range_free(hmm_range);
pr_debug("failed %d to get svm range pages\n", r);
}
} else {
* Overrride return value to TRY AGAIN only if prior returns
* were successful
*/
- if (hmm_range && amdgpu_hmm_range_get_pages_done(hmm_range) && !r) {
+ if (hmm_range && !amdgpu_hmm_range_valid(hmm_range) && !r) {
pr_debug("hmm update the range, need validate again\n");
r = -EAGAIN;
}
+ /* Free the hmm range */
+ amdgpu_hmm_range_free(hmm_range);
+
if (!r && !list_empty(&prange->child_list)) {
pr_debug("range split by unmap in parallel, validate again\n");