return 0;
}
- ret = amdgpu_ttm_tt_get_user_pages(bo, &range);
+ range = kzalloc(sizeof(*range), GFP_KERNEL);
+ if (unlikely(!range)) {
+ ret = -ENOMEM;
+ goto unregister_out;
+ }
+
+ ret = amdgpu_ttm_tt_get_user_pages(bo, range);
if (ret) {
+ kfree(range);
if (ret == -EAGAIN)
pr_debug("Failed to get user pages, try again\n");
else
}
}
+ mem->range = kzalloc(sizeof(*mem->range), GFP_KERNEL);
+ if (unlikely(!mem->range))
+ return -ENOMEM;
/* Get updated user pages */
- ret = amdgpu_ttm_tt_get_user_pages(bo, &mem->range);
+ ret = amdgpu_ttm_tt_get_user_pages(bo, mem->range);
if (ret) {
+ kfree(mem->range);
+ mem->range = NULL;
pr_debug("Failed %d to get user pages\n", ret);
/* Return -EFAULT bad address error as success. It will
bool userpage_invalidated = false;
struct amdgpu_bo *bo = e->bo;
- r = amdgpu_ttm_tt_get_user_pages(bo, &e->range);
+ e->range = kzalloc(sizeof(*e->range), GFP_KERNEL);
+ if (unlikely(!e->range))
+ return -ENOMEM;
+
+ r = amdgpu_ttm_tt_get_user_pages(bo, e->range);
if (r)
goto out_free_user_pages;
goto release_object;
if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
- r = amdgpu_ttm_tt_get_user_pages(bo, &range);
- if (r)
+ range = kzalloc(sizeof(*range), GFP_KERNEL);
+ if (unlikely(!range))
+ return -ENOMEM;
+ r = amdgpu_ttm_tt_get_user_pages(bo, range);
+ if (r) {
+ kfree(range);
goto release_object;
-
+ }
r = amdgpu_bo_reserve(bo, true);
if (r)
goto user_pages_done;
int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
uint64_t start, uint64_t npages, bool readonly,
void *owner,
- struct hmm_range **phmm_range)
+ struct hmm_range *hmm_range)
{
- struct hmm_range *hmm_range;
unsigned long end;
unsigned long timeout;
unsigned long *pfns;
int r = 0;
- hmm_range = kzalloc(sizeof(*hmm_range), GFP_KERNEL);
- if (unlikely(!hmm_range))
- return -ENOMEM;
-
pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL);
if (unlikely(!pfns)) {
r = -ENOMEM;
hmm_range->start = start;
hmm_range->hmm_pfns = pfns;
- *phmm_range = hmm_range;
-
return 0;
out_free_pfns:
kvfree(pfns);
out_free_range:
- kfree(hmm_range);
-
if (r == -EBUSY)
r = -EAGAIN;
return r;
int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
uint64_t start, uint64_t npages, bool readonly,
void *owner,
- struct hmm_range **phmm_range);
+ struct hmm_range *hmm_range);
bool amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range);
#if defined(CONFIG_HMM_MIRROR)
* memory and start HMM tracking CPU page table update
*
* Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only
- * once afterwards to stop HMM tracking
+ * once afterwards to stop HMM tracking. Its the caller responsibility to ensure
+ * that range is a valid memory and it is freed too.
*/
int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
- struct hmm_range **range)
+ struct hmm_range *range)
{
struct ttm_tt *ttm = bo->tbo.ttm;
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
bool readonly;
int r = 0;
- /* Make sure get_user_pages_done() can cleanup gracefully */
- *range = NULL;
-
mm = bo->notifier.mm;
if (unlikely(!mm)) {
DRM_DEBUG_DRIVER("BO is not registered?\n");
#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
- struct hmm_range **range);
+ struct hmm_range *range);
void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm,
struct hmm_range *range);
bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
struct hmm_range *range);
#else
static inline int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
- struct hmm_range **range)
+ struct hmm_range *range)
{
return -EPERM;
}
}
WRITE_ONCE(p->svms.faulting_task, current);
+ hmm_range = kzalloc(sizeof(*hmm_range), GFP_KERNEL);
r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
readonly, owner,
- &hmm_range);
+ hmm_range);
WRITE_ONCE(p->svms.faulting_task, NULL);
- if (r)
+ if (r) {
+ kfree(hmm_range);
pr_debug("failed %d to get svm range pages\n", r);
+ }
} else {
r = -EFAULT;
}