struct mutex lock;
struct amdgpu_bo *bo;
struct dma_buf *dmabuf;
- struct hmm_range *range;
+ struct amdgpu_hmm_range *range;
struct list_head attachments;
/* protected by amdkfd_process_info.lock */
struct list_head validate_list;
struct amdkfd_process_info *process_info = mem->process_info;
struct amdgpu_bo *bo = mem->bo;
struct ttm_operation_ctx ctx = { true, false };
- struct hmm_range *range;
+ struct amdgpu_hmm_range *range;
int ret = 0;
mutex_lock(&process_info->lock);
return 0;
}
- range = amdgpu_hmm_range_alloc();
+ range = amdgpu_hmm_range_alloc(NULL);
if (unlikely(!range)) {
ret = -ENOMEM;
goto unregister_out;
}
}
- mem->range = amdgpu_hmm_range_alloc();
+ mem->range = amdgpu_hmm_range_alloc(NULL);
if (unlikely(!mem->range))
return -ENOMEM;
/* Get updated user pages */
struct amdgpu_bo *bo;
struct amdgpu_bo_va *bo_va;
uint32_t priority;
- struct hmm_range *range;
+ struct amdgpu_hmm_range *range;
bool user_invalidated;
};
#include <linux/pagemap.h>
#include <linux/sync_file.h>
#include <linux/dma-buf.h>
-#include <linux/hmm.h>
#include <drm/amdgpu_drm.h>
#include <drm/drm_syncobj.h>
bool userpage_invalidated = false;
struct amdgpu_bo *bo = e->bo;
- e->range = amdgpu_hmm_range_alloc();
+ e->range = amdgpu_hmm_range_alloc(NULL);
if (unlikely(!e->range))
return -ENOMEM;
goto out_free_user_pages;
for (i = 0; i < bo->tbo.ttm->num_pages; i++) {
- if (bo->tbo.ttm->pages[i] != hmm_pfn_to_page(e->range->hmm_pfns[i])) {
+ if (bo->tbo.ttm->pages[i] !=
+ hmm_pfn_to_page(e->range->hmm_range.hmm_pfns[i])) {
userpage_invalidated = true;
break;
}
struct drm_amdgpu_gem_userptr *args = data;
struct amdgpu_fpriv *fpriv = filp->driver_priv;
struct drm_gem_object *gobj;
- struct hmm_range *range;
+ struct amdgpu_hmm_range *range;
struct amdgpu_bo *bo;
uint32_t handle;
int r;
goto release_object;
if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
- range = amdgpu_hmm_range_alloc();
+ range = amdgpu_hmm_range_alloc(NULL);
if (unlikely(!range))
return -ENOMEM;
r = amdgpu_ttm_tt_get_user_pages(bo, range);
int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
uint64_t start, uint64_t npages, bool readonly,
void *owner,
- struct hmm_range *hmm_range)
+ struct amdgpu_hmm_range *range)
{
unsigned long end;
unsigned long timeout;
unsigned long *pfns;
int r = 0;
+ struct hmm_range *hmm_range = &range->hmm_range;
pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL);
if (unlikely(!pfns)) {
return r;
}
-bool amdgpu_hmm_range_valid(struct hmm_range *hmm_range)
+bool amdgpu_hmm_range_valid(struct amdgpu_hmm_range *range)
{
- if (!hmm_range)
+ if (!range)
return false;
- return !mmu_interval_read_retry(hmm_range->notifier,
- hmm_range->notifier_seq);
+ return !mmu_interval_read_retry(range->hmm_range.notifier,
+ range->hmm_range.notifier_seq);
}
-struct hmm_range *amdgpu_hmm_range_alloc(void)
+struct amdgpu_hmm_range *amdgpu_hmm_range_alloc(struct amdgpu_bo *bo)
{
- return kzalloc(sizeof(struct hmm_range), GFP_KERNEL);
+ struct amdgpu_hmm_range *range;
+
+ range = kzalloc(sizeof(*range), GFP_KERNEL);
+ if (!range)
+ return NULL;
+
+ range->bo = amdgpu_bo_ref(bo);
+ return range;
}
-void amdgpu_hmm_range_free(struct hmm_range *hmm_range)
+void amdgpu_hmm_range_free(struct amdgpu_hmm_range *range)
{
- if (!hmm_range)
+ if (!range)
return;
- kvfree(hmm_range->hmm_pfns);
- kfree(hmm_range);
+ kvfree(range->hmm_range.hmm_pfns);
+ amdgpu_bo_unref(&range->bo);
+ kfree(range);
}
#include <linux/interval_tree.h>
#include <linux/mmu_notifier.h>
+struct amdgpu_hmm_range {
+ struct hmm_range hmm_range;
+ struct amdgpu_bo *bo;
+};
+
int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
uint64_t start, uint64_t npages, bool readonly,
void *owner,
- struct hmm_range *hmm_range);
+ struct amdgpu_hmm_range *range);
#if defined(CONFIG_HMM_MIRROR)
-bool amdgpu_hmm_range_valid(struct hmm_range *hmm_range);
-struct hmm_range *amdgpu_hmm_range_alloc(void);
-void amdgpu_hmm_range_free(struct hmm_range *hmm_range);
+bool amdgpu_hmm_range_valid(struct amdgpu_hmm_range *range);
+struct amdgpu_hmm_range *amdgpu_hmm_range_alloc(struct amdgpu_bo *bo);
+void amdgpu_hmm_range_free(struct amdgpu_hmm_range *range);
int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr);
void amdgpu_hmm_unregister(struct amdgpu_bo *bo);
#else
static inline void amdgpu_hmm_unregister(struct amdgpu_bo *bo) {}
-static inline bool amdgpu_hmm_range_valid(struct hmm_range *hmm_range)
+static inline bool amdgpu_hmm_range_valid(struct amdgpu_hmm_range *range)
{
return false;
}
-static inline struct hmm_range *amdgpu_hmm_range_alloc(void)
+static inline struct amdgpu_hmm_range *amdgpu_hmm_range_alloc(struct amdgpu_bo *bo)
{
return NULL;
}
-static inline void amdgpu_hmm_range_free(struct hmm_range *hmm_range) {}
+static inline void amdgpu_hmm_range_free(struct amdgpu_hmm_range *range) {}
#endif
#endif
* that range is a valid memory and it is freed too.
*/
int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
- struct hmm_range *range)
+ struct amdgpu_hmm_range *range)
{
struct ttm_tt *ttm = bo->tbo.ttm;
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
* that backs user memory and will ultimately be mapped into the device
* address space.
*/
-void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct hmm_range *range)
+void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct amdgpu_hmm_range *range)
{
unsigned long i;
for (i = 0; i < ttm->num_pages; ++i)
- ttm->pages[i] = range ? hmm_pfn_to_page(range->hmm_pfns[i]) : NULL;
+ ttm->pages[i] = range ? hmm_pfn_to_page(range->hmm_range.hmm_pfns[i]) : NULL;
}
/*
#include <drm/gpu_scheduler.h>
#include <drm/ttm/ttm_placement.h>
#include "amdgpu_vram_mgr.h"
+#include "amdgpu_hmm.h"
#define AMDGPU_PL_GDS (TTM_PL_PRIV + 0)
#define AMDGPU_PL_GWS (TTM_PL_PRIV + 1)
#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
- struct hmm_range *range);
+ struct amdgpu_hmm_range *range);
#else
static inline int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
- struct hmm_range *range)
+ struct amdgpu_hmm_range *range)
{
return -EPERM;
}
#endif
-void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct hmm_range *range);
+void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct amdgpu_hmm_range *range);
int amdgpu_ttm_tt_get_userptr(const struct ttm_buffer_object *tbo,
uint64_t *user_addr);
int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/types.h>
-#include <linux/hmm.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/migrate.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/sched/mm.h>
-#include <linux/hmm.h>
#include "kfd_priv.h"
#include "kfd_svm.h"
start = map_start << PAGE_SHIFT;
end = (map_last + 1) << PAGE_SHIFT;
for (addr = start; !r && addr < end; ) {
- struct hmm_range *hmm_range = NULL;
+ struct amdgpu_hmm_range *range = NULL;
unsigned long map_start_vma;
unsigned long map_last_vma;
struct vm_area_struct *vma;
}
WRITE_ONCE(p->svms.faulting_task, current);
- hmm_range = amdgpu_hmm_range_alloc();
+ range = amdgpu_hmm_range_alloc(NULL);
r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
readonly, owner,
- hmm_range);
+ range);
WRITE_ONCE(p->svms.faulting_task, NULL);
if (r) {
- amdgpu_hmm_range_free(hmm_range);
+ amdgpu_hmm_range_free(range);
pr_debug("failed %d to get svm range pages\n", r);
}
} else {
if (!r) {
offset = (addr >> PAGE_SHIFT) - prange->start;
r = svm_range_dma_map(prange, ctx->bitmap, offset, npages,
- hmm_range->hmm_pfns);
+ range->hmm_range.hmm_pfns);
if (r)
pr_debug("failed %d to dma map range\n", r);
}
* Overrride return value to TRY AGAIN only if prior returns
* were successful
*/
- if (hmm_range && !amdgpu_hmm_range_valid(hmm_range) && !r) {
+ if (range && !amdgpu_hmm_range_valid(range) && !r) {
pr_debug("hmm update the range, need validate again\n");
r = -EAGAIN;
}
/* Free the hmm range */
- amdgpu_hmm_range_free(hmm_range);
+ amdgpu_hmm_range_free(range);
if (!r && !list_empty(&prange->child_list)) {
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/sched/mm.h>
-#include <linux/hmm.h>
#include "amdgpu.h"
#include "kfd_priv.h"