*
* This function initializes the GPU SVM.
*
+ * Note: If only using the simple drm_gpusvm_pages API (get/unmap/free),
+ * then only @gpusvm, @name, and @drm are expected. However, the same base
+ * @gpusvm can also be used with both modes together in which case the full
+ * setup is needed, where the core drm_gpusvm_pages API will simply never use
+ * the other fields.
+ *
* Return: 0 on success, a negative error code on failure.
*/
int drm_gpusvm_init(struct drm_gpusvm *gpusvm,
const struct drm_gpusvm_ops *ops,
const unsigned long *chunk_sizes, int num_chunks)
{
- if (!ops->invalidate || !num_chunks)
- return -EINVAL;
+ if (mm) {
+ if (!ops->invalidate || !num_chunks)
+ return -EINVAL;
+ mmgrab(mm);
+ } else {
+ /* No full SVM mode, only core drm_gpusvm_pages API. */
+ if (ops || num_chunks || mm_range || notifier_size ||
+ device_private_page_owner)
+ return -EINVAL;
+ }
gpusvm->name = name;
gpusvm->drm = drm;
gpusvm->chunk_sizes = chunk_sizes;
gpusvm->num_chunks = num_chunks;
- mmgrab(mm);
gpusvm->root = RB_ROOT_CACHED;
INIT_LIST_HEAD(&gpusvm->notifier_list);
drm_gpusvm_range_remove(gpusvm, range);
}
- mmdrop(gpusvm->mm);
+ if (gpusvm->mm)
+ mmdrop(gpusvm->mm);
WARN_ON(!RB_EMPTY_ROOT(&gpusvm->root.rb_root));
}
EXPORT_SYMBOL_GPL(drm_gpusvm_fini);
}
}
+/**
+ * drm_gpusvm_free_pages() - Free dma-mapping associated with GPU SVM pages
+ * struct
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @svm_pages: Pointer to the GPU SVM pages structure
+ * @npages: Number of mapped pages
+ *
+ * This function unmaps and frees the dma address array associated with a GPU
+ * SVM pages struct.
+ */
+void drm_gpusvm_free_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_pages *svm_pages,
+ unsigned long npages)
+{
+ drm_gpusvm_notifier_lock(gpusvm);
+ __drm_gpusvm_unmap_pages(gpusvm, svm_pages, npages);
+ __drm_gpusvm_free_pages(gpusvm, svm_pages);
+ drm_gpusvm_notifier_unlock(gpusvm);
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_free_pages);
+
/**
* drm_gpusvm_range_remove() - Remove GPU SVM range
* @gpusvm: Pointer to the GPU SVM structure
*
* Return: 0 on success, negative error code on failure.
*/
-static int drm_gpusvm_get_pages(struct drm_gpusvm *gpusvm,
- struct drm_gpusvm_pages *svm_pages,
- struct mm_struct *mm,
- struct mmu_interval_notifier *notifier,
- unsigned long pages_start,
- unsigned long pages_end,
- const struct drm_gpusvm_ctx *ctx)
+int drm_gpusvm_get_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_pages *svm_pages,
+ struct mm_struct *mm,
+ struct mmu_interval_notifier *notifier,
+ unsigned long pages_start, unsigned long pages_end,
+ const struct drm_gpusvm_ctx *ctx)
{
struct hmm_range hmm_range = {
.default_flags = HMM_PFN_REQ_FAULT | (ctx->read_only ? 0 :
goto retry;
return err;
}
+EXPORT_SYMBOL_GPL(drm_gpusvm_get_pages);
/**
* drm_gpusvm_range_get_pages() - Get pages for a GPU SVM range
* Must be called in the invalidate() callback of the corresponding notifier for
* IOMMU security model.
*/
-static void drm_gpusvm_unmap_pages(struct drm_gpusvm *gpusvm,
- struct drm_gpusvm_pages *svm_pages,
- unsigned long npages,
- const struct drm_gpusvm_ctx *ctx)
+void drm_gpusvm_unmap_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_pages *svm_pages,
+ unsigned long npages,
+ const struct drm_gpusvm_ctx *ctx)
{
if (ctx->in_notifier)
lockdep_assert_held_write(&gpusvm->notifier_lock);
if (!ctx->in_notifier)
drm_gpusvm_notifier_unlock(gpusvm);
}
+EXPORT_SYMBOL_GPL(drm_gpusvm_unmap_pages);
/**
* drm_gpusvm_range_unmap_pages() - Unmap pages associated with a GPU SVM range
void drm_gpusvm_range_set_unmapped(struct drm_gpusvm_range *range,
const struct mmu_notifier_range *mmu_range);
+int drm_gpusvm_get_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_pages *svm_pages,
+ struct mm_struct *mm,
+ struct mmu_interval_notifier *notifier,
+ unsigned long pages_start, unsigned long pages_end,
+ const struct drm_gpusvm_ctx *ctx);
+
+void drm_gpusvm_unmap_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_pages *svm_pages,
+ unsigned long npages,
+ const struct drm_gpusvm_ctx *ctx);
+
+void drm_gpusvm_free_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_pages *svm_pages,
+ unsigned long npages);
+
#ifdef CONFIG_LOCKDEP
/**
* drm_gpusvm_driver_set_lock() - Set the lock protecting accesses to GPU SVM