*/
#include <drm/drm_drv.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_pagemap.h>
+#include <drm/drm_pagemap_util.h>
#include "xe_bo.h"
#include "xe_exec_queue_types.h"
#include "xe_vm_types.h"
#include "xe_vram_types.h"
+static int xe_svm_get_pagemaps(struct xe_vm *vm);
+
static bool xe_svm_range_in_vram(struct xe_svm_range *range)
{
/*
#if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
-static struct xe_vram_region *page_to_vr(struct page *page)
+static struct xe_vram_region *xe_pagemap_to_vr(struct xe_pagemap *xpagemap)
{
- return container_of(page_pgmap(page), struct xe_vram_region, pagemap);
+ return xpagemap->vr;
}
-static u64 xe_vram_region_page_to_dpa(struct xe_vram_region *vr,
- struct page *page)
+static struct xe_pagemap *xe_page_to_pagemap(struct page *page)
{
- u64 dpa;
+ return container_of(page_pgmap(page), struct xe_pagemap, pagemap);
+}
+
+static struct xe_vram_region *xe_page_to_vr(struct page *page)
+{
+ return xe_pagemap_to_vr(xe_page_to_pagemap(page));
+}
+
+static u64 xe_page_to_dpa(struct page *page)
+{
+ struct xe_pagemap *xpagemap = xe_page_to_pagemap(page);
+ struct xe_vram_region *vr = xe_pagemap_to_vr(xpagemap);
+ u64 hpa_base = xpagemap->hpa_base;
u64 pfn = page_to_pfn(page);
u64 offset;
+ u64 dpa;
xe_assert(vr->xe, is_device_private_page(page));
- xe_assert(vr->xe, (pfn << PAGE_SHIFT) >= vr->hpa_base);
+ xe_assert(vr->xe, (pfn << PAGE_SHIFT) >= hpa_base);
- offset = (pfn << PAGE_SHIFT) - vr->hpa_base;
+ offset = (pfn << PAGE_SHIFT) - hpa_base;
dpa = vr->dpa_base + offset;
return dpa;
continue;
if (!vr && spage) {
- vr = page_to_vr(spage);
+ vr = xe_page_to_vr(spage);
gt = xe_migrate_exec_queue(vr->migrate)->gt;
xe = vr->xe;
}
- XE_WARN_ON(spage && page_to_vr(spage) != vr);
+ XE_WARN_ON(spage && xe_page_to_vr(spage) != vr);
/*
* CPU page and device page valid, capture physical address on
* device pages.
*/
if (pagemap_addr[i].addr && spage) {
- __vram_addr = xe_vram_region_page_to_dpa(vr, spage);
+ __vram_addr = xe_page_to_dpa(spage);
if (vram_addr == XE_VRAM_ADDR_INVALID) {
vram_addr = __vram_addr;
pos = i;
xe_pm_runtime_put(xe);
}
-static u64 block_offset_to_pfn(struct xe_vram_region *vr, u64 offset)
+static u64 block_offset_to_pfn(struct drm_pagemap *dpagemap, u64 offset)
{
- return PHYS_PFN(offset + vr->hpa_base);
+ struct xe_pagemap *xpagemap = container_of(dpagemap, typeof(*xpagemap), dpagemap);
+
+ return PHYS_PFN(offset + xpagemap->hpa_base);
}
static struct drm_buddy *vram_to_buddy(struct xe_vram_region *vram)
list_for_each_entry(block, blocks, link) {
struct xe_vram_region *vr = block->private;
struct drm_buddy *buddy = vram_to_buddy(vr);
- u64 block_pfn = block_offset_to_pfn(vr, drm_buddy_block_offset(block));
+ u64 block_pfn = block_offset_to_pfn(devmem_allocation->dpagemap,
+ drm_buddy_block_offset(block));
int i;
for (i = 0; i < drm_buddy_block_size(buddy, block) >> PAGE_SHIFT; ++i)
.copy_to_ram = xe_svm_copy_to_ram,
};
+#else
+static int xe_svm_get_pagemaps(struct xe_vm *vm)
+{
+ return 0;
+}
#endif
static const struct drm_gpusvm_ops gpusvm_ops = {
SZ_4K,
};
+static void xe_pagemap_put(struct xe_pagemap *xpagemap)
+{
+ drm_pagemap_put(&xpagemap->dpagemap);
+}
+
+static void xe_svm_put_pagemaps(struct xe_vm *vm)
+{
+ struct xe_device *xe = vm->xe;
+ struct xe_tile *tile;
+ int id;
+
+ for_each_tile(tile, xe, id) {
+ struct xe_pagemap *xpagemap = vm->svm.pagemaps[id];
+
+ if (xpagemap)
+ xe_pagemap_put(xpagemap);
+ vm->svm.pagemaps[id] = NULL;
+ }
+}
+
/**
* xe_svm_init() - SVM initialize
* @vm: The VM.
INIT_WORK(&vm->svm.garbage_collector.work,
xe_svm_garbage_collector_work_func);
+ err = xe_svm_get_pagemaps(vm);
+ if (err)
+ return err;
+
err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM", &vm->xe->drm,
current->mm, 0, vm->size,
xe_modparam.svm_notifier_size * SZ_1M,
&gpusvm_ops, fault_chunk_sizes,
ARRAY_SIZE(fault_chunk_sizes));
drm_gpusvm_driver_set_lock(&vm->svm.gpusvm, &vm->lock);
+
+ if (err) {
+ xe_svm_put_pagemaps(vm);
+ return err;
+ }
} else {
err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM (simple)",
&vm->xe->drm, NULL, 0, 0, 0, NULL,
{
xe_assert(vm->xe, xe_vm_is_closed(vm));
flush_work(&vm->svm.garbage_collector.work);
+ xe_svm_put_pagemaps(vm);
}
/**
struct mm_struct *mm,
unsigned long timeslice_ms)
{
- struct xe_vram_region *vr = container_of(dpagemap->pagemap, typeof(*vr), pagemap);
+ struct xe_pagemap *xpagemap = container_of(dpagemap, typeof(*xpagemap), dpagemap);
+ struct xe_vram_region *vr = xe_pagemap_to_vr(xpagemap);
struct dma_fence *pre_migrate_fence = NULL;
struct xe_device *xe = vr->xe;
struct device *dev = xe->drm.dev;
#if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
-static struct drm_pagemap *tile_local_pagemap(struct xe_tile *tile)
-{
- return tile->mem.vram->dpagemap;
-}
-
/**
* xe_vma_resolve_pagemap - Resolve the appropriate DRM pagemap for a VMA
* @vma: Pointer to the xe_vma structure containing memory attributes
return NULL;
if (fd == DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE)
- return IS_DGFX(tile_to_xe(tile)) ? tile_local_pagemap(tile) : NULL;
+ return IS_DGFX(tile_to_xe(tile)) ? xe_tile_local_pagemap(tile) : NULL;
/* TODO: Support multi-device with drm_pagemap_from_fd(fd) */
return NULL;
xe_assert(tile_to_xe(tile), range->base.pages.flags.migrate_devmem);
range_debug(range, "ALLOCATE VRAM");
- dpagemap = tile_local_pagemap(tile);
+ dpagemap = xe_tile_local_pagemap(tile);
return drm_pagemap_populate_mm(dpagemap, xe_svm_range_start(range),
xe_svm_range_end(range),
range->base.gpusvm->mm,
dma_addr_t addr;
if (pgmap_dev == dev) {
- addr = xe_vram_region_page_to_dpa(page_to_vr(page), page);
+ addr = xe_page_to_dpa(page);
prot = XE_INTERCONNECT_VRAM;
} else {
addr = DMA_MAPPING_ERROR;
return drm_pagemap_addr_encode(addr, prot, order, dir);
}
-static const struct drm_pagemap_ops xe_drm_pagemap_ops = {
- .device_map = xe_drm_pagemap_device_map,
- .populate_mm = xe_drm_pagemap_populate_mm,
-};
+static void xe_pagemap_destroy_work(struct work_struct *work)
+{
+ struct xe_pagemap *xpagemap = container_of(work, typeof(*xpagemap), destroy_work);
+ struct dev_pagemap *pagemap = &xpagemap->pagemap;
+ struct drm_device *drm = xpagemap->dpagemap.drm;
+ int idx;
-static void xe_devm_release(void *data)
+ /*
+ * Only unmap / release if devm_ release hasn't run yet.
+ * Otherwise the devm_ callbacks have already released, or
+ * will do shortly.
+ */
+ if (drm_dev_enter(drm, &idx)) {
+ devm_memunmap_pages(drm->dev, pagemap);
+ devm_release_mem_region(drm->dev, pagemap->range.start,
+ pagemap->range.end - pagemap->range.start + 1);
+ drm_dev_exit(idx);
+ }
+ kfree(xpagemap);
+}
+
+static void xe_pagemap_destroy(struct drm_pagemap *dpagemap, bool from_atomic_or_reclaim)
{
- struct xe_vram_region *vr = data;
+ struct xe_pagemap *xpagemap = container_of(dpagemap, typeof(*xpagemap), dpagemap);
+ struct xe_device *xe = to_xe_device(dpagemap->drm);
- drm_pagemap_put(vr->dpagemap);
- vr->dpagemap = NULL;
+ if (from_atomic_or_reclaim)
+ queue_work(xe->destroy_wq, &xpagemap->destroy_work);
+ else
+ xe_pagemap_destroy_work(&xpagemap->destroy_work);
}
+static const struct drm_pagemap_ops xe_drm_pagemap_ops = {
+ .device_map = xe_drm_pagemap_device_map,
+ .populate_mm = xe_drm_pagemap_populate_mm,
+ .destroy = xe_pagemap_destroy,
+};
+
/**
- * xe_devm_add: Remap and provide memmap backing for device memory
- * @tile: tile that the memory region belongs to
- * @vr: vram memory region to remap
+ * xe_pagemap_create() - Create a struct xe_pagemap object
+ * @xe: The xe device.
+ * @vr: Back-pointer to the struct xe_vram_region.
*
- * This remap device memory to host physical address space and create
- * struct page to back device memory
+ * Allocate and initialize a struct xe_pagemap. On successful
+ * return, drm_pagemap_put() on the embedded struct drm_pagemap
+ * should be used to unreference.
*
- * Return: 0 on success standard error code otherwise
+ * Return: Pointer to a struct xe_pagemap if successful. Error pointer
+ * on failure.
*/
-int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
+static struct xe_pagemap *xe_pagemap_create(struct xe_device *xe, struct xe_vram_region *vr)
{
- struct xe_device *xe = tile_to_xe(tile);
- struct device *dev = &to_pci_dev(xe->drm.dev)->dev;
+ struct device *dev = xe->drm.dev;
+ struct xe_pagemap *xpagemap;
+ struct dev_pagemap *pagemap;
+ struct drm_pagemap *dpagemap;
struct resource *res;
void *addr;
- int ret;
+ int err;
+
+ xpagemap = kzalloc(sizeof(*xpagemap), GFP_KERNEL);
+ if (!xpagemap)
+ return ERR_PTR(-ENOMEM);
+
+ pagemap = &xpagemap->pagemap;
+ dpagemap = &xpagemap->dpagemap;
+ INIT_WORK(&xpagemap->destroy_work, xe_pagemap_destroy_work);
+ xpagemap->vr = vr;
+
+ err = drm_pagemap_init(dpagemap, pagemap, &xe->drm, &xe_drm_pagemap_ops);
+ if (err)
+ goto out_no_dpagemap;
res = devm_request_free_mem_region(dev, &iomem_resource,
vr->usable_size);
if (IS_ERR(res)) {
- ret = PTR_ERR(res);
- return ret;
+ err = PTR_ERR(res);
+ goto out_err;
}
- vr->dpagemap = drm_pagemap_create(&xe->drm, &vr->pagemap,
- &xe_drm_pagemap_ops);
- if (IS_ERR(vr->dpagemap)) {
- drm_err(&xe->drm, "Failed to create drm_pagemap tile %d memory: %pe\n",
- tile->id, vr->dpagemap);
- ret = PTR_ERR(vr->dpagemap);
- goto out_no_dpagemap;
+ pagemap->type = MEMORY_DEVICE_PRIVATE;
+ pagemap->range.start = res->start;
+ pagemap->range.end = res->end;
+ pagemap->nr_range = 1;
+ pagemap->owner = xe_svm_devm_owner(xe);
+ pagemap->ops = drm_pagemap_pagemap_ops_get();
+ addr = devm_memremap_pages(dev, pagemap);
+ if (IS_ERR(addr)) {
+ err = PTR_ERR(addr);
+ devm_release_mem_region(dev, res->start, res->end - res->start + 1);
+ goto out_err;
}
- ret = devm_add_action_or_reset(dev, xe_devm_release, vr);
- if (ret)
- goto out_no_dpagemap;
+ xpagemap->hpa_base = res->start;
+ return xpagemap;
- vr->pagemap.type = MEMORY_DEVICE_PRIVATE;
- vr->pagemap.range.start = res->start;
- vr->pagemap.range.end = res->end;
- vr->pagemap.nr_range = 1;
- vr->pagemap.ops = drm_pagemap_pagemap_ops_get();
- vr->pagemap.owner = xe_svm_devm_owner(xe);
- addr = devm_memremap_pages(dev, &vr->pagemap);
- if (IS_ERR(addr)) {
- ret = PTR_ERR(addr);
- drm_err(&xe->drm, "Failed to remap tile %d memory, errno %pe\n",
- tile->id, ERR_PTR(ret));
- goto out_failed_memremap;
+out_err:
+ drm_pagemap_put(dpagemap);
+ return ERR_PTR(err);
+
+out_no_dpagemap:
+ kfree(xpagemap);
+ return ERR_PTR(err);
+}
+
+/**
+ * xe_pagemap_find_or_create() - Find or create a struct xe_pagemap
+ * @xe: The xe device.
+ * @cache: The struct xe_pagemap_cache.
+ * @vr: The VRAM region.
+ *
+ * Check if there is an already used xe_pagemap for this tile, and in that case,
+ * return it.
+ * If not, check if there is a cached xe_pagemap for this tile, and in that case,
+ * cancel its destruction, re-initialize it and return it.
+ * Finally if there is no cached or already used pagemap, create one and
+ * register it in the tile's pagemap cache.
+ *
+ * Note that this function is typically called from within an IOCTL, and waits are
+ * therefore carried out interruptible if possible.
+ *
+ * Return: A pointer to a struct xe_pagemap if successful, Error pointer on failure.
+ */
+static struct xe_pagemap *
+xe_pagemap_find_or_create(struct xe_device *xe, struct drm_pagemap_cache *cache,
+ struct xe_vram_region *vr)
+{
+ struct drm_pagemap *dpagemap;
+ struct xe_pagemap *xpagemap;
+ int err;
+
+ err = drm_pagemap_cache_lock_lookup(cache);
+ if (err)
+ return ERR_PTR(err);
+
+ dpagemap = drm_pagemap_get_from_cache(cache);
+ if (IS_ERR(dpagemap)) {
+ xpagemap = ERR_CAST(dpagemap);
+ } else if (!dpagemap) {
+ xpagemap = xe_pagemap_create(xe, vr);
+ if (IS_ERR(xpagemap))
+ goto out_unlock;
+ drm_pagemap_cache_set_pagemap(cache, &xpagemap->dpagemap);
+ } else {
+ xpagemap = container_of(dpagemap, typeof(*xpagemap), dpagemap);
+ }
+
+out_unlock:
+ drm_pagemap_cache_unlock_lookup(cache);
+ return xpagemap;
+}
+
+static int xe_svm_get_pagemaps(struct xe_vm *vm)
+{
+ struct xe_device *xe = vm->xe;
+ struct xe_pagemap *xpagemap;
+ struct xe_tile *tile;
+ int id;
+
+ for_each_tile(tile, xe, id) {
+ struct xe_vram_region *vr;
+
+ if (!((BIT(id) << 1) & xe->info.mem_region_mask))
+ continue;
+
+ vr = xe_tile_to_vr(tile);
+ xpagemap = xe_pagemap_find_or_create(xe, vr->dpagemap_cache, vr);
+ if (IS_ERR(xpagemap))
+ break;
+ vm->svm.pagemaps[id] = xpagemap;
+ }
+
+ if (IS_ERR(xpagemap)) {
+ xe_svm_put_pagemaps(vm);
+ return PTR_ERR(xpagemap);
}
- vr->hpa_base = res->start;
- drm_dbg(&xe->drm, "Added tile %d memory [%llx-%llx] to devm, remapped to %pr\n",
- tile->id, vr->io_start, vr->io_start + vr->usable_size, res);
return 0;
+}
-out_failed_memremap:
- drm_pagemap_put(vr->dpagemap);
-out_no_dpagemap:
- devm_release_mem_region(dev, res->start, resource_size(res));
- return ret;
+/**
+ * xe_pagemap_shrinker_create() - Create a drm_pagemap shrinker
+ * @xe: The xe device
+ *
+ * Create a drm_pagemap shrinker and register with the xe device.
+ *
+ * Return: %0 on success, negative error code on failure.
+ */
+int xe_pagemap_shrinker_create(struct xe_device *xe)
+{
+ xe->usm.dpagemap_shrinker = drm_pagemap_shrinker_create_devm(&xe->drm);
+ return PTR_ERR_OR_ZERO(xe->usm.dpagemap_shrinker);
}
+
+/**
+ * xe_pagemap_cache_create() - Create a drm_pagemap cache
+ * @tile: The tile to register the cache with
+ *
+ * Create a drm_pagemap cache and register with the tile.
+ *
+ * Return: %0 on success, negative error code on failure.
+ */
+int xe_pagemap_cache_create(struct xe_tile *tile)
+{
+ struct xe_device *xe = tile_to_xe(tile);
+
+ if (IS_DGFX(xe)) {
+ struct drm_pagemap_cache *cache =
+ drm_pagemap_cache_create_devm(xe->usm.dpagemap_shrinker);
+
+ if (IS_ERR(cache))
+ return PTR_ERR(cache);
+
+ tile->mem.vram->dpagemap_cache = cache;
+ }
+
+ return 0;
+}
+
#else
-int xe_svm_alloc_vram(struct xe_tile *tile,
- struct xe_svm_range *range,
- const struct drm_gpusvm_ctx *ctx)
+
+int xe_pagemap_shrinker_create(struct xe_device *xe)
{
- return -EOPNOTSUPP;
+ return 0;
}
-int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
+int xe_pagemap_cache_create(struct xe_tile *tile)
{
return 0;
}
+int xe_svm_alloc_vram(struct xe_tile *tile,
+ struct xe_svm_range *range,
+ const struct drm_gpusvm_ctx *ctx)
+{
+ return -EOPNOTSUPP;
+}
+
struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile)
{
return NULL;