* Copyright © 2024 Intel Corporation
*/
+#include <linux/pci-p2pdma.h>
+
#include <drm/drm_drv.h>
#include <drm/drm_managed.h>
#include <drm/drm_pagemap.h>
return dpa;
}
+static u64 xe_page_to_pcie(struct page *page)
+{
+ struct xe_pagemap *xpagemap = xe_page_to_pagemap(page);
+ struct xe_vram_region *vr = xe_pagemap_to_vr(xpagemap);
+
+ return xe_page_to_dpa(page) - vr->dpa_base + vr->io_start;
+}
+
enum xe_svm_copy_dir {
XE_SVM_COPY_TO_VRAM,
XE_SVM_COPY_TO_SRAM,
struct device *dev1 = xe_peer_to_dev(peer1);
struct device *dev2 = xe_peer_to_dev(peer2);
- return dev1 == dev2;
+ if (dev1 == dev2)
+ return true;
+
+ return pci_p2pdma_distance(to_pci_dev(dev1), dev2, true) >= 0;
}
static DRM_PAGEMAP_OWNER_LIST_DEFINE(xe_owner_list);
addr = xe_page_to_dpa(page);
prot = XE_INTERCONNECT_VRAM;
} else {
- addr = DMA_MAPPING_ERROR;
- prot = 0;
+ addr = dma_map_resource(dev,
+ xe_page_to_pcie(page),
+ PAGE_SIZE << order, dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ prot = XE_INTERCONNECT_P2P;
}
return drm_pagemap_addr_encode(addr, prot, order, dir);
}
+static void xe_drm_pagemap_device_unmap(struct drm_pagemap *dpagemap,
+ struct device *dev,
+ struct drm_pagemap_addr addr)
+{
+ if (addr.proto != XE_INTERCONNECT_P2P)
+ return;
+
+ dma_unmap_resource(dev, addr.addr, PAGE_SIZE << addr.order,
+ addr.dir, DMA_ATTR_SKIP_CPU_SYNC);
+}
+
static void xe_pagemap_destroy_work(struct work_struct *work)
{
struct xe_pagemap *xpagemap = container_of(work, typeof(*xpagemap), destroy_work);
static const struct drm_pagemap_ops xe_drm_pagemap_ops = {
.device_map = xe_drm_pagemap_device_map,
+ .device_unmap = xe_drm_pagemap_device_unmap,
.populate_mm = xe_drm_pagemap_populate_mm,
.destroy = xe_pagemap_destroy,
};