]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/xe: Basic SVM BO eviction
authorMatthew Brost <matthew.brost@intel.com>
Thu, 6 Mar 2025 01:26:53 +0000 (17:26 -0800)
committerMatthew Brost <matthew.brost@intel.com>
Thu, 6 Mar 2025 19:36:57 +0000 (11:36 -0800)
Wire xe_bo_move to GPU SVM migration via new helper xe_svm_bo_evict.

v2:
 - Use xe_svm_bo_evict
 - Drop bo->range
v3:
 - Kernel doc (Thomas)
v4:
 - Add missing xe_bo.c code
v5:
 - Add XE_BO_FLAG_CPU_ADDR_MIRROR flag in this patch (Thomas)
 - Add message on eviction failure
v6:
 - Only compile if CONFIG_DRM_GPUSVM selected (CI, Lucas)

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20250306012657.3505757-29-matthew.brost@intel.com
drivers/gpu/drm/xe/xe_bo.c
drivers/gpu/drm/xe/xe_bo.h
drivers/gpu/drm/xe/xe_svm.c
drivers/gpu/drm/xe/xe_svm.h

index 51cd226955924ca0c3777ba32110a330d5442181..0c7a7f5e55968e9fefbd61660389140c59daafb0 100644 (file)
@@ -279,6 +279,8 @@ int xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
 static void xe_evict_flags(struct ttm_buffer_object *tbo,
                           struct ttm_placement *placement)
 {
+       struct xe_bo *bo;
+
        if (!xe_bo_is_xe_bo(tbo)) {
                /* Don't handle scatter gather BOs */
                if (tbo->type == ttm_bo_type_sg) {
@@ -290,6 +292,12 @@ static void xe_evict_flags(struct ttm_buffer_object *tbo,
                return;
        }
 
+       bo = ttm_to_xe_bo(tbo);
+       if (bo->flags & XE_BO_FLAG_CPU_ADDR_MIRROR) {
+               *placement = sys_placement;
+               return;
+       }
+
        /*
         * For xe, sg bos that are evicted to system just triggers a
         * rebind of the sg list upon subsequent validation to XE_PL_TT.
@@ -734,6 +742,20 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
                goto out;
        }
 
+       if (!move_lacks_source && (bo->flags & XE_BO_FLAG_CPU_ADDR_MIRROR) &&
+           new_mem->mem_type == XE_PL_SYSTEM) {
+               ret = xe_svm_bo_evict(bo);
+               if (!ret) {
+                       drm_dbg(&xe->drm, "Evict system allocator BO success\n");
+                       ttm_bo_move_null(ttm_bo, new_mem);
+               } else {
+                       drm_dbg(&xe->drm, "Evict system allocator BO failed=%pe\n",
+                               ERR_PTR(ret));
+               }
+
+               goto out;
+       }
+
        if (old_mem_type == XE_PL_SYSTEM && new_mem->mem_type == XE_PL_TT && !handle_system_ccs) {
                ttm_bo_move_null(ttm_bo, new_mem);
                goto out;
index 9cab686dc8722caad51a8e7384d1ec8c01947511..2b9c0b0778a2c02f330fa9b5f9221e91cf3750c6 100644 (file)
@@ -47,6 +47,7 @@
                                         XE_BO_FLAG_GGTT1 | \
                                         XE_BO_FLAG_GGTT2 | \
                                         XE_BO_FLAG_GGTT3)
+#define XE_BO_FLAG_CPU_ADDR_MIRROR     BIT(22)
 
 /* this one is trigger internally only */
 #define XE_BO_FLAG_INTERNAL_TEST       BIT(30)
index 766a6ab6f3685ac075e656d0fdab8b359dbc9b3b..0d6426622bf8494406c3c017607edcbe56f5bf9a 100644 (file)
@@ -617,7 +617,8 @@ retry:
        bo = xe_bo_create_locked(tile_to_xe(tile), NULL, NULL,
                                 xe_svm_range_size(range),
                                 ttm_bo_type_device,
-                                XE_BO_FLAG_VRAM_IF_DGFX(tile));
+                                XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+                                XE_BO_FLAG_CPU_ADDR_MIRROR);
        if (IS_ERR(bo)) {
                err = PTR_ERR(bo);
                if (xe_vm_validate_should_retry(NULL, err, &end))
@@ -772,6 +773,20 @@ bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
        return drm_gpusvm_has_mapping(&vm->svm.gpusvm, start, end);
 }
 
+/**
+ * xe_svm_bo_evict() - SVM evict BO to system memory
+ * @bo: BO to evict
+ *
+ * SVM evict BO to system memory. GPU SVM layer ensures all device pages
+ * are evicted before returning.
+ *
+ * Return: 0 on success standard error code otherwise
+ */
+int xe_svm_bo_evict(struct xe_bo *bo)
+{
+       return drm_gpusvm_evict_to_ram(&bo->devmem_allocation);
+}
+
 #if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
 static struct drm_pagemap_device_addr
 xe_drm_pagemap_device_map(struct drm_pagemap *dpagemap,
index 5d4eeb2d34ce925a679bd7da6720ab0437f383ca..855aa8e1dd383127af2e3b33325404630f56463e 100644 (file)
@@ -11,6 +11,7 @@
 
 #define XE_INTERCONNECT_VRAM DRM_INTERCONNECT_DRIVER
 
+struct xe_bo;
 struct xe_vram_region;
 struct xe_tile;
 struct xe_vm;
@@ -67,6 +68,8 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
                            bool atomic);
 
 bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end);
+
+int xe_svm_bo_evict(struct xe_bo *bo);
 #else
 static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
 {
@@ -108,6 +111,12 @@ bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
 {
        return false;
 }
+
+static inline
+int xe_svm_bo_evict(struct xe_bo *bo)
+{
+       return 0;
+}
 #endif
 
 /**