]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
drm/xe/migrate: ignore CCS for kernel objects
authorMatthew Auld <matthew.auld@intel.com>
Thu, 3 Apr 2025 10:24:44 +0000 (11:24 +0100)
committerMatthew Auld <matthew.auld@intel.com>
Fri, 4 Apr 2025 10:41:04 +0000 (11:41 +0100)
For kernel BOs we don't clear the CCS state on creation, therefore we
should be careful to ignore it when copying pages. In a future patch we
opt for using the copy path here for kernel BOs, so this now needs to be
considered.

v2:
 - Drop bogus asserts (CI)

Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Satyanarayana K V P <satyanarayana.k.v.p@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Satyanarayana K V P <satyanarayana.k.v.p@intel.com>
Link: https://lore.kernel.org/r/20250403102440.266113-12-matthew.auld@intel.com
drivers/gpu/drm/xe/xe_migrate.c

index c1277d599a11ded3a7aa1810dd0e1aec756f2e49..96b244aaf2d6eaccc6128c00386fb82b1647492c 100644 (file)
@@ -779,10 +779,12 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
        bool dst_is_pltt = dst->mem_type == XE_PL_TT;
        bool src_is_vram = mem_type_is_vram(src->mem_type);
        bool dst_is_vram = mem_type_is_vram(dst->mem_type);
+       bool type_device = src_bo->ttm.type == ttm_bo_type_device;
+       bool needs_ccs_emit = type_device && xe_migrate_needs_ccs_emit(xe);
        bool copy_ccs = xe_device_has_flat_ccs(xe) &&
                xe_bo_needs_ccs_pages(src_bo) && xe_bo_needs_ccs_pages(dst_bo);
        bool copy_system_ccs = copy_ccs && (!src_is_vram || !dst_is_vram);
-       bool use_comp_pat = xe_device_has_flat_ccs(xe) &&
+       bool use_comp_pat = type_device && xe_device_has_flat_ccs(xe) &&
                GRAPHICS_VER(xe) >= 20 && src_is_vram && !dst_is_vram;
 
        /* Copying CCS between two different BOs is not supported yet. */
@@ -839,6 +841,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
                                              avail_pts, avail_pts);
 
                if (copy_system_ccs) {
+                       xe_assert(xe, type_device);
                        ccs_size = xe_device_ccs_bytes(xe, src_L0);
                        batch_size += pte_update_size(m, 0, NULL, &ccs_it, &ccs_size,
                                                      &ccs_ofs, &ccs_pt, 0,
@@ -849,7 +852,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
 
                /* Add copy commands size here */
                batch_size += ((copy_only_ccs) ? 0 : EMIT_COPY_DW) +
-                       ((xe_migrate_needs_ccs_emit(xe) ? EMIT_COPY_CCS_DW : 0));
+                       ((needs_ccs_emit ? EMIT_COPY_CCS_DW : 0));
 
                bb = xe_bb_new(gt, batch_size, usm);
                if (IS_ERR(bb)) {
@@ -878,7 +881,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
                if (!copy_only_ccs)
                        emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, src_L0, XE_PAGE_SIZE);
 
-               if (xe_migrate_needs_ccs_emit(xe))
+               if (needs_ccs_emit)
                        flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs,
                                                          IS_DGFX(xe) ? src_is_vram : src_is_pltt,
                                                          dst_L0_ofs,