]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/xe/migrate: fix offset and len check
authorMatthew Auld <matthew.auld@intel.com>
Wed, 22 Oct 2025 16:38:30 +0000 (17:38 +0100)
committerMatthew Auld <matthew.auld@intel.com>
Thu, 23 Oct 2025 09:48:33 +0000 (10:48 +0100)
Restriction here is pitch of 4bytes to match pixel width (32b), and hw
restriction where src and dst must be aligned to 64bytes. If any of that
is not possible then we need a bounce buffer.

Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Link: https://lore.kernel.org/r/20251022163836.191405-2-matthew.auld@intel.com
drivers/gpu/drm/xe/xe_migrate.c

index 3112c966c67d7d1189bf4ba0e1e827e35aec4ca8..ce2ad876586cbef6ef8e53eef29bf683fc16fd50 100644 (file)
@@ -1883,7 +1883,7 @@ static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
        unsigned long i, j;
        bool use_pde = xe_migrate_vram_use_pde(sram_addr, len + sram_offset);
 
-       if (drm_WARN_ON(&xe->drm, (len & XE_CACHELINE_MASK) ||
+       if (drm_WARN_ON(&xe->drm, (!IS_ALIGNED(len, pitch)) ||
                        (sram_offset | vram_addr) & XE_CACHELINE_MASK))
                return ERR_PTR(-EOPNOTSUPP);
 
@@ -2103,8 +2103,9 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
        xe_bo_assert_held(bo);
 
        /* Use bounce buffer for small access and unaligned access */
-       if (!IS_ALIGNED(len, XE_CACHELINE_BYTES) ||
-           !IS_ALIGNED((unsigned long)buf + offset, XE_CACHELINE_BYTES)) {
+       if (!IS_ALIGNED(len, 4) ||
+           !IS_ALIGNED(page_offset, XE_CACHELINE_BYTES) ||
+           !IS_ALIGNED(offset, XE_CACHELINE_BYTES)) {
                int buf_offset = 0;
                void *bounce;
                int err;