]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
drm/xe: add interface to request physical alignment for buffer objects
authorJuha-Pekka Heikkila <juhapekka.heikkila@gmail.com>
Wed, 9 Oct 2024 15:19:46 +0000 (18:19 +0300)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 10 Jul 2025 14:05:03 +0000 (16:05 +0200)
[ Upstream commit 3ad86ae1da97d0091f673f08846848714f6dd745 ]

Add xe_bo_create_pin_map_at_aligned() which augment
xe_bo_create_pin_map_at() with alignment parameter allowing to pass
required alignemnt if it differ from default.

Signed-off-by: Juha-Pekka Heikkila <juhapekka.heikkila@gmail.com>
Reviewed-by: Jonathan Cavitt <jonathan.cavitt@intel.com>
Signed-off-by: Mika Kahola <mika.kahola@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20241009151947.2240099-2-juhapekka.heikkila@gmail.com
Stable-dep-of: f16873f42a06 ("drm/xe: move DPT l2 flush to a more sensible place")
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h
drivers/gpu/drm/xe/xe_bo.c
drivers/gpu/drm/xe/xe_bo.h
drivers/gpu/drm/xe/xe_bo_types.h
drivers/gpu/drm/xe/xe_ggtt.c

index cb6c7598824be38a8d5f17c1677a2f95a88db43c..9c4cf050059ac2cc97cb49ad4df2b88562f011b7 100644 (file)
@@ -29,7 +29,7 @@ static inline int i915_gem_stolen_insert_node_in_range(struct xe_device *xe,
 
        bo = xe_bo_create_locked_range(xe, xe_device_get_root_tile(xe),
                                       NULL, size, start, end,
-                                      ttm_bo_type_kernel, flags);
+                                      ttm_bo_type_kernel, flags, 0);
        if (IS_ERR(bo)) {
                err = PTR_ERR(bo);
                bo = NULL;
index 8acc4640f0a2857f52572f609da0c7c007d26817..c92953c08d6824985c08f42eaedf49c3ed4d0dd7 100644 (file)
@@ -1454,7 +1454,8 @@ static struct xe_bo *
 __xe_bo_create_locked(struct xe_device *xe,
                      struct xe_tile *tile, struct xe_vm *vm,
                      size_t size, u64 start, u64 end,
-                     u16 cpu_caching, enum ttm_bo_type type, u32 flags)
+                     u16 cpu_caching, enum ttm_bo_type type, u32 flags,
+                     u64 alignment)
 {
        struct xe_bo *bo = NULL;
        int err;
@@ -1483,6 +1484,8 @@ __xe_bo_create_locked(struct xe_device *xe,
        if (IS_ERR(bo))
                return bo;
 
+       bo->min_align = alignment;
+
        /*
         * Note that instead of taking a reference no the drm_gpuvm_resv_bo(),
         * to ensure the shared resv doesn't disappear under the bo, the bo
@@ -1523,16 +1526,18 @@ struct xe_bo *
 xe_bo_create_locked_range(struct xe_device *xe,
                          struct xe_tile *tile, struct xe_vm *vm,
                          size_t size, u64 start, u64 end,
-                         enum ttm_bo_type type, u32 flags)
+                         enum ttm_bo_type type, u32 flags, u64 alignment)
 {
-       return __xe_bo_create_locked(xe, tile, vm, size, start, end, 0, type, flags);
+       return __xe_bo_create_locked(xe, tile, vm, size, start, end, 0, type,
+                                    flags, alignment);
 }
 
 struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile,
                                  struct xe_vm *vm, size_t size,
                                  enum ttm_bo_type type, u32 flags)
 {
-       return __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, 0, type, flags);
+       return __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, 0, type,
+                                    flags, 0);
 }
 
 struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile,
@@ -1542,7 +1547,7 @@ struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile,
 {
        struct xe_bo *bo = __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL,
                                                 cpu_caching, ttm_bo_type_device,
-                                                flags | XE_BO_FLAG_USER);
+                                                flags | XE_BO_FLAG_USER, 0);
        if (!IS_ERR(bo))
                xe_bo_unlock_vm_held(bo);
 
@@ -1565,6 +1570,17 @@ struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile
                                      struct xe_vm *vm,
                                      size_t size, u64 offset,
                                      enum ttm_bo_type type, u32 flags)
+{
+       return xe_bo_create_pin_map_at_aligned(xe, tile, vm, size, offset,
+                                              type, flags, 0);
+}
+
+struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe,
+                                             struct xe_tile *tile,
+                                             struct xe_vm *vm,
+                                             size_t size, u64 offset,
+                                             enum ttm_bo_type type, u32 flags,
+                                             u64 alignment)
 {
        struct xe_bo *bo;
        int err;
@@ -1576,7 +1592,8 @@ struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile
                flags |= XE_BO_FLAG_GGTT;
 
        bo = xe_bo_create_locked_range(xe, tile, vm, size, start, end, type,
-                                      flags | XE_BO_FLAG_NEEDS_CPU_ACCESS);
+                                      flags | XE_BO_FLAG_NEEDS_CPU_ACCESS,
+                                      alignment);
        if (IS_ERR(bo))
                return bo;
 
index d22269a230aa19f1ade7f47b68dacd4affd5122c..704f5068d0d0a57d16dd3a0eab9c3f2c499b302e 100644 (file)
@@ -77,7 +77,7 @@ struct xe_bo *
 xe_bo_create_locked_range(struct xe_device *xe,
                          struct xe_tile *tile, struct xe_vm *vm,
                          size_t size, u64 start, u64 end,
-                         enum ttm_bo_type type, u32 flags);
+                         enum ttm_bo_type type, u32 flags, u64 alignment);
 struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile,
                                  struct xe_vm *vm, size_t size,
                                  enum ttm_bo_type type, u32 flags);
@@ -94,6 +94,12 @@ struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
 struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile,
                                      struct xe_vm *vm, size_t size, u64 offset,
                                      enum ttm_bo_type type, u32 flags);
+struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe,
+                                             struct xe_tile *tile,
+                                             struct xe_vm *vm,
+                                             size_t size, u64 offset,
+                                             enum ttm_bo_type type, u32 flags,
+                                             u64 alignment);
 struct xe_bo *xe_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile,
                                     const void *data, size_t size,
                                     enum ttm_bo_type type, u32 flags);
index 2ed558ac2264a69cc093496d2bd66f812b62bb91..35372c46edfa5be1324d332b0cb8b64238191cdf 100644 (file)
@@ -76,6 +76,11 @@ struct xe_bo {
 
        /** @vram_userfault_link: Link into @mem_access.vram_userfault.list */
                struct list_head vram_userfault_link;
+
+       /** @min_align: minimum alignment needed for this BO if different
+        * from default
+        */
+       u64 min_align;
 };
 
 #define intel_bo_to_drm_bo(bo) (&(bo)->ttm.base)
index e9820126feb96947a5b81deaec13461c333e0c79..9cb5760006a1ca530e77c5b423b0fd7b1c845ce4 100644 (file)
@@ -620,7 +620,7 @@ static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
                                  u64 start, u64 end)
 {
        int err;
-       u64 alignment = XE_PAGE_SIZE;
+       u64 alignment = bo->min_align > 0 ? bo->min_align : XE_PAGE_SIZE;
 
        if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K)
                alignment = SZ_64K;