]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/panthor: Label all kernel BO's
authorAdrián Larumbe <adrian.larumbe@collabora.com>
Wed, 23 Apr 2025 02:12:33 +0000 (03:12 +0100)
committerBoris Brezillon <boris.brezillon@collabora.com>
Wed, 23 Apr 2025 08:35:35 +0000 (10:35 +0200)
Kernel BO's aren't exposed to UM, so labelling them is the responsibility
of the driver itself. This kind of tagging will prove useful in further
commits when want to expose these objects through DebugFS.

Expand panthor_kernel_bo_create() interface to take a NUL-terminated
string. No bounds checking is done because all label strings are given
as statically-allocated literals, but if a more complex kernel BO naming
scheme with explicit memory allocation and formatting was desired in the
future, this would have to change.

Signed-off-by: Adrián Larumbe <adrian.larumbe@collabora.com>
Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com>
Reviewed-by: Liviu Dudau <liviu.dudau@arm.com>
Reviewed-by: Steven Price <steven.price@arm.com>
Link: https://lore.kernel.org/r/20250423021238.1639175-4-adrian.larumbe@collabora.com
Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
drivers/gpu/drm/panthor/panthor_fw.c
drivers/gpu/drm/panthor/panthor_gem.c
drivers/gpu/drm/panthor/panthor_gem.h
drivers/gpu/drm/panthor/panthor_heap.c
drivers/gpu/drm/panthor/panthor_sched.c

index 446bb377b953dbd2b23c170a3a59189b3ad3deb3..7bc38e6353295033e3218ba352504674644d97b9 100644 (file)
@@ -449,7 +449,8 @@ panthor_fw_alloc_queue_iface_mem(struct panthor_device *ptdev,
                                       DRM_PANTHOR_BO_NO_MMAP,
                                       DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
                                       DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
-                                      PANTHOR_VM_KERNEL_AUTO_VA);
+                                      PANTHOR_VM_KERNEL_AUTO_VA,
+                                      "Queue FW interface");
        if (IS_ERR(mem))
                return mem;
 
@@ -481,7 +482,8 @@ panthor_fw_alloc_suspend_buf_mem(struct panthor_device *ptdev, size_t size)
        return panthor_kernel_bo_create(ptdev, panthor_fw_vm(ptdev), size,
                                        DRM_PANTHOR_BO_NO_MMAP,
                                        DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC,
-                                       PANTHOR_VM_KERNEL_AUTO_VA);
+                                       PANTHOR_VM_KERNEL_AUTO_VA,
+                                       "FW suspend buffer");
 }
 
 static int panthor_fw_load_section_entry(struct panthor_device *ptdev,
@@ -601,7 +603,7 @@ static int panthor_fw_load_section_entry(struct panthor_device *ptdev,
                section->mem = panthor_kernel_bo_create(ptdev, panthor_fw_vm(ptdev),
                                                        section_size,
                                                        DRM_PANTHOR_BO_NO_MMAP,
-                                                       vm_map_flags, va);
+                                                       vm_map_flags, va, "FW section");
                if (IS_ERR(section->mem))
                        return PTR_ERR(section->mem);
 
index 4c9d52378d1cf1caee298b8d0784468d0652c1dd..a61a50d8d5f5dac18ec54567aa08646017b4d6b8 100644 (file)
@@ -76,13 +76,14 @@ out_free_bo:
  * @gpu_va: GPU address assigned when mapping to the VM.
  * If gpu_va == PANTHOR_VM_KERNEL_AUTO_VA, the virtual address will be
  * automatically allocated.
+ * @name: Descriptive label of the BO's contents
  *
  * Return: A valid pointer in case of success, an ERR_PTR() otherwise.
  */
 struct panthor_kernel_bo *
 panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm,
                         size_t size, u32 bo_flags, u32 vm_map_flags,
-                        u64 gpu_va)
+                        u64 gpu_va, const char *name)
 {
        struct drm_gem_shmem_object *obj;
        struct panthor_kernel_bo *kbo;
@@ -106,6 +107,8 @@ panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm,
        kbo->obj = &obj->base;
        bo->flags = bo_flags;
 
+       panthor_gem_kernel_bo_set_label(kbo, name);
+
        /* The system and GPU MMU page size might differ, which becomes a
         * problem for FW sections that need to be mapped at explicit address
         * since our PAGE_SIZE alignment might cover a VA range that's
index 983cc8ca264e1de2cca2e93c99f9d31f79aa8ced..3c09af568e479cc4756609ca083f1d0674a1323d 100644 (file)
@@ -153,7 +153,7 @@ panthor_kernel_bo_vunmap(struct panthor_kernel_bo *bo)
 struct panthor_kernel_bo *
 panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm,
                         size_t size, u32 bo_flags, u32 vm_map_flags,
-                        u64 gpu_va);
+                        u64 gpu_va, const char *name);
 
 void panthor_kernel_bo_destroy(struct panthor_kernel_bo *bo);
 
index 3bdf61c142644ad391bb838e74f8803e0f7a301c..d236e9ceade4b0a8e355ec34b1b4e9bf09bd411e 100644 (file)
@@ -151,7 +151,8 @@ static int panthor_alloc_heap_chunk(struct panthor_heap_pool *pool,
        chunk->bo = panthor_kernel_bo_create(pool->ptdev, pool->vm, heap->chunk_size,
                                             DRM_PANTHOR_BO_NO_MMAP,
                                             DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC,
-                                            PANTHOR_VM_KERNEL_AUTO_VA);
+                                            PANTHOR_VM_KERNEL_AUTO_VA,
+                                            "Tiler heap chunk");
        if (IS_ERR(chunk->bo)) {
                ret = PTR_ERR(chunk->bo);
                goto err_free_chunk;
@@ -555,7 +556,8 @@ panthor_heap_pool_create(struct panthor_device *ptdev, struct panthor_vm *vm)
        pool->gpu_contexts = panthor_kernel_bo_create(ptdev, vm, bosize,
                                                      DRM_PANTHOR_BO_NO_MMAP,
                                                      DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC,
-                                                     PANTHOR_VM_KERNEL_AUTO_VA);
+                                                     PANTHOR_VM_KERNEL_AUTO_VA,
+                                                     "Heap pool");
        if (IS_ERR(pool->gpu_contexts)) {
                ret = PTR_ERR(pool->gpu_contexts);
                goto err_destroy_pool;
index 446ec780eb4a6d98698ab997d4c77933c0a64ab9..43ee57728de543926aec80d4f6c3417b21fb7435 100644 (file)
@@ -3332,7 +3332,8 @@ group_create_queue(struct panthor_group *group,
                                                  DRM_PANTHOR_BO_NO_MMAP,
                                                  DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
                                                  DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
-                                                 PANTHOR_VM_KERNEL_AUTO_VA);
+                                                 PANTHOR_VM_KERNEL_AUTO_VA,
+                                                 "CS ring buffer");
        if (IS_ERR(queue->ringbuf)) {
                ret = PTR_ERR(queue->ringbuf);
                goto err_free_queue;
@@ -3362,7 +3363,8 @@ group_create_queue(struct panthor_group *group,
                                         DRM_PANTHOR_BO_NO_MMAP,
                                         DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
                                         DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
-                                        PANTHOR_VM_KERNEL_AUTO_VA);
+                                        PANTHOR_VM_KERNEL_AUTO_VA,
+                                        "Group job stats");
 
        if (IS_ERR(queue->profiling.slots)) {
                ret = PTR_ERR(queue->profiling.slots);
@@ -3493,7 +3495,8 @@ int panthor_group_create(struct panthor_file *pfile,
                                                   DRM_PANTHOR_BO_NO_MMAP,
                                                   DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
                                                   DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
-                                                  PANTHOR_VM_KERNEL_AUTO_VA);
+                                                  PANTHOR_VM_KERNEL_AUTO_VA,
+                                                  "Group sync objects");
        if (IS_ERR(group->syncobjs)) {
                ret = PTR_ERR(group->syncobjs);
                goto err_put_group;