Kernel BO's aren't exposed to UM, so labelling them is the responsibility
of the driver itself. This kind of tagging will prove useful in further
commits when want to expose these objects through DebugFS.
Expand panthor_kernel_bo_create() interface to take a NUL-terminated
string. No bounds checking is done because all label strings are given
as statically-allocated literals, but if a more complex kernel BO naming
scheme with explicit memory allocation and formatting was desired in the
future, this would have to change.
Signed-off-by: Adrián Larumbe <adrian.larumbe@collabora.com>
Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com>
Reviewed-by: Liviu Dudau <liviu.dudau@arm.com>
Reviewed-by: Steven Price <steven.price@arm.com>
Link: https://lore.kernel.org/r/20250423021238.1639175-4-adrian.larumbe@collabora.com
Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
DRM_PANTHOR_BO_NO_MMAP,
DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
- PANTHOR_VM_KERNEL_AUTO_VA);
+ PANTHOR_VM_KERNEL_AUTO_VA,
+ "Queue FW interface");
if (IS_ERR(mem))
return mem;
return panthor_kernel_bo_create(ptdev, panthor_fw_vm(ptdev), size,
DRM_PANTHOR_BO_NO_MMAP,
DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC,
- PANTHOR_VM_KERNEL_AUTO_VA);
+ PANTHOR_VM_KERNEL_AUTO_VA,
+ "FW suspend buffer");
}
static int panthor_fw_load_section_entry(struct panthor_device *ptdev,
section->mem = panthor_kernel_bo_create(ptdev, panthor_fw_vm(ptdev),
section_size,
DRM_PANTHOR_BO_NO_MMAP,
- vm_map_flags, va);
+ vm_map_flags, va, "FW section");
if (IS_ERR(section->mem))
return PTR_ERR(section->mem);
* @gpu_va: GPU address assigned when mapping to the VM.
* If gpu_va == PANTHOR_VM_KERNEL_AUTO_VA, the virtual address will be
* automatically allocated.
+ * @name: Descriptive label of the BO's contents
*
* Return: A valid pointer in case of success, an ERR_PTR() otherwise.
*/
struct panthor_kernel_bo *
panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm,
size_t size, u32 bo_flags, u32 vm_map_flags,
- u64 gpu_va)
+ u64 gpu_va, const char *name)
{
struct drm_gem_shmem_object *obj;
struct panthor_kernel_bo *kbo;
kbo->obj = &obj->base;
bo->flags = bo_flags;
+ panthor_gem_kernel_bo_set_label(kbo, name);
+
/* The system and GPU MMU page size might differ, which becomes a
* problem for FW sections that need to be mapped at explicit address
* since our PAGE_SIZE alignment might cover a VA range that's
struct panthor_kernel_bo *
panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm,
size_t size, u32 bo_flags, u32 vm_map_flags,
- u64 gpu_va);
+ u64 gpu_va, const char *name);
void panthor_kernel_bo_destroy(struct panthor_kernel_bo *bo);
chunk->bo = panthor_kernel_bo_create(pool->ptdev, pool->vm, heap->chunk_size,
DRM_PANTHOR_BO_NO_MMAP,
DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC,
- PANTHOR_VM_KERNEL_AUTO_VA);
+ PANTHOR_VM_KERNEL_AUTO_VA,
+ "Tiler heap chunk");
if (IS_ERR(chunk->bo)) {
ret = PTR_ERR(chunk->bo);
goto err_free_chunk;
pool->gpu_contexts = panthor_kernel_bo_create(ptdev, vm, bosize,
DRM_PANTHOR_BO_NO_MMAP,
DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC,
- PANTHOR_VM_KERNEL_AUTO_VA);
+ PANTHOR_VM_KERNEL_AUTO_VA,
+ "Heap pool");
if (IS_ERR(pool->gpu_contexts)) {
ret = PTR_ERR(pool->gpu_contexts);
goto err_destroy_pool;
DRM_PANTHOR_BO_NO_MMAP,
DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
- PANTHOR_VM_KERNEL_AUTO_VA);
+ PANTHOR_VM_KERNEL_AUTO_VA,
+ "CS ring buffer");
if (IS_ERR(queue->ringbuf)) {
ret = PTR_ERR(queue->ringbuf);
goto err_free_queue;
DRM_PANTHOR_BO_NO_MMAP,
DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
- PANTHOR_VM_KERNEL_AUTO_VA);
+ PANTHOR_VM_KERNEL_AUTO_VA,
+ "Group job stats");
if (IS_ERR(queue->profiling.slots)) {
ret = PTR_ERR(queue->profiling.slots);
DRM_PANTHOR_BO_NO_MMAP,
DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
- PANTHOR_VM_KERNEL_AUTO_VA);
+ PANTHOR_VM_KERNEL_AUTO_VA,
+ "Group sync objects");
if (IS_ERR(group->syncobjs)) {
ret = PTR_ERR(group->syncobjs);
goto err_put_group;