]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
drm/msm: Add mmu prealloc tracepoint
authorRob Clark <robdclark@chromium.org>
Sun, 29 Jun 2025 20:13:21 +0000 (13:13 -0700)
committerRob Clark <robin.clark@oss.qualcomm.com>
Sat, 5 Jul 2025 00:48:38 +0000 (17:48 -0700)
So we can monitor how many pages are getting preallocated vs how many
get used.

Signed-off-by: Rob Clark <robdclark@chromium.org>
Signed-off-by: Rob Clark <robin.clark@oss.qualcomm.com>
Tested-by: Antonino Maniscalco <antomani103@gmail.com>
Reviewed-by: Antonino Maniscalco <antomani103@gmail.com>
Patchwork: https://patchwork.freedesktop.org/patch/661521/

drivers/gpu/drm/msm/msm_gpu_trace.h
drivers/gpu/drm/msm/msm_iommu.c

index 7f863282db0d7812c8fd53b3f1fc0cd5635028ba..781bbe5540bde6d9cd6758050229fd0406fad232 100644 (file)
@@ -205,6 +205,20 @@ TRACE_EVENT(msm_gpu_preemption_irq,
                TP_printk("preempted to %u", __entry->ring_id)
 );
 
+TRACE_EVENT(msm_mmu_prealloc_cleanup,
+               TP_PROTO(u32 count, u32 remaining),
+               TP_ARGS(count, remaining),
+               TP_STRUCT__entry(
+                       __field(u32, count)
+                       __field(u32, remaining)
+                       ),
+               TP_fast_assign(
+                       __entry->count = count;
+                       __entry->remaining = remaining;
+                       ),
+               TP_printk("count=%u, remaining=%u", __entry->count, __entry->remaining)
+);
+
 #endif
 
 #undef TRACE_INCLUDE_PATH
index 887c9023f8a2be61f6ac3bd6eeb9fd86d3d44f9f..55c29f49b7889824e789f449f0e5dbd36a4216b5 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/io-pgtable.h>
 #include <linux/kmemleak.h>
 #include "msm_drv.h"
+#include "msm_gpu_trace.h"
 #include "msm_mmu.h"
 
 struct msm_iommu {
@@ -346,6 +347,9 @@ msm_iommu_pagetable_prealloc_cleanup(struct msm_mmu *mmu, struct msm_mmu_preallo
        struct kmem_cache *pt_cache = get_pt_cache(mmu);
        uint32_t remaining_pt_count = p->count - p->ptr;
 
+       if (p->count > 0)
+               trace_msm_mmu_prealloc_cleanup(p->count, remaining_pt_count);
+
        kmem_cache_free_bulk(pt_cache, remaining_pt_count, &p->pages[p->ptr]);
        kvfree(p->pages);
 }