]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/msm: Add GPU memory traces
authorRob Clark <robdclark@chromium.org>
Fri, 1 Mar 2024 18:53:45 +0000 (10:53 -0800)
committerRob Clark <robdclark@chromium.org>
Fri, 21 Jun 2024 20:41:43 +0000 (13:41 -0700)
Perfetto can use these traces to track global and per-process GPU memory
usage.

Signed-off-by: Rob Clark <robdclark@chromium.org>
Patchwork: https://patchwork.freedesktop.org/patch/580854/

drivers/gpu/drm/msm/Kconfig
drivers/gpu/drm/msm/msm_drv.h
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_gpu.h

index 1931ecf73e32a3c5369b438b48acc7ada51a2056..26a4c71da63aae0ca311fef71cbe0a8930b2edb7 100644 (file)
@@ -33,6 +33,7 @@ config DRM_MSM
        select PM_OPP
        select NVMEM
        select PM_GENERIC_DOMAINS
+       select TRACE_GPU_MEM
        help
          DRM/KMS driver for MSM/snapdragon.
 
index 912ebaa5df84660e568e8363ab57d65c69d4d549..e256d72adddd553a410c4181859601c2ad0a7f0b 100644 (file)
@@ -127,6 +127,11 @@ struct msm_drm_private {
        struct msm_rd_state *hangrd;   /* debugfs to dump hanging submits */
        struct msm_perf_state *perf;
 
+       /**
+        * total_mem: Total/global amount of memory backing GEM objects.
+        */
+       atomic64_t total_mem;
+
        /**
         * List of all GEM objects (mainly for debugfs, protected by obj_lock
         * (acquire before per GEM object lock)
index a5c6498a43f066901270aa376ff3e4d4c0c35013..ddc6a131c041a06d2f77e4adbbc4114ad1a31205 100644 (file)
@@ -12,6 +12,9 @@
 #include <linux/pfn_t.h>
 
 #include <drm/drm_prime.h>
+#include <drm/drm_file.h>
+
+#include <trace/events/gpu_mem.h>
 
 #include "msm_drv.h"
 #include "msm_fence.h"
@@ -33,6 +36,34 @@ static bool use_pages(struct drm_gem_object *obj)
        return !msm_obj->vram_node;
 }
 
+static void update_device_mem(struct msm_drm_private *priv, ssize_t size)
+{
+       uint64_t total_mem = atomic64_add_return(size, &priv->total_mem);
+       trace_gpu_mem_total(0, 0, total_mem);
+}
+
+static void update_ctx_mem(struct drm_file *file, ssize_t size)
+{
+       struct msm_file_private *ctx = file->driver_priv;
+       uint64_t ctx_mem = atomic64_add_return(size, &ctx->ctx_mem);
+
+       rcu_read_lock(); /* Locks file->pid! */
+       trace_gpu_mem_total(0, pid_nr(file->pid), ctx_mem);
+       rcu_read_unlock();
+
+}
+
+static int msm_gem_open(struct drm_gem_object *obj, struct drm_file *file)
+{
+       update_ctx_mem(file, obj->size);
+       return 0;
+}
+
+static void msm_gem_close(struct drm_gem_object *obj, struct drm_file *file)
+{
+       update_ctx_mem(file, -obj->size);
+}
+
 /*
  * Cache sync.. this is a bit over-complicated, to fit dma-mapping
  * API.  Really GPU cache is out of scope here (handled on cmdstream)
@@ -156,6 +187,8 @@ static struct page **get_pages(struct drm_gem_object *obj)
                        return p;
                }
 
+               update_device_mem(dev->dev_private, obj->size);
+
                msm_obj->pages = p;
 
                msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
@@ -209,6 +242,8 @@ static void put_pages(struct drm_gem_object *obj)
                        msm_obj->sgt = NULL;
                }
 
+               update_device_mem(obj->dev->dev_private, -obj->size);
+
                if (use_pages(obj))
                        drm_gem_put_pages(obj, msm_obj->pages, true, false);
                else
@@ -1118,6 +1153,8 @@ static const struct vm_operations_struct vm_ops = {
 
 static const struct drm_gem_object_funcs msm_gem_object_funcs = {
        .free = msm_gem_free_object,
+       .open = msm_gem_open,
+       .close = msm_gem_close,
        .pin = msm_gem_prime_pin,
        .unpin = msm_gem_prime_unpin,
        .get_sg_table = msm_gem_prime_get_sg_table,
index a0c1bd6d1d5b44fc240ee2472ef9434b32e68222..0c2c574b76fd26751319b0c314afc5b3c51cc3f2 100644 (file)
@@ -428,6 +428,14 @@ struct msm_file_private {
         * level.
         */
        struct drm_sched_entity *entities[NR_SCHED_PRIORITIES * MSM_GPU_MAX_RINGS];
+
+       /**
+        * ctx_mem:
+        *
+        * Total amount of memory of GEM buffers with handles attached for
+        * this context.
+        */
+       atomic64_t ctx_mem;
 };
 
 /**