]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
perf kwork: Make perf_kwork_add_work a callback
authorIan Rogers <irogers@google.com>
Tue, 19 Nov 2024 01:16:39 +0000 (17:16 -0800)
committerArnaldo Carvalho de Melo <acme@redhat.com>
Wed, 18 Dec 2024 19:24:33 +0000 (16:24 -0300)
perf_kwork_add_work is declared in builtin-kwork, whereas much kwork
code is in util. To avoid needing to stub perf_kwork_add_work in
python.c, add a callback to struct perf_kwork and initialize it in
builtin-kwork to perf_kwork_add_work - this is the only struct
perf_kwork. This removes the need for the stub in python.c.

Signed-off-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com>
Cc: Colin Ian King <colin.i.king@gmail.com>
Cc: Dapeng Mi <dapeng1.mi@linux.intel.com>
Cc: Howard Chu <howardchu95@gmail.com>
Cc: Ilya Leoshkevich <iii@linux.ibm.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@linaro.org>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Michael Petlan <mpetlan@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Richter <tmricht@linux.ibm.com>
Cc: Veronika Molnarova <vmolnaro@redhat.com>
Cc: Weilin Wang <weilin.wang@intel.com>
Link: https://lore.kernel.org/r/20241119011644.971342-18-irogers@google.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
tools/perf/builtin-kwork.c
tools/perf/util/bpf_kwork.c
tools/perf/util/bpf_kwork_top.c
tools/perf/util/kwork.h
tools/perf/util/python.c

index 878c93c026b1d79eba9963b4993296dfbff6fd30..c41a68d073debd09d6eeaf9728f1a094220300e1 100644 (file)
@@ -1848,7 +1848,7 @@ static void process_skipped_events(struct perf_kwork *kwork,
        }
 }
 
-struct kwork_work *perf_kwork_add_work(struct perf_kwork *kwork,
+static struct kwork_work *perf_kwork_add_work(struct perf_kwork *kwork,
                                       struct kwork_class *class,
                                       struct kwork_work *key)
 {
@@ -2346,6 +2346,7 @@ int cmd_kwork(int argc, const char **argv)
                .all_runtime         = 0,
                .all_count           = 0,
                .nr_skipped_events   = { 0 },
+               .add_work            = perf_kwork_add_work,
        };
        static const char default_report_sort_order[] = "runtime, max, count";
        static const char default_latency_sort_order[] = "avg, max, count";
index 6c7126b7670dd0c9a2b0a5b55d74cbbc5f4ea5cd..5cff755c71faed12aaa3c9029237bfdd2e3fc2d5 100644 (file)
@@ -285,7 +285,7 @@ static int add_work(struct perf_kwork *kwork,
            (bpf_trace->get_work_name(key, &tmp.name)))
                return -1;
 
-       work = perf_kwork_add_work(kwork, tmp.class, &tmp);
+       work = kwork->add_work(kwork, tmp.class, &tmp);
        if (work == NULL)
                return -1;
 
index 7261cad43468d7a4cfcaf4df0e1849b6fb057e67..b6f187dd9136ddca3c6d41e3e19fff328cd9261f 100644 (file)
@@ -255,7 +255,7 @@ static int add_work(struct perf_kwork *kwork, struct work_key *key,
        bpf_trace = kwork_class_bpf_supported_list[type];
        tmp.class = bpf_trace->class;
 
-       work = perf_kwork_add_work(kwork, tmp.class, &tmp);
+       work = kwork->add_work(kwork, tmp.class, &tmp);
        if (!work)
                return -1;
 
index 596595946a060c38e7f75f79529c84a3a0d00de4..db00269b73f24c6655951255e201a1f629ba6a71 100644 (file)
@@ -252,12 +252,14 @@ struct perf_kwork {
         * perf kwork top data
         */
        struct kwork_top_stat top_stat;
-};
 
-struct kwork_work *perf_kwork_add_work(struct perf_kwork *kwork,
+       /* Add work callback. */
+       struct kwork_work *(*add_work)(struct perf_kwork *kwork,
                                       struct kwork_class *class,
                                       struct kwork_work *key);
 
+};
+
 #ifdef HAVE_BPF_SKEL
 
 int perf_kwork__trace_prepare_bpf(struct perf_kwork *kwork);
index 5e6db4b143a11b292a04d6bb1a1ae68d2b69c62d..3e32a502a41c0d8cc348308b4395ba4781ce67a9 100644 (file)
@@ -16,7 +16,6 @@
 #include "thread_map.h"
 #include "trace-event.h"
 #include "mmap.h"
-#include "util/kwork.h"
 #include "util/sample.h"
 #include <internal/lib.h>
 
@@ -1297,14 +1296,3 @@ error:
                PyErr_SetString(PyExc_ImportError, "perf: Init failed!");
        return module;
 }
-
-
-/* The following are stubs to avoid dragging in builtin-* objects. */
-/* TODO: move the code out of the builtin-* file into util. */
-
-struct kwork_work *perf_kwork_add_work(struct perf_kwork *kwork __maybe_unused,
-                                      struct kwork_class *class __maybe_unused,
-                                      struct kwork_work *key  __maybe_unused)
-{
-       return NULL;
-}