+++ /dev/null
-From 6091acf3330051b01657197038f25b295dad43da Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Wed, 7 Aug 2024 22:46:43 -0700
-Subject: perf callchain: Fix stitch LBR memory leaks
-
-From: Ian Rogers <irogers@google.com>
-
-[ Upstream commit 599c19397b17d197fc1184bbc950f163a292efc9 ]
-
-The 'struct callchain_cursor_node' has a 'struct map_symbol' whose maps
-and map members are reference counted. Ensure these values use a _get
-routine to increment the reference counts and use map_symbol__exit() to
-release the reference counts.
-
-Do similar for 'struct thread's prev_lbr_cursor, but save the size of
-the prev_lbr_cursor array so that it may be iterated.
-
-Ensure that when stitch_nodes are placed on the free list the
-map_symbols are exited.
-
-Fix resolve_lbr_callchain_sample() by replacing list_replace_init() to
-list_splice_init(), so the whole list is moved and nodes aren't leaked.
-
-A reproduction of the memory leaks is possible with a leak sanitizer
-build in the perf report command of:
-
- ```
- $ perf record -e cycles --call-graph lbr perf test -w thloop
- $ perf report --stitch-lbr
- ```
-
-Reviewed-by: Kan Liang <kan.liang@linux.intel.com>
-Fixes: ff165628d72644e3 ("perf callchain: Stitch LBR call stack")
-Signed-off-by: Ian Rogers <irogers@google.com>
-[ Basic tests after applying the patch, repeating the example above ]
-Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
-Cc: Adrian Hunter <adrian.hunter@intel.com>
-Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
-Cc: Andi Kleen <ak@linux.intel.com>
-Cc: Anne Macedo <retpolanne@posteo.net>
-Cc: Changbin Du <changbin.du@huawei.com>
-Cc: Ingo Molnar <mingo@redhat.com>
-Cc: Jiri Olsa <jolsa@kernel.org>
-Cc: Mark Rutland <mark.rutland@arm.com>
-Cc: Namhyung Kim <namhyung@kernel.org>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Link: https://lore.kernel.org/r/20240808054644.1286065-1-irogers@google.com
-Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- tools/perf/util/machine.c | 17 +++++++++++++++--
- tools/perf/util/thread.c | 4 ++++
- tools/perf/util/thread.h | 1 +
- 3 files changed, 20 insertions(+), 2 deletions(-)
-
-diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
-index 8477edefc2997..706be5e4a0761 100644
---- a/tools/perf/util/machine.c
-+++ b/tools/perf/util/machine.c
-@@ -2270,8 +2270,12 @@ static void save_lbr_cursor_node(struct thread *thread,
- cursor->curr = cursor->first;
- else
- cursor->curr = cursor->curr->next;
-+
-+ map_symbol__exit(&lbr_stitch->prev_lbr_cursor[idx].ms);
- memcpy(&lbr_stitch->prev_lbr_cursor[idx], cursor->curr,
- sizeof(struct callchain_cursor_node));
-+ lbr_stitch->prev_lbr_cursor[idx].ms.maps = maps__get(cursor->curr->ms.maps);
-+ lbr_stitch->prev_lbr_cursor[idx].ms.map = map__get(cursor->curr->ms.map);
-
- lbr_stitch->prev_lbr_cursor[idx].valid = true;
- cursor->pos++;
-@@ -2482,6 +2486,9 @@ static bool has_stitched_lbr(struct thread *thread,
- memcpy(&stitch_node->cursor, &lbr_stitch->prev_lbr_cursor[i],
- sizeof(struct callchain_cursor_node));
-
-+ stitch_node->cursor.ms.maps = maps__get(lbr_stitch->prev_lbr_cursor[i].ms.maps);
-+ stitch_node->cursor.ms.map = map__get(lbr_stitch->prev_lbr_cursor[i].ms.map);
-+
- if (callee)
- list_add(&stitch_node->node, &lbr_stitch->lists);
- else
-@@ -2505,6 +2512,8 @@ static bool alloc_lbr_stitch(struct thread *thread, unsigned int max_lbr)
- if (!thread__lbr_stitch(thread)->prev_lbr_cursor)
- goto free_lbr_stitch;
-
-+ thread__lbr_stitch(thread)->prev_lbr_cursor_size = max_lbr + 1;
-+
- INIT_LIST_HEAD(&thread__lbr_stitch(thread)->lists);
- INIT_LIST_HEAD(&thread__lbr_stitch(thread)->free_lists);
-
-@@ -2560,8 +2569,12 @@ static int resolve_lbr_callchain_sample(struct thread *thread,
- max_lbr, callee);
-
- if (!stitched_lbr && !list_empty(&lbr_stitch->lists)) {
-- list_replace_init(&lbr_stitch->lists,
-- &lbr_stitch->free_lists);
-+ struct stitch_list *stitch_node;
-+
-+ list_for_each_entry(stitch_node, &lbr_stitch->lists, node)
-+ map_symbol__exit(&stitch_node->cursor.ms);
-+
-+ list_splice_init(&lbr_stitch->lists, &lbr_stitch->free_lists);
- }
- memcpy(&lbr_stitch->prev_sample, sample, sizeof(*sample));
- }
-diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
-index 87c59aa9fe38b..0ffdd52d86d70 100644
---- a/tools/perf/util/thread.c
-+++ b/tools/perf/util/thread.c
-@@ -476,6 +476,7 @@ void thread__free_stitch_list(struct thread *thread)
- return;
-
- list_for_each_entry_safe(pos, tmp, &lbr_stitch->lists, node) {
-+ map_symbol__exit(&pos->cursor.ms);
- list_del_init(&pos->node);
- free(pos);
- }
-@@ -485,6 +486,9 @@ void thread__free_stitch_list(struct thread *thread)
- free(pos);
- }
-
-+ for (unsigned int i = 0 ; i < lbr_stitch->prev_lbr_cursor_size; i++)
-+ map_symbol__exit(&lbr_stitch->prev_lbr_cursor[i].ms);
-+
- zfree(&lbr_stitch->prev_lbr_cursor);
- free(thread__lbr_stitch(thread));
- thread__set_lbr_stitch(thread, NULL);
-diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
-index 8b4a3c69bad19..6cbf6eb2812e0 100644
---- a/tools/perf/util/thread.h
-+++ b/tools/perf/util/thread.h
-@@ -26,6 +26,7 @@ struct lbr_stitch {
- struct list_head free_lists;
- struct perf_sample prev_sample;
- struct callchain_cursor_node *prev_lbr_cursor;
-+ unsigned int prev_lbr_cursor_size;
- };
-
- DECLARE_RC_STRUCT(thread) {
---
-2.43.0
-
bpf-zero-former-arg_ptr_to_-long-int-args-in-case-of.patch
perf-scripts-python-cs-etm-restore-first-sample-log-.patch
perf-mem-free-the-allocated-sort-string-fixing-a-lea.patch
-perf-callchain-fix-stitch-lbr-memory-leaks.patch
perf-lock-contention-change-stack_id-type-to-s32.patch
perf-inject-fix-leader-sampling-inserting-additional.patch
perf-report-fix-total-cycles-stdio-output-error.patch
+++ /dev/null
-From f3463e6dbd2ab2f265d877cfd3ff53c6ffa83fac Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Wed, 7 Aug 2024 22:46:43 -0700
-Subject: perf callchain: Fix stitch LBR memory leaks
-
-From: Ian Rogers <irogers@google.com>
-
-[ Upstream commit 599c19397b17d197fc1184bbc950f163a292efc9 ]
-
-The 'struct callchain_cursor_node' has a 'struct map_symbol' whose maps
-and map members are reference counted. Ensure these values use a _get
-routine to increment the reference counts and use map_symbol__exit() to
-release the reference counts.
-
-Do similar for 'struct thread's prev_lbr_cursor, but save the size of
-the prev_lbr_cursor array so that it may be iterated.
-
-Ensure that when stitch_nodes are placed on the free list the
-map_symbols are exited.
-
-Fix resolve_lbr_callchain_sample() by replacing list_replace_init() to
-list_splice_init(), so the whole list is moved and nodes aren't leaked.
-
-A reproduction of the memory leaks is possible with a leak sanitizer
-build in the perf report command of:
-
- ```
- $ perf record -e cycles --call-graph lbr perf test -w thloop
- $ perf report --stitch-lbr
- ```
-
-Reviewed-by: Kan Liang <kan.liang@linux.intel.com>
-Fixes: ff165628d72644e3 ("perf callchain: Stitch LBR call stack")
-Signed-off-by: Ian Rogers <irogers@google.com>
-[ Basic tests after applying the patch, repeating the example above ]
-Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
-Cc: Adrian Hunter <adrian.hunter@intel.com>
-Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
-Cc: Andi Kleen <ak@linux.intel.com>
-Cc: Anne Macedo <retpolanne@posteo.net>
-Cc: Changbin Du <changbin.du@huawei.com>
-Cc: Ingo Molnar <mingo@redhat.com>
-Cc: Jiri Olsa <jolsa@kernel.org>
-Cc: Mark Rutland <mark.rutland@arm.com>
-Cc: Namhyung Kim <namhyung@kernel.org>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Link: https://lore.kernel.org/r/20240808054644.1286065-1-irogers@google.com
-Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- tools/perf/util/machine.c | 17 +++++++++++++++--
- tools/perf/util/thread.c | 4 ++++
- tools/perf/util/thread.h | 1 +
- 3 files changed, 20 insertions(+), 2 deletions(-)
-
---- a/tools/perf/util/machine.c
-+++ b/tools/perf/util/machine.c
-@@ -2270,8 +2270,12 @@ static void save_lbr_cursor_node(struct
- cursor->curr = cursor->first;
- else
- cursor->curr = cursor->curr->next;
-+
-+ map_symbol__exit(&lbr_stitch->prev_lbr_cursor[idx].ms);
- memcpy(&lbr_stitch->prev_lbr_cursor[idx], cursor->curr,
- sizeof(struct callchain_cursor_node));
-+ lbr_stitch->prev_lbr_cursor[idx].ms.maps = maps__get(cursor->curr->ms.maps);
-+ lbr_stitch->prev_lbr_cursor[idx].ms.map = map__get(cursor->curr->ms.map);
-
- lbr_stitch->prev_lbr_cursor[idx].valid = true;
- cursor->pos++;
-@@ -2482,6 +2486,9 @@ static bool has_stitched_lbr(struct thre
- memcpy(&stitch_node->cursor, &lbr_stitch->prev_lbr_cursor[i],
- sizeof(struct callchain_cursor_node));
-
-+ stitch_node->cursor.ms.maps = maps__get(lbr_stitch->prev_lbr_cursor[i].ms.maps);
-+ stitch_node->cursor.ms.map = map__get(lbr_stitch->prev_lbr_cursor[i].ms.map);
-+
- if (callee)
- list_add(&stitch_node->node, &lbr_stitch->lists);
- else
-@@ -2505,6 +2512,8 @@ static bool alloc_lbr_stitch(struct thre
- if (!thread__lbr_stitch(thread)->prev_lbr_cursor)
- goto free_lbr_stitch;
-
-+ thread__lbr_stitch(thread)->prev_lbr_cursor_size = max_lbr + 1;
-+
- INIT_LIST_HEAD(&thread__lbr_stitch(thread)->lists);
- INIT_LIST_HEAD(&thread__lbr_stitch(thread)->free_lists);
-
-@@ -2560,8 +2569,12 @@ static int resolve_lbr_callchain_sample(
- max_lbr, callee);
-
- if (!stitched_lbr && !list_empty(&lbr_stitch->lists)) {
-- list_replace_init(&lbr_stitch->lists,
-- &lbr_stitch->free_lists);
-+ struct stitch_list *stitch_node;
-+
-+ list_for_each_entry(stitch_node, &lbr_stitch->lists, node)
-+ map_symbol__exit(&stitch_node->cursor.ms);
-+
-+ list_splice_init(&lbr_stitch->lists, &lbr_stitch->free_lists);
- }
- memcpy(&lbr_stitch->prev_sample, sample, sizeof(*sample));
- }
---- a/tools/perf/util/thread.c
-+++ b/tools/perf/util/thread.c
-@@ -476,6 +476,7 @@ void thread__free_stitch_list(struct thr
- return;
-
- list_for_each_entry_safe(pos, tmp, &lbr_stitch->lists, node) {
-+ map_symbol__exit(&pos->cursor.ms);
- list_del_init(&pos->node);
- free(pos);
- }
-@@ -485,6 +486,9 @@ void thread__free_stitch_list(struct thr
- free(pos);
- }
-
-+ for (unsigned int i = 0 ; i < lbr_stitch->prev_lbr_cursor_size; i++)
-+ map_symbol__exit(&lbr_stitch->prev_lbr_cursor[i].ms);
-+
- zfree(&lbr_stitch->prev_lbr_cursor);
- free(thread__lbr_stitch(thread));
- thread__set_lbr_stitch(thread, NULL);
---- a/tools/perf/util/thread.h
-+++ b/tools/perf/util/thread.h
-@@ -26,6 +26,7 @@ struct lbr_stitch {
- struct list_head free_lists;
- struct perf_sample prev_sample;
- struct callchain_cursor_node *prev_lbr_cursor;
-+ unsigned int prev_lbr_cursor_size;
- };
-
- DECLARE_RC_STRUCT(thread) {
perf-scripts-python-cs-etm-restore-first-sample-log-.patch
perf-bpf-move-bpf-disassembly-routines-to-separate-f.patch
perf-mem-free-the-allocated-sort-string-fixing-a-lea.patch
-perf-callchain-fix-stitch-lbr-memory-leaks.patch
perf-lock-contention-change-stack_id-type-to-s32.patch
perf-vendor-events-skx-clx-snr-uncore-cache-event-fi.patch
perf-inject-fix-leader-sampling-inserting-additional.patch
+++ /dev/null
-From 310257b8ea7ee5c9303a994559d1d9ea99c6cfb6 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Wed, 7 Aug 2024 22:46:43 -0700
-Subject: perf callchain: Fix stitch LBR memory leaks
-
-From: Ian Rogers <irogers@google.com>
-
-[ Upstream commit 599c19397b17d197fc1184bbc950f163a292efc9 ]
-
-The 'struct callchain_cursor_node' has a 'struct map_symbol' whose maps
-and map members are reference counted. Ensure these values use a _get
-routine to increment the reference counts and use map_symbol__exit() to
-release the reference counts.
-
-Do similar for 'struct thread's prev_lbr_cursor, but save the size of
-the prev_lbr_cursor array so that it may be iterated.
-
-Ensure that when stitch_nodes are placed on the free list the
-map_symbols are exited.
-
-Fix resolve_lbr_callchain_sample() by replacing list_replace_init() to
-list_splice_init(), so the whole list is moved and nodes aren't leaked.
-
-A reproduction of the memory leaks is possible with a leak sanitizer
-build in the perf report command of:
-
- ```
- $ perf record -e cycles --call-graph lbr perf test -w thloop
- $ perf report --stitch-lbr
- ```
-
-Reviewed-by: Kan Liang <kan.liang@linux.intel.com>
-Fixes: ff165628d72644e3 ("perf callchain: Stitch LBR call stack")
-Signed-off-by: Ian Rogers <irogers@google.com>
-[ Basic tests after applying the patch, repeating the example above ]
-Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
-Cc: Adrian Hunter <adrian.hunter@intel.com>
-Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
-Cc: Andi Kleen <ak@linux.intel.com>
-Cc: Anne Macedo <retpolanne@posteo.net>
-Cc: Changbin Du <changbin.du@huawei.com>
-Cc: Ingo Molnar <mingo@redhat.com>
-Cc: Jiri Olsa <jolsa@kernel.org>
-Cc: Mark Rutland <mark.rutland@arm.com>
-Cc: Namhyung Kim <namhyung@kernel.org>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Link: https://lore.kernel.org/r/20240808054644.1286065-1-irogers@google.com
-Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- tools/perf/util/machine.c | 17 +++++++++++++++--
- tools/perf/util/thread.c | 4 ++++
- tools/perf/util/thread.h | 1 +
- 3 files changed, 20 insertions(+), 2 deletions(-)
-
-diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
-index 7c6874804660e..24dead4e30656 100644
---- a/tools/perf/util/machine.c
-+++ b/tools/perf/util/machine.c
-@@ -2536,8 +2536,12 @@ static void save_lbr_cursor_node(struct thread *thread,
- cursor->curr = cursor->first;
- else
- cursor->curr = cursor->curr->next;
-+
-+ map_symbol__exit(&lbr_stitch->prev_lbr_cursor[idx].ms);
- memcpy(&lbr_stitch->prev_lbr_cursor[idx], cursor->curr,
- sizeof(struct callchain_cursor_node));
-+ lbr_stitch->prev_lbr_cursor[idx].ms.maps = maps__get(cursor->curr->ms.maps);
-+ lbr_stitch->prev_lbr_cursor[idx].ms.map = map__get(cursor->curr->ms.map);
-
- lbr_stitch->prev_lbr_cursor[idx].valid = true;
- cursor->pos++;
-@@ -2748,6 +2752,9 @@ static bool has_stitched_lbr(struct thread *thread,
- memcpy(&stitch_node->cursor, &lbr_stitch->prev_lbr_cursor[i],
- sizeof(struct callchain_cursor_node));
-
-+ stitch_node->cursor.ms.maps = maps__get(lbr_stitch->prev_lbr_cursor[i].ms.maps);
-+ stitch_node->cursor.ms.map = map__get(lbr_stitch->prev_lbr_cursor[i].ms.map);
-+
- if (callee)
- list_add(&stitch_node->node, &lbr_stitch->lists);
- else
-@@ -2771,6 +2778,8 @@ static bool alloc_lbr_stitch(struct thread *thread, unsigned int max_lbr)
- if (!thread__lbr_stitch(thread)->prev_lbr_cursor)
- goto free_lbr_stitch;
-
-+ thread__lbr_stitch(thread)->prev_lbr_cursor_size = max_lbr + 1;
-+
- INIT_LIST_HEAD(&thread__lbr_stitch(thread)->lists);
- INIT_LIST_HEAD(&thread__lbr_stitch(thread)->free_lists);
-
-@@ -2826,8 +2835,12 @@ static int resolve_lbr_callchain_sample(struct thread *thread,
- max_lbr, callee);
-
- if (!stitched_lbr && !list_empty(&lbr_stitch->lists)) {
-- list_replace_init(&lbr_stitch->lists,
-- &lbr_stitch->free_lists);
-+ struct stitch_list *stitch_node;
-+
-+ list_for_each_entry(stitch_node, &lbr_stitch->lists, node)
-+ map_symbol__exit(&stitch_node->cursor.ms);
-+
-+ list_splice_init(&lbr_stitch->lists, &lbr_stitch->free_lists);
- }
- memcpy(&lbr_stitch->prev_sample, sample, sizeof(*sample));
- }
-diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
-index 61e9f449c7258..6817b99e550ba 100644
---- a/tools/perf/util/thread.c
-+++ b/tools/perf/util/thread.c
-@@ -478,6 +478,7 @@ void thread__free_stitch_list(struct thread *thread)
- return;
-
- list_for_each_entry_safe(pos, tmp, &lbr_stitch->lists, node) {
-+ map_symbol__exit(&pos->cursor.ms);
- list_del_init(&pos->node);
- free(pos);
- }
-@@ -487,6 +488,9 @@ void thread__free_stitch_list(struct thread *thread)
- free(pos);
- }
-
-+ for (unsigned int i = 0 ; i < lbr_stitch->prev_lbr_cursor_size; i++)
-+ map_symbol__exit(&lbr_stitch->prev_lbr_cursor[i].ms);
-+
- zfree(&lbr_stitch->prev_lbr_cursor);
- free(thread__lbr_stitch(thread));
- thread__set_lbr_stitch(thread, NULL);
-diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
-index 0df775b5c1105..a5423f834dc9d 100644
---- a/tools/perf/util/thread.h
-+++ b/tools/perf/util/thread.h
-@@ -28,6 +28,7 @@ struct lbr_stitch {
- struct list_head free_lists;
- struct perf_sample prev_sample;
- struct callchain_cursor_node *prev_lbr_cursor;
-+ unsigned int prev_lbr_cursor_size;
- };
-
- struct thread_rb_node {
---
-2.43.0
-
bpf-improve-check_raw_mode_ok-test-for-mem_uninit-ta.patch
bpf-zero-former-arg_ptr_to_-long-int-args-in-case-of.patch
perf-mem-free-the-allocated-sort-string-fixing-a-lea.patch
-perf-callchain-fix-stitch-lbr-memory-leaks.patch
perf-inject-fix-leader-sampling-inserting-additional.patch
perf-annotate-split-branch-stack-cycles-info-from-st.patch
perf-annotate-move-some-source-code-related-fields-f.patch