--- /dev/null
+From 26efd79c4624294e553aeaa3439c646729bad084 Mon Sep 17 00:00:00 2001
+From: Zheng Yejian <zhengyejian1@huawei.com>
+Date: Wed, 12 Jul 2023 14:04:52 +0800
+Subject: ftrace: Fix possible warning on checking all pages used in ftrace_process_locs()
+
+From: Zheng Yejian <zhengyejian1@huawei.com>
+
+commit 26efd79c4624294e553aeaa3439c646729bad084 upstream.
+
+As comments in ftrace_process_locs(), there may be NULL pointers in
+mcount_loc section:
+ > Some architecture linkers will pad between
+ > the different mcount_loc sections of different
+ > object files to satisfy alignments.
+ > Skip any NULL pointers.
+
+After commit 20e5227e9f55 ("ftrace: allow NULL pointers in mcount_loc"),
+NULL pointers will be accounted when allocating ftrace pages but skipped
+before adding into ftrace pages, this may result in some pages not being
+used. Then after commit 706c81f87f84 ("ftrace: Remove extra helper
+functions"), warning may occur at:
+ WARN_ON(pg->next);
+
+To fix it, only warn for case that no pointers skipped but pages not used
+up, then free those unused pages after releasing ftrace_lock.
+
+Link: https://lore.kernel.org/linux-trace-kernel/20230712060452.3175675-1-zhengyejian1@huawei.com
+
+Cc: stable@vger.kernel.org
+Fixes: 706c81f87f84 ("ftrace: Remove extra helper functions")
+Suggested-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Zheng Yejian <zhengyejian1@huawei.com>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/ftrace.c | 45 +++++++++++++++++++++++++++++++--------------
+ 1 file changed, 31 insertions(+), 14 deletions(-)
+
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -3196,6 +3196,22 @@ static int ftrace_allocate_records(struc
+ return cnt;
+ }
+
++static void ftrace_free_pages(struct ftrace_page *pages)
++{
++ struct ftrace_page *pg = pages;
++
++ while (pg) {
++ if (pg->records) {
++ free_pages((unsigned long)pg->records, pg->order);
++ ftrace_number_of_pages -= 1 << pg->order;
++ }
++ pages = pg->next;
++ kfree(pg);
++ pg = pages;
++ ftrace_number_of_groups--;
++ }
++}
++
+ static struct ftrace_page *
+ ftrace_allocate_pages(unsigned long num_to_init)
+ {
+@@ -3234,17 +3250,7 @@ ftrace_allocate_pages(unsigned long num_
+ return start_pg;
+
+ free_pages:
+- pg = start_pg;
+- while (pg) {
+- if (pg->records) {
+- free_pages((unsigned long)pg->records, pg->order);
+- ftrace_number_of_pages -= 1 << pg->order;
+- }
+- start_pg = pg->next;
+- kfree(pg);
+- pg = start_pg;
+- ftrace_number_of_groups--;
+- }
++ ftrace_free_pages(start_pg);
+ pr_info("ftrace: FAILED to allocate memory for functions\n");
+ return NULL;
+ }
+@@ -6190,9 +6196,11 @@ static int ftrace_process_locs(struct mo
+ unsigned long *start,
+ unsigned long *end)
+ {
++ struct ftrace_page *pg_unuse = NULL;
+ struct ftrace_page *start_pg;
+ struct ftrace_page *pg;
+ struct dyn_ftrace *rec;
++ unsigned long skipped = 0;
+ unsigned long count;
+ unsigned long *p;
+ unsigned long addr;
+@@ -6246,8 +6254,10 @@ static int ftrace_process_locs(struct mo
+ * object files to satisfy alignments.
+ * Skip any NULL pointers.
+ */
+- if (!addr)
++ if (!addr) {
++ skipped++;
+ continue;
++ }
+
+ end_offset = (pg->index+1) * sizeof(pg->records[0]);
+ if (end_offset > PAGE_SIZE << pg->order) {
+@@ -6261,8 +6271,10 @@ static int ftrace_process_locs(struct mo
+ rec->ip = addr;
+ }
+
+- /* We should have used all pages */
+- WARN_ON(pg->next);
++ if (pg->next) {
++ pg_unuse = pg->next;
++ pg->next = NULL;
++ }
+
+ /* Assign the last page to ftrace_pages */
+ ftrace_pages = pg;
+@@ -6284,6 +6296,11 @@ static int ftrace_process_locs(struct mo
+ out:
+ mutex_unlock(&ftrace_lock);
+
++ /* We should have used all pages unless we skipped some */
++ if (pg_unuse) {
++ WARN_ON(!skipped);
++ ftrace_free_pages(pg_unuse);
++ }
+ return ret;
+ }
+
--- /dev/null
+From db42523b4f3e83ff86b53cdda219a9767c8b047f Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Thu, 1 Apr 2021 16:14:17 -0400
+Subject: ftrace: Store the order of pages allocated in ftrace_page
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+commit db42523b4f3e83ff86b53cdda219a9767c8b047f upstream.
+
+Instead of saving the size of the records field of the ftrace_page, store
+the order it uses to allocate the pages, as that is what is needed to know
+in order to free the pages. This simplifies the code.
+
+Link: https://lore.kernel.org/lkml/CAHk-=whyMxheOqXAORt9a7JK9gc9eHTgCJ55Pgs4p=X3RrQubQ@mail.gmail.com/
+
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+[ change log written by Steven Rostedt ]
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Zheng Yejian <zhengyejian1@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/ftrace.c | 35 +++++++++++++++++------------------
+ 1 file changed, 17 insertions(+), 18 deletions(-)
+
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -1091,7 +1091,7 @@ struct ftrace_page {
+ struct ftrace_page *next;
+ struct dyn_ftrace *records;
+ int index;
+- int size;
++ int order;
+ };
+
+ #define ENTRY_SIZE sizeof(struct dyn_ftrace)
+@@ -3188,7 +3188,7 @@ static int ftrace_allocate_records(struc
+ ftrace_number_of_groups++;
+
+ cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
+- pg->size = cnt;
++ pg->order = order;
+
+ if (cnt > count)
+ cnt = count;
+@@ -3201,7 +3201,6 @@ ftrace_allocate_pages(unsigned long num_
+ {
+ struct ftrace_page *start_pg;
+ struct ftrace_page *pg;
+- int order;
+ int cnt;
+
+ if (!num_to_init)
+@@ -3237,13 +3236,13 @@ ftrace_allocate_pages(unsigned long num_
+ free_pages:
+ pg = start_pg;
+ while (pg) {
+- order = get_count_order(pg->size / ENTRIES_PER_PAGE);
+- if (order >= 0)
+- free_pages((unsigned long)pg->records, order);
++ if (pg->records) {
++ free_pages((unsigned long)pg->records, pg->order);
++ ftrace_number_of_pages -= 1 << pg->order;
++ }
+ start_pg = pg->next;
+ kfree(pg);
+ pg = start_pg;
+- ftrace_number_of_pages -= 1 << order;
+ ftrace_number_of_groups--;
+ }
+ pr_info("ftrace: FAILED to allocate memory for functions\n");
+@@ -6239,6 +6238,7 @@ static int ftrace_process_locs(struct mo
+ p = start;
+ pg = start_pg;
+ while (p < end) {
++ unsigned long end_offset;
+ addr = ftrace_call_adjust(*p++);
+ /*
+ * Some architecture linkers will pad between
+@@ -6249,7 +6249,8 @@ static int ftrace_process_locs(struct mo
+ if (!addr)
+ continue;
+
+- if (pg->index == pg->size) {
++ end_offset = (pg->index+1) * sizeof(pg->records[0]);
++ if (end_offset > PAGE_SIZE << pg->order) {
+ /* We should have allocated enough */
+ if (WARN_ON(!pg->next))
+ break;
+@@ -6418,7 +6419,6 @@ void ftrace_release_mod(struct module *m
+ struct ftrace_page **last_pg;
+ struct ftrace_page *tmp_page = NULL;
+ struct ftrace_page *pg;
+- int order;
+
+ mutex_lock(&ftrace_lock);
+
+@@ -6469,12 +6469,12 @@ void ftrace_release_mod(struct module *m
+ /* Needs to be called outside of ftrace_lock */
+ clear_mod_from_hashes(pg);
+
+- order = get_count_order(pg->size / ENTRIES_PER_PAGE);
+- if (order >= 0)
+- free_pages((unsigned long)pg->records, order);
++ if (pg->records) {
++ free_pages((unsigned long)pg->records, pg->order);
++ ftrace_number_of_pages -= 1 << pg->order;
++ }
+ tmp_page = pg->next;
+ kfree(pg);
+- ftrace_number_of_pages -= 1 << order;
+ ftrace_number_of_groups--;
+ }
+ }
+@@ -6792,7 +6792,6 @@ void ftrace_free_mem(struct module *mod,
+ struct ftrace_mod_map *mod_map = NULL;
+ struct ftrace_init_func *func, *func_next;
+ struct list_head clear_hash;
+- int order;
+
+ INIT_LIST_HEAD(&clear_hash);
+
+@@ -6830,10 +6829,10 @@ void ftrace_free_mem(struct module *mod,
+ ftrace_update_tot_cnt--;
+ if (!pg->index) {
+ *last_pg = pg->next;
+- order = get_count_order(pg->size / ENTRIES_PER_PAGE);
+- if (order >= 0)
+- free_pages((unsigned long)pg->records, order);
+- ftrace_number_of_pages -= 1 << order;
++ if (pg->records) {
++ free_pages((unsigned long)pg->records, pg->order);
++ ftrace_number_of_pages -= 1 << pg->order;
++ }
+ ftrace_number_of_groups--;
+ kfree(pg);
+ pg = container_of(last_pg, struct ftrace_page, next);
tcp-annotate-data-races-around-icsk-icsk_user_timeou.patch
tcp-annotate-data-races-around-fastopenq.max_qlen.patch
net-phy-prevent-stale-pointer-dereference-in-phy_ini.patch
+tracing-histograms-return-an-error-if-we-fail-to-add-histogram-to-hist_vars-list.patch
+tracing-fix-memory-leak-of-iter-temp-when-reading-trace_pipe.patch
+ftrace-store-the-order-of-pages-allocated-in-ftrace_page.patch
+ftrace-fix-possible-warning-on-checking-all-pages-used-in-ftrace_process_locs.patch
--- /dev/null
+From d5a821896360cc8b93a15bd888fabc858c038dc0 Mon Sep 17 00:00:00 2001
+From: Zheng Yejian <zhengyejian1@huawei.com>
+Date: Thu, 13 Jul 2023 22:14:35 +0800
+Subject: tracing: Fix memory leak of iter->temp when reading trace_pipe
+
+From: Zheng Yejian <zhengyejian1@huawei.com>
+
+commit d5a821896360cc8b93a15bd888fabc858c038dc0 upstream.
+
+kmemleak reports:
+ unreferenced object 0xffff88814d14e200 (size 256):
+ comm "cat", pid 336, jiffies 4294871818 (age 779.490s)
+ hex dump (first 32 bytes):
+ 04 00 01 03 00 00 00 00 08 00 00 00 00 00 00 00 ................
+ 0c d8 c8 9b ff ff ff ff 04 5a ca 9b ff ff ff ff .........Z......
+ backtrace:
+ [<ffffffff9bdff18f>] __kmalloc+0x4f/0x140
+ [<ffffffff9bc9238b>] trace_find_next_entry+0xbb/0x1d0
+ [<ffffffff9bc9caef>] trace_print_lat_context+0xaf/0x4e0
+ [<ffffffff9bc94490>] print_trace_line+0x3e0/0x950
+ [<ffffffff9bc95499>] tracing_read_pipe+0x2d9/0x5a0
+ [<ffffffff9bf03a43>] vfs_read+0x143/0x520
+ [<ffffffff9bf04c2d>] ksys_read+0xbd/0x160
+ [<ffffffff9d0f0edf>] do_syscall_64+0x3f/0x90
+ [<ffffffff9d2000aa>] entry_SYSCALL_64_after_hwframe+0x6e/0xd8
+
+when reading file 'trace_pipe', 'iter->temp' is allocated or relocated
+in trace_find_next_entry() but not freed before 'trace_pipe' is closed.
+
+To fix it, free 'iter->temp' in tracing_release_pipe().
+
+Link: https://lore.kernel.org/linux-trace-kernel/20230713141435.1133021-1-zhengyejian1@huawei.com
+
+Cc: stable@vger.kernel.org
+Fixes: ff895103a84ab ("tracing: Save off entry when peeking at next entry")
+Signed-off-by: Zheng Yejian <zhengyejian1@huawei.com>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+[Fix conflict due to lack of 649e72070cbbb8600eb823833e4748f5a0815116]
+Signed-off-by: Zheng Yejian <zhengyejian1@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -6250,6 +6250,7 @@ static int tracing_release_pipe(struct i
+ mutex_unlock(&trace_types_lock);
+
+ free_cpumask_var(iter->started);
++ kfree(iter->temp);
+ mutex_destroy(&iter->mutex);
+ kfree(iter);
+
--- /dev/null
+From 4b8b3905165ef98386a3c06f196c85d21292d029 Mon Sep 17 00:00:00 2001
+From: Mohamed Khalfella <mkhalfella@purestorage.com>
+Date: Fri, 14 Jul 2023 20:33:41 +0000
+Subject: tracing/histograms: Return an error if we fail to add histogram to hist_vars list
+
+From: Mohamed Khalfella <mkhalfella@purestorage.com>
+
+commit 4b8b3905165ef98386a3c06f196c85d21292d029 upstream.
+
+Commit 6018b585e8c6 ("tracing/histograms: Add histograms to hist_vars if
+they have referenced variables") added a check to fail histogram creation
+if save_hist_vars() failed to add histogram to hist_vars list. But the
+commit failed to set ret to failed return code before jumping to
+unregister histogram, fix it.
+
+Link: https://lore.kernel.org/linux-trace-kernel/20230714203341.51396-1-mkhalfella@purestorage.com
+
+Cc: stable@vger.kernel.org
+Fixes: 6018b585e8c6 ("tracing/histograms: Add histograms to hist_vars if they have referenced variables")
+Signed-off-by: Mohamed Khalfella <mkhalfella@purestorage.com>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace_events_hist.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -5822,7 +5822,8 @@ static int event_hist_trigger_func(struc
+ goto out_unreg;
+
+ if (has_hist_vars(hist_data) || hist_data->n_var_refs) {
+- if (save_hist_vars(hist_data))
++ ret = save_hist_vars(hist_data);
++ if (ret)
+ goto out_unreg;
+ }
+