--- /dev/null
+From ca08d0eac020d48a3141dbec0a3cf64fbdb17cde Mon Sep 17 00:00:00 2001
+From: Zhang Xiaoxu <zhangxiaoxu5@huawei.com>
+Date: Thu, 18 Aug 2022 21:50:44 +0800
+Subject: cifs: Fix memory leak on the deferred close
+
+From: Zhang Xiaoxu <zhangxiaoxu5@huawei.com>
+
+commit ca08d0eac020d48a3141dbec0a3cf64fbdb17cde upstream.
+
+xfstests on smb21 report kmemleak as below:
+
+ unreferenced object 0xffff8881767d6200 (size 64):
+ comm "xfs_io", pid 1284, jiffies 4294777434 (age 20.789s)
+ hex dump (first 32 bytes):
+ 80 5a d0 11 81 88 ff ff 78 8a aa 63 81 88 ff ff .Z......x..c....
+ 00 71 99 76 81 88 ff ff 00 00 00 00 00 00 00 00 .q.v............
+ backtrace:
+ [<00000000ad04e6ea>] cifs_close+0x92/0x2c0
+ [<0000000028b93c82>] __fput+0xff/0x3f0
+ [<00000000d8116851>] task_work_run+0x85/0xc0
+ [<0000000027e14f9e>] do_exit+0x5e5/0x1240
+ [<00000000fb492b95>] do_group_exit+0x58/0xe0
+ [<00000000129a32d9>] __x64_sys_exit_group+0x28/0x30
+ [<00000000e3f7d8e9>] do_syscall_64+0x35/0x80
+ [<00000000102e8a0b>] entry_SYSCALL_64_after_hwframe+0x46/0xb0
+
+When cancel the deferred close work, we should also cleanup the struct
+cifs_deferred_close.
+
+Fixes: 9e992755be8f2 ("cifs: Call close synchronously during unlink/rename/lease break.")
+Fixes: e3fc065682ebb ("cifs: Deferred close performance improvements")
+Cc: stable@vger.kernel.org
+Reviewed-by: Shyam Prasad N <sprasad@microsoft.com>
+Signed-off-by: Zhang Xiaoxu <zhangxiaoxu5@huawei.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/cifs/misc.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/fs/cifs/misc.c
++++ b/fs/cifs/misc.c
+@@ -736,6 +736,8 @@ cifs_close_deferred_file(struct cifsInod
+ list_for_each_entry(cfile, &cifs_inode->openFileList, flist) {
+ if (delayed_work_pending(&cfile->deferred)) {
+ if (cancel_delayed_work(&cfile->deferred)) {
++ cifs_del_deferred_close(cfile);
++
+ tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
+ if (tmp_list == NULL)
+ break;
+@@ -767,6 +769,8 @@ cifs_close_all_deferred_files(struct cif
+ cfile = list_entry(tmp, struct cifsFileInfo, tlist);
+ if (delayed_work_pending(&cfile->deferred)) {
+ if (cancel_delayed_work(&cfile->deferred)) {
++ cifs_del_deferred_close(cfile);
++
+ tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
+ if (tmp_list == NULL)
+ break;
+@@ -802,6 +806,8 @@ cifs_close_deferred_file_under_dentry(st
+ if (strstr(full_path, path)) {
+ if (delayed_work_pending(&cfile->deferred)) {
+ if (cancel_delayed_work(&cfile->deferred)) {
++ cifs_del_deferred_close(cfile);
++
+ tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
+ if (tmp_list == NULL)
+ break;
btrfs-unset-reloc-control-if-transaction-commit-fails-in-prepare_to_relocate.patch
btrfs-reset-ro-counter-on-block-group-if-we-fail-to-relocate.patch
btrfs-fix-lost-error-handling-when-looking-up-extended-ref-on-log-replay.patch
+cifs-fix-memory-leak-on-the-deferred-close.patch
+x86-kprobes-fix-jng-jnle-emulation.patch
+tracing-perf-fix-double-put-of-trace-event-when-init-fails.patch
+tracing-eprobes-do-not-allow-eprobes-to-use-stack-or-for-regs.patch
+tracing-eprobes-do-not-hardcode-comm-as-a-string.patch
+tracing-eprobes-have-event-probes-be-consistent-with-kprobes-and-uprobes.patch
+tracing-probes-have-kprobes-and-uprobes-use-comm-too.patch
+tracing-have-filter-accept-common_cpu-to-be-consistent.patch
--- /dev/null
+From 2673c60ee67e71f2ebe34386e62d348f71edee47 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Google)" <rostedt@goodmis.org>
+Date: Sat, 20 Aug 2022 09:43:17 -0400
+Subject: tracing/eprobes: Do not allow eprobes to use $stack, or % for regs
+
+From: Steven Rostedt (Google) <rostedt@goodmis.org>
+
+commit 2673c60ee67e71f2ebe34386e62d348f71edee47 upstream.
+
+While playing with event probes (eprobes), I tried to see what would
+happen if I attempted to retrieve the instruction pointer (%rip) knowing
+that event probes do not use pt_regs. The result was:
+
+ BUG: kernel NULL pointer dereference, address: 0000000000000024
+ #PF: supervisor read access in kernel mode
+ #PF: error_code(0x0000) - not-present page
+ PGD 0 P4D 0
+ Oops: 0000 [#1] PREEMPT SMP PTI
+ CPU: 1 PID: 1847 Comm: trace-cmd Not tainted 5.19.0-rc5-test+ #309
+ Hardware name: Hewlett-Packard HP Compaq Pro 6300 SFF/339A, BIOS K01
+v03.03 07/14/2016
+ RIP: 0010:get_event_field.isra.0+0x0/0x50
+ Code: ff 48 c7 c7 c0 8f 74 a1 e8 3d 8b f5 ff e8 88 09 f6 ff 4c 89 e7 e8
+50 6a 13 00 48 89 ef 5b 5d 41 5c 41 5d e9 42 6a 13 00 66 90 <48> 63 47 24
+8b 57 2c 48 01 c6 8b 47 28 83 f8 02 74 0e 83 f8 04 74
+ RSP: 0018:ffff916c394bbaf0 EFLAGS: 00010086
+ RAX: ffff916c854041d8 RBX: ffff916c8d9fbf50 RCX: ffff916c255d2000
+ RDX: 0000000000000000 RSI: ffff916c255d2008 RDI: 0000000000000000
+ RBP: 0000000000000000 R08: ffff916c3a2a0c08 R09: ffff916c394bbda8
+ R10: 0000000000000000 R11: 0000000000000000 R12: ffff916c854041d8
+ R13: ffff916c854041b0 R14: 0000000000000000 R15: 0000000000000000
+ FS: 0000000000000000(0000) GS:ffff916c9ea40000(0000)
+knlGS:0000000000000000
+ CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ CR2: 0000000000000024 CR3: 000000011b60a002 CR4: 00000000001706e0
+ Call Trace:
+ <TASK>
+ get_eprobe_size+0xb4/0x640
+ ? __mod_node_page_state+0x72/0xc0
+ __eprobe_trace_func+0x59/0x1a0
+ ? __mod_lruvec_page_state+0xaa/0x1b0
+ ? page_remove_file_rmap+0x14/0x230
+ ? page_remove_rmap+0xda/0x170
+ event_triggers_call+0x52/0xe0
+ trace_event_buffer_commit+0x18f/0x240
+ trace_event_raw_event_sched_wakeup_template+0x7a/0xb0
+ try_to_wake_up+0x260/0x4c0
+ __wake_up_common+0x80/0x180
+ __wake_up_common_lock+0x7c/0xc0
+ do_notify_parent+0x1c9/0x2a0
+ exit_notify+0x1a9/0x220
+ do_exit+0x2ba/0x450
+ do_group_exit+0x2d/0x90
+ __x64_sys_exit_group+0x14/0x20
+ do_syscall_64+0x3b/0x90
+ entry_SYSCALL_64_after_hwframe+0x46/0xb0
+
+Obviously this is not the desired result.
+
+Move the testing for TPARG_FL_TPOINT which is only used for event probes
+to the top of the "$" variable check, as all the other variables are not
+used for event probes. Also add a check in the register parsing "%" to
+fail if an event probe is used.
+
+Link: https://lkml.kernel.org/r/20220820134400.564426983@goodmis.org
+
+Cc: stable@vger.kernel.org
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Tzvetomir Stoyanov <tz.stoyanov@gmail.com>
+Cc: Tom Zanussi <zanussi@kernel.org>
+Fixes: 7491e2c44278 ("tracing: Add a probe that attaches to trace events")
+Acked-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace_probe.c | 21 +++++++++++++--------
+ 1 file changed, 13 insertions(+), 8 deletions(-)
+
+--- a/kernel/trace/trace_probe.c
++++ b/kernel/trace/trace_probe.c
+@@ -279,7 +279,14 @@ static int parse_probe_vars(char *arg, c
+ int ret = 0;
+ int len;
+
+- if (strcmp(arg, "retval") == 0) {
++ if (flags & TPARG_FL_TPOINT) {
++ if (code->data)
++ return -EFAULT;
++ code->data = kstrdup(arg, GFP_KERNEL);
++ if (!code->data)
++ return -ENOMEM;
++ code->op = FETCH_OP_TP_ARG;
++ } else if (strcmp(arg, "retval") == 0) {
+ if (flags & TPARG_FL_RETURN) {
+ code->op = FETCH_OP_RETVAL;
+ } else {
+@@ -319,13 +326,6 @@ static int parse_probe_vars(char *arg, c
+ code->op = FETCH_OP_ARG;
+ code->param = (unsigned int)param - 1;
+ #endif
+- } else if (flags & TPARG_FL_TPOINT) {
+- if (code->data)
+- return -EFAULT;
+- code->data = kstrdup(arg, GFP_KERNEL);
+- if (!code->data)
+- return -ENOMEM;
+- code->op = FETCH_OP_TP_ARG;
+ } else
+ goto inval_var;
+
+@@ -380,6 +380,11 @@ parse_probe_arg(char *arg, const struct
+ break;
+
+ case '%': /* named register */
++ if (flags & TPARG_FL_TPOINT) {
++ /* eprobes do not handle registers */
++ trace_probe_log_err(offs, BAD_VAR);
++ break;
++ }
+ ret = regs_query_register_offset(arg + 1);
+ if (ret >= 0) {
+ code->op = FETCH_OP_REG;
--- /dev/null
+From 02333de90e5945e2fe7fc75b15b4eb9aee187f0a Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Google)" <rostedt@goodmis.org>
+Date: Sat, 20 Aug 2022 09:43:18 -0400
+Subject: tracing/eprobes: Do not hardcode $comm as a string
+
+From: Steven Rostedt (Google) <rostedt@goodmis.org>
+
+commit 02333de90e5945e2fe7fc75b15b4eb9aee187f0a upstream.
+
+The variable $comm is hard coded as a string, which is true for both
+kprobes and uprobes, but for event probes (eprobes) it is a field name. In
+most cases the "comm" field would be a string, but there's no guarantee of
+that fact.
+
+Do not assume that comm is a string. Not to mention, it currently forces
+comm fields to fault, as string processing for event probes is currently
+broken.
+
+Link: https://lkml.kernel.org/r/20220820134400.756152112@goodmis.org
+
+Cc: stable@vger.kernel.org
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Tzvetomir Stoyanov <tz.stoyanov@gmail.com>
+Cc: Tom Zanussi <zanussi@kernel.org>
+Fixes: 7491e2c44278 ("tracing: Add a probe that attaches to trace events")
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace_probe.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/kernel/trace/trace_probe.c
++++ b/kernel/trace/trace_probe.c
+@@ -618,9 +618,10 @@ static int traceprobe_parse_probe_arg_bo
+
+ /*
+ * Since $comm and immediate string can not be dereferenced,
+- * we can find those by strcmp.
++ * we can find those by strcmp. But ignore for eprobes.
+ */
+- if (strcmp(arg, "$comm") == 0 || strncmp(arg, "\\\"", 2) == 0) {
++ if (!(flags & TPARG_FL_TPOINT) &&
++ (strcmp(arg, "$comm") == 0 || strncmp(arg, "\\\"", 2) == 0)) {
+ /* The type of $comm must be "string", and not an array. */
+ if (parg->count || (t && strcmp(t, "string")))
+ goto out;
--- /dev/null
+From 6a832ec3d680b3a4f4fad5752672827d71bae501 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Google)" <rostedt@goodmis.org>
+Date: Sat, 20 Aug 2022 09:43:20 -0400
+Subject: tracing/eprobes: Have event probes be consistent with kprobes and uprobes
+
+From: Steven Rostedt (Google) <rostedt@goodmis.org>
+
+commit 6a832ec3d680b3a4f4fad5752672827d71bae501 upstream.
+
+Currently, if a symbol "@" is attempted to be used with an event probe
+(eprobes), it will cause a NULL pointer dereference crash.
+
+Both kprobes and uprobes can reference data other than the main registers.
+Such as immediate address, symbols and the current task name. Have eprobes
+do the same thing.
+
+For "comm", if "comm" is used and the event being attached to does not
+have the "comm" field, then make it the "$comm" that kprobes has. This is
+consistent to the way histograms and filters work.
+
+Link: https://lkml.kernel.org/r/20220820134401.136924220@goodmis.org
+
+Cc: stable@vger.kernel.org
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Tzvetomir Stoyanov <tz.stoyanov@gmail.com>
+Cc: Tom Zanussi <zanussi@kernel.org>
+Fixes: 7491e2c44278 ("tracing: Add a probe that attaches to trace events")
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace_eprobe.c | 70 ++++++++++++++++++++++++++++++++++++++++----
+ 1 file changed, 64 insertions(+), 6 deletions(-)
+
+--- a/kernel/trace/trace_eprobe.c
++++ b/kernel/trace/trace_eprobe.c
+@@ -226,6 +226,7 @@ static int trace_eprobe_tp_arg_update(st
+ struct probe_arg *parg = &ep->tp.args[i];
+ struct ftrace_event_field *field;
+ struct list_head *head;
++ int ret = -ENOENT;
+
+ head = trace_get_fields(ep->event);
+ list_for_each_entry(field, head, link) {
+@@ -235,9 +236,20 @@ static int trace_eprobe_tp_arg_update(st
+ return 0;
+ }
+ }
++
++ /*
++ * Argument not found on event. But allow for comm and COMM
++ * to be used to get the current->comm.
++ */
++ if (strcmp(parg->code->data, "COMM") == 0 ||
++ strcmp(parg->code->data, "comm") == 0) {
++ parg->code->op = FETCH_OP_COMM;
++ ret = 0;
++ }
++
+ kfree(parg->code->data);
+ parg->code->data = NULL;
+- return -ENOENT;
++ return ret;
+ }
+
+ static int eprobe_event_define_fields(struct trace_event_call *event_call)
+@@ -339,16 +351,38 @@ static unsigned long get_event_field(str
+
+ static int get_eprobe_size(struct trace_probe *tp, void *rec)
+ {
++ struct fetch_insn *code;
+ struct probe_arg *arg;
+ int i, len, ret = 0;
+
+ for (i = 0; i < tp->nr_args; i++) {
+ arg = tp->args + i;
+- if (unlikely(arg->dynamic)) {
++ if (arg->dynamic) {
+ unsigned long val;
+
+- val = get_event_field(arg->code, rec);
+- len = process_fetch_insn_bottom(arg->code + 1, val, NULL, NULL);
++ code = arg->code;
++ retry:
++ switch (code->op) {
++ case FETCH_OP_TP_ARG:
++ val = get_event_field(code, rec);
++ break;
++ case FETCH_OP_IMM:
++ val = code->immediate;
++ break;
++ case FETCH_OP_COMM:
++ val = (unsigned long)current->comm;
++ break;
++ case FETCH_OP_DATA:
++ val = (unsigned long)code->data;
++ break;
++ case FETCH_NOP_SYMBOL: /* Ignore a place holder */
++ code++;
++ goto retry;
++ default:
++ continue;
++ }
++ code++;
++ len = process_fetch_insn_bottom(code, val, NULL, NULL);
+ if (len > 0)
+ ret += len;
+ }
+@@ -366,8 +400,28 @@ process_fetch_insn(struct fetch_insn *co
+ {
+ unsigned long val;
+
+- val = get_event_field(code, rec);
+- return process_fetch_insn_bottom(code + 1, val, dest, base);
++ retry:
++ switch (code->op) {
++ case FETCH_OP_TP_ARG:
++ val = get_event_field(code, rec);
++ break;
++ case FETCH_OP_IMM:
++ val = code->immediate;
++ break;
++ case FETCH_OP_COMM:
++ val = (unsigned long)current->comm;
++ break;
++ case FETCH_OP_DATA:
++ val = (unsigned long)code->data;
++ break;
++ case FETCH_NOP_SYMBOL: /* Ignore a place holder */
++ code++;
++ goto retry;
++ default:
++ return -EILSEQ;
++ }
++ code++;
++ return process_fetch_insn_bottom(code, val, dest, base);
+ }
+ NOKPROBE_SYMBOL(process_fetch_insn)
+
+@@ -849,6 +903,10 @@ static int trace_eprobe_tp_update_arg(st
+ if (ep->tp.args[i].code->op == FETCH_OP_TP_ARG)
+ ret = trace_eprobe_tp_arg_update(ep, i);
+
++ /* Handle symbols "@" */
++ if (!ret)
++ ret = traceprobe_update_arg(&ep->tp.args[i]);
++
+ return ret;
+ }
+
--- /dev/null
+From b2380577d4fe1c0ef3fa50417f1e441c016e4cbe Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Google)" <rostedt@goodmis.org>
+Date: Sat, 20 Aug 2022 09:43:22 -0400
+Subject: tracing: Have filter accept "common_cpu" to be consistent
+
+From: Steven Rostedt (Google) <rostedt@goodmis.org>
+
+commit b2380577d4fe1c0ef3fa50417f1e441c016e4cbe upstream.
+
+Make filtering consistent with histograms. As "cpu" can be a field of an
+event, allow for "common_cpu" to keep it from being confused with the
+"cpu" field of the event.
+
+Link: https://lkml.kernel.org/r/20220820134401.513062765@goodmis.org
+Link: https://lore.kernel.org/all/20220820220920.e42fa32b70505b1904f0a0ad@kernel.org/
+
+Cc: stable@vger.kernel.org
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Tzvetomir Stoyanov <tz.stoyanov@gmail.com>
+Cc: Tom Zanussi <zanussi@kernel.org>
+Fixes: 1e3bac71c5053 ("tracing/histogram: Rename "cpu" to "common_cpu"")
+Suggested-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+Acked-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace_events.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -176,6 +176,7 @@ static int trace_define_generic_fields(v
+
+ __generic_field(int, CPU, FILTER_CPU);
+ __generic_field(int, cpu, FILTER_CPU);
++ __generic_field(int, common_cpu, FILTER_CPU);
+ __generic_field(char *, COMM, FILTER_COMM);
+ __generic_field(char *, comm, FILTER_COMM);
+
--- /dev/null
+From 7249921d94ff64f67b733eca0b68853a62032b3d Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Google)" <rostedt@goodmis.org>
+Date: Tue, 16 Aug 2022 19:28:17 -0400
+Subject: tracing/perf: Fix double put of trace event when init fails
+
+From: Steven Rostedt (Google) <rostedt@goodmis.org>
+
+commit 7249921d94ff64f67b733eca0b68853a62032b3d upstream.
+
+If in perf_trace_event_init(), the perf_trace_event_open() fails, then it
+will call perf_trace_event_unreg() which will not only unregister the perf
+trace event, but will also call the put() function of the tp_event.
+
+The problem here is that the trace_event_try_get_ref() is called by the
+caller of perf_trace_event_init() and if perf_trace_event_init() returns a
+failure, it will then call trace_event_put(). But since the
+perf_trace_event_unreg() already called the trace_event_put() function, it
+triggers a WARN_ON().
+
+ WARNING: CPU: 1 PID: 30309 at kernel/trace/trace_dynevent.c:46 trace_event_dyn_put_ref+0x15/0x20
+
+If perf_trace_event_reg() does not call the trace_event_try_get_ref() then
+the perf_trace_event_unreg() should not be calling trace_event_put(). This
+breaks symmetry and causes bugs like these.
+
+Pull out the trace_event_put() from perf_trace_event_unreg() and call it
+in the locations that perf_trace_event_unreg() is called. This not only
+fixes this bug, but also brings back the proper symmetry of the reg/unreg
+vs get/put logic.
+
+Link: https://lore.kernel.org/all/cover.1660347763.git.kjlx@templeofstupid.com/
+Link: https://lkml.kernel.org/r/20220816192817.43d5e17f@gandalf.local.home
+
+Cc: stable@vger.kernel.org
+Fixes: 1d18538e6a092 ("tracing: Have dynamic events have a ref counter")
+Reported-by: Krister Johansen <kjlx@templeofstupid.com>
+Reviewed-by: Krister Johansen <kjlx@templeofstupid.com>
+Tested-by: Krister Johansen <kjlx@templeofstupid.com>
+Acked-by: Jiri Olsa <jolsa@kernel.org>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace_event_perf.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
+index a114549720d6..61e3a2620fa3 100644
+--- a/kernel/trace/trace_event_perf.c
++++ b/kernel/trace/trace_event_perf.c
+@@ -157,7 +157,7 @@ static void perf_trace_event_unreg(struct perf_event *p_event)
+ int i;
+
+ if (--tp_event->perf_refcount > 0)
+- goto out;
++ return;
+
+ tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER, NULL);
+
+@@ -176,8 +176,6 @@ static void perf_trace_event_unreg(struct perf_event *p_event)
+ perf_trace_buf[i] = NULL;
+ }
+ }
+-out:
+- trace_event_put_ref(tp_event);
+ }
+
+ static int perf_trace_event_open(struct perf_event *p_event)
+@@ -241,6 +239,7 @@ void perf_trace_destroy(struct perf_event *p_event)
+ mutex_lock(&event_mutex);
+ perf_trace_event_close(p_event);
+ perf_trace_event_unreg(p_event);
++ trace_event_put_ref(p_event->tp_event);
+ mutex_unlock(&event_mutex);
+ }
+
+@@ -292,6 +291,7 @@ void perf_kprobe_destroy(struct perf_event *p_event)
+ mutex_lock(&event_mutex);
+ perf_trace_event_close(p_event);
+ perf_trace_event_unreg(p_event);
++ trace_event_put_ref(p_event->tp_event);
+ mutex_unlock(&event_mutex);
+
+ destroy_local_trace_kprobe(p_event->tp_event);
+@@ -347,6 +347,7 @@ void perf_uprobe_destroy(struct perf_event *p_event)
+ mutex_lock(&event_mutex);
+ perf_trace_event_close(p_event);
+ perf_trace_event_unreg(p_event);
++ trace_event_put_ref(p_event->tp_event);
+ mutex_unlock(&event_mutex);
+ destroy_local_trace_uprobe(p_event->tp_event);
+ }
+--
+2.37.2
+
--- /dev/null
+From ab8384442ee512fc0fc72deeb036110843d0e7ff Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Google)" <rostedt@goodmis.org>
+Date: Sat, 20 Aug 2022 09:43:21 -0400
+Subject: tracing/probes: Have kprobes and uprobes use $COMM too
+
+From: Steven Rostedt (Google) <rostedt@goodmis.org>
+
+commit ab8384442ee512fc0fc72deeb036110843d0e7ff upstream.
+
+Both $comm and $COMM can be used to get current->comm in eprobes and the
+filtering and histogram logic. Make kprobes and uprobes consistent in this
+regard and allow both $comm and $COMM as well. Currently kprobes and
+uprobes only handle $comm, which is inconsistent with the other utilities,
+and can be confusing to users.
+
+Link: https://lkml.kernel.org/r/20220820134401.317014913@goodmis.org
+Link: https://lore.kernel.org/all/20220820220442.776e1ddaf8836e82edb34d01@kernel.org/
+
+Cc: stable@vger.kernel.org
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Tzvetomir Stoyanov <tz.stoyanov@gmail.com>
+Cc: Tom Zanussi <zanussi@kernel.org>
+Fixes: 533059281ee5 ("tracing: probeevent: Introduce new argument fetching code")
+Suggested-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+Acked-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace_probe.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/kernel/trace/trace_probe.c
++++ b/kernel/trace/trace_probe.c
+@@ -310,7 +310,7 @@ static int parse_probe_vars(char *arg, c
+ }
+ } else
+ goto inval_var;
+- } else if (strcmp(arg, "comm") == 0) {
++ } else if (strcmp(arg, "comm") == 0 || strcmp(arg, "COMM") == 0) {
+ code->op = FETCH_OP_COMM;
+ #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
+ } else if (((flags & TPARG_FL_MASK) ==
+@@ -621,7 +621,8 @@ static int traceprobe_parse_probe_arg_bo
+ * we can find those by strcmp. But ignore for eprobes.
+ */
+ if (!(flags & TPARG_FL_TPOINT) &&
+- (strcmp(arg, "$comm") == 0 || strncmp(arg, "\\\"", 2) == 0)) {
++ (strcmp(arg, "$comm") == 0 || strcmp(arg, "$COMM") == 0 ||
++ strncmp(arg, "\\\"", 2) == 0)) {
+ /* The type of $comm must be "string", and not an array. */
+ if (parg->count || (t && strcmp(t, "string")))
+ goto out;
--- /dev/null
+From 8924779df820c53875abaeb10c648e9cb75b46d4 Mon Sep 17 00:00:00 2001
+From: Nadav Amit <namit@vmware.com>
+Date: Sat, 13 Aug 2022 15:59:43 -0700
+Subject: x86/kprobes: Fix JNG/JNLE emulation
+
+From: Nadav Amit <namit@vmware.com>
+
+commit 8924779df820c53875abaeb10c648e9cb75b46d4 upstream.
+
+When kprobes emulates JNG/JNLE instructions on x86 it uses the wrong
+condition. For JNG (opcode: 0F 8E), according to Intel SDM, the jump is
+performed if (ZF == 1 or SF != OF). However the kernel emulation
+currently uses 'and' instead of 'or'.
+
+As a result, setting a kprobe on JNG/JNLE might cause the kernel to
+behave incorrectly whenever the kprobe is hit.
+
+Fix by changing the 'and' to 'or'.
+
+Fixes: 6256e668b7af ("x86/kprobes: Use int3 instead of debug trap for single-step")
+Signed-off-by: Nadav Amit <namit@vmware.com>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20220813225943.143767-1-namit@vmware.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/kprobes/core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/kprobes/core.c
++++ b/arch/x86/kernel/kprobes/core.c
+@@ -495,7 +495,7 @@ static void kprobe_emulate_jcc(struct kp
+ match = ((regs->flags & X86_EFLAGS_SF) >> X86_EFLAGS_SF_BIT) ^
+ ((regs->flags & X86_EFLAGS_OF) >> X86_EFLAGS_OF_BIT);
+ if (p->ainsn.jcc.type >= 0xe)
+- match = match && (regs->flags & X86_EFLAGS_ZF);
++ match = match || (regs->flags & X86_EFLAGS_ZF);
+ }
+ __kprobe_emulate_jmp(p, regs, (match && !invert) || (!match && invert));
+ }