--- /dev/null
+From f890f89d9a80fffbfa7ca791b78927e5b8aba869 Mon Sep 17 00:00:00 2001
+From: Petr Vorel <petr.vorel@gmail.com>
+Date: Thu, 15 Apr 2021 21:39:13 +0200
+Subject: arm64: dts: qcom: msm8994-angler: Fix gpio-reserved-ranges 85-88
+
+From: Petr Vorel <petr.vorel@gmail.com>
+
+commit f890f89d9a80fffbfa7ca791b78927e5b8aba869 upstream.
+
+Reserve GPIO pins 85-88 as these aren't meant to be accessible from the
+application CPUs (causes reboot). Yet another fix similar to
+9134586715e3, 5f8d3ab136d0, which is needed to allow angler to boot after
+3edfb7bd76bd ("gpiolib: Show correct direction from the beginning").
+
+Fixes: feeaf56ac78d ("arm64: dts: msm8994 SoC and Huawei Angler (Nexus 6P) support")
+
+Signed-off-by: Petr Vorel <petr.vorel@gmail.com>
+Reviewed-by: Konrad Dybcio <konrad.dybcio@somainline.org>
+Link: https://lore.kernel.org/r/20210415193913.1836153-1-petr.vorel@gmail.com
+Signed-off-by: Bjorn Andersson <bjorn.andersson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/boot/dts/qcom/msm8994-angler-rev-101.dts | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/arch/arm64/boot/dts/qcom/msm8994-angler-rev-101.dts
++++ b/arch/arm64/boot/dts/qcom/msm8994-angler-rev-101.dts
+@@ -32,3 +32,7 @@
+ };
+ };
+ };
++
++&tlmm {
++ gpio-reserved-ranges = <85 4>;
++};
--- /dev/null
+From e4571b8c5e9ffa1e85c0c671995bd4dcc5c75091 Mon Sep 17 00:00:00 2001
+From: Qu Wenruo <wqu@suse.com>
+Date: Fri, 6 Aug 2021 18:24:15 +0800
+Subject: btrfs: fix NULL pointer dereference when deleting device by invalid id
+
+From: Qu Wenruo <wqu@suse.com>
+
+commit e4571b8c5e9ffa1e85c0c671995bd4dcc5c75091 upstream.
+
+[BUG]
+It's easy to trigger NULL pointer dereference, just by removing a
+non-existing device id:
+
+ # mkfs.btrfs -f -m single -d single /dev/test/scratch1 \
+ /dev/test/scratch2
+ # mount /dev/test/scratch1 /mnt/btrfs
+ # btrfs device remove 3 /mnt/btrfs
+
+Then we have the following kernel NULL pointer dereference:
+
+ BUG: kernel NULL pointer dereference, address: 0000000000000000
+ #PF: supervisor read access in kernel mode
+ #PF: error_code(0x0000) - not-present page
+ PGD 0 P4D 0
+ Oops: 0000 [#1] PREEMPT SMP NOPTI
+ CPU: 9 PID: 649 Comm: btrfs Not tainted 5.14.0-rc3-custom+ #35
+ Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015
+ RIP: 0010:btrfs_rm_device+0x4de/0x6b0 [btrfs]
+ btrfs_ioctl+0x18bb/0x3190 [btrfs]
+ ? lock_is_held_type+0xa5/0x120
+ ? find_held_lock.constprop.0+0x2b/0x80
+ ? do_user_addr_fault+0x201/0x6a0
+ ? lock_release+0xd2/0x2d0
+ ? __x64_sys_ioctl+0x83/0xb0
+ __x64_sys_ioctl+0x83/0xb0
+ do_syscall_64+0x3b/0x90
+ entry_SYSCALL_64_after_hwframe+0x44/0xae
+
+[CAUSE]
+Commit a27a94c2b0c7 ("btrfs: Make btrfs_find_device_by_devspec return
+btrfs_device directly") moves the "missing" device path check into
+btrfs_rm_device().
+
+But btrfs_rm_device() itself can have case where it only receives
+@devid, with NULL as @device_path.
+
+In that case, calling strcmp() on NULL will trigger the NULL pointer
+dereference.
+
+Before that commit, we handle the "missing" case inside
+btrfs_find_device_by_devspec(), which will not check @device_path at all
+if @devid is provided, thus no way to trigger the bug.
+
+[FIX]
+Before calling strcmp(), also make sure @device_path is not NULL.
+
+Fixes: a27a94c2b0c7 ("btrfs: Make btrfs_find_device_by_devspec return btrfs_device directly")
+CC: stable@vger.kernel.org # 5.4+
+Reported-by: butt3rflyh4ck <butterflyhuangxx@gmail.com>
+Reviewed-by: Anand Jain <anand.jain@oracle.com>
+Signed-off-by: Qu Wenruo <wqu@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/volumes.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -2059,7 +2059,7 @@ int btrfs_rm_device(struct btrfs_fs_info
+
+ if (IS_ERR(device)) {
+ if (PTR_ERR(device) == -ENOENT &&
+- strcmp(device_path, "missing") == 0)
++ device_path && strcmp(device_path, "missing") == 0)
+ ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
+ else
+ ret = PTR_ERR(device);
--- /dev/null
+From 3a7956e25e1d7b3c148569e78895e1f3178122a9 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Tue, 20 Apr 2021 10:18:17 +0200
+Subject: kthread: Fix PF_KTHREAD vs to_kthread() race
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit 3a7956e25e1d7b3c148569e78895e1f3178122a9 upstream.
+
+The kthread_is_per_cpu() construct relies on only being called on
+PF_KTHREAD tasks (per the WARN in to_kthread). This gives rise to the
+following usage pattern:
+
+ if ((p->flags & PF_KTHREAD) && kthread_is_per_cpu(p))
+
+However, as reported by syzcaller, this is broken. The scenario is:
+
+ CPU0 CPU1 (running p)
+
+ (p->flags & PF_KTHREAD) // true
+
+ begin_new_exec()
+ me->flags &= ~(PF_KTHREAD|...);
+ kthread_is_per_cpu(p)
+ to_kthread(p)
+ WARN(!(p->flags & PF_KTHREAD) <-- *SPLAT*
+
+Introduce __to_kthread() that omits the WARN and is sure to check both
+values.
+
+Use this to remove the problematic pattern for kthread_is_per_cpu()
+and fix a number of other kthread_*() functions that have similar
+issues but are currently not used in ways that would expose the
+problem.
+
+Notably kthread_func() is only ever called on 'current', while
+kthread_probe_data() is only used for PF_WQ_WORKER, which implies the
+task is from kthread_create*().
+
+Fixes: ac687e6e8c26 ("kthread: Extract KTHREAD_IS_PER_CPU")
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Valentin Schneider <Valentin.Schneider@arm.com>
+Link: https://lkml.kernel.org/r/YH6WJc825C4P0FCK@hirez.programming.kicks-ass.net
+[ Drop the balance_push() hunk as it is not needed. ]
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/kthread.c | 33 +++++++++++++++++++++++++++------
+ kernel/sched/fair.c | 2 +-
+ 2 files changed, 28 insertions(+), 7 deletions(-)
+
+--- a/kernel/kthread.c
++++ b/kernel/kthread.c
+@@ -84,6 +84,25 @@ static inline struct kthread *to_kthread
+ return (__force void *)k->set_child_tid;
+ }
+
++/*
++ * Variant of to_kthread() that doesn't assume @p is a kthread.
++ *
++ * Per construction; when:
++ *
++ * (p->flags & PF_KTHREAD) && p->set_child_tid
++ *
++ * the task is both a kthread and struct kthread is persistent. However
++ * PF_KTHREAD on it's own is not, kernel_thread() can exec() (See umh.c and
++ * begin_new_exec()).
++ */
++static inline struct kthread *__to_kthread(struct task_struct *p)
++{
++ void *kthread = (__force void *)p->set_child_tid;
++ if (kthread && !(p->flags & PF_KTHREAD))
++ kthread = NULL;
++ return kthread;
++}
++
+ void free_kthread_struct(struct task_struct *k)
+ {
+ struct kthread *kthread;
+@@ -168,8 +187,9 @@ EXPORT_SYMBOL_GPL(kthread_freezable_shou
+ */
+ void *kthread_func(struct task_struct *task)
+ {
+- if (task->flags & PF_KTHREAD)
+- return to_kthread(task)->threadfn;
++ struct kthread *kthread = __to_kthread(task);
++ if (kthread)
++ return kthread->threadfn;
+ return NULL;
+ }
+ EXPORT_SYMBOL_GPL(kthread_func);
+@@ -199,10 +219,11 @@ EXPORT_SYMBOL_GPL(kthread_data);
+ */
+ void *kthread_probe_data(struct task_struct *task)
+ {
+- struct kthread *kthread = to_kthread(task);
++ struct kthread *kthread = __to_kthread(task);
+ void *data = NULL;
+
+- copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
++ if (kthread)
++ copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
+ return data;
+ }
+
+@@ -514,9 +535,9 @@ void kthread_set_per_cpu(struct task_str
+ set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
+ }
+
+-bool kthread_is_per_cpu(struct task_struct *k)
++bool kthread_is_per_cpu(struct task_struct *p)
+ {
+- struct kthread *kthread = to_kthread(k);
++ struct kthread *kthread = __to_kthread(p);
+ if (!kthread)
+ return false;
+
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -7569,7 +7569,7 @@ int can_migrate_task(struct task_struct
+ return 0;
+
+ /* Disregard pcpu kthreads; they are where they need to be. */
+- if ((p->flags & PF_KTHREAD) && kthread_is_per_cpu(p))
++ if (kthread_is_per_cpu(p))
+ return 0;
+
+ if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) {
--- /dev/null
+From foo@baz Wed Sep 1 11:29:26 AM CEST 2021
+From: Kees Cook <keescook@chromium.org>
+Date: Wed, 23 Jun 2021 13:39:33 -0700
+Subject: lkdtm: Enable DOUBLE_FAULT on all architectures
+
+From: Kees Cook <keescook@chromium.org>
+
+commit f123c42bbeff26bfe8bdb08a01307e92d51eec39 upstream
+
+Where feasible, I prefer to have all tests visible on all architectures,
+but to have them wired to XFAIL. DOUBLE_FAIL was set up to XFAIL, but
+wasn't actually being added to the test list.
+
+Fixes: cea23efb4de2 ("lkdtm/bugs: Make double-fault test always available")
+Cc: stable@vger.kernel.org
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Link: https://lore.kernel.org/r/20210623203936.3151093-7-keescook@chromium.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+[sudip: adjust context]
+Signed-off-by: Sudip Mukherjee <sudipm.mukherjee@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/misc/lkdtm/core.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/drivers/misc/lkdtm/core.c
++++ b/drivers/misc/lkdtm/core.c
+@@ -173,9 +173,7 @@ static const struct crashtype crashtypes
+ CRASHTYPE(USERCOPY_KERNEL),
+ CRASHTYPE(STACKLEAK_ERASING),
+ CRASHTYPE(CFI_FORWARD_PROTO),
+-#ifdef CONFIG_X86_32
+ CRASHTYPE(DOUBLE_FAULT),
+-#endif
+ };
+
+
--- /dev/null
+From 7428022b50d0fbb4846dd0f00639ea09d36dff02 Mon Sep 17 00:00:00 2001
+From: DENG Qingfang <dqfext@gmail.com>
+Date: Wed, 11 Aug 2021 17:50:43 +0800
+Subject: net: dsa: mt7530: fix VLAN traffic leaks again
+
+From: DENG Qingfang <dqfext@gmail.com>
+
+commit 7428022b50d0fbb4846dd0f00639ea09d36dff02 upstream.
+
+When a port leaves a VLAN-aware bridge, the current code does not clear
+other ports' matrix field bit. If the bridge is later set to VLAN-unaware
+mode, traffic in the bridge may leak to that port.
+
+Remove the VLAN filtering check in mt7530_port_bridge_leave.
+
+Fixes: 474a2ddaa192 ("net: dsa: mt7530: fix VLAN traffic leaks")
+Fixes: 83163f7dca56 ("net: dsa: mediatek: add VLAN support for MT7530")
+Signed-off-by: DENG Qingfang <dqfext@gmail.com>
+Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/dsa/mt7530.c | 5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+--- a/drivers/net/dsa/mt7530.c
++++ b/drivers/net/dsa/mt7530.c
+@@ -1161,11 +1161,8 @@ mt7530_port_bridge_leave(struct dsa_swit
+ /* Remove this port from the port matrix of the other ports
+ * in the same bridge. If the port is disabled, port matrix
+ * is kept and not being setup until the port becomes enabled.
+- * And the other port's port matrix cannot be broken when the
+- * other port is still a VLAN-aware port.
+ */
+- if (dsa_is_user_port(ds, i) && i != port &&
+- !dsa_port_is_vlan_filtering(dsa_to_port(ds, i))) {
++ if (dsa_is_user_port(ds, i) && i != port) {
+ if (dsa_to_port(ds, i)->bridge_dev != bridge)
+ continue;
+ if (priv->ports[i].enable)
--- /dev/null
+From 1f0e6edcd968ff19211245f7da6039e983aa51e5 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Martin=20Li=C5=A1ka?= <mliska@suse.cz>
+Date: Thu, 11 Feb 2021 13:37:55 +0100
+Subject: perf annotate: Fix jump parsing for C++ code.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Martin Liška <mliska@suse.cz>
+
+commit 1f0e6edcd968ff19211245f7da6039e983aa51e5 upstream.
+
+Considering the following testcase:
+
+ int
+ foo(int a, int b)
+ {
+ for (unsigned i = 0; i < 1000000000; i++)
+ a += b;
+ return a;
+ }
+
+ int main()
+ {
+ foo (3, 4);
+ return 0;
+ }
+
+'perf annotate' displays:
+
+ 86.52 │40055e: → ja 40056c <foo(int, int)+0x26>
+ 13.37 │400560: mov -0x18(%rbp),%eax
+ │400563: add %eax,-0x14(%rbp)
+ │400566: addl $0x1,-0x4(%rbp)
+ 0.11 │40056a: → jmp 400557 <foo(int, int)+0x11>
+ │40056c: mov -0x14(%rbp),%eax
+ │40056f: pop %rbp
+
+and the 'ja 40056c' does not link to the location in the function. It's
+caused by fact that comma is wrongly parsed, it's part of function
+signature.
+
+With my patch I see:
+
+ 86.52 │ ┌──ja 26
+ 13.37 │ │ mov -0x18(%rbp),%eax
+ │ │ add %eax,-0x14(%rbp)
+ │ │ addl $0x1,-0x4(%rbp)
+ 0.11 │ │↑ jmp 11
+ │26:└─→mov -0x14(%rbp),%eax
+
+and 'o' output prints:
+
+ 86.52 │4005┌── ↓ ja 40056c <foo(int, int)+0x26>
+ 13.37 │4005│0: mov -0x18(%rbp),%eax
+ │4005│3: add %eax,-0x14(%rbp)
+ │4005│6: addl $0x1,-0x4(%rbp)
+ 0.11 │4005│a: ↑ jmp 400557 <foo(int, int)+0x11>
+ │4005└─→ mov -0x14(%rbp),%eax
+
+On the contrary, compiling the very same file with gcc -x c, the parsing
+is fine because function arguments are not displayed:
+
+ jmp 400543 <foo+0x1d>
+
+Committer testing:
+
+Before:
+
+ $ cat cpp_args_annotate.c
+ int
+ foo(int a, int b)
+ {
+ for (unsigned i = 0; i < 1000000000; i++)
+ a += b;
+ return a;
+ }
+
+ int main()
+ {
+ foo (3, 4);
+ return 0;
+ }
+ $ gcc --version |& head -1
+ gcc (GCC) 10.2.1 20201125 (Red Hat 10.2.1-9)
+ $ gcc -g cpp_args_annotate.c -o cpp_args_annotate
+ $ perf record ./cpp_args_annotate
+ [ perf record: Woken up 2 times to write data ]
+ [ perf record: Captured and wrote 0.275 MB perf.data (7188 samples) ]
+ $ perf annotate --stdio2 foo
+ Samples: 7K of event 'cycles:u', 4000 Hz, Event count (approx.): 7468429289, [percent: local period]
+ foo() /home/acme/c/cpp_args_annotate
+ Percent
+ 0000000000401106 <foo>:
+ foo():
+ int
+ foo(int a, int b)
+ {
+ push %rbp
+ mov %rsp,%rbp
+ mov %edi,-0x14(%rbp)
+ mov %esi,-0x18(%rbp)
+ for (unsigned i = 0; i < 1000000000; i++)
+ movl $0x0,-0x4(%rbp)
+ ↓ jmp 1d
+ a += b;
+ 13.45 13: mov -0x18(%rbp),%eax
+ add %eax,-0x14(%rbp)
+ for (unsigned i = 0; i < 1000000000; i++)
+ addl $0x1,-0x4(%rbp)
+ 0.09 1d: cmpl $0x3b9ac9ff,-0x4(%rbp)
+ 86.46 ↑ jbe 13
+ return a;
+ mov -0x14(%rbp),%eax
+ }
+ pop %rbp
+ ← retq
+ $
+
+I.e. works for C, now lets switch to C++:
+
+ $ g++ -g cpp_args_annotate.c -o cpp_args_annotate
+ $ perf record ./cpp_args_annotate
+ [ perf record: Woken up 1 times to write data ]
+ [ perf record: Captured and wrote 0.268 MB perf.data (6976 samples) ]
+ $ perf annotate --stdio2 foo
+ Samples: 6K of event 'cycles:u', 4000 Hz, Event count (approx.): 7380681761, [percent: local period]
+ foo() /home/acme/c/cpp_args_annotate
+ Percent
+ 0000000000401106 <foo(int, int)>:
+ foo(int, int):
+ int
+ foo(int a, int b)
+ {
+ push %rbp
+ mov %rsp,%rbp
+ mov %edi,-0x14(%rbp)
+ mov %esi,-0x18(%rbp)
+ for (unsigned i = 0; i < 1000000000; i++)
+ movl $0x0,-0x4(%rbp)
+ cmpl $0x3b9ac9ff,-0x4(%rbp)
+ 86.53 → ja 40112c <foo(int, int)+0x26>
+ a += b;
+ 13.32 mov -0x18(%rbp),%eax
+ 0.00 add %eax,-0x14(%rbp)
+ for (unsigned i = 0; i < 1000000000; i++)
+ addl $0x1,-0x4(%rbp)
+ 0.15 → jmp 401117 <foo(int, int)+0x11>
+ return a;
+ mov -0x14(%rbp),%eax
+ }
+ pop %rbp
+ ← retq
+ $
+
+Reproduced.
+
+Now with this patch:
+
+Reusing the C++ built binary, as we can see here:
+
+ $ readelf -wi cpp_args_annotate | grep producer
+ <c> DW_AT_producer : (indirect string, offset: 0x2e): GNU C++14 10.2.1 20201125 (Red Hat 10.2.1-9) -mtune=generic -march=x86-64 -g
+ $
+
+And furthermore:
+
+ $ file cpp_args_annotate
+ cpp_args_annotate: ELF 64-bit LSB executable, x86-64, version 1 (SYSV), dynamically linked, interpreter /lib64/ld-linux-x86-64.so.2, BuildID[sha1]=4fe3cab260204765605ec630d0dc7a7e93c361a9, for GNU/Linux 3.2.0, with debug_info, not stripped
+ $ perf buildid-list -i cpp_args_annotate
+ 4fe3cab260204765605ec630d0dc7a7e93c361a9
+ $ perf buildid-list | grep cpp_args_annotate
+ 4fe3cab260204765605ec630d0dc7a7e93c361a9 /home/acme/c/cpp_args_annotate
+ $
+
+It now works:
+
+ $ perf annotate --stdio2 foo
+ Samples: 6K of event 'cycles:u', 4000 Hz, Event count (approx.): 7380681761, [percent: local period]
+ foo() /home/acme/c/cpp_args_annotate
+ Percent
+ 0000000000401106 <foo(int, int)>:
+ foo(int, int):
+ int
+ foo(int a, int b)
+ {
+ push %rbp
+ mov %rsp,%rbp
+ mov %edi,-0x14(%rbp)
+ mov %esi,-0x18(%rbp)
+ for (unsigned i = 0; i < 1000000000; i++)
+ movl $0x0,-0x4(%rbp)
+ 11: cmpl $0x3b9ac9ff,-0x4(%rbp)
+ 86.53 ↓ ja 26
+ a += b;
+ 13.32 mov -0x18(%rbp),%eax
+ 0.00 add %eax,-0x14(%rbp)
+ for (unsigned i = 0; i < 1000000000; i++)
+ addl $0x1,-0x4(%rbp)
+ 0.15 ↑ jmp 11
+ return a;
+ 26: mov -0x14(%rbp),%eax
+ }
+ pop %rbp
+ ← retq
+ $
+
+Signed-off-by: Martin Liška <mliska@suse.cz>
+Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Jiri Slaby <jslaby@suse.cz>
+Link: http://lore.kernel.org/lkml/13e1a405-edf9-e4c2-4327-a9b454353730@suse.cz
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Hanjun Guo <guohanjun@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/perf/util/annotate.c | 8 ++++++++
+ tools/perf/util/annotate.h | 1 +
+ 2 files changed, 9 insertions(+)
+
+--- a/tools/perf/util/annotate.c
++++ b/tools/perf/util/annotate.c
+@@ -317,12 +317,18 @@ bool ins__is_call(const struct ins *ins)
+ /*
+ * Prevents from matching commas in the comment section, e.g.:
+ * ffff200008446e70: b.cs ffff2000084470f4 <generic_exec_single+0x314> // b.hs, b.nlast
++ *
++ * and skip comma as part of function arguments, e.g.:
++ * 1d8b4ac <linemap_lookup(line_maps const*, unsigned int)+0xcc>
+ */
+ static inline const char *validate_comma(const char *c, struct ins_operands *ops)
+ {
+ if (ops->raw_comment && c > ops->raw_comment)
+ return NULL;
+
++ if (ops->raw_func_start && c > ops->raw_func_start)
++ return NULL;
++
+ return c;
+ }
+
+@@ -337,6 +343,8 @@ static int jump__parse(struct arch *arch
+ u64 start, end;
+
+ ops->raw_comment = strchr(ops->raw, arch->objdump.comment_char);
++ ops->raw_func_start = strchr(ops->raw, '<');
++
+ c = validate_comma(c, ops);
+
+ /*
+--- a/tools/perf/util/annotate.h
++++ b/tools/perf/util/annotate.h
+@@ -32,6 +32,7 @@ struct ins {
+ struct ins_operands {
+ char *raw;
+ char *raw_comment;
++ char *raw_func_start;
+ struct {
+ char *raw;
+ char *name;
--- /dev/null
+From 67069a1f0fe5f9eeca86d954fff2087f5542a008 Mon Sep 17 00:00:00 2001
+From: Riccardo Mancini <rickyman7@gmail.com>
+Date: Thu, 3 Jun 2021 00:40:23 +0200
+Subject: perf env: Fix memory leak of bpf_prog_info_linear member
+
+From: Riccardo Mancini <rickyman7@gmail.com>
+
+commit 67069a1f0fe5f9eeca86d954fff2087f5542a008 upstream.
+
+ASan reported a memory leak caused by info_linear not being deallocated.
+
+The info_linear was allocated during in perf_event__synthesize_one_bpf_prog().
+
+This patch adds the corresponding free() when bpf_prog_info_node
+is freed in perf_env__purge_bpf().
+
+ $ sudo ./perf record -- sleep 5
+ [ perf record: Woken up 1 times to write data ]
+ [ perf record: Captured and wrote 0.025 MB perf.data (8 samples) ]
+
+ =================================================================
+ ==297735==ERROR: LeakSanitizer: detected memory leaks
+
+ Direct leak of 7688 byte(s) in 19 object(s) allocated from:
+ #0 0x4f420f in malloc (/home/user/linux/tools/perf/perf+0x4f420f)
+ #1 0xc06a74 in bpf_program__get_prog_info_linear /home/user/linux/tools/lib/bpf/libbpf.c:11113:16
+ #2 0xb426fe in perf_event__synthesize_one_bpf_prog /home/user/linux/tools/perf/util/bpf-event.c:191:16
+ #3 0xb42008 in perf_event__synthesize_bpf_events /home/user/linux/tools/perf/util/bpf-event.c:410:9
+ #4 0x594596 in record__synthesize /home/user/linux/tools/perf/builtin-record.c:1490:8
+ #5 0x58c9ac in __cmd_record /home/user/linux/tools/perf/builtin-record.c:1798:8
+ #6 0x58990b in cmd_record /home/user/linux/tools/perf/builtin-record.c:2901:8
+ #7 0x7b2a20 in run_builtin /home/user/linux/tools/perf/perf.c:313:11
+ #8 0x7b12ff in handle_internal_command /home/user/linux/tools/perf/perf.c:365:8
+ #9 0x7b2583 in run_argv /home/user/linux/tools/perf/perf.c:409:2
+ #10 0x7b0d79 in main /home/user/linux/tools/perf/perf.c:539:3
+ #11 0x7fa357ef6b74 in __libc_start_main /usr/src/debug/glibc-2.33-8.fc34.x86_64/csu/../csu/libc-start.c:332:16
+
+Signed-off-by: Riccardo Mancini <rickyman7@gmail.com>
+Acked-by: Ian Rogers <irogers@google.com>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Alexei Starovoitov <ast@kernel.org>
+Cc: Andrii Nakryiko <andrii@kernel.org>
+Cc: Daniel Borkmann <daniel@iogearbox.net>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: John Fastabend <john.fastabend@gmail.com>
+Cc: KP Singh <kpsingh@kernel.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Martin KaFai Lau <kafai@fb.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Song Liu <songliubraving@fb.com>
+Cc: Yonghong Song <yhs@fb.com>
+Link: http://lore.kernel.org/lkml/20210602224024.300485-1-rickyman7@gmail.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Hanjun Guo <guohanjun@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/perf/util/env.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/tools/perf/util/env.c
++++ b/tools/perf/util/env.c
+@@ -142,6 +142,7 @@ static void perf_env__purge_bpf(struct p
+ node = rb_entry(next, struct bpf_prog_info_node, rb_node);
+ next = rb_next(&node->rb_node);
+ rb_erase(&node->rb_node, root);
++ free(node->info_linear);
+ free(node);
+ }
+
--- /dev/null
+From 41d585411311abf187e5f09042978fe7073a9375 Mon Sep 17 00:00:00 2001
+From: Namhyung Kim <namhyung@kernel.org>
+Date: Mon, 15 Mar 2021 13:56:41 +0900
+Subject: perf record: Fix memory leak in vDSO found using ASAN
+
+From: Namhyung Kim <namhyung@kernel.org>
+
+commit 41d585411311abf187e5f09042978fe7073a9375 upstream.
+
+I got several memory leak reports from Asan with a simple command. It
+was because VDSO is not released due to the refcount. Like in
+__dsos_addnew_id(), it should put the refcount after adding to the list.
+
+ $ perf record true
+ [ perf record: Woken up 1 times to write data ]
+ [ perf record: Captured and wrote 0.030 MB perf.data (10 samples) ]
+
+ =================================================================
+ ==692599==ERROR: LeakSanitizer: detected memory leaks
+
+ Direct leak of 439 byte(s) in 1 object(s) allocated from:
+ #0 0x7fea52341037 in __interceptor_calloc ../../../../src/libsanitizer/asan/asan_malloc_linux.cpp:154
+ #1 0x559bce4aa8ee in dso__new_id util/dso.c:1256
+ #2 0x559bce59245a in __machine__addnew_vdso util/vdso.c:132
+ #3 0x559bce59245a in machine__findnew_vdso util/vdso.c:347
+ #4 0x559bce50826c in map__new util/map.c:175
+ #5 0x559bce503c92 in machine__process_mmap2_event util/machine.c:1787
+ #6 0x559bce512f6b in machines__deliver_event util/session.c:1481
+ #7 0x559bce515107 in perf_session__deliver_event util/session.c:1551
+ #8 0x559bce51d4d2 in do_flush util/ordered-events.c:244
+ #9 0x559bce51d4d2 in __ordered_events__flush util/ordered-events.c:323
+ #10 0x559bce519bea in __perf_session__process_events util/session.c:2268
+ #11 0x559bce519bea in perf_session__process_events util/session.c:2297
+ #12 0x559bce2e7a52 in process_buildids /home/namhyung/project/linux/tools/perf/builtin-record.c:1017
+ #13 0x559bce2e7a52 in record__finish_output /home/namhyung/project/linux/tools/perf/builtin-record.c:1234
+ #14 0x559bce2ed4f6 in __cmd_record /home/namhyung/project/linux/tools/perf/builtin-record.c:2026
+ #15 0x559bce2ed4f6 in cmd_record /home/namhyung/project/linux/tools/perf/builtin-record.c:2858
+ #16 0x559bce422db4 in run_builtin /home/namhyung/project/linux/tools/perf/perf.c:313
+ #17 0x559bce2acac8 in handle_internal_command /home/namhyung/project/linux/tools/perf/perf.c:365
+ #18 0x559bce2acac8 in run_argv /home/namhyung/project/linux/tools/perf/perf.c:409
+ #19 0x559bce2acac8 in main /home/namhyung/project/linux/tools/perf/perf.c:539
+ #20 0x7fea51e76d09 in __libc_start_main ../csu/libc-start.c:308
+
+ Indirect leak of 32 byte(s) in 1 object(s) allocated from:
+ #0 0x7fea52341037 in __interceptor_calloc ../../../../src/libsanitizer/asan/asan_malloc_linux.cpp:154
+ #1 0x559bce520907 in nsinfo__copy util/namespaces.c:169
+ #2 0x559bce50821b in map__new util/map.c:168
+ #3 0x559bce503c92 in machine__process_mmap2_event util/machine.c:1787
+ #4 0x559bce512f6b in machines__deliver_event util/session.c:1481
+ #5 0x559bce515107 in perf_session__deliver_event util/session.c:1551
+ #6 0x559bce51d4d2 in do_flush util/ordered-events.c:244
+ #7 0x559bce51d4d2 in __ordered_events__flush util/ordered-events.c:323
+ #8 0x559bce519bea in __perf_session__process_events util/session.c:2268
+ #9 0x559bce519bea in perf_session__process_events util/session.c:2297
+ #10 0x559bce2e7a52 in process_buildids /home/namhyung/project/linux/tools/perf/builtin-record.c:1017
+ #11 0x559bce2e7a52 in record__finish_output /home/namhyung/project/linux/tools/perf/builtin-record.c:1234
+ #12 0x559bce2ed4f6 in __cmd_record /home/namhyung/project/linux/tools/perf/builtin-record.c:2026
+ #13 0x559bce2ed4f6 in cmd_record /home/namhyung/project/linux/tools/perf/builtin-record.c:2858
+ #14 0x559bce422db4 in run_builtin /home/namhyung/project/linux/tools/perf/perf.c:313
+ #15 0x559bce2acac8 in handle_internal_command /home/namhyung/project/linux/tools/perf/perf.c:365
+ #16 0x559bce2acac8 in run_argv /home/namhyung/project/linux/tools/perf/perf.c:409
+ #17 0x559bce2acac8 in main /home/namhyung/project/linux/tools/perf/perf.c:539
+ #18 0x7fea51e76d09 in __libc_start_main ../csu/libc-start.c:308
+
+ SUMMARY: AddressSanitizer: 471 byte(s) leaked in 2 allocation(s).
+
+Signed-off-by: Namhyung Kim <namhyung@kernel.org>
+Acked-by: Jiri Olsa <jolsa@redhat.com>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Ian Rogers <irogers@google.com>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Link: http://lore.kernel.org/lkml/20210315045641.700430-1-namhyung@kernel.org
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Hanjun Guo <guohanjun@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/perf/util/vdso.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/tools/perf/util/vdso.c
++++ b/tools/perf/util/vdso.c
+@@ -133,6 +133,8 @@ static struct dso *__machine__addnew_vds
+ if (dso != NULL) {
+ __dsos__add(&machine->dsos, dso);
+ dso__set_long_name(dso, long_name, false);
++ /* Put dso here because __dsos_add already got it */
++ dso__put(dso);
+ }
+
+ return dso;
--- /dev/null
+From 69c9ffed6cede9c11697861f654946e3ae95a930 Mon Sep 17 00:00:00 2001
+From: Riccardo Mancini <rickyman7@gmail.com>
+Date: Thu, 3 Jun 2021 00:08:33 +0200
+Subject: perf symbol-elf: Fix memory leak by freeing sdt_note.args
+
+From: Riccardo Mancini <rickyman7@gmail.com>
+
+commit 69c9ffed6cede9c11697861f654946e3ae95a930 upstream.
+
+Reported by ASan.
+
+Signed-off-by: Riccardo Mancini <rickyman7@gmail.com>
+Acked-by: Ian Rogers <irogers@google.com>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Fabian Hemmer <copy@copy.sh>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Remi Bernon <rbernon@codeweavers.com>
+Cc: Jiri Slaby <jirislaby@kernel.org>
+Link: http://lore.kernel.org/lkml/20210602220833.285226-1-rickyman7@gmail.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Hanjun Guo <guohanjun@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/perf/util/symbol-elf.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/tools/perf/util/symbol-elf.c
++++ b/tools/perf/util/symbol-elf.c
+@@ -2360,6 +2360,7 @@ int cleanup_sdt_note_list(struct list_he
+
+ list_for_each_entry_safe(pos, tmp, sdt_notes, note_list) {
+ list_del_init(&pos->note_list);
++ zfree(&pos->args);
+ zfree(&pos->name);
+ zfree(&pos->provider);
+ free(pos);
--- /dev/null
+From 067012974c8ae31a8886046df082aeba93592972 Mon Sep 17 00:00:00 2001
+From: Jianlin Lv <Jianlin.Lv@arm.com>
+Date: Thu, 18 Feb 2021 11:12:45 +0800
+Subject: perf tools: Fix arm64 build error with gcc-11
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jianlin Lv <Jianlin.Lv@arm.com>
+
+commit 067012974c8ae31a8886046df082aeba93592972 upstream.
+
+gcc version: 11.0.0 20210208 (experimental) (GCC)
+
+Following build error on arm64:
+
+.......
+In function ‘printf’,
+ inlined from ‘regs_dump__printf’ at util/session.c:1141:3,
+ inlined from ‘regs__printf’ at util/session.c:1169:2:
+/usr/include/aarch64-linux-gnu/bits/stdio2.h:107:10: \
+ error: ‘%-5s’ directive argument is null [-Werror=format-overflow=]
+
+107 | return __printf_chk (__USE_FORTIFY_LEVEL - 1, __fmt, \
+ __va_arg_pack ());
+
+......
+In function ‘fprintf’,
+ inlined from ‘perf_sample__fprintf_regs.isra’ at \
+ builtin-script.c:622:14:
+/usr/include/aarch64-linux-gnu/bits/stdio2.h:100:10: \
+ error: ‘%5s’ directive argument is null [-Werror=format-overflow=]
+ 100 | return __fprintf_chk (__stream, __USE_FORTIFY_LEVEL - 1, __fmt,
+ 101 | __va_arg_pack ());
+
+cc1: all warnings being treated as errors
+.......
+
+This patch fixes Wformat-overflow warnings. Add helper function to
+convert NULL to "unknown".
+
+Signed-off-by: Jianlin Lv <Jianlin.Lv@arm.com>
+Reviewed-by: John Garry <john.garry@huawei.com>
+Acked-by: Jiri Olsa <jolsa@redhat.com>
+Cc: Albert Ou <aou@eecs.berkeley.edu>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Anju T Sudhakar <anju@linux.vnet.ibm.com>
+Cc: Athira Jajeev <atrajeev@linux.vnet.ibm.com>
+Cc: Guo Ren <guoren@kernel.org>
+Cc: Kajol Jain <kjain@linux.ibm.com>
+Cc: Leo Yan <leo.yan@linaro.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Mathieu Poirier <mathieu.poirier@linaro.org>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Paul Walmsley <paul.walmsley@sifive.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Ravi Bangoria <ravi.bangoria@linux.ibm.com>
+Cc: Will Deacon <will@kernel.org>
+Cc: Palmer Dabbelt <palmer@dabbelt.com>
+Cc: iecedge@gmail.com
+Cc: linux-csky@vger.kernel.org
+Cc: linux-riscv@lists.infradead.org
+Link: http://lore.kernel.org/lkml/20210218031245.2078492-1-Jianlin.Lv@arm.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Hanjun Guo <guohanjun@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/perf/arch/arm/include/perf_regs.h | 2 +-
+ tools/perf/arch/arm64/include/perf_regs.h | 2 +-
+ tools/perf/arch/csky/include/perf_regs.h | 2 +-
+ tools/perf/arch/powerpc/include/perf_regs.h | 2 +-
+ tools/perf/arch/riscv/include/perf_regs.h | 2 +-
+ tools/perf/arch/s390/include/perf_regs.h | 2 +-
+ tools/perf/arch/x86/include/perf_regs.h | 2 +-
+ tools/perf/util/perf_regs.h | 7 +++++++
+ 8 files changed, 14 insertions(+), 7 deletions(-)
+
+--- a/tools/perf/arch/arm/include/perf_regs.h
++++ b/tools/perf/arch/arm/include/perf_regs.h
+@@ -15,7 +15,7 @@ void perf_regs_load(u64 *regs);
+ #define PERF_REG_IP PERF_REG_ARM_PC
+ #define PERF_REG_SP PERF_REG_ARM_SP
+
+-static inline const char *perf_reg_name(int id)
++static inline const char *__perf_reg_name(int id)
+ {
+ switch (id) {
+ case PERF_REG_ARM_R0:
+--- a/tools/perf/arch/arm64/include/perf_regs.h
++++ b/tools/perf/arch/arm64/include/perf_regs.h
+@@ -15,7 +15,7 @@ void perf_regs_load(u64 *regs);
+ #define PERF_REG_IP PERF_REG_ARM64_PC
+ #define PERF_REG_SP PERF_REG_ARM64_SP
+
+-static inline const char *perf_reg_name(int id)
++static inline const char *__perf_reg_name(int id)
+ {
+ switch (id) {
+ case PERF_REG_ARM64_X0:
+--- a/tools/perf/arch/csky/include/perf_regs.h
++++ b/tools/perf/arch/csky/include/perf_regs.h
+@@ -15,7 +15,7 @@
+ #define PERF_REG_IP PERF_REG_CSKY_PC
+ #define PERF_REG_SP PERF_REG_CSKY_SP
+
+-static inline const char *perf_reg_name(int id)
++static inline const char *__perf_reg_name(int id)
+ {
+ switch (id) {
+ case PERF_REG_CSKY_A0:
+--- a/tools/perf/arch/powerpc/include/perf_regs.h
++++ b/tools/perf/arch/powerpc/include/perf_regs.h
+@@ -73,7 +73,7 @@ static const char *reg_names[] = {
+ [PERF_REG_POWERPC_SIER3] = "sier3",
+ };
+
+-static inline const char *perf_reg_name(int id)
++static inline const char *__perf_reg_name(int id)
+ {
+ return reg_names[id];
+ }
+--- a/tools/perf/arch/riscv/include/perf_regs.h
++++ b/tools/perf/arch/riscv/include/perf_regs.h
+@@ -19,7 +19,7 @@
+ #define PERF_REG_IP PERF_REG_RISCV_PC
+ #define PERF_REG_SP PERF_REG_RISCV_SP
+
+-static inline const char *perf_reg_name(int id)
++static inline const char *__perf_reg_name(int id)
+ {
+ switch (id) {
+ case PERF_REG_RISCV_PC:
+--- a/tools/perf/arch/s390/include/perf_regs.h
++++ b/tools/perf/arch/s390/include/perf_regs.h
+@@ -14,7 +14,7 @@ void perf_regs_load(u64 *regs);
+ #define PERF_REG_IP PERF_REG_S390_PC
+ #define PERF_REG_SP PERF_REG_S390_R15
+
+-static inline const char *perf_reg_name(int id)
++static inline const char *__perf_reg_name(int id)
+ {
+ switch (id) {
+ case PERF_REG_S390_R0:
+--- a/tools/perf/arch/x86/include/perf_regs.h
++++ b/tools/perf/arch/x86/include/perf_regs.h
+@@ -23,7 +23,7 @@ void perf_regs_load(u64 *regs);
+ #define PERF_REG_IP PERF_REG_X86_IP
+ #define PERF_REG_SP PERF_REG_X86_SP
+
+-static inline const char *perf_reg_name(int id)
++static inline const char *__perf_reg_name(int id)
+ {
+ switch (id) {
+ case PERF_REG_X86_AX:
+--- a/tools/perf/util/perf_regs.h
++++ b/tools/perf/util/perf_regs.h
+@@ -33,6 +33,13 @@ extern const struct sample_reg sample_re
+
+ int perf_reg_value(u64 *valp, struct regs_dump *regs, int id);
+
++static inline const char *perf_reg_name(int id)
++{
++ const char *reg_name = __perf_reg_name(id);
++
++ return reg_name ?: "unknown";
++}
++
+ #else
+ #define PERF_REGS_MASK 0
+ #define PERF_REGS_MAX 0
--- /dev/null
+From f66de7ac4849eb42a7b18e26b8ee49e08130fd27 Mon Sep 17 00:00:00 2001
+From: Athira Rajeev <atrajeev@linux.vnet.ibm.com>
+Date: Tue, 1 Dec 2020 04:28:00 -0500
+Subject: powerpc/perf: Invoke per-CPU variable access with disabled interrupts
+
+From: Athira Rajeev <atrajeev@linux.vnet.ibm.com>
+
+commit f66de7ac4849eb42a7b18e26b8ee49e08130fd27 upstream.
+
+The power_pmu_event_init() callback access per-cpu variable
+(cpu_hw_events) to check for event constraints and Branch Stack
+(BHRB). Current usage is to disable preemption when accessing the
+per-cpu variable, but this does not prevent timer callback from
+interrupting event_init. Fix this by using 'local_irq_save/restore'
+to make sure the code path is invoked with disabled interrupts.
+
+This change is tested in mambo simulator to ensure that, if a timer
+interrupt comes in during the per-cpu access in event_init, it will be
+soft masked and replayed later. For testing purpose, introduced a
+udelay() in power_pmu_event_init() to make sure a timer interrupt arrives
+while in per-cpu variable access code between local_irq_save/resore.
+As expected the timer interrupt was replayed later during local_irq_restore
+called from power_pmu_event_init. This was confirmed by adding
+breakpoint in mambo and checking the backtrace when timer_interrupt
+was hit.
+
+Reported-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Athira Rajeev <atrajeev@linux.vnet.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/1606814880-1720-1-git-send-email-atrajeev@linux.vnet.ibm.com
+Signed-off-by: Hanjun Guo <guohanjun@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/perf/core-book3s.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/arch/powerpc/perf/core-book3s.c
++++ b/arch/powerpc/perf/core-book3s.c
+@@ -1884,7 +1884,7 @@ static bool is_event_blacklisted(u64 ev)
+ static int power_pmu_event_init(struct perf_event *event)
+ {
+ u64 ev;
+- unsigned long flags;
++ unsigned long flags, irq_flags;
+ struct perf_event *ctrs[MAX_HWEVENTS];
+ u64 events[MAX_HWEVENTS];
+ unsigned int cflags[MAX_HWEVENTS];
+@@ -1992,7 +1992,9 @@ static int power_pmu_event_init(struct p
+ if (check_excludes(ctrs, cflags, n, 1))
+ return -EINVAL;
+
+- cpuhw = &get_cpu_var(cpu_hw_events);
++ local_irq_save(irq_flags);
++ cpuhw = this_cpu_ptr(&cpu_hw_events);
++
+ err = power_check_constraints(cpuhw, events, cflags, n + 1);
+
+ if (has_branch_stack(event)) {
+@@ -2003,13 +2005,13 @@ static int power_pmu_event_init(struct p
+ event->attr.branch_sample_type);
+
+ if (bhrb_filter == -1) {
+- put_cpu_var(cpu_hw_events);
++ local_irq_restore(irq_flags);
+ return -EOPNOTSUPP;
+ }
+ cpuhw->bhrb_filter = bhrb_filter;
+ }
+
+- put_cpu_var(cpu_hw_events);
++ local_irq_restore(irq_flags);
+ if (err)
+ return -EINVAL;
+
--- /dev/null
+From 5ad84adf5456313e285734102367c861c436c5ed Mon Sep 17 00:00:00 2001
+From: Guo Ren <guoren@linux.alibaba.com>
+Date: Thu, 17 Dec 2020 16:01:40 +0000
+Subject: riscv: Fixup patch_text panic in ftrace
+
+From: Guo Ren <guoren@linux.alibaba.com>
+
+commit 5ad84adf5456313e285734102367c861c436c5ed upstream.
+
+Just like arm64, we can't trace the function in the patch_text path.
+
+Here is the bug log:
+
+[ 45.234334] Unable to handle kernel paging request at virtual address ffffffd38ae80900
+[ 45.242313] Oops [#1]
+[ 45.244600] Modules linked in:
+[ 45.247678] CPU: 0 PID: 11 Comm: migration/0 Not tainted 5.9.0-00025-g9b7db83-dirty #215
+[ 45.255797] epc: ffffffe00021689a ra : ffffffe00021718e sp : ffffffe01afabb58
+[ 45.262955] gp : ffffffe00136afa0 tp : ffffffe01af94d00 t0 : 0000000000000002
+[ 45.270200] t1 : 0000000000000000 t2 : 0000000000000001 s0 : ffffffe01afabc08
+[ 45.277443] s1 : ffffffe0013718a8 a0 : 0000000000000000 a1 : ffffffe01afabba8
+[ 45.284686] a2 : 0000000000000000 a3 : 0000000000000000 a4 : c4c16ad38ae80900
+[ 45.291929] a5 : 0000000000000000 a6 : 0000000000000000 a7 : 0000000052464e43
+[ 45.299173] s2 : 0000000000000001 s3 : ffffffe000206a60 s4 : ffffffe000206a60
+[ 45.306415] s5 : 00000000000009ec s6 : ffffffe0013718a8 s7 : c4c16ad38ae80900
+[ 45.313658] s8 : 0000000000000004 s9 : 0000000000000001 s10: 0000000000000001
+[ 45.320902] s11: 0000000000000003 t3 : 0000000000000001 t4 : ffffffffd192fe79
+[ 45.328144] t5 : ffffffffb8f80000 t6 : 0000000000040000
+[ 45.333472] status: 0000000200000100 badaddr: ffffffd38ae80900 cause: 000000000000000f
+[ 45.341514] ---[ end trace d95102172248fdcf ]---
+[ 45.346176] note: migration/0[11] exited with preempt_count 1
+
+(gdb) x /2i $pc
+=> 0xffffffe00021689a <__do_proc_dointvec+196>: sd zero,0(s7)
+ 0xffffffe00021689e <__do_proc_dointvec+200>: li s11,0
+
+(gdb) bt
+0 __do_proc_dointvec (tbl_data=0x0, table=0xffffffe01afabba8,
+write=0, buffer=0x0, lenp=0x7bf897061f9a0800, ppos=0x4, conv=0x0,
+data=0x52464e43) at kernel/sysctl.c:581
+1 0xffffffe00021718e in do_proc_dointvec (data=<optimized out>,
+conv=<optimized out>, ppos=<optimized out>, lenp=<optimized out>,
+buffer=<optimized out>, write=<optimized out>, table=<optimized out>)
+at kernel/sysctl.c:964
+2 proc_dointvec_minmax (ppos=<optimized out>, lenp=<optimized out>,
+buffer=<optimized out>, write=<optimized out>, table=<optimized out>)
+at kernel/sysctl.c:964
+3 proc_do_static_key (table=<optimized out>, write=1, buffer=0x0,
+lenp=0x0, ppos=0x7bf897061f9a0800) at kernel/sysctl.c:1643
+4 0xffffffe000206792 in ftrace_make_call (rec=<optimized out>,
+addr=<optimized out>) at arch/riscv/kernel/ftrace.c:109
+5 0xffffffe0002c9c04 in __ftrace_replace_code
+(rec=0xffffffe01ae40c30, enable=3) at kernel/trace/ftrace.c:2503
+6 0xffffffe0002ca0b2 in ftrace_replace_code (mod_flags=<optimized
+out>) at kernel/trace/ftrace.c:2530
+7 0xffffffe0002ca26a in ftrace_modify_all_code (command=5) at
+kernel/trace/ftrace.c:2677
+8 0xffffffe0002ca30e in __ftrace_modify_code (data=<optimized out>)
+at kernel/trace/ftrace.c:2703
+9 0xffffffe0002c13b0 in multi_cpu_stop (data=0x0) at kernel/stop_machine.c:224
+10 0xffffffe0002c0fde in cpu_stopper_thread (cpu=<optimized out>) at
+kernel/stop_machine.c:491
+11 0xffffffe0002343de in smpboot_thread_fn (data=0x0) at kernel/smpboot.c:165
+12 0xffffffe00022f8b4 in kthread (_create=0xffffffe01af0c040) at
+kernel/kthread.c:292
+13 0xffffffe000201fac in handle_exception () at arch/riscv/kernel/entry.S:236
+
+ 0xffffffe00020678a <+114>: auipc ra,0xffffe
+ 0xffffffe00020678e <+118>: jalr -118(ra) # 0xffffffe000204714 <patch_text_nosync>
+ 0xffffffe000206792 <+122>: snez a0,a0
+
+(gdb) disassemble patch_text_nosync
+Dump of assembler code for function patch_text_nosync:
+ 0xffffffe000204714 <+0>: addi sp,sp,-32
+ 0xffffffe000204716 <+2>: sd s0,16(sp)
+ 0xffffffe000204718 <+4>: sd ra,24(sp)
+ 0xffffffe00020471a <+6>: addi s0,sp,32
+ 0xffffffe00020471c <+8>: auipc ra,0x0
+ 0xffffffe000204720 <+12>: jalr -384(ra) # 0xffffffe00020459c <patch_insn_write>
+ 0xffffffe000204724 <+16>: beqz a0,0xffffffe00020472e <patch_text_nosync+26>
+ 0xffffffe000204726 <+18>: ld ra,24(sp)
+ 0xffffffe000204728 <+20>: ld s0,16(sp)
+ 0xffffffe00020472a <+22>: addi sp,sp,32
+ 0xffffffe00020472c <+24>: ret
+ 0xffffffe00020472e <+26>: sd a0,-24(s0)
+ 0xffffffe000204732 <+30>: auipc ra,0x4
+ 0xffffffe000204736 <+34>: jalr -1464(ra) # 0xffffffe00020817a <flush_icache_all>
+ 0xffffffe00020473a <+38>: ld a0,-24(s0)
+ 0xffffffe00020473e <+42>: ld ra,24(sp)
+ 0xffffffe000204740 <+44>: ld s0,16(sp)
+ 0xffffffe000204742 <+46>: addi sp,sp,32
+ 0xffffffe000204744 <+48>: ret
+
+(gdb) disassemble flush_icache_all-4
+Dump of assembler code for function flush_icache_all:
+ 0xffffffe00020817a <+0>: addi sp,sp,-8
+ 0xffffffe00020817c <+2>: sd ra,0(sp)
+ 0xffffffe00020817e <+4>: auipc ra,0xfffff
+ 0xffffffe000208182 <+8>: jalr -1822(ra) # 0xffffffe000206a60 <ftrace_caller>
+ 0xffffffe000208186 <+12>: ld ra,0(sp)
+ 0xffffffe000208188 <+14>: addi sp,sp,8
+ 0xffffffe00020818a <+0>: addi sp,sp,-16
+ 0xffffffe00020818c <+2>: sd s0,0(sp)
+ 0xffffffe00020818e <+4>: sd ra,8(sp)
+ 0xffffffe000208190 <+6>: addi s0,sp,16
+ 0xffffffe000208192 <+8>: li a0,0
+ 0xffffffe000208194 <+10>: auipc ra,0xfffff
+ 0xffffffe000208198 <+14>: jalr -410(ra) # 0xffffffe000206ffa <sbi_remote_fence_i>
+ 0xffffffe00020819c <+18>: ld s0,0(sp)
+ 0xffffffe00020819e <+20>: ld ra,8(sp)
+ 0xffffffe0002081a0 <+22>: addi sp,sp,16
+ 0xffffffe0002081a2 <+24>: ret
+
+(gdb) frame 5
+(rec=0xffffffe01ae40c30, enable=3) at kernel/trace/ftrace.c:2503
+2503 return ftrace_make_call(rec, ftrace_addr);
+(gdb) p /x rec->ip
+$2 = 0xffffffe00020817a -> flush_icache_all !
+
+When we modified flush_icache_all's patchable-entry with ftrace_caller:
+ - Insert ftrace_caller at flush_icache_all prologue.
+ - Call flush_icache_all to sync I/Dcache, but flush_icache_all is
+just we modified by half.
+
+Link: https://lore.kernel.org/linux-riscv/CAJF2gTT=oDWesWe0JVWvTpGi60-gpbNhYLdFWN_5EbyeqoEDdw@mail.gmail.com/T/#t
+Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
+Reviewed-by: Atish Patra <atish.patra@wdc.com>
+Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
+Signed-off-by: Dimitri John Ledkov <dimitri.ledkov@canonical.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/riscv/kernel/Makefile | 1 +
+ arch/riscv/mm/Makefile | 1 +
+ 2 files changed, 2 insertions(+)
+
+--- a/arch/riscv/kernel/Makefile
++++ b/arch/riscv/kernel/Makefile
+@@ -6,6 +6,7 @@
+ ifdef CONFIG_FTRACE
+ CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
+ CFLAGS_REMOVE_patch.o = $(CC_FLAGS_FTRACE)
++CFLAGS_REMOVE_sbi.o = $(CC_FLAGS_FTRACE)
+ endif
+
+ extra-y += head.o
+--- a/arch/riscv/mm/Makefile
++++ b/arch/riscv/mm/Makefile
+@@ -3,6 +3,7 @@
+ CFLAGS_init.o := -mcmodel=medany
+ ifdef CONFIG_FTRACE
+ CFLAGS_REMOVE_init.o = $(CC_FLAGS_FTRACE)
++CFLAGS_REMOVE_cacheflush.o = $(CC_FLAGS_FTRACE)
+ endif
+
+ KCOV_INSTRUMENT_init.o := n
--- /dev/null
+From 67d945778099b14324811fe67c5aff2cda7a7ad5 Mon Sep 17 00:00:00 2001
+From: Guo Ren <guoren@linux.alibaba.com>
+Date: Thu, 17 Dec 2020 16:01:39 +0000
+Subject: riscv: Fixup wrong ftrace remove cflag
+
+From: Guo Ren <guoren@linux.alibaba.com>
+
+commit 67d945778099b14324811fe67c5aff2cda7a7ad5 upstream.
+
+We must use $(CC_FLAGS_FTRACE) instead of directly using -pg. It
+will cause -fpatchable-function-entry error.
+
+Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
+Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
+Signed-off-by: Dimitri John Ledkov <dimitri.ledkov@canonical.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/riscv/kernel/Makefile | 4 ++--
+ arch/riscv/mm/Makefile | 2 +-
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/riscv/kernel/Makefile
++++ b/arch/riscv/kernel/Makefile
+@@ -4,8 +4,8 @@
+ #
+
+ ifdef CONFIG_FTRACE
+-CFLAGS_REMOVE_ftrace.o = -pg
+-CFLAGS_REMOVE_patch.o = -pg
++CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
++CFLAGS_REMOVE_patch.o = $(CC_FLAGS_FTRACE)
+ endif
+
+ extra-y += head.o
+--- a/arch/riscv/mm/Makefile
++++ b/arch/riscv/mm/Makefile
+@@ -2,7 +2,7 @@
+
+ CFLAGS_init.o := -mcmodel=medany
+ ifdef CONFIG_FTRACE
+-CFLAGS_REMOVE_init.o = -pg
++CFLAGS_REMOVE_init.o = $(CC_FLAGS_FTRACE)
+ endif
+
+ KCOV_INSTRUMENT_init.o := n
tipc-call-tipc_wait_for_connect-only-when-dlen-is-not-0.patch
vt_kdsetmode-extend-console-locking.patch
bluetooth-btusb-check-conditions-before-enabling-usb-alt-3-for-wbs.patch
+riscv-fixup-wrong-ftrace-remove-cflag.patch
+riscv-fixup-patch_text-panic-in-ftrace.patch
+perf-env-fix-memory-leak-of-bpf_prog_info_linear-member.patch
+perf-symbol-elf-fix-memory-leak-by-freeing-sdt_note.args.patch
+perf-record-fix-memory-leak-in-vdso-found-using-asan.patch
+perf-tools-fix-arm64-build-error-with-gcc-11.patch
+perf-annotate-fix-jump-parsing-for-c-code.patch
+powerpc-perf-invoke-per-cpu-variable-access-with-disabled-interrupts.patch
+srcu-provide-internal-interface-to-start-a-tree-srcu-grace-period.patch
+srcu-provide-polling-interfaces-for-tree-srcu-grace-periods.patch
+srcu-provide-internal-interface-to-start-a-tiny-srcu-grace-period.patch
+srcu-make-tiny-srcu-use-multi-bit-grace-period-counter.patch
+srcu-provide-polling-interfaces-for-tiny-srcu-grace-periods.patch
+tracepoint-use-rcu-get-state-and-cond-sync-for-static-call-updates.patch
+usb-typec-ucsi-acpi-always-decode-connector-change-information.patch
+usb-typec-ucsi-work-around-ppm-losing-change-information.patch
+usb-typec-ucsi-clear-pending-after-acking-connector-change.patch
+net-dsa-mt7530-fix-vlan-traffic-leaks-again.patch
+lkdtm-enable-double_fault-on-all-architectures.patch
+arm64-dts-qcom-msm8994-angler-fix-gpio-reserved-ranges-85-88.patch
+btrfs-fix-null-pointer-dereference-when-deleting-device-by-invalid-id.patch
+kthread-fix-pf_kthread-vs-to_kthread-race.patch
--- /dev/null
+From 74612a07b83fc46c2b2e6f71a541d55b024ebefc Mon Sep 17 00:00:00 2001
+From: "Paul E. McKenney" <paulmck@kernel.org>
+Date: Thu, 12 Nov 2020 16:34:09 -0800
+Subject: srcu: Make Tiny SRCU use multi-bit grace-period counter
+
+From: Paul E. McKenney <paulmck@kernel.org>
+
+commit 74612a07b83fc46c2b2e6f71a541d55b024ebefc upstream.
+
+There is a need for a polling interface for SRCU grace periods. This
+polling needs to distinguish between an SRCU instance being idle on the
+one hand or in the middle of a grace period on the other. This commit
+therefore converts the Tiny SRCU srcu_struct structure's srcu_idx from
+a defacto boolean to a free-running counter, using the bottom bit to
+indicate that a grace period is in progress. The second-from-bottom
+bit is thus used as the index returned by srcu_read_lock().
+
+Link: https://lore.kernel.org/rcu/20201112201547.GF3365678@moria.home.lan/
+Reported-by: Kent Overstreet <kent.overstreet@gmail.com>
+[ paulmck: Fix ->srcu_lock_nesting[] indexing per Neeraj Upadhyay. ]
+Reviewed-by: Neeraj Upadhyay <neeraju@codeaurora.org>
+Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/srcutiny.h | 6 +++---
+ kernel/rcu/srcutiny.c | 5 +++--
+ 2 files changed, 6 insertions(+), 5 deletions(-)
+
+--- a/include/linux/srcutiny.h
++++ b/include/linux/srcutiny.h
+@@ -15,7 +15,7 @@
+
+ struct srcu_struct {
+ short srcu_lock_nesting[2]; /* srcu_read_lock() nesting depth. */
+- short srcu_idx; /* Current reader array element. */
++ unsigned short srcu_idx; /* Current reader array element in bit 0x2. */
+ u8 srcu_gp_running; /* GP workqueue running? */
+ u8 srcu_gp_waiting; /* GP waiting for readers? */
+ struct swait_queue_head srcu_wq;
+@@ -59,7 +59,7 @@ static inline int __srcu_read_lock(struc
+ {
+ int idx;
+
+- idx = READ_ONCE(ssp->srcu_idx);
++ idx = ((READ_ONCE(ssp->srcu_idx) + 1) & 0x2) >> 1;
+ WRITE_ONCE(ssp->srcu_lock_nesting[idx], ssp->srcu_lock_nesting[idx] + 1);
+ return idx;
+ }
+@@ -80,7 +80,7 @@ static inline void srcu_torture_stats_pr
+ {
+ int idx;
+
+- idx = READ_ONCE(ssp->srcu_idx) & 0x1;
++ idx = ((READ_ONCE(ssp->srcu_idx) + 1) & 0x2) >> 1;
+ pr_alert("%s%s Tiny SRCU per-CPU(idx=%d): (%hd,%hd)\n",
+ tt, tf, idx,
+ READ_ONCE(ssp->srcu_lock_nesting[!idx]),
+--- a/kernel/rcu/srcutiny.c
++++ b/kernel/rcu/srcutiny.c
+@@ -124,11 +124,12 @@ void srcu_drive_gp(struct work_struct *w
+ ssp->srcu_cb_head = NULL;
+ ssp->srcu_cb_tail = &ssp->srcu_cb_head;
+ local_irq_enable();
+- idx = ssp->srcu_idx;
+- WRITE_ONCE(ssp->srcu_idx, !ssp->srcu_idx);
++ idx = (ssp->srcu_idx & 0x2) / 2;
++ WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1);
+ WRITE_ONCE(ssp->srcu_gp_waiting, true); /* srcu_read_unlock() wakes! */
+ swait_event_exclusive(ssp->srcu_wq, !READ_ONCE(ssp->srcu_lock_nesting[idx]));
+ WRITE_ONCE(ssp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */
++ WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1);
+
+ /* Invoke the callbacks we removed above. */
+ while (lh) {
--- /dev/null
+From 1a893c711a600ab57526619b56e6f6b7be00956e Mon Sep 17 00:00:00 2001
+From: "Paul E. McKenney" <paulmck@kernel.org>
+Date: Fri, 13 Nov 2020 09:37:39 -0800
+Subject: srcu: Provide internal interface to start a Tiny SRCU grace period
+
+From: Paul E. McKenney <paulmck@kernel.org>
+
+commit 1a893c711a600ab57526619b56e6f6b7be00956e upstream.
+
+There is a need for a polling interface for SRCU grace periods.
+This polling needs to initiate an SRCU grace period without
+having to queue (and manage) a callback. This commit therefore
+splits the Tiny SRCU call_srcu() function into callback-queuing and
+start-grace-period portions, with the latter in a new function named
+srcu_gp_start_if_needed().
+
+Link: https://lore.kernel.org/rcu/20201112201547.GF3365678@moria.home.lan/
+Reported-by: Kent Overstreet <kent.overstreet@gmail.com>
+Reviewed-by: Neeraj Upadhyay <neeraju@codeaurora.org>
+Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/rcu/srcutiny.c | 17 +++++++++++------
+ 1 file changed, 11 insertions(+), 6 deletions(-)
+
+--- a/kernel/rcu/srcutiny.c
++++ b/kernel/rcu/srcutiny.c
+@@ -151,6 +151,16 @@ void srcu_drive_gp(struct work_struct *w
+ }
+ EXPORT_SYMBOL_GPL(srcu_drive_gp);
+
++static void srcu_gp_start_if_needed(struct srcu_struct *ssp)
++{
++ if (!READ_ONCE(ssp->srcu_gp_running)) {
++ if (likely(srcu_init_done))
++ schedule_work(&ssp->srcu_work);
++ else if (list_empty(&ssp->srcu_work.entry))
++ list_add(&ssp->srcu_work.entry, &srcu_boot_list);
++ }
++}
++
+ /*
+ * Enqueue an SRCU callback on the specified srcu_struct structure,
+ * initiating grace-period processing if it is not already running.
+@@ -166,12 +176,7 @@ void call_srcu(struct srcu_struct *ssp,
+ *ssp->srcu_cb_tail = rhp;
+ ssp->srcu_cb_tail = &rhp->next;
+ local_irq_restore(flags);
+- if (!READ_ONCE(ssp->srcu_gp_running)) {
+- if (likely(srcu_init_done))
+- schedule_work(&ssp->srcu_work);
+- else if (list_empty(&ssp->srcu_work.entry))
+- list_add(&ssp->srcu_work.entry, &srcu_boot_list);
+- }
++ srcu_gp_start_if_needed(ssp);
+ }
+ EXPORT_SYMBOL_GPL(call_srcu);
+
--- /dev/null
+From 29d2bb94a8a126ce80ffbb433b648b32fdea524e Mon Sep 17 00:00:00 2001
+From: "Paul E. McKenney" <paulmck@kernel.org>
+Date: Fri, 13 Nov 2020 10:08:09 -0800
+Subject: srcu: Provide internal interface to start a Tree SRCU grace period
+
+From: Paul E. McKenney <paulmck@kernel.org>
+
+commit 29d2bb94a8a126ce80ffbb433b648b32fdea524e upstream.
+
+There is a need for a polling interface for SRCU grace periods.
+This polling needs to initiate an SRCU grace period without having
+to queue (and manage) a callback. This commit therefore splits the
+Tree SRCU __call_srcu() function into callback-initialization and
+queuing/start-grace-period portions, with the latter in a new function
+named srcu_gp_start_if_needed(). This function may be passed a NULL
+callback pointer, in which case it will refrain from queuing anything.
+
+Why have the new function mess with queuing? Locking considerations,
+of course!
+
+Link: https://lore.kernel.org/rcu/20201112201547.GF3365678@moria.home.lan/
+Reported-by: Kent Overstreet <kent.overstreet@gmail.com>
+Reviewed-by: Neeraj Upadhyay <neeraju@codeaurora.org>
+Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/rcu/srcutree.c | 66 ++++++++++++++++++++++++++++----------------------
+ 1 file changed, 37 insertions(+), 29 deletions(-)
+
+--- a/kernel/rcu/srcutree.c
++++ b/kernel/rcu/srcutree.c
+@@ -809,6 +809,42 @@ static void srcu_leak_callback(struct rc
+ }
+
+ /*
++ * Start an SRCU grace period, and also queue the callback if non-NULL.
++ */
++static void srcu_gp_start_if_needed(struct srcu_struct *ssp, struct rcu_head *rhp, bool do_norm)
++{
++ unsigned long flags;
++ int idx;
++ bool needexp = false;
++ bool needgp = false;
++ unsigned long s;
++ struct srcu_data *sdp;
++
++ idx = srcu_read_lock(ssp);
++ sdp = raw_cpu_ptr(ssp->sda);
++ spin_lock_irqsave_rcu_node(sdp, flags);
++ rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp);
++ rcu_segcblist_advance(&sdp->srcu_cblist,
++ rcu_seq_current(&ssp->srcu_gp_seq));
++ s = rcu_seq_snap(&ssp->srcu_gp_seq);
++ (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
++ if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
++ sdp->srcu_gp_seq_needed = s;
++ needgp = true;
++ }
++ if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) {
++ sdp->srcu_gp_seq_needed_exp = s;
++ needexp = true;
++ }
++ spin_unlock_irqrestore_rcu_node(sdp, flags);
++ if (needgp)
++ srcu_funnel_gp_start(ssp, sdp, s, do_norm);
++ else if (needexp)
++ srcu_funnel_exp_start(ssp, sdp->mynode, s);
++ srcu_read_unlock(ssp, idx);
++}
++
++/*
+ * Enqueue an SRCU callback on the srcu_data structure associated with
+ * the current CPU and the specified srcu_struct structure, initiating
+ * grace-period processing if it is not already running.
+@@ -839,13 +875,6 @@ static void srcu_leak_callback(struct rc
+ static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
+ rcu_callback_t func, bool do_norm)
+ {
+- unsigned long flags;
+- int idx;
+- bool needexp = false;
+- bool needgp = false;
+- unsigned long s;
+- struct srcu_data *sdp;
+-
+ check_init_srcu_struct(ssp);
+ if (debug_rcu_head_queue(rhp)) {
+ /* Probable double call_srcu(), so leak the callback. */
+@@ -854,28 +883,7 @@ static void __call_srcu(struct srcu_stru
+ return;
+ }
+ rhp->func = func;
+- idx = srcu_read_lock(ssp);
+- sdp = raw_cpu_ptr(ssp->sda);
+- spin_lock_irqsave_rcu_node(sdp, flags);
+- rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp);
+- rcu_segcblist_advance(&sdp->srcu_cblist,
+- rcu_seq_current(&ssp->srcu_gp_seq));
+- s = rcu_seq_snap(&ssp->srcu_gp_seq);
+- (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
+- if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
+- sdp->srcu_gp_seq_needed = s;
+- needgp = true;
+- }
+- if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) {
+- sdp->srcu_gp_seq_needed_exp = s;
+- needexp = true;
+- }
+- spin_unlock_irqrestore_rcu_node(sdp, flags);
+- if (needgp)
+- srcu_funnel_gp_start(ssp, sdp, s, do_norm);
+- else if (needexp)
+- srcu_funnel_exp_start(ssp, sdp->mynode, s);
+- srcu_read_unlock(ssp, idx);
++ srcu_gp_start_if_needed(ssp, rhp, do_norm);
+ }
+
+ /**
--- /dev/null
+From 8b5bd67cf6422b63ee100d76d8de8960ca2df7f0 Mon Sep 17 00:00:00 2001
+From: "Paul E. McKenney" <paulmck@kernel.org>
+Date: Fri, 13 Nov 2020 12:54:48 -0800
+Subject: srcu: Provide polling interfaces for Tiny SRCU grace periods
+
+From: Paul E. McKenney <paulmck@kernel.org>
+
+commit 8b5bd67cf6422b63ee100d76d8de8960ca2df7f0 upstream.
+
+There is a need for a polling interface for SRCU grace
+periods, so this commit supplies get_state_synchronize_srcu(),
+start_poll_synchronize_srcu(), and poll_state_synchronize_srcu() for this
+purpose. The first can be used if future grace periods are inevitable
+(perhaps due to a later call_srcu() invocation), the second if future
+grace periods might not otherwise happen, and the third to check if a
+grace period has elapsed since the corresponding call to either of the
+first two.
+
+As with get_state_synchronize_rcu() and cond_synchronize_rcu(),
+the return value from either get_state_synchronize_srcu() or
+start_poll_synchronize_srcu() must be passed in to a later call to
+poll_state_synchronize_srcu().
+
+Link: https://lore.kernel.org/rcu/20201112201547.GF3365678@moria.home.lan/
+Reported-by: Kent Overstreet <kent.overstreet@gmail.com>
+[ paulmck: Add EXPORT_SYMBOL_GPL() per kernel test robot feedback. ]
+[ paulmck: Apply feedback from Neeraj Upadhyay. ]
+Link: https://lore.kernel.org/lkml/20201117004017.GA7444@paulmck-ThinkPad-P72/
+Reviewed-by: Neeraj Upadhyay <neeraju@codeaurora.org>
+Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/rcupdate.h | 2 +
+ include/linux/srcu.h | 3 ++
+ include/linux/srcutiny.h | 1
+ kernel/rcu/srcutiny.c | 55 +++++++++++++++++++++++++++++++++++++++++++++--
+ 4 files changed, 59 insertions(+), 2 deletions(-)
+
+--- a/include/linux/rcupdate.h
++++ b/include/linux/rcupdate.h
+@@ -33,6 +33,8 @@
+ #define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b))
+ #define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b))
+ #define ulong2long(a) (*(long *)(&(a)))
++#define USHORT_CMP_GE(a, b) (USHRT_MAX / 2 >= (unsigned short)((a) - (b)))
++#define USHORT_CMP_LT(a, b) (USHRT_MAX / 2 < (unsigned short)((a) - (b)))
+
+ /* Exported common interfaces */
+ void call_rcu(struct rcu_head *head, rcu_callback_t func);
+--- a/include/linux/srcu.h
++++ b/include/linux/srcu.h
+@@ -60,6 +60,9 @@ void cleanup_srcu_struct(struct srcu_str
+ int __srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp);
+ void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp);
+ void synchronize_srcu(struct srcu_struct *ssp);
++unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp);
++unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp);
++bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie);
+
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+
+--- a/include/linux/srcutiny.h
++++ b/include/linux/srcutiny.h
+@@ -16,6 +16,7 @@
+ struct srcu_struct {
+ short srcu_lock_nesting[2]; /* srcu_read_lock() nesting depth. */
+ unsigned short srcu_idx; /* Current reader array element in bit 0x2. */
++ unsigned short srcu_idx_max; /* Furthest future srcu_idx request. */
+ u8 srcu_gp_running; /* GP workqueue running? */
+ u8 srcu_gp_waiting; /* GP waiting for readers? */
+ struct swait_queue_head srcu_wq;
+--- a/kernel/rcu/srcutiny.c
++++ b/kernel/rcu/srcutiny.c
+@@ -34,6 +34,7 @@ static int init_srcu_struct_fields(struc
+ ssp->srcu_gp_running = false;
+ ssp->srcu_gp_waiting = false;
+ ssp->srcu_idx = 0;
++ ssp->srcu_idx_max = 0;
+ INIT_WORK(&ssp->srcu_work, srcu_drive_gp);
+ INIT_LIST_HEAD(&ssp->srcu_work.entry);
+ return 0;
+@@ -84,6 +85,8 @@ void cleanup_srcu_struct(struct srcu_str
+ WARN_ON(ssp->srcu_gp_waiting);
+ WARN_ON(ssp->srcu_cb_head);
+ WARN_ON(&ssp->srcu_cb_head != ssp->srcu_cb_tail);
++ WARN_ON(ssp->srcu_idx != ssp->srcu_idx_max);
++ WARN_ON(ssp->srcu_idx & 0x1);
+ }
+ EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
+
+@@ -114,7 +117,7 @@ void srcu_drive_gp(struct work_struct *w
+ struct srcu_struct *ssp;
+
+ ssp = container_of(wp, struct srcu_struct, srcu_work);
+- if (ssp->srcu_gp_running || !READ_ONCE(ssp->srcu_cb_head))
++ if (ssp->srcu_gp_running || USHORT_CMP_GE(ssp->srcu_idx, READ_ONCE(ssp->srcu_idx_max)))
+ return; /* Already running or nothing to do. */
+
+ /* Remove recently arrived callbacks and wait for readers. */
+@@ -147,13 +150,19 @@ void srcu_drive_gp(struct work_struct *w
+ * straighten that out.
+ */
+ WRITE_ONCE(ssp->srcu_gp_running, false);
+- if (READ_ONCE(ssp->srcu_cb_head))
++ if (USHORT_CMP_LT(ssp->srcu_idx, READ_ONCE(ssp->srcu_idx_max)))
+ schedule_work(&ssp->srcu_work);
+ }
+ EXPORT_SYMBOL_GPL(srcu_drive_gp);
+
+ static void srcu_gp_start_if_needed(struct srcu_struct *ssp)
+ {
++ unsigned short cookie;
++
++ cookie = get_state_synchronize_srcu(ssp);
++ if (USHORT_CMP_GE(READ_ONCE(ssp->srcu_idx_max), cookie))
++ return;
++ WRITE_ONCE(ssp->srcu_idx_max, cookie);
+ if (!READ_ONCE(ssp->srcu_gp_running)) {
+ if (likely(srcu_init_done))
+ schedule_work(&ssp->srcu_work);
+@@ -196,6 +205,48 @@ void synchronize_srcu(struct srcu_struct
+ }
+ EXPORT_SYMBOL_GPL(synchronize_srcu);
+
++/*
++ * get_state_synchronize_srcu - Provide an end-of-grace-period cookie
++ */
++unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp)
++{
++ unsigned long ret;
++
++ barrier();
++ ret = (READ_ONCE(ssp->srcu_idx) + 3) & ~0x1;
++ barrier();
++ return ret & USHRT_MAX;
++}
++EXPORT_SYMBOL_GPL(get_state_synchronize_srcu);
++
++/*
++ * start_poll_synchronize_srcu - Provide cookie and start grace period
++ *
++ * The difference between this and get_state_synchronize_srcu() is that
++ * this function ensures that the poll_state_synchronize_srcu() will
++ * eventually return the value true.
++ */
++unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp)
++{
++ unsigned long ret = get_state_synchronize_srcu(ssp);
++
++ srcu_gp_start_if_needed(ssp);
++ return ret;
++}
++EXPORT_SYMBOL_GPL(start_poll_synchronize_srcu);
++
++/*
++ * poll_state_synchronize_srcu - Has cookie's grace period ended?
++ */
++bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie)
++{
++ bool ret = USHORT_CMP_GE(READ_ONCE(ssp->srcu_idx), cookie);
++
++ barrier();
++ return ret;
++}
++EXPORT_SYMBOL_GPL(poll_state_synchronize_srcu);
++
+ /* Lockdep diagnostics. */
+ void __init rcu_scheduler_starting(void)
+ {
--- /dev/null
+From 5358c9fa54b09b5d3d7811b033aa0838c1bbaaf2 Mon Sep 17 00:00:00 2001
+From: "Paul E. McKenney" <paulmck@kernel.org>
+Date: Fri, 13 Nov 2020 17:31:55 -0800
+Subject: srcu: Provide polling interfaces for Tree SRCU grace periods
+
+From: Paul E. McKenney <paulmck@kernel.org>
+
+commit 5358c9fa54b09b5d3d7811b033aa0838c1bbaaf2 upstream.
+
+There is a need for a polling interface for SRCU grace
+periods, so this commit supplies get_state_synchronize_srcu(),
+start_poll_synchronize_srcu(), and poll_state_synchronize_srcu() for this
+purpose. The first can be used if future grace periods are inevitable
+(perhaps due to a later call_srcu() invocation), the second if future
+grace periods might not otherwise happen, and the third to check if a
+grace period has elapsed since the corresponding call to either of the
+first two.
+
+As with get_state_synchronize_rcu() and cond_synchronize_rcu(),
+the return value from either get_state_synchronize_srcu() or
+start_poll_synchronize_srcu() must be passed in to a later call to
+poll_state_synchronize_srcu().
+
+Link: https://lore.kernel.org/rcu/20201112201547.GF3365678@moria.home.lan/
+Reported-by: Kent Overstreet <kent.overstreet@gmail.com>
+[ paulmck: Add EXPORT_SYMBOL_GPL() per kernel test robot feedback. ]
+[ paulmck: Apply feedback from Neeraj Upadhyay. ]
+Link: https://lore.kernel.org/lkml/20201117004017.GA7444@paulmck-ThinkPad-P72/
+Reviewed-by: Neeraj Upadhyay <neeraju@codeaurora.org>
+Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/rcu/srcutree.c | 67 +++++++++++++++++++++++++++++++++++++++++++++++---
+ 1 file changed, 63 insertions(+), 4 deletions(-)
+
+--- a/kernel/rcu/srcutree.c
++++ b/kernel/rcu/srcutree.c
+@@ -811,7 +811,8 @@ static void srcu_leak_callback(struct rc
+ /*
+ * Start an SRCU grace period, and also queue the callback if non-NULL.
+ */
+-static void srcu_gp_start_if_needed(struct srcu_struct *ssp, struct rcu_head *rhp, bool do_norm)
++static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
++ struct rcu_head *rhp, bool do_norm)
+ {
+ unsigned long flags;
+ int idx;
+@@ -820,10 +821,12 @@ static void srcu_gp_start_if_needed(stru
+ unsigned long s;
+ struct srcu_data *sdp;
+
++ check_init_srcu_struct(ssp);
+ idx = srcu_read_lock(ssp);
+ sdp = raw_cpu_ptr(ssp->sda);
+ spin_lock_irqsave_rcu_node(sdp, flags);
+- rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp);
++ if (rhp)
++ rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp);
+ rcu_segcblist_advance(&sdp->srcu_cblist,
+ rcu_seq_current(&ssp->srcu_gp_seq));
+ s = rcu_seq_snap(&ssp->srcu_gp_seq);
+@@ -842,6 +845,7 @@ static void srcu_gp_start_if_needed(stru
+ else if (needexp)
+ srcu_funnel_exp_start(ssp, sdp->mynode, s);
+ srcu_read_unlock(ssp, idx);
++ return s;
+ }
+
+ /*
+@@ -875,7 +879,6 @@ static void srcu_gp_start_if_needed(stru
+ static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
+ rcu_callback_t func, bool do_norm)
+ {
+- check_init_srcu_struct(ssp);
+ if (debug_rcu_head_queue(rhp)) {
+ /* Probable double call_srcu(), so leak the callback. */
+ WRITE_ONCE(rhp->func, srcu_leak_callback);
+@@ -883,7 +886,7 @@ static void __call_srcu(struct srcu_stru
+ return;
+ }
+ rhp->func = func;
+- srcu_gp_start_if_needed(ssp, rhp, do_norm);
++ (void)srcu_gp_start_if_needed(ssp, rhp, do_norm);
+ }
+
+ /**
+@@ -1012,6 +1015,62 @@ void synchronize_srcu(struct srcu_struct
+ }
+ EXPORT_SYMBOL_GPL(synchronize_srcu);
+
++/**
++ * get_state_synchronize_srcu - Provide an end-of-grace-period cookie
++ * @ssp: srcu_struct to provide cookie for.
++ *
++ * This function returns a cookie that can be passed to
++ * poll_state_synchronize_srcu(), which will return true if a full grace
++ * period has elapsed in the meantime. It is the caller's responsibility
++ * to make sure that grace period happens, for example, by invoking
++ * call_srcu() after return from get_state_synchronize_srcu().
++ */
++unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp)
++{
++ // Any prior manipulation of SRCU-protected data must happen
++ // before the load from ->srcu_gp_seq.
++ smp_mb();
++ return rcu_seq_snap(&ssp->srcu_gp_seq);
++}
++EXPORT_SYMBOL_GPL(get_state_synchronize_srcu);
++
++/**
++ * start_poll_synchronize_srcu - Provide cookie and start grace period
++ * @ssp: srcu_struct to provide cookie for.
++ *
++ * This function returns a cookie that can be passed to
++ * poll_state_synchronize_srcu(), which will return true if a full grace
++ * period has elapsed in the meantime. Unlike get_state_synchronize_srcu(),
++ * this function also ensures that any needed SRCU grace period will be
++ * started. This convenience does come at a cost in terms of CPU overhead.
++ */
++unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp)
++{
++ return srcu_gp_start_if_needed(ssp, NULL, true);
++}
++EXPORT_SYMBOL_GPL(start_poll_synchronize_srcu);
++
++/**
++ * poll_state_synchronize_srcu - Has cookie's grace period ended?
++ * @ssp: srcu_struct to provide cookie for.
++ * @cookie: Return value from get_state_synchronize_srcu() or start_poll_synchronize_srcu().
++ *
++ * This function takes the cookie that was returned from either
++ * get_state_synchronize_srcu() or start_poll_synchronize_srcu(), and
++ * returns @true if an SRCU grace period elapsed since the time that the
++ * cookie was created.
++ */
++bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie)
++{
++ if (!rcu_seq_done(&ssp->srcu_gp_seq, cookie))
++ return false;
++ // Ensure that the end of the SRCU grace period happens before
++ // any subsequent code that the caller might execute.
++ smp_mb(); // ^^^
++ return true;
++}
++EXPORT_SYMBOL_GPL(poll_state_synchronize_srcu);
++
+ /*
+ * Callback function for srcu_barrier() use.
+ */
--- /dev/null
+From 7b40066c97ec66a44e388f82fcf694987451768f Mon Sep 17 00:00:00 2001
+From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Date: Thu, 5 Aug 2021 15:29:54 -0400
+Subject: tracepoint: Use rcu get state and cond sync for static call updates
+
+From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+
+commit 7b40066c97ec66a44e388f82fcf694987451768f upstream.
+
+State transitions from 1->0->1 and N->2->1 callbacks require RCU
+synchronization. Rather than performing the RCU synchronization every
+time the state change occurs, which is quite slow when many tracepoints
+are registered in batch, instead keep a snapshot of the RCU state on the
+most recent transitions which belong to a chain, and conditionally wait
+for a grace period on the last transition of the chain if one g.p. has
+not elapsed since the last snapshot.
+
+This applies to both RCU and SRCU.
+
+This brings the performance regression caused by commit 231264d6927f
+("Fix: tracepoint: static call function vs data state mismatch") back to
+what it was originally.
+
+Before this commit:
+
+ # trace-cmd start -e all
+ # time trace-cmd start -p nop
+
+ real 0m10.593s
+ user 0m0.017s
+ sys 0m0.259s
+
+After this commit:
+
+ # trace-cmd start -e all
+ # time trace-cmd start -p nop
+
+ real 0m0.878s
+ user 0m0.000s
+ sys 0m0.103s
+
+Link: https://lkml.kernel.org/r/20210805192954.30688-1-mathieu.desnoyers@efficios.com
+Link: https://lore.kernel.org/io-uring/4ebea8f0-58c9-e571-fd30-0ce4f6f09c70@samba.org/
+
+Cc: stable@vger.kernel.org
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: "Paul E. McKenney" <paulmck@kernel.org>
+Cc: Stefan Metzmacher <metze@samba.org>
+Fixes: 231264d6927f ("Fix: tracepoint: static call function vs data state mismatch")
+Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Reviewed-by: Paul E. McKenney <paulmck@kernel.org>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/tracepoint.c | 81 +++++++++++++++++++++++++++++++++++++++++++---------
+ 1 file changed, 67 insertions(+), 14 deletions(-)
+
+--- a/kernel/tracepoint.c
++++ b/kernel/tracepoint.c
+@@ -28,6 +28,44 @@ extern tracepoint_ptr_t __stop___tracepo
+ DEFINE_SRCU(tracepoint_srcu);
+ EXPORT_SYMBOL_GPL(tracepoint_srcu);
+
++enum tp_transition_sync {
++ TP_TRANSITION_SYNC_1_0_1,
++ TP_TRANSITION_SYNC_N_2_1,
++
++ _NR_TP_TRANSITION_SYNC,
++};
++
++struct tp_transition_snapshot {
++ unsigned long rcu;
++ unsigned long srcu;
++ bool ongoing;
++};
++
++/* Protected by tracepoints_mutex */
++static struct tp_transition_snapshot tp_transition_snapshot[_NR_TP_TRANSITION_SYNC];
++
++static void tp_rcu_get_state(enum tp_transition_sync sync)
++{
++ struct tp_transition_snapshot *snapshot = &tp_transition_snapshot[sync];
++
++ /* Keep the latest get_state snapshot. */
++ snapshot->rcu = get_state_synchronize_rcu();
++ snapshot->srcu = start_poll_synchronize_srcu(&tracepoint_srcu);
++ snapshot->ongoing = true;
++}
++
++static void tp_rcu_cond_sync(enum tp_transition_sync sync)
++{
++ struct tp_transition_snapshot *snapshot = &tp_transition_snapshot[sync];
++
++ if (!snapshot->ongoing)
++ return;
++ cond_synchronize_rcu(snapshot->rcu);
++ if (!poll_state_synchronize_srcu(&tracepoint_srcu, snapshot->srcu))
++ synchronize_srcu(&tracepoint_srcu);
++ snapshot->ongoing = false;
++}
++
+ /* Set to 1 to enable tracepoint debug output */
+ static const int tracepoint_debug;
+
+@@ -332,6 +370,11 @@ static int tracepoint_add_func(struct tr
+ */
+ switch (nr_func_state(tp_funcs)) {
+ case TP_FUNC_1: /* 0->1 */
++ /*
++ * Make sure new static func never uses old data after a
++ * 1->0->1 transition sequence.
++ */
++ tp_rcu_cond_sync(TP_TRANSITION_SYNC_1_0_1);
+ /* Set static call to first function */
+ tracepoint_update_call(tp, tp_funcs);
+ /* Both iterator and static call handle NULL tp->funcs */
+@@ -346,10 +389,15 @@ static int tracepoint_add_func(struct tr
+ * Requires ordering between RCU assign/dereference and
+ * static call update/call.
+ */
+- rcu_assign_pointer(tp->funcs, tp_funcs);
+- break;
++ fallthrough;
+ case TP_FUNC_N: /* N->N+1 (N>1) */
+ rcu_assign_pointer(tp->funcs, tp_funcs);
++ /*
++ * Make sure static func never uses incorrect data after a
++ * N->...->2->1 (N>1) transition sequence.
++ */
++ if (tp_funcs[0].data != old[0].data)
++ tp_rcu_get_state(TP_TRANSITION_SYNC_N_2_1);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+@@ -393,24 +441,23 @@ static int tracepoint_remove_func(struct
+ /* Both iterator and static call handle NULL tp->funcs */
+ rcu_assign_pointer(tp->funcs, NULL);
+ /*
+- * Make sure new func never uses old data after a 1->0->1
+- * transition sequence.
+- * Considering that transition 0->1 is the common case
+- * and don't have rcu-sync, issue rcu-sync after
+- * transition 1->0 to break that sequence by waiting for
+- * readers to be quiescent.
++ * Make sure new static func never uses old data after a
++ * 1->0->1 transition sequence.
+ */
+- tracepoint_synchronize_unregister();
++ tp_rcu_get_state(TP_TRANSITION_SYNC_1_0_1);
+ break;
+ case TP_FUNC_1: /* 2->1 */
+ rcu_assign_pointer(tp->funcs, tp_funcs);
+ /*
+- * On 2->1 transition, RCU sync is needed before setting
+- * static call to first callback, because the observer
+- * may have loaded any prior tp->funcs after the last one
+- * associated with an rcu-sync.
++ * Make sure static func never uses incorrect data after a
++ * N->...->2->1 (N>2) transition sequence. If the first
++ * element's data has changed, then force the synchronization
++ * to prevent current readers that have loaded the old data
++ * from calling the new function.
+ */
+- tracepoint_synchronize_unregister();
++ if (tp_funcs[0].data != old[0].data)
++ tp_rcu_get_state(TP_TRANSITION_SYNC_N_2_1);
++ tp_rcu_cond_sync(TP_TRANSITION_SYNC_N_2_1);
+ /* Set static call to first function */
+ tracepoint_update_call(tp, tp_funcs);
+ break;
+@@ -418,6 +465,12 @@ static int tracepoint_remove_func(struct
+ fallthrough;
+ case TP_FUNC_N:
+ rcu_assign_pointer(tp->funcs, tp_funcs);
++ /*
++ * Make sure static func never uses incorrect data after a
++ * N->...->2->1 (N>2) transition sequence.
++ */
++ if (tp_funcs[0].data != old[0].data)
++ tp_rcu_get_state(TP_TRANSITION_SYNC_N_2_1);
+ break;
+ default:
+ WARN_ON_ONCE(1);
--- /dev/null
+From 47ea2929d58c35598e681212311d35b240c373ce Mon Sep 17 00:00:00 2001
+From: Benjamin Berg <bberg@redhat.com>
+Date: Fri, 9 Oct 2020 16:40:46 +0200
+Subject: usb: typec: ucsi: acpi: Always decode connector change information
+
+From: Benjamin Berg <bberg@redhat.com>
+
+commit 47ea2929d58c35598e681212311d35b240c373ce upstream.
+
+Normal commands may be reporting that a connector has changed. Always
+call the usci_connector_change handler and let it take care of
+scheduling the work when needed.
+Doing this makes the ACPI code path identical to the CCG one.
+
+Cc: Hans de Goede <hdegoede@redhat.com>
+Cc: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Acked-by: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Signed-off-by: Benjamin Berg <bberg@redhat.com>
+Link: https://lore.kernel.org/r/20201009144047.505957-2-benjamin@sipsolutions.net
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/typec/ucsi/ucsi_acpi.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/usb/typec/ucsi/ucsi_acpi.c
++++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
+@@ -103,11 +103,12 @@ static void ucsi_acpi_notify(acpi_handle
+ if (ret)
+ return;
+
++ if (UCSI_CCI_CONNECTOR(cci))
++ ucsi_connector_change(ua->ucsi, UCSI_CCI_CONNECTOR(cci));
++
+ if (test_bit(COMMAND_PENDING, &ua->flags) &&
+ cci & (UCSI_CCI_ACK_COMPLETE | UCSI_CCI_COMMAND_COMPLETE))
+ complete(&ua->complete);
+- else if (UCSI_CCI_CONNECTOR(cci))
+- ucsi_connector_change(ua->ucsi, UCSI_CCI_CONNECTOR(cci));
+ }
+
+ static int ucsi_acpi_probe(struct platform_device *pdev)
--- /dev/null
+From 8c9b3caab3ac26db1da00b8117901640c55a69dd Mon Sep 17 00:00:00 2001
+From: Bjorn Andersson <bjorn.andersson@linaro.org>
+Date: Sat, 15 May 2021 21:09:53 -0700
+Subject: usb: typec: ucsi: Clear pending after acking connector change
+
+From: Bjorn Andersson <bjorn.andersson@linaro.org>
+
+commit 8c9b3caab3ac26db1da00b8117901640c55a69dd upstream.
+
+It's possible that the interrupt handler for the UCSI driver signals a
+connector changes after the handler clears the PENDING bit, but before
+it has sent the acknowledge request. The result is that the handler is
+invoked yet again, to ack the same connector change.
+
+At least some versions of the Qualcomm UCSI firmware will not handle the
+second - "spurious" - acknowledgment gracefully. So make sure to not
+clear the pending flag until the change is acknowledged.
+
+Any connector changes coming in after the acknowledgment, that would
+have the pending flag incorrectly cleared, would afaict be covered by
+the subsequent connector status check.
+
+Fixes: 217504a05532 ("usb: typec: ucsi: Work around PPM losing change information")
+Cc: stable <stable@vger.kernel.org>
+Reviewed-by: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Acked-By: Benjamin Berg <bberg@redhat.com>
+Signed-off-by: Bjorn Andersson <bjorn.andersson@linaro.org>
+Link: https://lore.kernel.org/r/20210516040953.622409-1-bjorn.andersson@linaro.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/typec/ucsi/ucsi.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/usb/typec/ucsi/ucsi.c
++++ b/drivers/usb/typec/ucsi/ucsi.c
+@@ -703,8 +703,8 @@ static void ucsi_handle_connector_change
+ ucsi_send_command(con->ucsi, command, NULL, 0);
+
+ /* 3. ACK connector change */
+- clear_bit(EVENT_PENDING, &ucsi->flags);
+ ret = ucsi_acknowledge_connector_change(ucsi);
++ clear_bit(EVENT_PENDING, &ucsi->flags);
+ if (ret) {
+ dev_err(ucsi->dev, "%s: ACK failed (%d)", __func__, ret);
+ goto out_unlock;
--- /dev/null
+From 217504a055325fe76ec1142aa15f14d3db77f94f Mon Sep 17 00:00:00 2001
+From: Benjamin Berg <bberg@redhat.com>
+Date: Fri, 9 Oct 2020 16:40:47 +0200
+Subject: usb: typec: ucsi: Work around PPM losing change information
+
+From: Benjamin Berg <bberg@redhat.com>
+
+commit 217504a055325fe76ec1142aa15f14d3db77f94f upstream.
+
+Some/many PPMs are simply clearing the change bitfield when a
+notification on a port is acknowledge. Unfortunately, doing so means
+that any changes between the GET_CONNECTOR_STATUS and ACK_CC_CI commands
+is simply lost.
+
+Work around this by re-fetching the connector status afterwards. We can
+then infer any changes that we see have happened but that may not be
+respresented in the change bitfield.
+
+We end up with the following actions:
+ 1. UCSI_GET_CONNECTOR_STATUS, store result, update unprocessed_changes
+ 2. UCSI_GET_CAM_SUPPORTED, discard result
+ 3. ACK connector change
+ 4. UCSI_GET_CONNECTOR_STATUS, store result
+ 5. Infere lost changes by comparing UCSI_GET_CONNECTOR_STATUS results
+ 6. If PPM reported a new change, then restart in order to ACK
+ 7. Process everything as usual.
+
+The worker is also changed to re-schedule itself if a new change
+notification happened while it was running.
+
+Doing this fixes quite commonly occurring issues where e.g. the UCSI
+power supply would remain online even thought the ThunderBolt cable was
+unplugged.
+
+Cc: Hans de Goede <hdegoede@redhat.com>
+Cc: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Acked-by: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Signed-off-by: Benjamin Berg <bberg@redhat.com>
+Link: https://lore.kernel.org/r/20201009144047.505957-3-benjamin@sipsolutions.net
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/typec/ucsi/ucsi.c | 125 +++++++++++++++++++++++++++++++++++-------
+ drivers/usb/typec/ucsi/ucsi.h | 2
+ 2 files changed, 107 insertions(+), 20 deletions(-)
+
+--- a/drivers/usb/typec/ucsi/ucsi.c
++++ b/drivers/usb/typec/ucsi/ucsi.c
+@@ -53,7 +53,7 @@ static int ucsi_acknowledge_connector_ch
+ ctrl = UCSI_ACK_CC_CI;
+ ctrl |= UCSI_ACK_CONNECTOR_CHANGE;
+
+- return ucsi->ops->async_write(ucsi, UCSI_CONTROL, &ctrl, sizeof(ctrl));
++ return ucsi->ops->sync_write(ucsi, UCSI_CONTROL, &ctrl, sizeof(ctrl));
+ }
+
+ static int ucsi_exec_command(struct ucsi *ucsi, u64 command);
+@@ -648,21 +648,113 @@ static void ucsi_handle_connector_change
+ struct ucsi_connector *con = container_of(work, struct ucsi_connector,
+ work);
+ struct ucsi *ucsi = con->ucsi;
++ struct ucsi_connector_status pre_ack_status;
++ struct ucsi_connector_status post_ack_status;
+ enum typec_role role;
++ u16 inferred_changes;
++ u16 changed_flags;
+ u64 command;
+ int ret;
+
+ mutex_lock(&con->lock);
+
++ /*
++ * Some/many PPMs have an issue where all fields in the change bitfield
++ * are cleared when an ACK is send. This will causes any change
++ * between GET_CONNECTOR_STATUS and ACK to be lost.
++ *
++ * We work around this by re-fetching the connector status afterwards.
++ * We then infer any changes that we see have happened but that may not
++ * be represented in the change bitfield.
++ *
++ * Also, even though we don't need to know the currently supported alt
++ * modes, we run the GET_CAM_SUPPORTED command to ensure the PPM does
++ * not get stuck in case it assumes we do.
++ * Always do this, rather than relying on UCSI_CONSTAT_CAM_CHANGE to be
++ * set in the change bitfield.
++ *
++ * We end up with the following actions:
++ * 1. UCSI_GET_CONNECTOR_STATUS, store result, update unprocessed_changes
++ * 2. UCSI_GET_CAM_SUPPORTED, discard result
++ * 3. ACK connector change
++ * 4. UCSI_GET_CONNECTOR_STATUS, store result
++ * 5. Infere lost changes by comparing UCSI_GET_CONNECTOR_STATUS results
++ * 6. If PPM reported a new change, then restart in order to ACK
++ * 7. Process everything as usual.
++ *
++ * We may end up seeing a change twice, but we can only miss extremely
++ * short transitional changes.
++ */
++
++ /* 1. First UCSI_GET_CONNECTOR_STATUS */
++ command = UCSI_GET_CONNECTOR_STATUS | UCSI_CONNECTOR_NUMBER(con->num);
++ ret = ucsi_send_command(ucsi, command, &pre_ack_status,
++ sizeof(pre_ack_status));
++ if (ret < 0) {
++ dev_err(ucsi->dev, "%s: GET_CONNECTOR_STATUS failed (%d)\n",
++ __func__, ret);
++ goto out_unlock;
++ }
++ con->unprocessed_changes |= pre_ack_status.change;
++
++ /* 2. Run UCSI_GET_CAM_SUPPORTED and discard the result. */
++ command = UCSI_GET_CAM_SUPPORTED;
++ command |= UCSI_CONNECTOR_NUMBER(con->num);
++ ucsi_send_command(con->ucsi, command, NULL, 0);
++
++ /* 3. ACK connector change */
++ clear_bit(EVENT_PENDING, &ucsi->flags);
++ ret = ucsi_acknowledge_connector_change(ucsi);
++ if (ret) {
++ dev_err(ucsi->dev, "%s: ACK failed (%d)", __func__, ret);
++ goto out_unlock;
++ }
++
++ /* 4. Second UCSI_GET_CONNECTOR_STATUS */
+ command = UCSI_GET_CONNECTOR_STATUS | UCSI_CONNECTOR_NUMBER(con->num);
+- ret = ucsi_send_command(ucsi, command, &con->status,
+- sizeof(con->status));
++ ret = ucsi_send_command(ucsi, command, &post_ack_status,
++ sizeof(post_ack_status));
+ if (ret < 0) {
+ dev_err(ucsi->dev, "%s: GET_CONNECTOR_STATUS failed (%d)\n",
+ __func__, ret);
+ goto out_unlock;
+ }
+
++ /* 5. Inferre any missing changes */
++ changed_flags = pre_ack_status.flags ^ post_ack_status.flags;
++ inferred_changes = 0;
++ if (UCSI_CONSTAT_PWR_OPMODE(changed_flags) != 0)
++ inferred_changes |= UCSI_CONSTAT_POWER_OPMODE_CHANGE;
++
++ if (changed_flags & UCSI_CONSTAT_CONNECTED)
++ inferred_changes |= UCSI_CONSTAT_CONNECT_CHANGE;
++
++ if (changed_flags & UCSI_CONSTAT_PWR_DIR)
++ inferred_changes |= UCSI_CONSTAT_POWER_DIR_CHANGE;
++
++ if (UCSI_CONSTAT_PARTNER_FLAGS(changed_flags) != 0)
++ inferred_changes |= UCSI_CONSTAT_PARTNER_CHANGE;
++
++ if (UCSI_CONSTAT_PARTNER_TYPE(changed_flags) != 0)
++ inferred_changes |= UCSI_CONSTAT_PARTNER_CHANGE;
++
++ /* Mask out anything that was correctly notified in the later call. */
++ inferred_changes &= ~post_ack_status.change;
++ if (inferred_changes)
++ dev_dbg(ucsi->dev, "%s: Inferred changes that would have been lost: 0x%04x\n",
++ __func__, inferred_changes);
++
++ con->unprocessed_changes |= inferred_changes;
++
++ /* 6. If PPM reported a new change, then restart in order to ACK */
++ if (post_ack_status.change)
++ goto out_unlock;
++
++ /* 7. Continue as if nothing happened */
++ con->status = post_ack_status;
++ con->status.change = con->unprocessed_changes;
++ con->unprocessed_changes = 0;
++
+ role = !!(con->status.flags & UCSI_CONSTAT_PWR_DIR);
+
+ if (con->status.change & UCSI_CONSTAT_POWER_OPMODE_CHANGE ||
+@@ -703,28 +795,19 @@ static void ucsi_handle_connector_change
+ ucsi_port_psy_changed(con);
+ }
+
+- if (con->status.change & UCSI_CONSTAT_CAM_CHANGE) {
+- /*
+- * We don't need to know the currently supported alt modes here.
+- * Running GET_CAM_SUPPORTED command just to make sure the PPM
+- * does not get stuck in case it assumes we do so.
+- */
+- command = UCSI_GET_CAM_SUPPORTED;
+- command |= UCSI_CONNECTOR_NUMBER(con->num);
+- ucsi_send_command(con->ucsi, command, NULL, 0);
+- }
+-
+ if (con->status.change & UCSI_CONSTAT_PARTNER_CHANGE)
+ ucsi_partner_change(con);
+
+- ret = ucsi_acknowledge_connector_change(ucsi);
+- if (ret)
+- dev_err(ucsi->dev, "%s: ACK failed (%d)", __func__, ret);
+-
+ trace_ucsi_connector_change(con->num, &con->status);
+
+ out_unlock:
+- clear_bit(EVENT_PENDING, &ucsi->flags);
++ if (test_and_clear_bit(EVENT_PENDING, &ucsi->flags)) {
++ schedule_work(&con->work);
++ mutex_unlock(&con->lock);
++ return;
++ }
++
++ clear_bit(EVENT_PROCESSING, &ucsi->flags);
+ mutex_unlock(&con->lock);
+ }
+
+@@ -742,7 +825,9 @@ void ucsi_connector_change(struct ucsi *
+ return;
+ }
+
+- if (!test_and_set_bit(EVENT_PENDING, &ucsi->flags))
++ set_bit(EVENT_PENDING, &ucsi->flags);
++
++ if (!test_and_set_bit(EVENT_PROCESSING, &ucsi->flags))
+ schedule_work(&con->work);
+ }
+ EXPORT_SYMBOL_GPL(ucsi_connector_change);
+--- a/drivers/usb/typec/ucsi/ucsi.h
++++ b/drivers/usb/typec/ucsi/ucsi.h
+@@ -299,6 +299,7 @@ struct ucsi {
+ #define EVENT_PENDING 0
+ #define COMMAND_PENDING 1
+ #define ACK_PENDING 2
++#define EVENT_PROCESSING 3
+ };
+
+ #define UCSI_MAX_SVID 5
+@@ -324,6 +325,7 @@ struct ucsi_connector {
+
+ struct typec_capability typec_cap;
+
++ u16 unprocessed_changes;
+ struct ucsi_connector_status status;
+ struct ucsi_connector_capability cap;
+ struct power_supply *psy;