--- /dev/null
+From 010bfbe768f7ecc876ffba92db30432de4997e2a Mon Sep 17 00:00:00 2001
+From: Johannes Berg <johannes.berg@intel.com>
+Date: Fri, 16 Apr 2021 09:42:14 +0200
+Subject: cfg80211: scan: drop entry from hidden_list on overflow
+
+From: Johannes Berg <johannes.berg@intel.com>
+
+commit 010bfbe768f7ecc876ffba92db30432de4997e2a upstream.
+
+If we overflow the maximum number of BSS entries and free the
+new entry, drop it from any hidden_list that it may have been
+added to in the code above or in cfg80211_combine_bsses().
+
+Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
+Link: https://lore.kernel.org/r/20210416094212.5de7d1676ad7.Ied283b0bc5f504845e7d6ab90626bdfa68bb3dc0@changeid
+Cc: stable@vger.kernel.org
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/wireless/scan.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/net/wireless/scan.c
++++ b/net/wireless/scan.c
+@@ -956,6 +956,8 @@ cfg80211_bss_update(struct cfg80211_regi
+
+ if (rdev->bss_entries >= bss_entries_limit &&
+ !cfg80211_bss_expire_oldest(rdev)) {
++ if (!list_empty(&new->hidden_list))
++ list_del(&new->hidden_list);
+ kfree(new);
+ goto drop;
+ }
--- /dev/null
+From 260a9ad9446723d4063ed802989758852809714d Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Wed, 14 Apr 2021 11:29:55 +0300
+Subject: ipw2x00: potential buffer overflow in libipw_wx_set_encodeext()
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+commit 260a9ad9446723d4063ed802989758852809714d upstream.
+
+The "ext->key_len" is a u16 that comes from the user. If it's over
+SCM_KEY_LEN (32) that could lead to memory corruption.
+
+Fixes: e0d369d1d969 ("[PATCH] ieee82011: Added WE-18 support to default wireless extension handler")
+Cc: stable@vger.kernel.org
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Acked-by: Stanislav Yakovlev <stas.yakovlev@gmail.com>
+Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
+Link: https://lore.kernel.org/r/YHaoA1i+8uT4ir4h@mwanda
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/intel/ipw2x00/libipw_wx.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
++++ b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
+@@ -649,8 +649,10 @@ int libipw_wx_set_encodeext(struct libip
+ }
+
+ if (ext->alg != IW_ENCODE_ALG_NONE) {
+- memcpy(sec.keys[idx], ext->key, ext->key_len);
+- sec.key_sizes[idx] = ext->key_len;
++ int key_len = clamp_val(ext->key_len, 0, SCM_KEY_LEN);
++
++ memcpy(sec.keys[idx], ext->key, key_len);
++ sec.key_sizes[idx] = key_len;
+ sec.flags |= (1 << idx);
+ if (ext->alg == IW_ENCODE_ALG_WEP) {
+ sec.encode_alg[idx] = SEC_ALG_WEP;
--- /dev/null
+From f7c7a2f9a23e5b6e0f5251f29648d0238bb7757e Mon Sep 17 00:00:00 2001
+From: Heming Zhao <heming.zhao@suse.com>
+Date: Thu, 8 Apr 2021 15:44:15 +0800
+Subject: md-cluster: fix use-after-free issue when removing rdev
+
+From: Heming Zhao <heming.zhao@suse.com>
+
+commit f7c7a2f9a23e5b6e0f5251f29648d0238bb7757e upstream.
+
+md_kick_rdev_from_array will remove rdev, so we should
+use rdev_for_each_safe to search list.
+
+How to trigger:
+
+env: Two nodes on kvm-qemu x86_64 VMs (2C2G with 2 iscsi luns).
+
+```
+node2=192.168.0.3
+
+for i in {1..20}; do
+ echo ==== $i `date` ====;
+
+ mdadm -Ss && ssh ${node2} "mdadm -Ss"
+ wipefs -a /dev/sda /dev/sdb
+
+ mdadm -CR /dev/md0 -b clustered -e 1.2 -n 2 -l 1 /dev/sda \
+ /dev/sdb --assume-clean
+ ssh ${node2} "mdadm -A /dev/md0 /dev/sda /dev/sdb"
+ mdadm --wait /dev/md0
+ ssh ${node2} "mdadm --wait /dev/md0"
+
+ mdadm --manage /dev/md0 --fail /dev/sda --remove /dev/sda
+ sleep 1
+done
+```
+
+Crash stack:
+
+```
+stack segment: 0000 [#1] SMP
+... ...
+RIP: 0010:md_check_recovery+0x1e8/0x570 [md_mod]
+... ...
+RSP: 0018:ffffb149807a7d68 EFLAGS: 00010207
+RAX: 0000000000000000 RBX: ffff9d494c180800 RCX: ffff9d490fc01e50
+RDX: fffff047c0ed8308 RSI: 0000000000000246 RDI: 0000000000000246
+RBP: 6b6b6b6b6b6b6b6b R08: ffff9d490fc01e40 R09: 0000000000000000
+R10: 0000000000000001 R11: 0000000000000001 R12: 0000000000000000
+R13: ffff9d494c180818 R14: ffff9d493399ef38 R15: ffff9d4933a1d800
+FS: 0000000000000000(0000) GS:ffff9d494f700000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00007fe68cab9010 CR3: 000000004c6be001 CR4: 00000000003706e0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+Call Trace:
+ raid1d+0x5c/0xd40 [raid1]
+ ? finish_task_switch+0x75/0x2a0
+ ? lock_timer_base+0x67/0x80
+ ? try_to_del_timer_sync+0x4d/0x80
+ ? del_timer_sync+0x41/0x50
+ ? schedule_timeout+0x254/0x2d0
+ ? md_start_sync+0xe0/0xe0 [md_mod]
+ ? md_thread+0x127/0x160 [md_mod]
+ md_thread+0x127/0x160 [md_mod]
+ ? wait_woken+0x80/0x80
+ kthread+0x10d/0x130
+ ? kthread_park+0xa0/0xa0
+ ret_from_fork+0x1f/0x40
+```
+
+Fixes: dbb64f8635f5d ("md-cluster: Fix adding of new disk with new reload code")
+Fixes: 659b254fa7392 ("md-cluster: remove a disk asynchronously from cluster environment")
+Cc: stable@vger.kernel.org
+Reviewed-by: Gang He <ghe@suse.com>
+Signed-off-by: Heming Zhao <heming.zhao@suse.com>
+Signed-off-by: Song Liu <song@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/md.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -8462,11 +8462,11 @@ void md_check_recovery(struct mddev *mdd
+ }
+
+ if (mddev_is_clustered(mddev)) {
+- struct md_rdev *rdev;
++ struct md_rdev *rdev, *tmp;
+ /* kick the device if another node issued a
+ * remove disk.
+ */
+- rdev_for_each(rdev, mddev) {
++ rdev_for_each_safe(rdev, tmp, mddev) {
+ if (test_and_clear_bit(ClusterRemove, &rdev->flags) &&
+ rdev->raid_disk < 0)
+ md_kick_rdev_from_array(rdev);
+@@ -8775,12 +8775,12 @@ err_wq:
+ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
+ {
+ struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
+- struct md_rdev *rdev2;
++ struct md_rdev *rdev2, *tmp;
+ int role, ret;
+ char b[BDEVNAME_SIZE];
+
+ /* Check for change of roles in the active devices */
+- rdev_for_each(rdev2, mddev) {
++ rdev_for_each_safe(rdev2, tmp, mddev) {
+ if (test_bit(Faulty, &rdev2->flags))
+ continue;
+
--- /dev/null
+From 8b57251f9a91f5e5a599de7549915d2d226cc3af Mon Sep 17 00:00:00 2001
+From: Christoph Hellwig <hch@lst.de>
+Date: Sat, 3 Apr 2021 18:15:28 +0200
+Subject: md: factor out a mddev_find_locked helper from mddev_find
+
+From: Christoph Hellwig <hch@lst.de>
+
+commit 8b57251f9a91f5e5a599de7549915d2d226cc3af upstream.
+
+Factor out a self-contained helper to just lookup a mddev by the dev_t
+"unit".
+
+Cc: stable@vger.kernel.org
+Reviewed-by: Heming Zhao <heming.zhao@suse.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Song Liu <song@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/md.c | 32 +++++++++++++++++++-------------
+ 1 file changed, 19 insertions(+), 13 deletions(-)
+
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -508,6 +508,17 @@ void mddev_init(struct mddev *mddev)
+ }
+ EXPORT_SYMBOL_GPL(mddev_init);
+
++static struct mddev *mddev_find_locked(dev_t unit)
++{
++ struct mddev *mddev;
++
++ list_for_each_entry(mddev, &all_mddevs, all_mddevs)
++ if (mddev->unit == unit)
++ return mddev;
++
++ return NULL;
++}
++
+ static struct mddev *mddev_find(dev_t unit)
+ {
+ struct mddev *mddev, *new = NULL;
+@@ -519,13 +530,13 @@ static struct mddev *mddev_find(dev_t un
+ spin_lock(&all_mddevs_lock);
+
+ if (unit) {
+- list_for_each_entry(mddev, &all_mddevs, all_mddevs)
+- if (mddev->unit == unit) {
+- mddev_get(mddev);
+- spin_unlock(&all_mddevs_lock);
+- kfree(new);
+- return mddev;
+- }
++ mddev = mddev_find_locked(unit);
++ if (mddev) {
++ mddev_get(mddev);
++ spin_unlock(&all_mddevs_lock);
++ kfree(new);
++ return mddev;
++ }
+
+ if (new) {
+ list_add(&new->all_mddevs, &all_mddevs);
+@@ -551,12 +562,7 @@ static struct mddev *mddev_find(dev_t un
+ return NULL;
+ }
+
+- is_free = 1;
+- list_for_each_entry(mddev, &all_mddevs, all_mddevs)
+- if (mddev->unit == dev) {
+- is_free = 0;
+- break;
+- }
++ is_free = !mddev_find_locked(dev);
+ }
+ new->unit = dev;
+ new->md_minor = MINOR(dev);
--- /dev/null
+From 6a4db2a60306eb65bfb14ccc9fde035b74a4b4e7 Mon Sep 17 00:00:00 2001
+From: Zhao Heming <heming.zhao@suse.com>
+Date: Sat, 3 Apr 2021 11:01:25 +0800
+Subject: md: md_open returns -EBUSY when entering racing area
+
+From: Zhao Heming <heming.zhao@suse.com>
+
+commit 6a4db2a60306eb65bfb14ccc9fde035b74a4b4e7 upstream.
+
+commit d3374825ce57 ("md: make devices disappear when they are no longer
+needed.") introduced protection between mddev creating & removing. The
+md_open shouldn't create mddev when all_mddevs list doesn't contain
+mddev. With currently code logic, there will be very easy to trigger
+soft lockup in non-preempt env.
+
+This patch changes md_open returning from -ERESTARTSYS to -EBUSY, which
+will break the infinitely retry when md_open enter racing area.
+
+This patch is partly fix soft lockup issue, full fix needs mddev_find
+is split into two functions: mddev_find & mddev_find_or_alloc. And
+md_open should call new mddev_find (it only does searching job).
+
+For more detail, please refer with Christoph's "split mddev_find" patch
+in later commits.
+
+---
+ drivers/md/md.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -7112,8 +7112,7 @@ static int md_open(struct block_device *
+ /* Wait until bdev->bd_disk is definitely gone */
+ if (work_pending(&mddev->del_work))
+ flush_workqueue(md_misc_wq);
+- /* Then retry the open from the top */
+- return -ERESTARTSYS;
++ return -EBUSY;
+ }
+ BUG_ON(mddev != bdev->bd_disk->private_data);
+
--- /dev/null
+From c49f71f60754acbff37505e1d16ca796bf8a8140 Mon Sep 17 00:00:00 2001
+From: "Maciej W. Rozycki" <macro@orcam.me.uk>
+Date: Tue, 20 Apr 2021 04:50:40 +0200
+Subject: MIPS: Reinstate platform `__div64_32' handler
+
+From: Maciej W. Rozycki <macro@orcam.me.uk>
+
+commit c49f71f60754acbff37505e1d16ca796bf8a8140 upstream.
+
+Our current MIPS platform `__div64_32' handler is inactive, because it
+is incorrectly only enabled for 64-bit configurations, for which generic
+`do_div' code does not call it anyway.
+
+The handler is not suitable for being called from there though as it
+only calculates 32 bits of the quotient under the assumption the 64-bit
+divident has been suitably reduced. Code for such reduction used to be
+there, however it has been incorrectly removed with commit c21004cd5b4c
+("MIPS: Rewrite <asm/div64.h> to work with gcc 4.4.0."), which should
+have only updated an obsoleted constraint for an inline asm involving
+$hi and $lo register outputs, while possibly wiring the original MIPS
+variant of the `do_div' macro as `__div64_32' handler for the generic
+`do_div' implementation
+
+Correct the handler as follows then:
+
+- Revert most of the commit referred, however retaining the current
+ formatting, except for the final two instructions of the inline asm
+ sequence, which the original commit missed. Omit the original 64-bit
+ parts though.
+
+- Rename the original `do_div' macro to `__div64_32'. Use the combined
+ `x' constraint referring to the MD accumulator as a whole, replacing
+ the original individual `h' and `l' constraints used for $hi and $lo
+ registers respectively, of which `h' has been obsoleted with GCC 4.4.
+ Update surrounding code accordingly.
+
+ We have since removed support for GCC versions before 4.9, so no need
+ for a special arrangement here; GCC has supported the `x' constraint
+ since forever anyway, or at least going back to 1991.
+
+- Rename the `__base' local variable in `__div64_32' to `__radix' to
+ avoid a conflict with a local variable in `do_div'.
+
+- Actually enable this code for 32-bit rather than 64-bit configurations
+ by qualifying it with BITS_PER_LONG being 32 instead of 64. Include
+ <asm/bitsperlong.h> for this macro rather than <linux/types.h> as we
+ don't need anything else.
+
+- Finally include <asm-generic/div64.h> last rather than first.
+
+This has passed correctness verification with test_div64 and reduced the
+module's average execution time down to 1.0668s and 0.2629s from 2.1529s
+and 0.5647s respectively for an R3400 CPU @40MHz and a 5Kc CPU @160MHz.
+For a reference 64-bit `do_div' code where we have the DDIVU instruction
+available to do the whole calculation right away averages at 0.0660s for
+the latter CPU.
+
+Fixes: c21004cd5b4c ("MIPS: Rewrite <asm/div64.h> to work with gcc 4.4.0.")
+Reported-by: Huacai Chen <chenhuacai@kernel.org>
+Signed-off-by: Maciej W. Rozycki <macro@orcam.me.uk>
+Cc: stable@vger.kernel.org # v2.6.30+
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/mips/include/asm/div64.h | 57 ++++++++++++++++++++++++++++++------------
+ 1 file changed, 41 insertions(+), 16 deletions(-)
+
+--- a/arch/mips/include/asm/div64.h
++++ b/arch/mips/include/asm/div64.h
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright (C) 2000, 2004 Maciej W. Rozycki
++ * Copyright (C) 2000, 2004, 2021 Maciej W. Rozycki
+ * Copyright (C) 2003, 07 Ralf Baechle (ralf@linux-mips.org)
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+@@ -9,25 +9,18 @@
+ #ifndef __ASM_DIV64_H
+ #define __ASM_DIV64_H
+
+-#include <asm-generic/div64.h>
+-
+-#if BITS_PER_LONG == 64
++#include <asm/bitsperlong.h>
+
+-#include <linux/types.h>
++#if BITS_PER_LONG == 32
+
+ /*
+ * No traps on overflows for any of these...
+ */
+
+-#define __div64_32(n, base) \
+-({ \
++#define do_div64_32(res, high, low, base) ({ \
+ unsigned long __cf, __tmp, __tmp2, __i; \
+ unsigned long __quot32, __mod32; \
+- unsigned long __high, __low; \
+- unsigned long long __n; \
+ \
+- __high = *__n >> 32; \
+- __low = __n; \
+ __asm__( \
+ " .set push \n" \
+ " .set noat \n" \
+@@ -51,18 +44,50 @@
+ " subu %0, %0, %z6 \n" \
+ " addiu %2, %2, 1 \n" \
+ "3: \n" \
+- " bnez %4, 0b\n\t" \
+- " srl %5, %1, 0x1f\n\t" \
++ " bnez %4, 0b \n" \
++ " srl %5, %1, 0x1f \n" \
+ " .set pop" \
+ : "=&r" (__mod32), "=&r" (__tmp), \
+ "=&r" (__quot32), "=&r" (__cf), \
+ "=&r" (__i), "=&r" (__tmp2) \
+- : "Jr" (base), "0" (__high), "1" (__low)); \
++ : "Jr" (base), "0" (high), "1" (low)); \
+ \
+- (__n) = __quot32; \
++ (res) = __quot32; \
+ __mod32; \
+ })
+
+-#endif /* BITS_PER_LONG == 64 */
++#define __div64_32(n, base) ({ \
++ unsigned long __upper, __low, __high, __radix; \
++ unsigned long long __modquot; \
++ unsigned long long __quot; \
++ unsigned long long __div; \
++ unsigned long __mod; \
++ \
++ __div = (*n); \
++ __radix = (base); \
++ \
++ __high = __div >> 32; \
++ __low = __div; \
++ __upper = __high; \
++ \
++ if (__high) { \
++ __asm__("divu $0, %z1, %z2" \
++ : "=x" (__modquot) \
++ : "Jr" (__high), "Jr" (__radix)); \
++ __upper = __modquot >> 32; \
++ __high = __modquot; \
++ } \
++ \
++ __mod = do_div64_32(__low, __upper, __low, __radix); \
++ \
++ __quot = __high; \
++ __quot = __quot << 32 | __low; \
++ (*n) = __quot; \
++ __mod; \
++})
++
++#endif /* BITS_PER_LONG == 32 */
++
++#include <asm-generic/div64.h>
+
+ #endif /* __ASM_DIV64_H */
misc-lis3lv02d-fix-false-positive-warn-on-various-hp-models.patch
misc-vmw_vmci-explicitly-initialize-vmci_notify_bm_set_msg-struct.patch
misc-vmw_vmci-explicitly-initialize-vmci_datagram-payload.patch
+tracing-treat-recording-comm-for-idle-task-as-a-success.patch
+tracing-use-strlcpy-instead-of-strcpy-in-__trace_find_cmdline.patch
+tracing-map-all-pids-to-command-lines.patch
+tracing-restructure-trace_clock_global-to-never-block.patch
+md-cluster-fix-use-after-free-issue-when-removing-rdev.patch
+md-factor-out-a-mddev_find_locked-helper-from-mddev_find.patch
+md-md_open-returns-ebusy-when-entering-racing-area.patch
+mips-reinstate-platform-__div64_32-handler.patch
+ipw2x00-potential-buffer-overflow-in-libipw_wx_set_encodeext.patch
+cfg80211-scan-drop-entry-from-hidden_list-on-overflow.patch
--- /dev/null
+From 785e3c0a3a870e72dc530856136ab4c8dd207128 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (VMware)" <rostedt@goodmis.org>
+Date: Tue, 27 Apr 2021 11:32:07 -0400
+Subject: tracing: Map all PIDs to command lines
+
+From: Steven Rostedt (VMware) <rostedt@goodmis.org>
+
+commit 785e3c0a3a870e72dc530856136ab4c8dd207128 upstream.
+
+The default max PID is set by PID_MAX_DEFAULT, and the tracing
+infrastructure uses this number to map PIDs to the comm names of the
+tasks, such output of the trace can show names from the recorded PIDs in
+the ring buffer. This mapping is also exported to user space via the
+"saved_cmdlines" file in the tracefs directory.
+
+But currently the mapping expects the PIDs to be less than
+PID_MAX_DEFAULT, which is the default maximum and not the real maximum.
+Recently, systemd will increases the maximum value of a PID on the system,
+and when tasks are traced that have a PID higher than PID_MAX_DEFAULT, its
+comm is not recorded. This leads to the entire trace to have "<...>" as
+the comm name, which is pretty useless.
+
+Instead, keep the array mapping the size of PID_MAX_DEFAULT, but instead
+of just mapping the index to the comm, map a mask of the PID
+(PID_MAX_DEFAULT - 1) to the comm, and find the full PID from the
+map_cmdline_to_pid array (that already exists).
+
+This bug goes back to the beginning of ftrace, but hasn't been an issue
+until user space started increasing the maximum value of PIDs.
+
+Link: https://lkml.kernel.org/r/20210427113207.3c601884@gandalf.local.home
+
+Cc: stable@vger.kernel.org
+Fixes: bc0c38d139ec7 ("ftrace: latency tracer infrastructure")
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace.c | 41 +++++++++++++++--------------------------
+ 1 file changed, 15 insertions(+), 26 deletions(-)
+
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -1809,14 +1809,13 @@ void trace_stop_cmdline_recording(void);
+
+ static int trace_save_cmdline(struct task_struct *tsk)
+ {
+- unsigned pid, idx;
++ unsigned tpid, idx;
+
+ /* treat recording of idle task as a success */
+ if (!tsk->pid)
+ return 1;
+
+- if (unlikely(tsk->pid > PID_MAX_DEFAULT))
+- return 0;
++ tpid = tsk->pid & (PID_MAX_DEFAULT - 1);
+
+ /*
+ * It's not the end of the world if we don't get
+@@ -1827,26 +1826,15 @@ static int trace_save_cmdline(struct tas
+ if (!arch_spin_trylock(&trace_cmdline_lock))
+ return 0;
+
+- idx = savedcmd->map_pid_to_cmdline[tsk->pid];
++ idx = savedcmd->map_pid_to_cmdline[tpid];
+ if (idx == NO_CMDLINE_MAP) {
+ idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
+
+- /*
+- * Check whether the cmdline buffer at idx has a pid
+- * mapped. We are going to overwrite that entry so we
+- * need to clear the map_pid_to_cmdline. Otherwise we
+- * would read the new comm for the old pid.
+- */
+- pid = savedcmd->map_cmdline_to_pid[idx];
+- if (pid != NO_CMDLINE_MAP)
+- savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
+-
+- savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
+- savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
+-
++ savedcmd->map_pid_to_cmdline[tpid] = idx;
+ savedcmd->cmdline_idx = idx;
+ }
+
++ savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
+ set_cmdline(idx, tsk->comm);
+
+ arch_spin_unlock(&trace_cmdline_lock);
+@@ -1857,6 +1845,7 @@ static int trace_save_cmdline(struct tas
+ static void __trace_find_cmdline(int pid, char comm[])
+ {
+ unsigned map;
++ int tpid;
+
+ if (!pid) {
+ strcpy(comm, "<idle>");
+@@ -1868,16 +1857,16 @@ static void __trace_find_cmdline(int pid
+ return;
+ }
+
+- if (pid > PID_MAX_DEFAULT) {
+- strcpy(comm, "<...>");
+- return;
++ tpid = pid & (PID_MAX_DEFAULT - 1);
++ map = savedcmd->map_pid_to_cmdline[tpid];
++ if (map != NO_CMDLINE_MAP) {
++ tpid = savedcmd->map_cmdline_to_pid[map];
++ if (tpid == pid) {
++ strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
++ return;
++ }
+ }
+-
+- map = savedcmd->map_pid_to_cmdline[pid];
+- if (map != NO_CMDLINE_MAP)
+- strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
+- else
+- strcpy(comm, "<...>");
++ strcpy(comm, "<...>");
+ }
+
+ void trace_find_cmdline(int pid, char comm[])
--- /dev/null
+From aafe104aa9096827a429bc1358f8260ee565b7cc Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (VMware)" <rostedt@goodmis.org>
+Date: Fri, 30 Apr 2021 12:17:58 -0400
+Subject: tracing: Restructure trace_clock_global() to never block
+
+From: Steven Rostedt (VMware) <rostedt@goodmis.org>
+
+commit aafe104aa9096827a429bc1358f8260ee565b7cc upstream.
+
+It was reported that a fix to the ring buffer recursion detection would
+cause a hung machine when performing suspend / resume testing. The
+following backtrace was extracted from debugging that case:
+
+Call Trace:
+ trace_clock_global+0x91/0xa0
+ __rb_reserve_next+0x237/0x460
+ ring_buffer_lock_reserve+0x12a/0x3f0
+ trace_buffer_lock_reserve+0x10/0x50
+ __trace_graph_return+0x1f/0x80
+ trace_graph_return+0xb7/0xf0
+ ? trace_clock_global+0x91/0xa0
+ ftrace_return_to_handler+0x8b/0xf0
+ ? pv_hash+0xa0/0xa0
+ return_to_handler+0x15/0x30
+ ? ftrace_graph_caller+0xa0/0xa0
+ ? trace_clock_global+0x91/0xa0
+ ? __rb_reserve_next+0x237/0x460
+ ? ring_buffer_lock_reserve+0x12a/0x3f0
+ ? trace_event_buffer_lock_reserve+0x3c/0x120
+ ? trace_event_buffer_reserve+0x6b/0xc0
+ ? trace_event_raw_event_device_pm_callback_start+0x125/0x2d0
+ ? dpm_run_callback+0x3b/0xc0
+ ? pm_ops_is_empty+0x50/0x50
+ ? platform_get_irq_byname_optional+0x90/0x90
+ ? trace_device_pm_callback_start+0x82/0xd0
+ ? dpm_run_callback+0x49/0xc0
+
+With the following RIP:
+
+RIP: 0010:native_queued_spin_lock_slowpath+0x69/0x200
+
+Since the fix to the recursion detection would allow a single recursion to
+happen while tracing, this lead to the trace_clock_global() taking a spin
+lock and then trying to take it again:
+
+ring_buffer_lock_reserve() {
+ trace_clock_global() {
+ arch_spin_lock() {
+ queued_spin_lock_slowpath() {
+ /* lock taken */
+ (something else gets traced by function graph tracer)
+ ring_buffer_lock_reserve() {
+ trace_clock_global() {
+ arch_spin_lock() {
+ queued_spin_lock_slowpath() {
+ /* DEAD LOCK! */
+
+Tracing should *never* block, as it can lead to strange lockups like the
+above.
+
+Restructure the trace_clock_global() code to instead of simply taking a
+lock to update the recorded "prev_time" simply use it, as two events
+happening on two different CPUs that calls this at the same time, really
+doesn't matter which one goes first. Use a trylock to grab the lock for
+updating the prev_time, and if it fails, simply try again the next time.
+If it failed to be taken, that means something else is already updating
+it.
+
+Link: https://lkml.kernel.org/r/20210430121758.650b6e8a@gandalf.local.home
+
+Cc: stable@vger.kernel.org
+Tested-by: Konstantin Kharlamov <hi-angel@yandex.ru>
+Tested-by: Todd Brandt <todd.e.brandt@linux.intel.com>
+Fixes: b02414c8f045 ("ring-buffer: Fix recursion protection transitions between interrupt context") # started showing the problem
+Fixes: 14131f2f98ac3 ("tracing: implement trace_clock_*() APIs") # where the bug happened
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=212761
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace_clock.c | 48 ++++++++++++++++++++++++++++++---------------
+ 1 file changed, 32 insertions(+), 16 deletions(-)
+
+--- a/kernel/trace/trace_clock.c
++++ b/kernel/trace/trace_clock.c
+@@ -93,33 +93,49 @@ u64 notrace trace_clock_global(void)
+ {
+ unsigned long flags;
+ int this_cpu;
+- u64 now;
++ u64 now, prev_time;
+
+ local_irq_save(flags);
+
+ this_cpu = raw_smp_processor_id();
+- now = sched_clock_cpu(this_cpu);
++
+ /*
+- * If in an NMI context then dont risk lockups and return the
+- * cpu_clock() time:
++ * The global clock "guarantees" that the events are ordered
++ * between CPUs. But if two events on two different CPUS call
++ * trace_clock_global at roughly the same time, it really does
++ * not matter which one gets the earlier time. Just make sure
++ * that the same CPU will always show a monotonic clock.
++ *
++ * Use a read memory barrier to get the latest written
++ * time that was recorded.
+ */
+- if (unlikely(in_nmi()))
+- goto out;
++ smp_rmb();
++ prev_time = READ_ONCE(trace_clock_struct.prev_time);
++ now = sched_clock_cpu(this_cpu);
+
+- arch_spin_lock(&trace_clock_struct.lock);
++ /* Make sure that now is always greater than prev_time */
++ if ((s64)(now - prev_time) < 0)
++ now = prev_time + 1;
+
+ /*
+- * TODO: if this happens often then maybe we should reset
+- * my_scd->clock to prev_time+1, to make sure
+- * we start ticking with the local clock from now on?
++ * If in an NMI context then dont risk lockups and simply return
++ * the current time.
+ */
+- if ((s64)(now - trace_clock_struct.prev_time) < 0)
+- now = trace_clock_struct.prev_time + 1;
+-
+- trace_clock_struct.prev_time = now;
+-
+- arch_spin_unlock(&trace_clock_struct.lock);
++ if (unlikely(in_nmi()))
++ goto out;
+
++ /* Tracing can cause strange recursion, always use a try lock */
++ if (arch_spin_trylock(&trace_clock_struct.lock)) {
++ /* Reread prev_time in case it was already updated */
++ prev_time = READ_ONCE(trace_clock_struct.prev_time);
++ if ((s64)(now - prev_time) < 0)
++ now = prev_time + 1;
++
++ trace_clock_struct.prev_time = now;
++
++ /* The unlock acts as the wmb for the above rmb */
++ arch_spin_unlock(&trace_clock_struct.lock);
++ }
+ out:
+ local_irq_restore(flags);
+
--- /dev/null
+From eaf260ac04d9b4cf9f458d5c97555bfff2da526e Mon Sep 17 00:00:00 2001
+From: Joel Fernandes <joelaf@google.com>
+Date: Thu, 6 Jul 2017 16:00:21 -0700
+Subject: tracing: Treat recording comm for idle task as a success
+
+From: Joel Fernandes <joelaf@google.com>
+
+commit eaf260ac04d9b4cf9f458d5c97555bfff2da526e upstream.
+
+Currently we stop recording comm for non-idle tasks when switching from/to idle
+task since we treat that as a record failure. Fix that by treat recording of
+comm for idle task as a success.
+
+Link: http://lkml.kernel.org/r/20170706230023.17942-1-joelaf@google.com
+
+Cc: kernel-team@android.com
+Cc: Ingo Molnar <mingo@redhat.com>
+Reported-by: Michael Sartain <mikesart@gmail.com>
+Signed-off-by: Joel Fernandes <joelaf@google.com>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -1811,7 +1811,11 @@ static int trace_save_cmdline(struct tas
+ {
+ unsigned pid, idx;
+
+- if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
++ /* treat recording of idle task as a success */
++ if (!tsk->pid)
++ return 1;
++
++ if (unlikely(tsk->pid > PID_MAX_DEFAULT))
+ return 0;
+
+ /*
--- /dev/null
+From e09e28671cda63e6308b31798b997639120e2a21 Mon Sep 17 00:00:00 2001
+From: Amey Telawane <ameyt@codeaurora.org>
+Date: Wed, 3 May 2017 15:41:14 +0530
+Subject: tracing: Use strlcpy() instead of strcpy() in __trace_find_cmdline()
+
+From: Amey Telawane <ameyt@codeaurora.org>
+
+commit e09e28671cda63e6308b31798b997639120e2a21 upstream.
+
+Strcpy is inherently not safe, and strlcpy() should be used instead.
+__trace_find_cmdline() uses strcpy() because the comms saved must have a
+terminating nul character, but it doesn't hurt to add the extra protection
+of using strlcpy() instead of strcpy().
+
+Link: http://lkml.kernel.org/r/1493806274-13936-1-git-send-email-amit.pundir@linaro.org
+
+Signed-off-by: Amey Telawane <ameyt@codeaurora.org>
+[AmitP: Cherry-picked this commit from CodeAurora kernel/msm-3.10
+https://source.codeaurora.org/quic/la/kernel/msm-3.10/commit/?id=2161ae9a70b12cf18ac8e5952a20161ffbccb477]
+Signed-off-by: Amit Pundir <amit.pundir@linaro.org>
+[ Updated change log and removed the "- 1" from len parameter ]
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -1875,7 +1875,7 @@ static void __trace_find_cmdline(int pid
+
+ map = savedcmd->map_pid_to_cmdline[pid];
+ if (map != NO_CMDLINE_MAP)
+- strcpy(comm, get_saved_cmdlines(map));
++ strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
+ else
+ strcpy(comm, "<...>");
+ }