--- /dev/null
+From 77251e41f89a813b4090f5199442f217bbf11297 Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Thu, 4 Jun 2020 11:52:53 -0700
+Subject: crypto: algboss - don't wait during notifier callback
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit 77251e41f89a813b4090f5199442f217bbf11297 upstream.
+
+When a crypto template needs to be instantiated, CRYPTO_MSG_ALG_REQUEST
+is sent to crypto_chain. cryptomgr_schedule_probe() handles this by
+starting a thread to instantiate the template, then waiting for this
+thread to complete via crypto_larval::completion.
+
+This can deadlock because instantiating the template may require loading
+modules, and this (apparently depending on userspace) may need to wait
+for the crc-t10dif module (lib/crc-t10dif.c) to be loaded. But
+crc-t10dif's module_init function uses crypto_register_notifier() and
+therefore takes crypto_chain.rwsem for write. That can't proceed until
+the notifier callback has finished, as it holds this semaphore for read.
+
+Fix this by removing the wait on crypto_larval::completion from within
+cryptomgr_schedule_probe(). It's actually unnecessary because
+crypto_alg_mod_lookup() calls crypto_larval_wait() itself after sending
+CRYPTO_MSG_ALG_REQUEST.
+
+This only actually became a problem in v4.20 due to commit b76377543b73
+("crc-t10dif: Pick better transform if one becomes available"), but the
+unnecessary wait was much older.
+
+BugLink: https://bugzilla.kernel.org/show_bug.cgi?id=207159
+Reported-by: Mike Gerow <gerow@google.com>
+Fixes: 398710379f51 ("crypto: algapi - Move larval completion into algboss")
+Cc: <stable@vger.kernel.org> # v3.6+
+Cc: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Reported-by: Kai Lüke <kai@kinvolk.io>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ crypto/algboss.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/crypto/algboss.c
++++ b/crypto/algboss.c
+@@ -178,8 +178,6 @@ static int cryptomgr_schedule_probe(stru
+ if (IS_ERR(thread))
+ goto err_put_larval;
+
+- wait_for_completion_interruptible(&larval->completion);
+-
+ return NOTIFY_STOP;
+
+ err_put_larval:
--- /dev/null
+From 7cf81954705b7e5b057f7dc39a7ded54422ab6e1 Mon Sep 17 00:00:00 2001
+From: Herbert Xu <herbert@gondor.apana.org.au>
+Date: Fri, 29 May 2020 14:54:43 +1000
+Subject: crypto: algif_skcipher - Cap recv SG list at ctx->used
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+commit 7cf81954705b7e5b057f7dc39a7ded54422ab6e1 upstream.
+
+Somewhere along the line the cap on the SG list length for receive
+was lost. This patch restores it and removes the subsequent test
+which is now redundant.
+
+Fixes: 2d97591ef43d ("crypto: af_alg - consolidation of...")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Reviewed-by: Stephan Mueller <smueller@chronox.de>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ crypto/algif_skcipher.c | 6 +-----
+ 1 file changed, 1 insertion(+), 5 deletions(-)
+
+--- a/crypto/algif_skcipher.c
++++ b/crypto/algif_skcipher.c
+@@ -74,14 +74,10 @@ static int _skcipher_recvmsg(struct sock
+ return PTR_ERR(areq);
+
+ /* convert iovecs of output buffers into RX SGL */
+- err = af_alg_get_rsgl(sk, msg, flags, areq, -1, &len);
++ err = af_alg_get_rsgl(sk, msg, flags, areq, ctx->used, &len);
+ if (err)
+ goto free;
+
+- /* Process only as much RX buffers for which we have TX data */
+- if (len > ctx->used)
+- len = ctx->used;
+-
+ /*
+ * If more buffers are to be expected to be processed, process only
+ * full block size buffers.
--- /dev/null
+From 63d0f3ea8ebb67160eca281320d255c72b0cb51a Mon Sep 17 00:00:00 2001
+From: Swathi Dhanavanthri <swathi.dhanavanthri@intel.com>
+Date: Thu, 26 Mar 2020 16:49:55 -0700
+Subject: drm/i915/tgl: Make Wa_14010229206 permanent
+
+From: Swathi Dhanavanthri <swathi.dhanavanthri@intel.com>
+
+commit 63d0f3ea8ebb67160eca281320d255c72b0cb51a upstream.
+
+This workaround now applies to all steppings, not just A0.
+Wa_1409085225 is a temporary A0-only W/A however it is
+identical to Wa_14010229206 and hence the combined workaround
+is made permanent.
+Bspec: 52890
+
+Signed-off-by: Swathi Dhanavanthri <swathi.dhanavanthri@intel.com>
+Tested-by: Rafael Antognolli <rafael.antognolli@intel.com>
+Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
+[mattrope: added missing blank line]
+Signed-off-by: Matt Roper <matthew.d.roper@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20200326234955.16155-1-swathi.dhanavanthri@intel.com
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/gt/intel_workarounds.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
++++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
+@@ -1620,12 +1620,6 @@ rcs_engine_wa_init(struct intel_engine_c
+ GEN7_FF_THREAD_MODE,
+ GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
+
+- /*
+- * Wa_1409085225:tgl
+- * Wa_14010229206:tgl
+- */
+- wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH);
+-
+ /* Wa_1408615072:tgl */
+ wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
+ VSUNIT_CLKGATE_DIS_TGL);
+@@ -1643,6 +1637,12 @@ rcs_engine_wa_init(struct intel_engine_c
+ wa_masked_en(wal,
+ GEN9_CS_DEBUG_MODE1,
+ FF_DOP_CLOCK_GATE_DISABLE);
++
++ /*
++ * Wa_1409085225:tgl
++ * Wa_14010229206:tgl
++ */
++ wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH);
+ }
+
+ if (IS_GEN(i915, 11)) {
--- /dev/null
+From 6bf6be1127f7e6d4bf39f84d56854e944d045d74 Mon Sep 17 00:00:00 2001
+From: Chen Yu <yu.c.chen@intel.com>
+Date: Fri, 22 May 2020 01:59:00 +0800
+Subject: e1000e: Do not wake up the system via WOL if device wakeup is disabled
+
+From: Chen Yu <yu.c.chen@intel.com>
+
+commit 6bf6be1127f7e6d4bf39f84d56854e944d045d74 upstream.
+
+Currently the system will be woken up via WOL(Wake On LAN) even if the
+device wakeup ability has been disabled via sysfs:
+ cat /sys/devices/pci0000:00/0000:00:1f.6/power/wakeup
+ disabled
+
+The system should not be woken up if the user has explicitly
+disabled the wake up ability for this device.
+
+This patch clears the WOL ability of this network device if the
+user has disabled the wake up ability in sysfs.
+
+Fixes: bc7f75fa9788 ("[E1000E]: New pci-express e1000 driver")
+Reported-by: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Chen Yu <yu.c.chen@intel.com>
+Tested-by: Aaron Brown <aaron.f.brown@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/ethernet/intel/e1000e/netdev.c | 14 ++++++++++----
+ 1 file changed, 10 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/ethernet/intel/e1000e/netdev.c
++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
+@@ -6518,11 +6518,17 @@ static int __e1000_shutdown(struct pci_d
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+- u32 ctrl, ctrl_ext, rctl, status;
+- /* Runtime suspend should only enable wakeup for link changes */
+- u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
++ u32 ctrl, ctrl_ext, rctl, status, wufc;
+ int retval = 0;
+
++ /* Runtime suspend should only enable wakeup for link changes */
++ if (runtime)
++ wufc = E1000_WUFC_LNKC;
++ else if (device_may_wakeup(&pdev->dev))
++ wufc = adapter->wol;
++ else
++ wufc = 0;
++
+ status = er32(STATUS);
+ if (status & E1000_STATUS_LU)
+ wufc &= ~E1000_WUFC_LNKC;
+@@ -6579,7 +6585,7 @@ static int __e1000_shutdown(struct pci_d
+ if (adapter->hw.phy.type == e1000_phy_igp_3) {
+ e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
+ } else if (hw->mac.type >= e1000_pch_lpt) {
+- if (!(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC)))
++ if (wufc && !(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC)))
+ /* ULP does not support wake from unicast, multicast
+ * or broadcast.
+ */
--- /dev/null
+From 1a0aa991a6274161c95a844c58cfb801d681eb59 Mon Sep 17 00:00:00 2001
+From: Masami Hiramatsu <mhiramat@kernel.org>
+Date: Tue, 12 May 2020 17:02:56 +0900
+Subject: kprobes: Fix to protect kick_kprobe_optimizer() by kprobe_mutex
+
+From: Masami Hiramatsu <mhiramat@kernel.org>
+
+commit 1a0aa991a6274161c95a844c58cfb801d681eb59 upstream.
+
+In kprobe_optimizer() kick_kprobe_optimizer() is called
+without kprobe_mutex, but this can race with other caller
+which is protected by kprobe_mutex.
+
+To fix that, expand kprobe_mutex protected area to protect
+kick_kprobe_optimizer() call.
+
+Link: http://lkml.kernel.org/r/158927057586.27680.5036330063955940456.stgit@devnote2
+
+Fixes: cd7ebe2298ff ("kprobes: Use text_poke_smp_batch for optimizing")
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: "Gustavo A . R . Silva" <gustavoars@kernel.org>
+Cc: Anders Roxell <anders.roxell@linaro.org>
+Cc: "Naveen N . Rao" <naveen.n.rao@linux.ibm.com>
+Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
+Cc: David Miller <davem@davemloft.net>
+Cc: Ingo Molnar <mingo@elte.hu>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Ziqian SUN <zsun@redhat.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/kprobes.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -586,11 +586,12 @@ static void kprobe_optimizer(struct work
+ mutex_unlock(&module_mutex);
+ mutex_unlock(&text_mutex);
+ cpus_read_unlock();
+- mutex_unlock(&kprobe_mutex);
+
+ /* Step 5: Kick optimizer again if needed */
+ if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
+ kick_kprobe_optimizer();
++
++ mutex_unlock(&kprobe_mutex);
+ }
+
+ /* Wait for completing optimization and unoptimization */
--- /dev/null
+From 9b38cc704e844e41d9cf74e647bff1d249512cb3 Mon Sep 17 00:00:00 2001
+From: Jiri Olsa <jolsa@redhat.com>
+Date: Tue, 12 May 2020 17:03:18 +0900
+Subject: kretprobe: Prevent triggering kretprobe from within kprobe_flush_task
+
+From: Jiri Olsa <jolsa@redhat.com>
+
+commit 9b38cc704e844e41d9cf74e647bff1d249512cb3 upstream.
+
+Ziqian reported lockup when adding retprobe on _raw_spin_lock_irqsave.
+My test was also able to trigger lockdep output:
+
+ ============================================
+ WARNING: possible recursive locking detected
+ 5.6.0-rc6+ #6 Not tainted
+ --------------------------------------------
+ sched-messaging/2767 is trying to acquire lock:
+ ffffffff9a492798 (&(kretprobe_table_locks[i].lock)){-.-.}, at: kretprobe_hash_lock+0x52/0xa0
+
+ but task is already holding lock:
+ ffffffff9a491a18 (&(kretprobe_table_locks[i].lock)){-.-.}, at: kretprobe_trampoline+0x0/0x50
+
+ other info that might help us debug this:
+ Possible unsafe locking scenario:
+
+ CPU0
+ ----
+ lock(&(kretprobe_table_locks[i].lock));
+ lock(&(kretprobe_table_locks[i].lock));
+
+ *** DEADLOCK ***
+
+ May be due to missing lock nesting notation
+
+ 1 lock held by sched-messaging/2767:
+ #0: ffffffff9a491a18 (&(kretprobe_table_locks[i].lock)){-.-.}, at: kretprobe_trampoline+0x0/0x50
+
+ stack backtrace:
+ CPU: 3 PID: 2767 Comm: sched-messaging Not tainted 5.6.0-rc6+ #6
+ Call Trace:
+ dump_stack+0x96/0xe0
+ __lock_acquire.cold.57+0x173/0x2b7
+ ? native_queued_spin_lock_slowpath+0x42b/0x9e0
+ ? lockdep_hardirqs_on+0x590/0x590
+ ? __lock_acquire+0xf63/0x4030
+ lock_acquire+0x15a/0x3d0
+ ? kretprobe_hash_lock+0x52/0xa0
+ _raw_spin_lock_irqsave+0x36/0x70
+ ? kretprobe_hash_lock+0x52/0xa0
+ kretprobe_hash_lock+0x52/0xa0
+ trampoline_handler+0xf8/0x940
+ ? kprobe_fault_handler+0x380/0x380
+ ? find_held_lock+0x3a/0x1c0
+ kretprobe_trampoline+0x25/0x50
+ ? lock_acquired+0x392/0xbc0
+ ? _raw_spin_lock_irqsave+0x50/0x70
+ ? __get_valid_kprobe+0x1f0/0x1f0
+ ? _raw_spin_unlock_irqrestore+0x3b/0x40
+ ? finish_task_switch+0x4b9/0x6d0
+ ? __switch_to_asm+0x34/0x70
+ ? __switch_to_asm+0x40/0x70
+
+The code within the kretprobe handler checks for probe reentrancy,
+so we won't trigger any _raw_spin_lock_irqsave probe in there.
+
+The problem is in outside kprobe_flush_task, where we call:
+
+ kprobe_flush_task
+ kretprobe_table_lock
+ raw_spin_lock_irqsave
+ _raw_spin_lock_irqsave
+
+where _raw_spin_lock_irqsave triggers the kretprobe and installs
+kretprobe_trampoline handler on _raw_spin_lock_irqsave return.
+
+The kretprobe_trampoline handler is then executed with already
+locked kretprobe_table_locks, and first thing it does is to
+lock kretprobe_table_locks ;-) the whole lockup path like:
+
+ kprobe_flush_task
+ kretprobe_table_lock
+ raw_spin_lock_irqsave
+ _raw_spin_lock_irqsave ---> probe triggered, kretprobe_trampoline installed
+
+ ---> kretprobe_table_locks locked
+
+ kretprobe_trampoline
+ trampoline_handler
+ kretprobe_hash_lock(current, &head, &flags); <--- deadlock
+
+Adding kprobe_busy_begin/end helpers that mark code with fake
+probe installed to prevent triggering of another kprobe within
+this code.
+
+Using these helpers in kprobe_flush_task, so the probe recursion
+protection check is hit and the probe is never set to prevent
+above lockup.
+
+Link: http://lkml.kernel.org/r/158927059835.27680.7011202830041561604.stgit@devnote2
+
+Fixes: ef53d9c5e4da ("kprobes: improve kretprobe scalability with hashed locking")
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: "Gustavo A . R . Silva" <gustavoars@kernel.org>
+Cc: Anders Roxell <anders.roxell@linaro.org>
+Cc: "Naveen N . Rao" <naveen.n.rao@linux.ibm.com>
+Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
+Cc: David Miller <davem@davemloft.net>
+Cc: Ingo Molnar <mingo@elte.hu>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: stable@vger.kernel.org
+Reported-by: "Ziqian SUN (Zamir)" <zsun@redhat.com>
+Acked-by: Masami Hiramatsu <mhiramat@kernel.org>
+Signed-off-by: Jiri Olsa <jolsa@kernel.org>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/kprobes/core.c | 16 +++-------------
+ include/linux/kprobes.h | 4 ++++
+ kernel/kprobes.c | 24 ++++++++++++++++++++++++
+ 3 files changed, 31 insertions(+), 13 deletions(-)
+
+--- a/arch/x86/kernel/kprobes/core.c
++++ b/arch/x86/kernel/kprobes/core.c
+@@ -753,16 +753,11 @@ asm(
+ NOKPROBE_SYMBOL(kretprobe_trampoline);
+ STACK_FRAME_NON_STANDARD(kretprobe_trampoline);
+
+-static struct kprobe kretprobe_kprobe = {
+- .addr = (void *)kretprobe_trampoline,
+-};
+-
+ /*
+ * Called from kretprobe_trampoline
+ */
+ __used __visible void *trampoline_handler(struct pt_regs *regs)
+ {
+- struct kprobe_ctlblk *kcb;
+ struct kretprobe_instance *ri = NULL;
+ struct hlist_head *head, empty_rp;
+ struct hlist_node *tmp;
+@@ -772,16 +767,12 @@ __used __visible void *trampoline_handle
+ void *frame_pointer;
+ bool skipped = false;
+
+- preempt_disable();
+-
+ /*
+ * Set a dummy kprobe for avoiding kretprobe recursion.
+ * Since kretprobe never run in kprobe handler, kprobe must not
+ * be running at this point.
+ */
+- kcb = get_kprobe_ctlblk();
+- __this_cpu_write(current_kprobe, &kretprobe_kprobe);
+- kcb->kprobe_status = KPROBE_HIT_ACTIVE;
++ kprobe_busy_begin();
+
+ INIT_HLIST_HEAD(&empty_rp);
+ kretprobe_hash_lock(current, &head, &flags);
+@@ -857,7 +848,7 @@ __used __visible void *trampoline_handle
+ __this_cpu_write(current_kprobe, &ri->rp->kp);
+ ri->ret_addr = correct_ret_addr;
+ ri->rp->handler(ri, regs);
+- __this_cpu_write(current_kprobe, &kretprobe_kprobe);
++ __this_cpu_write(current_kprobe, &kprobe_busy);
+ }
+
+ recycle_rp_inst(ri, &empty_rp);
+@@ -873,8 +864,7 @@ __used __visible void *trampoline_handle
+
+ kretprobe_hash_unlock(current, &flags);
+
+- __this_cpu_write(current_kprobe, NULL);
+- preempt_enable();
++ kprobe_busy_end();
+
+ hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
+ hlist_del(&ri->hlist);
+--- a/include/linux/kprobes.h
++++ b/include/linux/kprobes.h
+@@ -350,6 +350,10 @@ static inline struct kprobe_ctlblk *get_
+ return this_cpu_ptr(&kprobe_ctlblk);
+ }
+
++extern struct kprobe kprobe_busy;
++void kprobe_busy_begin(void);
++void kprobe_busy_end(void);
++
+ kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset);
+ int register_kprobe(struct kprobe *p);
+ void unregister_kprobe(struct kprobe *p);
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -1237,6 +1237,26 @@ __releases(hlist_lock)
+ }
+ NOKPROBE_SYMBOL(kretprobe_table_unlock);
+
++struct kprobe kprobe_busy = {
++ .addr = (void *) get_kprobe,
++};
++
++void kprobe_busy_begin(void)
++{
++ struct kprobe_ctlblk *kcb;
++
++ preempt_disable();
++ __this_cpu_write(current_kprobe, &kprobe_busy);
++ kcb = get_kprobe_ctlblk();
++ kcb->kprobe_status = KPROBE_HIT_ACTIVE;
++}
++
++void kprobe_busy_end(void)
++{
++ __this_cpu_write(current_kprobe, NULL);
++ preempt_enable();
++}
++
+ /*
+ * This function is called from finish_task_switch when task tk becomes dead,
+ * so that we can recycle any function-return probe instances associated
+@@ -1254,6 +1274,8 @@ void kprobe_flush_task(struct task_struc
+ /* Early boot. kretprobe_table_locks not yet initialized. */
+ return;
+
++ kprobe_busy_begin();
++
+ INIT_HLIST_HEAD(&empty_rp);
+ hash = hash_ptr(tk, KPROBE_HASH_BITS);
+ head = &kretprobe_inst_table[hash];
+@@ -1267,6 +1289,8 @@ void kprobe_flush_task(struct task_struc
+ hlist_del(&ri->hlist);
+ kfree(ri);
+ }
++
++ kprobe_busy_end();
+ }
+ NOKPROBE_SYMBOL(kprobe_flush_task);
+
--- /dev/null
+From 11d6011c2cf29f7c8181ebde6c8bc0c4d83adcd7 Mon Sep 17 00:00:00 2001
+From: "Ahmed S. Darwish" <a.darwish@linutronix.de>
+Date: Wed, 3 Jun 2020 16:49:44 +0200
+Subject: net: core: device_rename: Use rwsem instead of a seqcount
+
+From: Ahmed S. Darwish <a.darwish@linutronix.de>
+
+commit 11d6011c2cf29f7c8181ebde6c8bc0c4d83adcd7 upstream.
+
+Sequence counters write paths are critical sections that must never be
+preempted, and blocking, even for CONFIG_PREEMPTION=n, is not allowed.
+
+Commit 5dbe7c178d3f ("net: fix kernel deadlock with interface rename and
+netdev name retrieval.") handled a deadlock, observed with
+CONFIG_PREEMPTION=n, where the devnet_rename seqcount read side was
+infinitely spinning: it got scheduled after the seqcount write side
+blocked inside its own critical section.
+
+To fix that deadlock, among other issues, the commit added a
+cond_resched() inside the read side section. While this will get the
+non-preemptible kernel eventually unstuck, the seqcount reader is fully
+exhausting its slice just spinning -- until TIF_NEED_RESCHED is set.
+
+The fix is also still broken: if the seqcount reader belongs to a
+real-time scheduling policy, it can spin forever and the kernel will
+livelock.
+
+Disabling preemption over the seqcount write side critical section will
+not work: inside it are a number of GFP_KERNEL allocations and mutex
+locking through the drivers/base/ :: device_rename() call chain.
+
+>From all the above, replace the seqcount with a rwsem.
+
+Fixes: 5dbe7c178d3f (net: fix kernel deadlock with interface rename and netdev name retrieval.)
+Fixes: 30e6c9fa93cf (net: devnet_rename_seq should be a seqcount)
+Fixes: c91f6df2db49 (sockopt: Change getsockopt() of SO_BINDTODEVICE to return an interface name)
+Cc: <stable@vger.kernel.org>
+Reported-by: kbuild test robot <lkp@intel.com> [ v1 missing up_read() on error exit ]
+Reported-by: Dan Carpenter <dan.carpenter@oracle.com> [ v1 missing up_read() on error exit ]
+Signed-off-by: Ahmed S. Darwish <a.darwish@linutronix.de>
+Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/core/dev.c | 40 ++++++++++++++++++----------------------
+ 1 file changed, 18 insertions(+), 22 deletions(-)
+
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -79,6 +79,7 @@
+ #include <linux/sched.h>
+ #include <linux/sched/mm.h>
+ #include <linux/mutex.h>
++#include <linux/rwsem.h>
+ #include <linux/string.h>
+ #include <linux/mm.h>
+ #include <linux/socket.h>
+@@ -194,7 +195,7 @@ static DEFINE_SPINLOCK(napi_hash_lock);
+ static unsigned int napi_gen_id = NR_CPUS;
+ static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
+
+-static seqcount_t devnet_rename_seq;
++static DECLARE_RWSEM(devnet_rename_sem);
+
+ static inline void dev_base_seq_inc(struct net *net)
+ {
+@@ -930,33 +931,28 @@ EXPORT_SYMBOL(dev_get_by_napi_id);
+ * @net: network namespace
+ * @name: a pointer to the buffer where the name will be stored.
+ * @ifindex: the ifindex of the interface to get the name from.
+- *
+- * The use of raw_seqcount_begin() and cond_resched() before
+- * retrying is required as we want to give the writers a chance
+- * to complete when CONFIG_PREEMPTION is not set.
+ */
+ int netdev_get_name(struct net *net, char *name, int ifindex)
+ {
+ struct net_device *dev;
+- unsigned int seq;
++ int ret;
+
+-retry:
+- seq = raw_seqcount_begin(&devnet_rename_seq);
++ down_read(&devnet_rename_sem);
+ rcu_read_lock();
++
+ dev = dev_get_by_index_rcu(net, ifindex);
+ if (!dev) {
+- rcu_read_unlock();
+- return -ENODEV;
++ ret = -ENODEV;
++ goto out;
+ }
+
+ strcpy(name, dev->name);
+- rcu_read_unlock();
+- if (read_seqcount_retry(&devnet_rename_seq, seq)) {
+- cond_resched();
+- goto retry;
+- }
+
+- return 0;
++ ret = 0;
++out:
++ rcu_read_unlock();
++ up_read(&devnet_rename_sem);
++ return ret;
+ }
+
+ /**
+@@ -1228,10 +1224,10 @@ int dev_change_name(struct net_device *d
+ likely(!(dev->priv_flags & IFF_LIVE_RENAME_OK)))
+ return -EBUSY;
+
+- write_seqcount_begin(&devnet_rename_seq);
++ down_write(&devnet_rename_sem);
+
+ if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
+- write_seqcount_end(&devnet_rename_seq);
++ up_write(&devnet_rename_sem);
+ return 0;
+ }
+
+@@ -1239,7 +1235,7 @@ int dev_change_name(struct net_device *d
+
+ err = dev_get_valid_name(net, dev, newname);
+ if (err < 0) {
+- write_seqcount_end(&devnet_rename_seq);
++ up_write(&devnet_rename_sem);
+ return err;
+ }
+
+@@ -1254,11 +1250,11 @@ rollback:
+ if (ret) {
+ memcpy(dev->name, oldname, IFNAMSIZ);
+ dev->name_assign_type = old_assign_type;
+- write_seqcount_end(&devnet_rename_seq);
++ up_write(&devnet_rename_sem);
+ return ret;
+ }
+
+- write_seqcount_end(&devnet_rename_seq);
++ up_write(&devnet_rename_sem);
+
+ netdev_adjacent_rename_links(dev, oldname);
+
+@@ -1279,7 +1275,7 @@ rollback:
+ /* err >= 0 after dev_alloc_name() or stores the first errno */
+ if (err >= 0) {
+ err = ret;
+- write_seqcount_begin(&devnet_rename_seq);
++ down_write(&devnet_rename_sem);
+ memcpy(dev->name, oldname, IFNAMSIZ);
+ memcpy(oldname, newname, IFNAMSIZ);
+ dev->name_assign_type = old_assign_type;
--- /dev/null
+From 0c34bb598c510e070160029f34efeeb217000f8d Mon Sep 17 00:00:00 2001
+From: Alexander Sverdlin <alexander.sverdlin@nokia.com>
+Date: Fri, 29 May 2020 14:17:10 +0200
+Subject: net: octeon: mgmt: Repair filling of RX ring
+
+From: Alexander Sverdlin <alexander.sverdlin@nokia.com>
+
+commit 0c34bb598c510e070160029f34efeeb217000f8d upstream.
+
+The removal of mips_swiotlb_ops exposed a problem in octeon_mgmt Ethernet
+driver. mips_swiotlb_ops had an mb() after most of the operations and the
+removal of the ops had broken the receive functionality of the driver.
+My code inspection has shown no other places except
+octeon_mgmt_rx_fill_ring() where an explicit barrier would be obviously
+missing. The latter function however has to make sure that "ringing the
+bell" doesn't happen before RX ring entry is really written.
+
+The patch has been successfully tested on Octeon II.
+
+Fixes: a999933db9ed ("MIPS: remove mips_swiotlb_ops")
+Cc: stable@vger.kernel.org
+Signed-off-by: Alexander Sverdlin <alexander.sverdlin@nokia.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/ethernet/cavium/octeon/octeon_mgmt.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
++++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+@@ -234,6 +234,11 @@ static void octeon_mgmt_rx_fill_ring(str
+
+ /* Put it in the ring. */
+ p->rx_ring[p->rx_next_fill] = re.d64;
++ /* Make sure there is no reorder of filling the ring and ringing
++ * the bell
++ */
++ wmb();
++
+ dma_sync_single_for_device(p->dev, p->rx_ring_handle,
+ ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
+ DMA_BIDIRECTIONAL);
--- /dev/null
+From c3829285b2e6a0d5461078d7f6cbb2c2b4bf8c4e Mon Sep 17 00:00:00 2001
+From: Stefano Brivio <sbrivio@redhat.com>
+Date: Mon, 8 Jun 2020 10:50:29 +0200
+Subject: netfilter: nft_set_pipapo: Disable preemption before getting per-CPU pointer
+
+From: Stefano Brivio <sbrivio@redhat.com>
+
+commit c3829285b2e6a0d5461078d7f6cbb2c2b4bf8c4e upstream.
+
+The lkp kernel test robot reports, with CONFIG_DEBUG_PREEMPT enabled:
+
+ [ 165.316525] BUG: using smp_processor_id() in preemptible [00000000] code: nft/6247
+ [ 165.319547] caller is nft_pipapo_insert+0x464/0x610 [nf_tables]
+ [ 165.321846] CPU: 1 PID: 6247 Comm: nft Not tainted 5.6.0-rc5-01595-ge32a4dc6512ce3 #1
+ [ 165.332128] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.12.0-1 04/01/2014
+ [ 165.334892] Call Trace:
+ [ 165.336435] dump_stack+0x8f/0xcb
+ [ 165.338128] debug_smp_processor_id+0xb2/0xc0
+ [ 165.340117] nft_pipapo_insert+0x464/0x610 [nf_tables]
+ [ 165.342290] ? nft_trans_alloc_gfp+0x1c/0x60 [nf_tables]
+ [ 165.344420] ? rcu_read_lock_sched_held+0x52/0x80
+ [ 165.346460] ? nft_trans_alloc_gfp+0x1c/0x60 [nf_tables]
+ [ 165.348543] ? __mmu_interval_notifier_insert+0xa0/0xf0
+ [ 165.350629] nft_add_set_elem+0x5ff/0xa90 [nf_tables]
+ [ 165.352699] ? __lock_acquire+0x241/0x1400
+ [ 165.354573] ? __lock_acquire+0x241/0x1400
+ [ 165.356399] ? reacquire_held_locks+0x12f/0x200
+ [ 165.358384] ? nf_tables_valid_genid+0x1f/0x40 [nf_tables]
+ [ 165.360502] ? nla_strcmp+0x10/0x50
+ [ 165.362199] ? nft_table_lookup+0x4f/0xa0 [nf_tables]
+ [ 165.364217] ? nla_strcmp+0x10/0x50
+ [ 165.365891] ? nf_tables_newsetelem+0xd5/0x150 [nf_tables]
+ [ 165.367997] nf_tables_newsetelem+0xd5/0x150 [nf_tables]
+ [ 165.370083] nfnetlink_rcv_batch+0x4fd/0x790 [nfnetlink]
+ [ 165.372205] ? __lock_acquire+0x241/0x1400
+ [ 165.374058] ? __nla_validate_parse+0x57/0x8a0
+ [ 165.375989] ? cap_inode_getsecurity+0x230/0x230
+ [ 165.377954] ? security_capable+0x38/0x50
+ [ 165.379795] nfnetlink_rcv+0x11d/0x140 [nfnetlink]
+ [ 165.381779] netlink_unicast+0x1b2/0x280
+ [ 165.383612] netlink_sendmsg+0x351/0x470
+ [ 165.385439] sock_sendmsg+0x5b/0x60
+ [ 165.387133] ____sys_sendmsg+0x200/0x280
+ [ 165.388871] ? copy_msghdr_from_user+0xd9/0x160
+ [ 165.390805] ___sys_sendmsg+0x88/0xd0
+ [ 165.392524] ? __might_fault+0x3e/0x90
+ [ 165.394273] ? sock_getsockopt+0x3d5/0xbb0
+ [ 165.396021] ? __handle_mm_fault+0x545/0x6a0
+ [ 165.397822] ? find_held_lock+0x2d/0x90
+ [ 165.399593] ? __sys_sendmsg+0x5e/0xa0
+ [ 165.401338] __sys_sendmsg+0x5e/0xa0
+ [ 165.402979] do_syscall_64+0x60/0x280
+ [ 165.404680] entry_SYSCALL_64_after_hwframe+0x49/0xbe
+ [ 165.406621] RIP: 0033:0x7ff1fa46e783
+ [ 165.408299] Code: c7 c0 ff ff ff ff eb bb 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 44 00 00 64 8b 04 25 18 00 00 00 85 c0 75 14 b8 2e 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 55 c3 0f 1f 40 00 48 83 ec 28 89 54 24 1c 48
+ [ 165.414163] RSP: 002b:00007ffedf59ea78 EFLAGS: 00000246 ORIG_RAX: 000000000000002e
+ [ 165.416804] RAX: ffffffffffffffda RBX: 00007ffedf59fc60 RCX: 00007ff1fa46e783
+ [ 165.419419] RDX: 0000000000000000 RSI: 00007ffedf59fb10 RDI: 0000000000000005
+ [ 165.421886] RBP: 00007ffedf59fc10 R08: 00007ffedf59ea54 R09: 0000000000000001
+ [ 165.424445] R10: 00007ff1fa630c6c R11: 0000000000000246 R12: 0000000000020000
+ [ 165.426954] R13: 0000000000000280 R14: 0000000000000005 R15: 00007ffedf59ea90
+
+Disable preemption before accessing the lookup scratch area in
+nft_pipapo_insert().
+
+Reported-by: kernel test robot <lkp@intel.com>
+Analysed-by: Florian Westphal <fw@strlen.de>
+Cc: <stable@vger.kernel.org> # 5.6.x
+Fixes: 3c4287f62044 ("nf_tables: Add set type for arbitrary concatenation of ranges")
+Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/netfilter/nft_set_pipapo.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/net/netfilter/nft_set_pipapo.c
++++ b/net/netfilter/nft_set_pipapo.c
+@@ -1242,7 +1242,9 @@ static int nft_pipapo_insert(const struc
+ end += NFT_PIPAPO_GROUPS_PADDED_SIZE(f);
+ }
+
+- if (!*this_cpu_ptr(m->scratch) || bsize_max > m->bsize_max) {
++ if (!*get_cpu_ptr(m->scratch) || bsize_max > m->bsize_max) {
++ put_cpu_ptr(m->scratch);
++
+ err = pipapo_realloc_scratch(m, bsize_max);
+ if (err)
+ return err;
+@@ -1250,6 +1252,8 @@ static int nft_pipapo_insert(const struc
+ this_cpu_write(nft_pipapo_scratch_index, false);
+
+ m->bsize_max = bsize_max;
++ } else {
++ put_cpu_ptr(m->scratch);
+ }
+
+ *ext2 = &e->ext;
--- /dev/null
+From 33d077996a87175b155fe88030e8fec7ca76327e Mon Sep 17 00:00:00 2001
+From: Stefano Brivio <sbrivio@redhat.com>
+Date: Wed, 3 Jun 2020 01:50:11 +0200
+Subject: netfilter: nft_set_rbtree: Don't account for expired elements on insertion
+
+From: Stefano Brivio <sbrivio@redhat.com>
+
+commit 33d077996a87175b155fe88030e8fec7ca76327e upstream.
+
+While checking the validity of insertion in __nft_rbtree_insert(),
+we currently ignore conflicting elements and intervals only if they
+are not active within the next generation.
+
+However, if we consider expired elements and intervals as
+potentially conflicting and overlapping, we'll return error for
+entries that should be added instead. This is particularly visible
+with garbage collection intervals that are comparable with the
+element timeout itself, as reported by Mike Dillinger.
+
+Other than the simple issue of denying insertion of valid entries,
+this might also result in insertion of a single element (opening or
+closing) out of a given interval. With single entries (that are
+inserted as intervals of size 1), this leads in turn to the creation
+of new intervals. For example:
+
+ # nft add element t s { 192.0.2.1 }
+ # nft list ruleset
+ [...]
+ elements = { 192.0.2.1-255.255.255.255 }
+
+Always ignore expired elements active in the next generation, while
+checking for conflicts.
+
+It might be more convenient to introduce a new macro that covers
+both inactive and expired items, as this type of check also appears
+quite frequently in other set back-ends. This is however beyond the
+scope of this fix and can be deferred to a separate patch.
+
+Other than the overlap detection cases introduced by commit
+7c84d41416d8 ("netfilter: nft_set_rbtree: Detect partial overlaps
+on insertion"), we also have to cover the original conflict check
+dealing with conflicts between two intervals of size 1, which was
+introduced before support for timeout was introduced. This won't
+return an error to the user as -EEXIST is masked by nft if
+NLM_F_EXCL is not given, but would result in a silent failure
+adding the entry.
+
+Reported-by: Mike Dillinger <miked@softtalker.com>
+Cc: <stable@vger.kernel.org> # 5.6.x
+Fixes: 8d8540c4f5e0 ("netfilter: nft_set_rbtree: add timeout support")
+Fixes: 7c84d41416d8 ("netfilter: nft_set_rbtree: Detect partial overlaps on insertion")
+Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
+Acked-by: Phil Sutter <phil@nwl.cc>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/netfilter/nft_set_rbtree.c | 21 ++++++++++++++-------
+ 1 file changed, 14 insertions(+), 7 deletions(-)
+
+--- a/net/netfilter/nft_set_rbtree.c
++++ b/net/netfilter/nft_set_rbtree.c
+@@ -271,12 +271,14 @@ static int __nft_rbtree_insert(const str
+
+ if (nft_rbtree_interval_start(new)) {
+ if (nft_rbtree_interval_end(rbe) &&
+- nft_set_elem_active(&rbe->ext, genmask))
++ nft_set_elem_active(&rbe->ext, genmask) &&
++ !nft_set_elem_expired(&rbe->ext))
+ overlap = false;
+ } else {
+ overlap = nft_rbtree_interval_end(rbe) &&
+ nft_set_elem_active(&rbe->ext,
+- genmask);
++ genmask) &&
++ !nft_set_elem_expired(&rbe->ext);
+ }
+ } else if (d > 0) {
+ p = &parent->rb_right;
+@@ -284,9 +286,11 @@ static int __nft_rbtree_insert(const str
+ if (nft_rbtree_interval_end(new)) {
+ overlap = nft_rbtree_interval_end(rbe) &&
+ nft_set_elem_active(&rbe->ext,
+- genmask);
++ genmask) &&
++ !nft_set_elem_expired(&rbe->ext);
+ } else if (nft_rbtree_interval_end(rbe) &&
+- nft_set_elem_active(&rbe->ext, genmask)) {
++ nft_set_elem_active(&rbe->ext, genmask) &&
++ !nft_set_elem_expired(&rbe->ext)) {
+ overlap = true;
+ }
+ } else {
+@@ -294,15 +298,18 @@ static int __nft_rbtree_insert(const str
+ nft_rbtree_interval_start(new)) {
+ p = &parent->rb_left;
+
+- if (nft_set_elem_active(&rbe->ext, genmask))
++ if (nft_set_elem_active(&rbe->ext, genmask) &&
++ !nft_set_elem_expired(&rbe->ext))
+ overlap = false;
+ } else if (nft_rbtree_interval_start(rbe) &&
+ nft_rbtree_interval_end(new)) {
+ p = &parent->rb_right;
+
+- if (nft_set_elem_active(&rbe->ext, genmask))
++ if (nft_set_elem_active(&rbe->ext, genmask) &&
++ !nft_set_elem_expired(&rbe->ext))
+ overlap = false;
+- } else if (nft_set_elem_active(&rbe->ext, genmask)) {
++ } else if (nft_set_elem_active(&rbe->ext, genmask) &&
++ !nft_set_elem_expired(&rbe->ext)) {
+ *ext = &rbe->ext;
+ return -EEXIST;
+ } else {
--- /dev/null
+From 0bdcfa182506526fbe4e088ff9ca86a31b81828d Mon Sep 17 00:00:00 2001
+From: Nicholas Piggin <npiggin@gmail.com>
+Date: Mon, 15 Jun 2020 16:12:47 +1000
+Subject: powerpc/64s: Fix KVM interrupt using wrong save area
+
+From: Nicholas Piggin <npiggin@gmail.com>
+
+commit 0bdcfa182506526fbe4e088ff9ca86a31b81828d upstream.
+
+The CTR register reload in the KVM interrupt path used the wrong save
+area for SLB (and NMI) interrupts.
+
+Fixes: 9600f261acaa ("powerpc/64s/exception: Move KVM test to common code")
+Cc: stable@vger.kernel.org # v5.7+
+Reported-by: Christian Zigotzky <chzigotzky@xenosoft.de>
+Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
+Tested-by: Christian Zigotzky <chzigotzky@xenosoft.de>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20200615061247.1310763-1-npiggin@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/exceptions-64s.S | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/powerpc/kernel/exceptions-64s.S
++++ b/arch/powerpc/kernel/exceptions-64s.S
+@@ -270,7 +270,7 @@ BEGIN_FTR_SECTION
+ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
+ .endif
+
+- ld r10,PACA_EXGEN+EX_CTR(r13)
++ ld r10,IAREA+EX_CTR(r13)
+ mtctr r10
+ BEGIN_FTR_SECTION
+ ld r10,IAREA+EX_PPR(r13)
+@@ -298,7 +298,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+
+ .if IKVM_SKIP
+ 89: mtocrf 0x80,r9
+- ld r10,PACA_EXGEN+EX_CTR(r13)
++ ld r10,IAREA+EX_CTR(r13)
+ mtctr r10
+ ld r9,IAREA+EX_R9(r13)
+ ld r10,IAREA+EX_R10(r13)
--- /dev/null
+From 4e264ffd953463cd14c0720eaa9315ac052f5973 Mon Sep 17 00:00:00 2001
+From: Masami Hiramatsu <mhiramat@kernel.org>
+Date: Tue, 16 Jun 2020 19:14:08 +0900
+Subject: proc/bootconfig: Fix to use correct quotes for value
+
+From: Masami Hiramatsu <mhiramat@kernel.org>
+
+commit 4e264ffd953463cd14c0720eaa9315ac052f5973 upstream.
+
+Fix /proc/bootconfig to select double or single quotes
+corrctly according to the value.
+
+If a bootconfig value includes a double quote character,
+we must use single-quotes to quote that value.
+
+This modifies if() condition and blocks for avoiding
+double-quote in value check in 2 places. Anyway, since
+xbc_array_for_each_value() can handle the array which
+has a single node correctly.
+Thus,
+
+if (vnode && xbc_node_is_array(vnode)) {
+ xbc_array_for_each_value(vnode) /* vnode->next != NULL */
+ ...
+} else {
+ snprintf(val); /* val is an empty string if !vnode */
+}
+
+is equivalent to
+
+if (vnode) {
+ xbc_array_for_each_value(vnode) /* vnode->next can be NULL */
+ ...
+} else {
+ snprintf(""); /* value is always empty */
+}
+
+Link: http://lkml.kernel.org/r/159230244786.65555.3763894451251622488.stgit@devnote2
+
+Cc: stable@vger.kernel.org
+Fixes: c1a3c36017d4 ("proc: bootconfig: Add /proc/bootconfig to show boot config list")
+Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/proc/bootconfig.c | 15 ++++++++++-----
+ 1 file changed, 10 insertions(+), 5 deletions(-)
+
+--- a/fs/proc/bootconfig.c
++++ b/fs/proc/bootconfig.c
+@@ -26,8 +26,9 @@ static int boot_config_proc_show(struct
+ static int __init copy_xbc_key_value_list(char *dst, size_t size)
+ {
+ struct xbc_node *leaf, *vnode;
+- const char *val;
+ char *key, *end = dst + size;
++ const char *val;
++ char q;
+ int ret = 0;
+
+ key = kzalloc(XBC_KEYLEN_MAX, GFP_KERNEL);
+@@ -41,16 +42,20 @@ static int __init copy_xbc_key_value_lis
+ break;
+ dst += ret;
+ vnode = xbc_node_get_child(leaf);
+- if (vnode && xbc_node_is_array(vnode)) {
++ if (vnode) {
+ xbc_array_for_each_value(vnode, val) {
+- ret = snprintf(dst, rest(dst, end), "\"%s\"%s",
+- val, vnode->next ? ", " : "\n");
++ if (strchr(val, '"'))
++ q = '\'';
++ else
++ q = '"';
++ ret = snprintf(dst, rest(dst, end), "%c%s%c%s",
++ q, val, q, vnode->next ? ", " : "\n");
+ if (ret < 0)
+ goto out;
+ dst += ret;
+ }
+ } else {
+- ret = snprintf(dst, rest(dst, end), "\"%s\"\n", val);
++ ret = snprintf(dst, rest(dst, end), "\"\"\n");
+ if (ret < 0)
+ break;
+ dst += ret;
--- /dev/null
+From 14ed1c908a7a623cc0cbf0203f8201d1b7d31d16 Mon Sep 17 00:00:00 2001
+From: Harry Wentland <harry.wentland@amd.com>
+Date: Thu, 28 May 2020 09:44:44 -0400
+Subject: Revert "drm/amd/display: disable dcn20 abm feature for bring up"
+
+From: Harry Wentland <harry.wentland@amd.com>
+
+commit 14ed1c908a7a623cc0cbf0203f8201d1b7d31d16 upstream.
+
+This reverts commit 96cb7cf13d8530099c256c053648ad576588c387.
+
+This change was used for DCN2 bringup and is no longer desired.
+In fact it breaks backlight on DCN2 systems.
+
+Cc: Alexander Monakov <amonakov@ispras.ru>
+Cc: Hersen Wu <hersenxs.wu@amd.com>
+Cc: Anthony Koo <Anthony.Koo@amd.com>
+Cc: Michael Chiu <Michael.Chiu@amd.com>
+Signed-off-by: Harry Wentland <harry.wentland@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Reported-and-tested-by: Alexander Monakov <amonakov@ispras.ru>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 13 +++++--------
+ 1 file changed, 5 insertions(+), 8 deletions(-)
+
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -1334,7 +1334,7 @@ static int dm_late_init(void *handle)
+ unsigned int linear_lut[16];
+ int i;
+ struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
+- bool ret = false;
++ bool ret;
+
+ for (i = 0; i < 16; i++)
+ linear_lut[i] = 0xFFFF * i / 15;
+@@ -1350,13 +1350,10 @@ static int dm_late_init(void *handle)
+ */
+ params.min_abm_backlight = 0x28F;
+
+- /* todo will enable for navi10 */
+- if (adev->asic_type <= CHIP_RAVEN) {
+- ret = dmcu_load_iram(dmcu, params);
+-
+- if (!ret)
+- return -EINVAL;
+- }
++ ret = dmcu_load_iram(dmcu, params);
++
++ if (!ret)
++ return -EINVAL;
+
+ return detect_mst_link_for_all_connectors(adev->ddev);
+ }
--- /dev/null
+From e9b7b1c0c103a623be1a65c39f98719803440871 Mon Sep 17 00:00:00 2001
+From: Kefeng Wang <wangkefeng.wang@huawei.com>
+Date: Wed, 10 Jun 2020 01:12:44 +0000
+Subject: sample-trace-array: Fix sleeping function called from invalid context
+
+From: Kefeng Wang <wangkefeng.wang@huawei.com>
+
+commit e9b7b1c0c103a623be1a65c39f98719803440871 upstream.
+
+BUG: sleeping function called from invalid context at kernel/locking/mutex.c:935
+ in_atomic(): 1, irqs_disabled(): 0, non_block: 0, pid: 0, name: swapper/5
+ 1 lock held by swapper/5/0:
+ #0: ffff80001002bd90 (samples/ftrace/sample-trace-array.c:38){+.-.}-{0:0}, at: call_timer_fn+0x8/0x3e0
+ CPU: 5 PID: 0 Comm: swapper/5 Not tainted 5.7.0+ #8
+ Hardware name: QEMU QEMU Virtual Machine, BIOS 0.0.0 02/06/2015
+ Call trace:
+ dump_backtrace+0x0/0x1a0
+ show_stack+0x20/0x30
+ dump_stack+0xe4/0x150
+ ___might_sleep+0x160/0x200
+ __might_sleep+0x58/0x90
+ __mutex_lock+0x64/0x948
+ mutex_lock_nested+0x3c/0x58
+ __ftrace_set_clr_event+0x44/0x88
+ trace_array_set_clr_event+0x24/0x38
+ mytimer_handler+0x34/0x40 [sample_trace_array]
+
+mutex_lock() will be called in interrupt context, using workqueue to fix it.
+
+Link: https://lkml.kernel.org/r/20200610011244.2209486-1-wangkefeng.wang@huawei.com
+
+Cc: stable@vger.kernel.org
+Fixes: 89ed42495ef4 ("tracing: Sample module to demonstrate kernel access to Ftrace instances.")
+Reviewed-by: Divya Indi <divya.indi@oracle.com>
+Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ samples/ftrace/sample-trace-array.c | 18 +++++++++++++-----
+ 1 file changed, 13 insertions(+), 5 deletions(-)
+
+--- a/samples/ftrace/sample-trace-array.c
++++ b/samples/ftrace/sample-trace-array.c
+@@ -6,6 +6,7 @@
+ #include <linux/timer.h>
+ #include <linux/err.h>
+ #include <linux/jiffies.h>
++#include <linux/workqueue.h>
+
+ /*
+ * Any file that uses trace points, must include the header.
+@@ -20,6 +21,16 @@ struct trace_array *tr;
+ static void mytimer_handler(struct timer_list *unused);
+ static struct task_struct *simple_tsk;
+
++static void trace_work_fn(struct work_struct *work)
++{
++ /*
++ * Disable tracing for event "sample_event".
++ */
++ trace_array_set_clr_event(tr, "sample-subsystem", "sample_event",
++ false);
++}
++static DECLARE_WORK(trace_work, trace_work_fn);
++
+ /*
+ * mytimer: Timer setup to disable tracing for event "sample_event". This
+ * timer is only for the purposes of the sample module to demonstrate access of
+@@ -29,11 +40,7 @@ static DEFINE_TIMER(mytimer, mytimer_han
+
+ static void mytimer_handler(struct timer_list *unused)
+ {
+- /*
+- * Disable tracing for event "sample_event".
+- */
+- trace_array_set_clr_event(tr, "sample-subsystem", "sample_event",
+- false);
++ schedule_work(&trace_work);
+ }
+
+ static void simple_thread_func(int count)
+@@ -76,6 +83,7 @@ static int simple_thread(void *arg)
+ simple_thread_func(count++);
+
+ del_timer(&mytimer);
++ cancel_work_sync(&trace_work);
+
+ /*
+ * trace_array_put() decrements the reference counter associated with
--- /dev/null
+From 9fbc01cdba66e988122ccdc6094cfd85d9587769 Mon Sep 17 00:00:00 2001
+From: Kefeng Wang <wangkefeng.wang@huawei.com>
+Date: Tue, 9 Jun 2020 13:52:00 +0000
+Subject: sample-trace-array: Remove trace_array 'sample-instance'
+
+From: Kefeng Wang <wangkefeng.wang@huawei.com>
+
+commit 9fbc01cdba66e988122ccdc6094cfd85d9587769 upstream.
+
+Remove trace_array 'sample-instance' if kthread_run fails
+in sample_trace_array_init().
+
+Link: https://lkml.kernel.org/r/20200609135200.2206726-1-wangkefeng.wang@huawei.com
+
+Cc: stable@vger.kernel.org
+Fixes: 89ed42495ef4a ("tracing: Sample module to demonstrate kernel access to Ftrace instances.")
+Reviewed-by: Divya Indi <divya.indi@oracle.com>
+Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ samples/ftrace/sample-trace-array.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/samples/ftrace/sample-trace-array.c
++++ b/samples/ftrace/sample-trace-array.c
+@@ -107,8 +107,12 @@ static int __init sample_trace_array_ini
+ trace_printk_init_buffers();
+
+ simple_tsk = kthread_run(simple_thread, NULL, "sample-instance");
+- if (IS_ERR(simple_tsk))
++ if (IS_ERR(simple_tsk)) {
++ trace_array_put(tr);
++ trace_array_destroy(tr);
+ return -1;
++ }
++
+ return 0;
+ }
+
drm-i915-gt-move-ilk-gt-workarounds-from-init_clock_gating-to-workarounds.patch
drm-i915-gt-move-vlv-gt-workarounds-from-init_clock_gating-to-workarounds.patch
drm-i915-gt-move-gen4-gt-workarounds-from-init_clock_gating-to-workarounds.patch
+revert-drm-amd-display-disable-dcn20-abm-feature-for-bring-up.patch
+drm-i915-tgl-make-wa_14010229206-permanent.patch
+crypto-algif_skcipher-cap-recv-sg-list-at-ctx-used.patch
+crypto-algboss-don-t-wait-during-notifier-callback.patch
+tracing-make-ftrace-packed-events-have-align-of-1.patch
+tracing-probe-fix-memleak-in-fetch_op_data-operations.patch
+proc-bootconfig-fix-to-use-correct-quotes-for-value.patch
+tools-bootconfig-fix-to-use-correct-quotes-for-value.patch
+tools-bootconfig-fix-to-return-0-if-succeeded-to-show-the-bootconfig.patch
+sample-trace-array-remove-trace_array-sample-instance.patch
+sample-trace-array-fix-sleeping-function-called-from-invalid-context.patch
+netfilter-nft_set_rbtree-don-t-account-for-expired-elements-on-insertion.patch
+netfilter-nft_set_pipapo-disable-preemption-before-getting-per-cpu-pointer.patch
+kprobes-fix-to-protect-kick_kprobe_optimizer-by-kprobe_mutex.patch
+kretprobe-prevent-triggering-kretprobe-from-within-kprobe_flush_task.patch
+powerpc-64s-fix-kvm-interrupt-using-wrong-save-area.patch
+e1000e-do-not-wake-up-the-system-via-wol-if-device-wakeup-is-disabled.patch
+net-octeon-mgmt-repair-filling-of-rx-ring.patch
+net-core-device_rename-use-rwsem-instead-of-a-seqcount.patch
--- /dev/null
+From f91cb5b7476a603068eae31e5b2cc170dd2b9b1b Mon Sep 17 00:00:00 2001
+From: Masami Hiramatsu <mhiramat@kernel.org>
+Date: Tue, 16 Jun 2020 19:14:25 +0900
+Subject: tools/bootconfig: Fix to return 0 if succeeded to show the bootconfig
+
+From: Masami Hiramatsu <mhiramat@kernel.org>
+
+commit f91cb5b7476a603068eae31e5b2cc170dd2b9b1b upstream.
+
+Fix bootconfig to return 0 if succeeded to show the bootconfig
+in initrd. Without this fix, "bootconfig INITRD" command
+returns !0 even if the command succeeded to show the bootconfig.
+
+Link: http://lkml.kernel.org/r/159230246566.65555.11891772258543514487.stgit@devnote2
+
+Cc: stable@vger.kernel.org
+Fixes: 950313ebf79c ("tools: bootconfig: Add bootconfig command")
+Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/bootconfig/main.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/tools/bootconfig/main.c
++++ b/tools/bootconfig/main.c
+@@ -207,11 +207,13 @@ int show_xbc(const char *path)
+ }
+
+ ret = load_xbc_from_initrd(fd, &buf);
+- if (ret < 0)
++ if (ret < 0) {
+ pr_err("Failed to load a boot config from initrd: %d\n", ret);
+- else
+- xbc_show_compact_tree();
+-
++ goto out;
++ }
++ xbc_show_compact_tree();
++ ret = 0;
++out:
+ close(fd);
+ free(buf);
+
--- /dev/null
+From 272da3279df191f028fd63d1683e5ecd56fcb13b Mon Sep 17 00:00:00 2001
+From: Masami Hiramatsu <mhiramat@kernel.org>
+Date: Tue, 16 Jun 2020 19:14:17 +0900
+Subject: tools/bootconfig: Fix to use correct quotes for value
+
+From: Masami Hiramatsu <mhiramat@kernel.org>
+
+commit 272da3279df191f028fd63d1683e5ecd56fcb13b upstream.
+
+Fix bootconfig tool to select double or single quotes
+correctly according to the value.
+
+If a bootconfig value includes a double quote character,
+we must use single-quotes to quote that value.
+
+Link: http://lkml.kernel.org/r/159230245697.65555.12444299015852932304.stgit@devnote2
+
+Cc: stable@vger.kernel.org
+Fixes: 950313ebf79c ("tools: bootconfig: Add bootconfig command")
+Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/bootconfig/main.c | 14 ++++++++------
+ 1 file changed, 8 insertions(+), 6 deletions(-)
+
+--- a/tools/bootconfig/main.c
++++ b/tools/bootconfig/main.c
+@@ -14,13 +14,18 @@
+ #include <linux/kernel.h>
+ #include <linux/bootconfig.h>
+
+-static int xbc_show_array(struct xbc_node *node)
++static int xbc_show_value(struct xbc_node *node)
+ {
+ const char *val;
++ char q;
+ int i = 0;
+
+ xbc_array_for_each_value(node, val) {
+- printf("\"%s\"%s", val, node->next ? ", " : ";\n");
++ if (strchr(val, '"'))
++ q = '\'';
++ else
++ q = '"';
++ printf("%c%s%c%s", q, val, q, node->next ? ", " : ";\n");
+ i++;
+ }
+ return i;
+@@ -48,10 +53,7 @@ static void xbc_show_compact_tree(void)
+ continue;
+ } else if (cnode && xbc_node_is_value(cnode)) {
+ printf("%s = ", xbc_node_get_data(node));
+- if (cnode->next)
+- xbc_show_array(cnode);
+- else
+- printf("\"%s\";\n", xbc_node_get_data(cnode));
++ xbc_show_value(cnode);
+ } else {
+ printf("%s;\n", xbc_node_get_data(node));
+ }
--- /dev/null
+From 4649079b9de1ad86be9f4c989373adb8235a8485 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (VMware)" <rostedt@goodmis.org>
+Date: Tue, 9 Jun 2020 22:00:41 -0400
+Subject: tracing: Make ftrace packed events have align of 1
+
+From: Steven Rostedt (VMware) <rostedt@goodmis.org>
+
+commit 4649079b9de1ad86be9f4c989373adb8235a8485 upstream.
+
+When using trace-cmd on 5.6-rt for the function graph tracer, the output was
+corrupted. It gave output like this:
+
+ funcgraph_entry: func=0xffffffff depth=38982
+ funcgraph_entry: func=0x1ffffffff depth=16044
+ funcgraph_exit: func=0xffffffff overrun=0x92539aaf00000000 calltime=0x92539c9900000072 rettime=0x100000072 depth=11084
+ funcgraph_exit: func=0xffffffff overrun=0x9253946e00000000 calltime=0x92539e2100000072 rettime=0x72 depth=26033702
+ funcgraph_entry: func=0xffffffff depth=85798
+ funcgraph_entry: func=0x1ffffffff depth=12044
+
+The reason was because the tracefs/events/ftrace/funcgraph_entry/exit format
+file was incorrect. The -rt kernel adds more common fields to the trace
+events. Namely, common_migrate_disable and common_preempt_lazy_count. Each
+is one byte in size. This changes the alignment of the normal payload. Most
+events are aligned normally, but the function and function graph events are
+defined with a "PACKED" macro, that packs their payload. As the offsets
+displayed in the format files are now calculated by an aligned field, the
+aligned field for function and function graph events should be 1, not their
+normal alignment.
+
+With aligning of the funcgraph_entry event, the format file has:
+
+ field:unsigned short common_type; offset:0; size:2; signed:0;
+ field:unsigned char common_flags; offset:2; size:1; signed:0;
+ field:unsigned char common_preempt_count; offset:3; size:1; signed:0;
+ field:int common_pid; offset:4; size:4; signed:1;
+ field:unsigned char common_migrate_disable; offset:8; size:1; signed:0;
+ field:unsigned char common_preempt_lazy_count; offset:9; size:1; signed:0;
+
+ field:unsigned long func; offset:16; size:8; signed:0;
+ field:int depth; offset:24; size:4; signed:1;
+
+But the actual alignment is:
+
+ field:unsigned short common_type; offset:0; size:2; signed:0;
+ field:unsigned char common_flags; offset:2; size:1; signed:0;
+ field:unsigned char common_preempt_count; offset:3; size:1; signed:0;
+ field:int common_pid; offset:4; size:4; signed:1;
+ field:unsigned char common_migrate_disable; offset:8; size:1; signed:0;
+ field:unsigned char common_preempt_lazy_count; offset:9; size:1; signed:0;
+
+ field:unsigned long func; offset:12; size:8; signed:0;
+ field:int depth; offset:20; size:4; signed:1;
+
+Link: https://lkml.kernel.org/r/20200609220041.2a3b527f@oasis.local.home
+
+Cc: stable@vger.kernel.org
+Fixes: 04ae87a52074e ("ftrace: Rework event_create_dir()")
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace.h | 3 +++
+ kernel/trace/trace_entries.h | 14 +++++++-------
+ kernel/trace/trace_export.c | 16 ++++++++++++++++
+ 3 files changed, 26 insertions(+), 7 deletions(-)
+
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -61,6 +61,9 @@ enum trace_type {
+ #undef __field_desc
+ #define __field_desc(type, container, item)
+
++#undef __field_packed
++#define __field_packed(type, container, item)
++
+ #undef __array
+ #define __array(type, item, size) type item[size];
+
+--- a/kernel/trace/trace_entries.h
++++ b/kernel/trace/trace_entries.h
+@@ -78,8 +78,8 @@ FTRACE_ENTRY_PACKED(funcgraph_entry, ftr
+
+ F_STRUCT(
+ __field_struct( struct ftrace_graph_ent, graph_ent )
+- __field_desc( unsigned long, graph_ent, func )
+- __field_desc( int, graph_ent, depth )
++ __field_packed( unsigned long, graph_ent, func )
++ __field_packed( int, graph_ent, depth )
+ ),
+
+ F_printk("--> %ps (%d)", (void *)__entry->func, __entry->depth)
+@@ -92,11 +92,11 @@ FTRACE_ENTRY_PACKED(funcgraph_exit, ftra
+
+ F_STRUCT(
+ __field_struct( struct ftrace_graph_ret, ret )
+- __field_desc( unsigned long, ret, func )
+- __field_desc( unsigned long, ret, overrun )
+- __field_desc( unsigned long long, ret, calltime)
+- __field_desc( unsigned long long, ret, rettime )
+- __field_desc( int, ret, depth )
++ __field_packed( unsigned long, ret, func )
++ __field_packed( unsigned long, ret, overrun )
++ __field_packed( unsigned long long, ret, calltime)
++ __field_packed( unsigned long long, ret, rettime )
++ __field_packed( int, ret, depth )
+ ),
+
+ F_printk("<-- %ps (%d) (start: %llx end: %llx) over: %d",
+--- a/kernel/trace/trace_export.c
++++ b/kernel/trace/trace_export.c
+@@ -45,6 +45,9 @@ static int ftrace_event_register(struct
+ #undef __field_desc
+ #define __field_desc(type, container, item) type item;
+
++#undef __field_packed
++#define __field_packed(type, container, item) type item;
++
+ #undef __array
+ #define __array(type, item, size) type item[size];
+
+@@ -85,6 +88,13 @@ static void __always_unused ____ftrace_c
+ .size = sizeof(_type), .align = __alignof__(_type), \
+ is_signed_type(_type), .filter_type = _filter_type },
+
++
++#undef __field_ext_packed
++#define __field_ext_packed(_type, _item, _filter_type) { \
++ .type = #_type, .name = #_item, \
++ .size = sizeof(_type), .align = 1, \
++ is_signed_type(_type), .filter_type = _filter_type },
++
+ #undef __field
+ #define __field(_type, _item) __field_ext(_type, _item, FILTER_OTHER)
+
+@@ -94,6 +104,9 @@ static void __always_unused ____ftrace_c
+ #undef __field_desc
+ #define __field_desc(_type, _container, _item) __field_ext(_type, _item, FILTER_OTHER)
+
++#undef __field_packed
++#define __field_packed(_type, _container, _item) __field_ext_packed(_type, _item, FILTER_OTHER)
++
+ #undef __array
+ #define __array(_type, _item, _len) { \
+ .type = #_type"["__stringify(_len)"]", .name = #_item, \
+@@ -129,6 +142,9 @@ static struct trace_event_fields ftrace_
+ #undef __field_desc
+ #define __field_desc(type, container, item)
+
++#undef __field_packed
++#define __field_packed(type, container, item)
++
+ #undef __array
+ #define __array(type, item, len)
+
--- /dev/null
+From 3aa8fdc37d16735e8891035becf25b3857d3efe0 Mon Sep 17 00:00:00 2001
+From: Vamshi K Sthambamkadi <vamshi.k.sthambamkadi@gmail.com>
+Date: Mon, 15 Jun 2020 20:00:38 +0530
+Subject: tracing/probe: Fix memleak in fetch_op_data operations
+
+From: Vamshi K Sthambamkadi <vamshi.k.sthambamkadi@gmail.com>
+
+commit 3aa8fdc37d16735e8891035becf25b3857d3efe0 upstream.
+
+kmemleak report:
+ [<57dcc2ca>] __kmalloc_track_caller+0x139/0x2b0
+ [<f1c45d0f>] kstrndup+0x37/0x80
+ [<f9761eb0>] parse_probe_arg.isra.7+0x3cc/0x630
+ [<055bf2ba>] traceprobe_parse_probe_arg+0x2f5/0x810
+ [<655a7766>] trace_kprobe_create+0x2ca/0x950
+ [<4fc6a02a>] create_or_delete_trace_kprobe+0xf/0x30
+ [<6d1c8a52>] trace_run_command+0x67/0x80
+ [<be812cc0>] trace_parse_run_command+0xa7/0x140
+ [<aecfe401>] probes_write+0x10/0x20
+ [<2027641c>] __vfs_write+0x30/0x1e0
+ [<6a4aeee1>] vfs_write+0x96/0x1b0
+ [<3517fb7d>] ksys_write+0x53/0xc0
+ [<dad91db7>] __ia32_sys_write+0x15/0x20
+ [<da347f64>] do_syscall_32_irqs_on+0x3d/0x260
+ [<fd0b7e7d>] do_fast_syscall_32+0x39/0xb0
+ [<ea5ae810>] entry_SYSENTER_32+0xaf/0x102
+
+Post parse_probe_arg(), the FETCH_OP_DATA operation type is overwritten
+to FETCH_OP_ST_STRING, as a result memory is never freed since
+traceprobe_free_probe_arg() iterates only over SYMBOL and DATA op types
+
+Setup fetch string operation correctly after fetch_op_data operation.
+
+Link: https://lkml.kernel.org/r/20200615143034.GA1734@cosmos
+
+Cc: stable@vger.kernel.org
+Fixes: a42e3c4de964 ("tracing/probe: Add immediate string parameter support")
+Acked-by: Masami Hiramatsu <mhiramat@kernel.org>
+Signed-off-by: Vamshi K Sthambamkadi <vamshi.k.sthambamkadi@gmail.com>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace_probe.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/kernel/trace/trace_probe.c
++++ b/kernel/trace/trace_probe.c
+@@ -639,8 +639,8 @@ static int traceprobe_parse_probe_arg_bo
+ ret = -EINVAL;
+ goto fail;
+ }
+- if ((code->op == FETCH_OP_IMM || code->op == FETCH_OP_COMM) ||
+- parg->count) {
++ if ((code->op == FETCH_OP_IMM || code->op == FETCH_OP_COMM ||
++ code->op == FETCH_OP_DATA) || parg->count) {
+ /*
+ * IMM, DATA and COMM is pointing actual address, those
+ * must be kept, and if parg->count != 0, this is an