--- /dev/null
+From 21b200d091826a83aafc95d847139b2b0582f6d1 Mon Sep 17 00:00:00 2001
+From: Aurelien Aptel <aaptel@suse.com>
+Date: Fri, 5 Feb 2021 15:42:48 +0100
+Subject: cifs: report error instead of invalid when revalidating a dentry fails
+
+From: Aurelien Aptel <aaptel@suse.com>
+
+commit 21b200d091826a83aafc95d847139b2b0582f6d1 upstream.
+
+Assuming
+- //HOST/a is mounted on /mnt
+- //HOST/b is mounted on /mnt/b
+
+On a slow connection, running 'df' and killing it while it's
+processing /mnt/b can make cifs_get_inode_info() returns -ERESTARTSYS.
+
+This triggers the following chain of events:
+=> the dentry revalidation fail
+=> dentry is put and released
+=> superblock associated with the dentry is put
+=> /mnt/b is unmounted
+
+This patch makes cifs_d_revalidate() return the error instead of 0
+(invalid) when cifs_revalidate_dentry() fails, except for ENOENT (file
+deleted) and ESTALE (file recreated).
+
+Signed-off-by: Aurelien Aptel <aaptel@suse.com>
+Suggested-by: Shyam Prasad N <nspmangalore@gmail.com>
+Reviewed-by: Shyam Prasad N <nspmangalore@gmail.com>
+CC: stable@vger.kernel.org
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/cifs/dir.c | 22 ++++++++++++++++++++--
+ 1 file changed, 20 insertions(+), 2 deletions(-)
+
+--- a/fs/cifs/dir.c
++++ b/fs/cifs/dir.c
+@@ -736,6 +736,7 @@ static int
+ cifs_d_revalidate(struct dentry *direntry, unsigned int flags)
+ {
+ struct inode *inode;
++ int rc;
+
+ if (flags & LOOKUP_RCU)
+ return -ECHILD;
+@@ -745,8 +746,25 @@ cifs_d_revalidate(struct dentry *direntr
+ if ((flags & LOOKUP_REVAL) && !CIFS_CACHE_READ(CIFS_I(inode)))
+ CIFS_I(inode)->time = 0; /* force reval */
+
+- if (cifs_revalidate_dentry(direntry))
+- return 0;
++ rc = cifs_revalidate_dentry(direntry);
++ if (rc) {
++ cifs_dbg(FYI, "cifs_revalidate_dentry failed with rc=%d", rc);
++ switch (rc) {
++ case -ENOENT:
++ case -ESTALE:
++ /*
++ * Those errors mean the dentry is invalid
++ * (file was deleted or recreated)
++ */
++ return 0;
++ default:
++ /*
++ * Otherwise some unexpected error happened
++ * report it as-is to VFS layer
++ */
++ return rc;
++ }
++ }
+ else {
+ /*
+ * If the inode wasn't known to be a dfs entry when
--- /dev/null
+From 7e0a9220467dbcfdc5bc62825724f3e52e50ab31 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (VMware)" <rostedt@goodmis.org>
+Date: Fri, 29 Jan 2021 10:13:53 -0500
+Subject: fgraph: Initialize tracing_graph_pause at task creation
+
+From: Steven Rostedt (VMware) <rostedt@goodmis.org>
+
+commit 7e0a9220467dbcfdc5bc62825724f3e52e50ab31 upstream.
+
+On some archs, the idle task can call into cpu_suspend(). The cpu_suspend()
+will disable or pause function graph tracing, as there's some paths in
+bringing down the CPU that can have issues with its return address being
+modified. The task_struct structure has a "tracing_graph_pause" atomic
+counter, that when set to something other than zero, the function graph
+tracer will not modify the return address.
+
+The problem is that the tracing_graph_pause counter is initialized when the
+function graph tracer is enabled. This can corrupt the counter for the idle
+task if it is suspended in these architectures.
+
+ CPU 1 CPU 2
+ ----- -----
+ do_idle()
+ cpu_suspend()
+ pause_graph_tracing()
+ task_struct->tracing_graph_pause++ (0 -> 1)
+
+ start_graph_tracing()
+ for_each_online_cpu(cpu) {
+ ftrace_graph_init_idle_task(cpu)
+ task-struct->tracing_graph_pause = 0 (1 -> 0)
+
+ unpause_graph_tracing()
+ task_struct->tracing_graph_pause-- (0 -> -1)
+
+The above should have gone from 1 to zero, and enabled function graph
+tracing again. But instead, it is set to -1, which keeps it disabled.
+
+There's no reason that the field tracing_graph_pause on the task_struct can
+not be initialized at boot up.
+
+Cc: stable@vger.kernel.org
+Fixes: 380c4b1411ccd ("tracing/function-graph-tracer: append the tracing_graph_flag")
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=211339
+Reported-by: pierre.gondois@arm.com
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ init/init_task.c | 3 ++-
+ kernel/trace/fgraph.c | 2 --
+ 2 files changed, 2 insertions(+), 3 deletions(-)
+
+--- a/init/init_task.c
++++ b/init/init_task.c
+@@ -198,7 +198,8 @@ struct task_struct init_task
+ .lockdep_recursion = 0,
+ #endif
+ #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+- .ret_stack = NULL,
++ .ret_stack = NULL,
++ .tracing_graph_pause = ATOMIC_INIT(0),
+ #endif
+ #if defined(CONFIG_TRACING) && defined(CONFIG_PREEMPTION)
+ .trace_recursion = 0,
+--- a/kernel/trace/fgraph.c
++++ b/kernel/trace/fgraph.c
+@@ -395,7 +395,6 @@ static int alloc_retstack_tasklist(struc
+ }
+
+ if (t->ret_stack == NULL) {
+- atomic_set(&t->tracing_graph_pause, 0);
+ atomic_set(&t->trace_overrun, 0);
+ t->curr_ret_stack = -1;
+ t->curr_ret_depth = -1;
+@@ -490,7 +489,6 @@ static DEFINE_PER_CPU(struct ftrace_ret_
+ static void
+ graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
+ {
+- atomic_set(&t->tracing_graph_pause, 0);
+ atomic_set(&t->trace_overrun, 0);
+ t->ftrace_timestamp = 0;
+ /* make curr_ret_stack visible before we add the ret_stack */
--- /dev/null
+From 4c457e8cb75eda91906a4f89fc39bde3f9a43922 Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <maz@kernel.org>
+Date: Sat, 23 Jan 2021 12:27:59 +0000
+Subject: genirq/msi: Activate Multi-MSI early when MSI_FLAG_ACTIVATE_EARLY is set
+
+From: Marc Zyngier <maz@kernel.org>
+
+commit 4c457e8cb75eda91906a4f89fc39bde3f9a43922 upstream.
+
+When MSI_FLAG_ACTIVATE_EARLY is set (which is the case for PCI),
+__msi_domain_alloc_irqs() performs the activation of the interrupt (which
+in the case of PCI results in the endpoint being programmed) as soon as the
+interrupt is allocated.
+
+But it appears that this is only done for the first vector, introducing an
+inconsistent behaviour for PCI Multi-MSI.
+
+Fix it by iterating over the number of vectors allocated to each MSI
+descriptor. This is easily achieved by introducing a new
+"for_each_msi_vector" iterator, together with a tiny bit of refactoring.
+
+Fixes: f3b0946d629c ("genirq/msi: Make sure PCI MSIs are activated early")
+Reported-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20210123122759.1781359-1-maz@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/msi.h | 6 ++++++
+ kernel/irq/msi.c | 44 ++++++++++++++++++++------------------------
+ 2 files changed, 26 insertions(+), 24 deletions(-)
+
+--- a/include/linux/msi.h
++++ b/include/linux/msi.h
+@@ -139,6 +139,12 @@ struct msi_desc {
+ list_for_each_entry((desc), dev_to_msi_list((dev)), list)
+ #define for_each_msi_entry_safe(desc, tmp, dev) \
+ list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list)
++#define for_each_msi_vector(desc, __irq, dev) \
++ for_each_msi_entry((desc), (dev)) \
++ if ((desc)->irq) \
++ for (__irq = (desc)->irq; \
++ __irq < ((desc)->irq + (desc)->nvec_used); \
++ __irq++)
+
+ #ifdef CONFIG_IRQ_MSI_IOMMU
+ static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc)
+--- a/kernel/irq/msi.c
++++ b/kernel/irq/msi.c
+@@ -436,22 +436,22 @@ int __msi_domain_alloc_irqs(struct irq_d
+
+ can_reserve = msi_check_reservation_mode(domain, info, dev);
+
+- for_each_msi_entry(desc, dev) {
+- virq = desc->irq;
+- if (desc->nvec_used == 1)
+- dev_dbg(dev, "irq %d for MSI\n", virq);
+- else
++ /*
++ * This flag is set by the PCI layer as we need to activate
++ * the MSI entries before the PCI layer enables MSI in the
++ * card. Otherwise the card latches a random msi message.
++ */
++ if (!(info->flags & MSI_FLAG_ACTIVATE_EARLY))
++ goto skip_activate;
++
++ for_each_msi_vector(desc, i, dev) {
++ if (desc->irq == i) {
++ virq = desc->irq;
+ dev_dbg(dev, "irq [%d-%d] for MSI\n",
+ virq, virq + desc->nvec_used - 1);
+- /*
+- * This flag is set by the PCI layer as we need to activate
+- * the MSI entries before the PCI layer enables MSI in the
+- * card. Otherwise the card latches a random msi message.
+- */
+- if (!(info->flags & MSI_FLAG_ACTIVATE_EARLY))
+- continue;
++ }
+
+- irq_data = irq_domain_get_irq_data(domain, desc->irq);
++ irq_data = irq_domain_get_irq_data(domain, i);
+ if (!can_reserve) {
+ irqd_clr_can_reserve(irq_data);
+ if (domain->flags & IRQ_DOMAIN_MSI_NOMASK_QUIRK)
+@@ -462,28 +462,24 @@ int __msi_domain_alloc_irqs(struct irq_d
+ goto cleanup;
+ }
+
++skip_activate:
+ /*
+ * If these interrupts use reservation mode, clear the activated bit
+ * so request_irq() will assign the final vector.
+ */
+ if (can_reserve) {
+- for_each_msi_entry(desc, dev) {
+- irq_data = irq_domain_get_irq_data(domain, desc->irq);
++ for_each_msi_vector(desc, i, dev) {
++ irq_data = irq_domain_get_irq_data(domain, i);
+ irqd_clr_activated(irq_data);
+ }
+ }
+ return 0;
+
+ cleanup:
+- for_each_msi_entry(desc, dev) {
+- struct irq_data *irqd;
+-
+- if (desc->irq == virq)
+- break;
+-
+- irqd = irq_domain_get_irq_data(domain, desc->irq);
+- if (irqd_is_activated(irqd))
+- irq_domain_deactivate_irq(irqd);
++ for_each_msi_vector(desc, i, dev) {
++ irq_data = irq_domain_get_irq_data(domain, i);
++ if (irqd_is_activated(irq_data))
++ irq_domain_deactivate_irq(irq_data);
+ }
+ msi_domain_free_irqs(domain, dev);
+ return ret;
--- /dev/null
+From 4c7bcb51ae25f79e3733982e5d0cd8ce8640ddfc Mon Sep 17 00:00:00 2001
+From: Hans de Goede <hdegoede@redhat.com>
+Date: Mon, 21 Dec 2020 19:56:47 +0100
+Subject: genirq: Prevent [devm_]irq_alloc_desc from returning irq 0
+
+From: Hans de Goede <hdegoede@redhat.com>
+
+commit 4c7bcb51ae25f79e3733982e5d0cd8ce8640ddfc upstream.
+
+Since commit a85a6c86c25b ("driver core: platform: Clarify that IRQ 0
+is invalid"), having a linux-irq with number 0 will trigger a WARN()
+when calling platform_get_irq*() to retrieve that linux-irq.
+
+Since [devm_]irq_alloc_desc allocs a single irq and since irq 0 is not used
+on some systems, it can return 0, triggering that WARN(). This happens
+e.g. on Intel Bay Trail and Cherry Trail devices using the LPE audio engine
+for HDMI audio:
+
+ 0 is an invalid IRQ number
+ WARNING: CPU: 3 PID: 472 at drivers/base/platform.c:238 platform_get_irq_optional+0x108/0x180
+ Modules linked in: snd_hdmi_lpe_audio(+) ...
+
+ Call Trace:
+ platform_get_irq+0x17/0x30
+ hdmi_lpe_audio_probe+0x4a/0x6c0 [snd_hdmi_lpe_audio]
+
+ ---[ end trace ceece38854223a0b ]---
+
+Change the 'from' parameter passed to __[devm_]irq_alloc_descs() by the
+[devm_]irq_alloc_desc macros from 0 to 1, so that these macros will no
+longer return 0.
+
+Fixes: a85a6c86c25b ("driver core: platform: Clarify that IRQ 0 is invalid")
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20201221185647.226146-1-hdegoede@redhat.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/irq.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/include/linux/irq.h
++++ b/include/linux/irq.h
+@@ -922,7 +922,7 @@ int __devm_irq_alloc_descs(struct device
+ __irq_alloc_descs(irq, from, cnt, node, THIS_MODULE, NULL)
+
+ #define irq_alloc_desc(node) \
+- irq_alloc_descs(-1, 0, 1, node)
++ irq_alloc_descs(-1, 1, 1, node)
+
+ #define irq_alloc_desc_at(at, node) \
+ irq_alloc_descs(at, at, 1, node)
+@@ -937,7 +937,7 @@ int __devm_irq_alloc_descs(struct device
+ __devm_irq_alloc_descs(dev, irq, from, cnt, node, THIS_MODULE, NULL)
+
+ #define devm_irq_alloc_desc(dev, node) \
+- devm_irq_alloc_descs(dev, -1, 0, 1, node)
++ devm_irq_alloc_descs(dev, -1, 1, 1, node)
+
+ #define devm_irq_alloc_desc_at(dev, at, node) \
+ devm_irq_alloc_descs(dev, at, at, 1, node)
--- /dev/null
+From c351bb64cbe67029c68dea3adbec1b9508c6ff0f Mon Sep 17 00:00:00 2001
+From: Quanyang Wang <quanyang.wang@windriver.com>
+Date: Fri, 29 Jan 2021 16:19:17 +0800
+Subject: gpiolib: free device name on error path to fix kmemleak
+
+From: Quanyang Wang <quanyang.wang@windriver.com>
+
+commit c351bb64cbe67029c68dea3adbec1b9508c6ff0f upstream.
+
+In gpiochip_add_data_with_key, we should check the return value of
+dev_set_name to ensure that device name is allocated successfully
+and then add a label on the error path to free device name to fix
+kmemleak as below:
+
+unreferenced object 0xc2d6fc40 (size 64):
+ comm "kworker/0:1", pid 16, jiffies 4294937425 (age 65.120s)
+ hex dump (first 32 bytes):
+ 67 70 69 6f 63 68 69 70 30 00 1a c0 54 63 1a c0 gpiochip0...Tc..
+ 0c ed 84 c0 48 ed 84 c0 3c ee 84 c0 10 00 00 00 ....H...<.......
+ backtrace:
+ [<962810f7>] kobject_set_name_vargs+0x2c/0xa0
+ [<f50797e6>] dev_set_name+0x2c/0x5c
+ [<94abbca9>] gpiochip_add_data_with_key+0xfc/0xce8
+ [<5c4193e0>] omap_gpio_probe+0x33c/0x68c
+ [<3402f137>] platform_probe+0x58/0xb8
+ [<7421e210>] really_probe+0xec/0x3b4
+ [<000f8ada>] driver_probe_device+0x58/0xb4
+ [<67e0f7f7>] bus_for_each_drv+0x80/0xd0
+ [<4de545dc>] __device_attach+0xe8/0x15c
+ [<2e4431e7>] bus_probe_device+0x84/0x8c
+ [<c18b1de9>] device_add+0x384/0x7c0
+ [<5aff2995>] of_platform_device_create_pdata+0x8c/0xb8
+ [<061c3483>] of_platform_bus_create+0x198/0x230
+ [<5ee6d42a>] of_platform_populate+0x60/0xb8
+ [<2647300f>] sysc_probe+0xd18/0x135c
+ [<3402f137>] platform_probe+0x58/0xb8
+
+Signed-off-by: Quanyang Wang <quanyang.wang@windriver.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Bartosz Golaszewski <bgolaszewski@baylibre.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpio/gpiolib.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -602,7 +602,11 @@ int gpiochip_add_data_with_key(struct gp
+ ret = gdev->id;
+ goto err_free_gdev;
+ }
+- dev_set_name(&gdev->dev, GPIOCHIP_NAME "%d", gdev->id);
++
++ ret = dev_set_name(&gdev->dev, GPIOCHIP_NAME "%d", gdev->id);
++ if (ret)
++ goto err_free_ida;
++
+ device_initialize(&gdev->dev);
+ dev_set_drvdata(&gdev->dev, gdev);
+ if (gc->parent && gc->parent->driver)
+@@ -616,7 +620,7 @@ int gpiochip_add_data_with_key(struct gp
+ gdev->descs = kcalloc(gc->ngpio, sizeof(gdev->descs[0]), GFP_KERNEL);
+ if (!gdev->descs) {
+ ret = -ENOMEM;
+- goto err_free_ida;
++ goto err_free_dev_name;
+ }
+
+ if (gc->ngpio == 0) {
+@@ -767,6 +771,8 @@ err_free_label:
+ kfree_const(gdev->label);
+ err_free_descs:
+ kfree(gdev->descs);
++err_free_dev_name:
++ kfree(dev_name(&gdev->dev));
+ err_free_ida:
+ ida_free(&gpio_ida, gdev->id);
+ err_free_gdev:
--- /dev/null
+From 0188b87899ffc4a1d36a0badbe77d56c92fd91dc Mon Sep 17 00:00:00 2001
+From: Wang ShaoBo <bobo.shaobowang@huawei.com>
+Date: Thu, 28 Jan 2021 20:44:27 +0800
+Subject: kretprobe: Avoid re-registration of the same kretprobe earlier
+
+From: Wang ShaoBo <bobo.shaobowang@huawei.com>
+
+commit 0188b87899ffc4a1d36a0badbe77d56c92fd91dc upstream.
+
+Our system encountered a re-init error when re-registering same kretprobe,
+where the kretprobe_instance in rp->free_instances is illegally accessed
+after re-init.
+
+Implementation to avoid re-registration has been introduced for kprobe
+before, but lags for register_kretprobe(). We must check if kprobe has
+been re-registered before re-initializing kretprobe, otherwise it will
+destroy the data struct of kretprobe registered, which can lead to memory
+leak, system crash, also some unexpected behaviors.
+
+We use check_kprobe_rereg() to check if kprobe has been re-registered
+before running register_kretprobe()'s body, for giving a warning message
+and terminate registration process.
+
+Link: https://lkml.kernel.org/r/20210128124427.2031088-1-bobo.shaobowang@huawei.com
+
+Cc: stable@vger.kernel.org
+Fixes: 1f0ab40976460 ("kprobes: Prevent re-registration of the same kprobe")
+[ The above commit should have been done for kretprobes too ]
+Acked-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
+Acked-by: Ananth N Mavinakayanahalli <ananth@linux.ibm.com>
+Acked-by: Masami Hiramatsu <mhiramat@kernel.org>
+Signed-off-by: Wang ShaoBo <bobo.shaobowang@huawei.com>
+Signed-off-by: Cheng Jian <cj.chengjian@huawei.com>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/kprobes.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -2122,6 +2122,10 @@ int register_kretprobe(struct kretprobe
+ if (ret)
+ return ret;
+
++ /* If only rp->kp.addr is specified, check reregistering kprobes */
++ if (rp->kp.addr && check_kprobe_rereg(&rp->kp))
++ return -EINVAL;
++
+ if (kretprobe_blacklist_size) {
+ addr = kprobe_addr(&rp->kp);
+ if (IS_ERR(addr))
--- /dev/null
+From 7018c897c2f243d4b5f1b94bc6b4831a7eab80fb Mon Sep 17 00:00:00 2001
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Mon, 1 Feb 2021 16:20:40 -0800
+Subject: libnvdimm/dimm: Avoid race between probe and available_slots_show()
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+commit 7018c897c2f243d4b5f1b94bc6b4831a7eab80fb upstream.
+
+Richard reports that the following test:
+
+(while true; do
+ cat /sys/bus/nd/devices/nmem*/available_slots 2>&1 > /dev/null
+ done) &
+
+while true; do
+ for i in $(seq 0 4); do
+ echo nmem$i > /sys/bus/nd/drivers/nvdimm/bind
+ done
+ for i in $(seq 0 4); do
+ echo nmem$i > /sys/bus/nd/drivers/nvdimm/unbind
+ done
+ done
+
+...fails with a crash signature like:
+
+ divide error: 0000 [#1] SMP KASAN PTI
+ RIP: 0010:nd_label_nfree+0x134/0x1a0 [libnvdimm]
+ [..]
+ Call Trace:
+ available_slots_show+0x4e/0x120 [libnvdimm]
+ dev_attr_show+0x42/0x80
+ ? memset+0x20/0x40
+ sysfs_kf_seq_show+0x218/0x410
+
+The root cause is that available_slots_show() consults driver-data, but
+fails to synchronize against device-unbind setting up a TOCTOU race to
+access uninitialized memory.
+
+Validate driver-data under the device-lock.
+
+Fixes: 4d88a97aa9e8 ("libnvdimm, nvdimm: dimm driver and base libnvdimm device-driver infrastructure")
+Cc: <stable@vger.kernel.org>
+Cc: Vishal Verma <vishal.l.verma@intel.com>
+Cc: Dave Jiang <dave.jiang@intel.com>
+Cc: Ira Weiny <ira.weiny@intel.com>
+Cc: Coly Li <colyli@suse.com>
+Reported-by: Richard Palethorpe <rpalethorpe@suse.com>
+Acked-by: Richard Palethorpe <rpalethorpe@suse.com>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/nvdimm/dimm_devs.c | 18 +++++++++++++++---
+ 1 file changed, 15 insertions(+), 3 deletions(-)
+
+--- a/drivers/nvdimm/dimm_devs.c
++++ b/drivers/nvdimm/dimm_devs.c
+@@ -335,16 +335,16 @@ static ssize_t state_show(struct device
+ }
+ static DEVICE_ATTR_RO(state);
+
+-static ssize_t available_slots_show(struct device *dev,
+- struct device_attribute *attr, char *buf)
++static ssize_t __available_slots_show(struct nvdimm_drvdata *ndd, char *buf)
+ {
+- struct nvdimm_drvdata *ndd = dev_get_drvdata(dev);
++ struct device *dev;
+ ssize_t rc;
+ u32 nfree;
+
+ if (!ndd)
+ return -ENXIO;
+
++ dev = ndd->dev;
+ nvdimm_bus_lock(dev);
+ nfree = nd_label_nfree(ndd);
+ if (nfree - 1 > nfree) {
+@@ -356,6 +356,18 @@ static ssize_t available_slots_show(stru
+ nvdimm_bus_unlock(dev);
+ return rc;
+ }
++
++static ssize_t available_slots_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ ssize_t rc;
++
++ nd_device_lock(dev);
++ rc = __available_slots_show(dev_get_drvdata(dev), buf);
++ nd_device_unlock(dev);
++
++ return rc;
++}
+ static DEVICE_ATTR_RO(available_slots);
+
+ __weak ssize_t security_show(struct device *dev,
--- /dev/null
+From 13f445d65955f388499f00851dc9a86280970f7c Mon Sep 17 00:00:00 2001
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Tue, 12 Jan 2021 23:35:50 -0800
+Subject: libnvdimm/namespace: Fix visibility of namespace resource attribute
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+commit 13f445d65955f388499f00851dc9a86280970f7c upstream.
+
+Legacy pmem namespaces lost support for the "resource" attribute when
+the code was cleaned up to put the permission visibility in the
+declaration. Restore this by listing 'resource' in the default
+attributes.
+
+A new ndctl regression test for pfn_to_online_page() corner cases builds
+on this fix.
+
+Fixes: bfd2e9140656 ("libnvdimm: Simplify root read-only definition for the 'resource' attribute")
+Cc: Vishal Verma <vishal.l.verma@intel.com>
+Cc: Dave Jiang <dave.jiang@intel.com>
+Cc: Ira Weiny <ira.weiny@intel.com>
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/161052334995.1805594.12054873528154362921.stgit@dwillia2-desk3.amr.corp.intel.com
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/nvdimm/namespace_devs.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/drivers/nvdimm/namespace_devs.c
++++ b/drivers/nvdimm/namespace_devs.c
+@@ -1635,11 +1635,11 @@ static umode_t namespace_visible(struct
+ return a->mode;
+ }
+
+- if (a == &dev_attr_nstype.attr || a == &dev_attr_size.attr
+- || a == &dev_attr_holder.attr
+- || a == &dev_attr_holder_class.attr
+- || a == &dev_attr_force_raw.attr
+- || a == &dev_attr_mode.attr)
++ /* base is_namespace_io() attributes */
++ if (a == &dev_attr_nstype.attr || a == &dev_attr_size.attr ||
++ a == &dev_attr_holder.attr || a == &dev_attr_holder_class.attr ||
++ a == &dev_attr_force_raw.attr || a == &dev_attr_mode.attr ||
++ a == &dev_attr_resource.attr)
+ return a->mode;
+
+ return 0;
--- /dev/null
+From 18fe0fae61252b5ae6e26553e2676b5fac555951 Mon Sep 17 00:00:00 2001
+From: Felix Fietkau <nbd@nbd.name>
+Date: Mon, 1 Feb 2021 09:33:24 +0100
+Subject: mac80211: fix station rate table updates on assoc
+
+From: Felix Fietkau <nbd@nbd.name>
+
+commit 18fe0fae61252b5ae6e26553e2676b5fac555951 upstream.
+
+If the driver uses .sta_add, station entries are only uploaded after the sta
+is in assoc state. Fix early station rate table updates by deferring them
+until the sta has been uploaded.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+Link: https://lore.kernel.org/r/20210201083324.3134-1-nbd@nbd.name
+[use rcu_access_pointer() instead since we won't dereference here]
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mac80211/driver-ops.c | 5 ++++-
+ net/mac80211/rate.c | 3 ++-
+ 2 files changed, 6 insertions(+), 2 deletions(-)
+
+--- a/net/mac80211/driver-ops.c
++++ b/net/mac80211/driver-ops.c
+@@ -125,8 +125,11 @@ int drv_sta_state(struct ieee80211_local
+ } else if (old_state == IEEE80211_STA_AUTH &&
+ new_state == IEEE80211_STA_ASSOC) {
+ ret = drv_sta_add(local, sdata, &sta->sta);
+- if (ret == 0)
++ if (ret == 0) {
+ sta->uploaded = true;
++ if (rcu_access_pointer(sta->sta.rates))
++ drv_sta_rate_tbl_update(local, sdata, &sta->sta);
++ }
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTH) {
+ drv_sta_remove(local, sdata, &sta->sta);
+--- a/net/mac80211/rate.c
++++ b/net/mac80211/rate.c
+@@ -960,7 +960,8 @@ int rate_control_set_rates(struct ieee80
+ if (old)
+ kfree_rcu(old, rcu_head);
+
+- drv_sta_rate_tbl_update(hw_to_local(hw), sta->sdata, pubsta);
++ if (sta->uploaded)
++ drv_sta_rate_tbl_update(hw_to_local(hw), sta->sdata, pubsta);
+
+ ieee80211_sta_set_expected_throughput(pubsta, sta_get_expected_throughput(sta));
+
--- /dev/null
+From b854cc659dcb80f172cb35dbedc15d39d49c383f Mon Sep 17 00:00:00 2001
+From: Miklos Szeredi <mszeredi@redhat.com>
+Date: Tue, 5 Jan 2021 08:36:11 +0800
+Subject: ovl: avoid deadlock on directory ioctl
+
+From: Miklos Szeredi <mszeredi@redhat.com>
+
+commit b854cc659dcb80f172cb35dbedc15d39d49c383f upstream.
+
+The function ovl_dir_real_file() currently uses the inode lock to serialize
+writes to the od->upperfile field.
+
+However, this function will get called by ovl_ioctl_set_flags(), which
+utilizes the inode lock too. In this case ovl_dir_real_file() will try to
+claim a lock that is owned by a function in its call stack, which won't get
+released before ovl_dir_real_file() returns.
+
+Fix by replacing the open coded compare and exchange by an explicit atomic
+op.
+
+Fixes: 61536bed2149 ("ovl: support [S|G]ETFLAGS and FS[S|G]ETXATTR ioctls for directories")
+Cc: stable@vger.kernel.org # v5.10
+Reported-by: Icenowy Zheng <icenowy@aosc.io>
+Tested-by: Icenowy Zheng <icenowy@aosc.io>
+Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/overlayfs/readdir.c | 23 +++++++----------------
+ 1 file changed, 7 insertions(+), 16 deletions(-)
+
+--- a/fs/overlayfs/readdir.c
++++ b/fs/overlayfs/readdir.c
+@@ -865,7 +865,7 @@ struct file *ovl_dir_real_file(const str
+
+ struct ovl_dir_file *od = file->private_data;
+ struct dentry *dentry = file->f_path.dentry;
+- struct file *realfile = od->realfile;
++ struct file *old, *realfile = od->realfile;
+
+ if (!OVL_TYPE_UPPER(ovl_path_type(dentry)))
+ return want_upper ? NULL : realfile;
+@@ -874,29 +874,20 @@ struct file *ovl_dir_real_file(const str
+ * Need to check if we started out being a lower dir, but got copied up
+ */
+ if (!od->is_upper) {
+- struct inode *inode = file_inode(file);
+-
+ realfile = READ_ONCE(od->upperfile);
+ if (!realfile) {
+ struct path upperpath;
+
+ ovl_path_upper(dentry, &upperpath);
+ realfile = ovl_dir_open_realfile(file, &upperpath);
++ if (IS_ERR(realfile))
++ return realfile;
+
+- inode_lock(inode);
+- if (!od->upperfile) {
+- if (IS_ERR(realfile)) {
+- inode_unlock(inode);
+- return realfile;
+- }
+- smp_store_release(&od->upperfile, realfile);
+- } else {
+- /* somebody has beaten us to it */
+- if (!IS_ERR(realfile))
+- fput(realfile);
+- realfile = od->upperfile;
++ old = cmpxchg_release(&od->upperfile, NULL, realfile);
++ if (old) {
++ fput(realfile);
++ realfile = old;
+ }
+- inode_unlock(inode);
+ }
+ }
+
--- /dev/null
+From e04527fefba6e4e66492f122cf8cc6314f3cf3bf Mon Sep 17 00:00:00 2001
+From: Liangyan <liangyan.peng@linux.alibaba.com>
+Date: Tue, 22 Dec 2020 11:06:26 +0800
+Subject: ovl: fix dentry leak in ovl_get_redirect
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Liangyan <liangyan.peng@linux.alibaba.com>
+
+commit e04527fefba6e4e66492f122cf8cc6314f3cf3bf upstream.
+
+We need to lock d_parent->d_lock before dget_dlock, or this may
+have d_lockref updated parallelly like calltrace below which will
+cause dentry->d_lockref leak and risk a crash.
+
+ CPU 0 CPU 1
+ovl_set_redirect lookup_fast
+ ovl_get_redirect __d_lookup
+ dget_dlock
+ //no lock protection here spin_lock(&dentry->d_lock)
+ dentry->d_lockref.count++ dentry->d_lockref.count++
+
+[ Â 49.799059] PGD 800000061fed7067 P4D 800000061fed7067 PUD 61fec5067 PMD 0
+[ Â 49.799689] Oops: 0002 [#1] SMP PTI
+[ Â 49.800019] CPU: 2 PID: 2332 Comm: node Not tainted 4.19.24-7.20.al7.x86_64 #1
+[ Â 49.800678] Hardware name: Alibaba Cloud Alibaba Cloud ECS, BIOS 8a46cfe 04/01/2014
+[ Â 49.801380] RIP: 0010:_raw_spin_lock+0xc/0x20
+[ Â 49.803470] RSP: 0018:ffffac6fc5417e98 EFLAGS: 00010246
+[ Â 49.803949] RAX: 0000000000000000 RBX: ffff93b8da3446c0 RCX: 0000000a00000000
+[ Â 49.804600] RDX: 0000000000000001 RSI: 000000000000000a RDI: 0000000000000088
+[ Â 49.805252] RBP: 0000000000000000 R08: 0000000000000000 R09: ffffffff993cf040
+[ Â 49.805898] R10: ffff93b92292e580 R11: ffffd27f188a4b80 R12: 0000000000000000
+[ Â 49.806548] R13: 00000000ffffff9c R14: 00000000fffffffe R15: ffff93b8da3446c0
+[ Â 49.807200] FS: Â 00007ffbedffb700(0000) GS:ffff93b927880000(0000) knlGS:0000000000000000
+[ Â 49.807935] CS: Â 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ Â 49.808461] CR2: 0000000000000088 CR3: 00000005e3f74006 CR4: 00000000003606a0
+[ Â 49.809113] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+[ Â 49.809758] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+[ Â 49.810410] Call Trace:
+[ Â 49.810653] Â d_delete+0x2c/0xb0
+[ Â 49.810951] Â vfs_rmdir+0xfd/0x120
+[ Â 49.811264] Â do_rmdir+0x14f/0x1a0
+[ Â 49.811573] Â do_syscall_64+0x5b/0x190
+[ Â 49.811917] Â entry_SYSCALL_64_after_hwframe+0x44/0xa9
+[ Â 49.812385] RIP: 0033:0x7ffbf505ffd7
+[ Â 49.814404] RSP: 002b:00007ffbedffada8 EFLAGS: 00000297 ORIG_RAX: 0000000000000054
+[ Â 49.815098] RAX: ffffffffffffffda RBX: 00007ffbedffb640 RCX: 00007ffbf505ffd7
+[ Â 49.815744] RDX: 0000000004449700 RSI: 0000000000000000 RDI: 0000000006c8cd50
+[ Â 49.816394] RBP: 00007ffbedffaea0 R08: 0000000000000000 R09: 0000000000017d0b
+[ Â 49.817038] R10: 0000000000000000 R11: 0000000000000297 R12: 0000000000000012
+[ Â 49.817687] R13: 00000000072823d8 R14: 00007ffbedffb700 R15: 00000000072823d8
+[ Â 49.818338] Modules linked in: pvpanic cirrusfb button qemu_fw_cfg atkbd libps2 i8042
+[ Â 49.819052] CR2: 0000000000000088
+[ Â 49.819368] ---[ end trace 4e652b8aa299aa2d ]---
+[ Â 49.819796] RIP: 0010:_raw_spin_lock+0xc/0x20
+[ Â 49.821880] RSP: 0018:ffffac6fc5417e98 EFLAGS: 00010246
+[ Â 49.822363] RAX: 0000000000000000 RBX: ffff93b8da3446c0 RCX: 0000000a00000000
+[ Â 49.823008] RDX: 0000000000000001 RSI: 000000000000000a RDI: 0000000000000088
+[ Â 49.823658] RBP: 0000000000000000 R08: 0000000000000000 R09: ffffffff993cf040
+[ Â 49.825404] R10: ffff93b92292e580 R11: ffffd27f188a4b80 R12: 0000000000000000
+[ Â 49.827147] R13: 00000000ffffff9c R14: 00000000fffffffe R15: ffff93b8da3446c0
+[ Â 49.828890] FS: Â 00007ffbedffb700(0000) GS:ffff93b927880000(0000) knlGS:0000000000000000
+[ Â 49.830725] CS: Â 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ Â 49.832359] CR2: 0000000000000088 CR3: 00000005e3f74006 CR4: 00000000003606a0
+[ Â 49.834085] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+[ Â 49.835792] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+
+Cc: <stable@vger.kernel.org>
+Fixes: a6c606551141 ("ovl: redirect on rename-dir")
+Signed-off-by: Liangyan <liangyan.peng@linux.alibaba.com>
+Reviewed-by: Joseph Qi <joseph.qi@linux.alibaba.com>
+Suggested-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/overlayfs/dir.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/overlayfs/dir.c
++++ b/fs/overlayfs/dir.c
+@@ -992,8 +992,8 @@ static char *ovl_get_redirect(struct den
+
+ buflen -= thislen;
+ memcpy(&buf[buflen], name, thislen);
+- tmp = dget_dlock(d->d_parent);
+ spin_unlock(&d->d_lock);
++ tmp = dget_parent(d);
+
+ dput(d);
+ d = tmp;
--- /dev/null
+From 335d3fc57941e5c6164c69d439aec1cb7a800876 Mon Sep 17 00:00:00 2001
+From: Sargun Dhillon <sargun@sargun.me>
+Date: Thu, 7 Jan 2021 16:10:43 -0800
+Subject: ovl: implement volatile-specific fsync error behaviour
+
+From: Sargun Dhillon <sargun@sargun.me>
+
+commit 335d3fc57941e5c6164c69d439aec1cb7a800876 upstream.
+
+Overlayfs's volatile option allows the user to bypass all forced sync calls
+to the upperdir filesystem. This comes at the cost of safety. We can never
+ensure that the user's data is intact, but we can make a best effort to
+expose whether or not the data is likely to be in a bad state.
+
+The best way to handle this in the time being is that if an overlayfs's
+upperdir experiences an error after a volatile mount occurs, that error
+will be returned on fsync, fdatasync, sync, and syncfs. This is
+contradictory to the traditional behaviour of VFS which fails the call
+once, and only raises an error if a subsequent fsync error has occurred,
+and been raised by the filesystem.
+
+One awkward aspect of the patch is that we have to manually set the
+superblock's errseq_t after the sync_fs callback as opposed to just
+returning an error from syncfs. This is because the call chain looks
+something like this:
+
+sys_syncfs ->
+ sync_filesystem ->
+ __sync_filesystem ->
+ /* The return value is ignored here
+ sb->s_op->sync_fs(sb)
+ _sync_blockdev
+ /* Where the VFS fetches the error to raise to userspace */
+ errseq_check_and_advance
+
+Because of this we call errseq_set every time the sync_fs callback occurs.
+Due to the nature of this seen / unseen dichotomy, if the upperdir is an
+inconsistent state at the initial mount time, overlayfs will refuse to
+mount, as overlayfs cannot get a snapshot of the upperdir's errseq that
+will increment on error until the user calls syncfs.
+
+Signed-off-by: Sargun Dhillon <sargun@sargun.me>
+Suggested-by: Amir Goldstein <amir73il@gmail.com>
+Reviewed-by: Amir Goldstein <amir73il@gmail.com>
+Fixes: c86243b090bc ("ovl: provide a mount option "volatile"")
+Cc: stable@vger.kernel.org
+Reviewed-by: Vivek Goyal <vgoyal@redhat.com>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/filesystems/overlayfs.rst | 8 +++++++
+ fs/overlayfs/file.c | 5 ++--
+ fs/overlayfs/overlayfs.h | 1
+ fs/overlayfs/ovl_entry.h | 2 +
+ fs/overlayfs/readdir.c | 5 ++--
+ fs/overlayfs/super.c | 34 +++++++++++++++++++++++++-------
+ fs/overlayfs/util.c | 27 +++++++++++++++++++++++++
+ 7 files changed, 71 insertions(+), 11 deletions(-)
+
+--- a/Documentation/filesystems/overlayfs.rst
++++ b/Documentation/filesystems/overlayfs.rst
+@@ -575,6 +575,14 @@ without significant effort.
+ The advantage of mounting with the "volatile" option is that all forms of
+ sync calls to the upper filesystem are omitted.
+
++In order to avoid a giving a false sense of safety, the syncfs (and fsync)
++semantics of volatile mounts are slightly different than that of the rest of
++VFS. If any writeback error occurs on the upperdir's filesystem after a
++volatile mount takes place, all sync functions will return an error. Once this
++condition is reached, the filesystem will not recover, and every subsequent sync
++call will return an error, even if the upperdir has not experience a new error
++since the last sync call.
++
+ When overlay is mounted with "volatile" option, the directory
+ "$workdir/work/incompat/volatile" is created. During next mount, overlay
+ checks for this directory and refuses to mount if present. This is a strong
+--- a/fs/overlayfs/file.c
++++ b/fs/overlayfs/file.c
+@@ -445,8 +445,9 @@ static int ovl_fsync(struct file *file,
+ const struct cred *old_cred;
+ int ret;
+
+- if (!ovl_should_sync(OVL_FS(file_inode(file)->i_sb)))
+- return 0;
++ ret = ovl_sync_status(OVL_FS(file_inode(file)->i_sb));
++ if (ret <= 0)
++ return ret;
+
+ ret = ovl_real_fdget_meta(file, &real, !datasync);
+ if (ret)
+--- a/fs/overlayfs/overlayfs.h
++++ b/fs/overlayfs/overlayfs.h
+@@ -322,6 +322,7 @@ int ovl_check_metacopy_xattr(struct ovl_
+ bool ovl_is_metacopy_dentry(struct dentry *dentry);
+ char *ovl_get_redirect_xattr(struct ovl_fs *ofs, struct dentry *dentry,
+ int padding);
++int ovl_sync_status(struct ovl_fs *ofs);
+
+ static inline bool ovl_is_impuredir(struct super_block *sb,
+ struct dentry *dentry)
+--- a/fs/overlayfs/ovl_entry.h
++++ b/fs/overlayfs/ovl_entry.h
+@@ -79,6 +79,8 @@ struct ovl_fs {
+ atomic_long_t last_ino;
+ /* Whiteout dentry cache */
+ struct dentry *whiteout;
++ /* r/o snapshot of upperdir sb's only taken on volatile mounts */
++ errseq_t errseq;
+ };
+
+ static inline struct vfsmount *ovl_upper_mnt(struct ovl_fs *ofs)
+--- a/fs/overlayfs/readdir.c
++++ b/fs/overlayfs/readdir.c
+@@ -900,8 +900,9 @@ static int ovl_dir_fsync(struct file *fi
+ struct file *realfile;
+ int err;
+
+- if (!ovl_should_sync(OVL_FS(file->f_path.dentry->d_sb)))
+- return 0;
++ err = ovl_sync_status(OVL_FS(file->f_path.dentry->d_sb));
++ if (err <= 0)
++ return err;
+
+ realfile = ovl_dir_real_file(file, true);
+ err = PTR_ERR_OR_ZERO(realfile);
+--- a/fs/overlayfs/super.c
++++ b/fs/overlayfs/super.c
+@@ -261,11 +261,20 @@ static int ovl_sync_fs(struct super_bloc
+ struct super_block *upper_sb;
+ int ret;
+
+- if (!ovl_upper_mnt(ofs))
+- return 0;
++ ret = ovl_sync_status(ofs);
++ /*
++ * We have to always set the err, because the return value isn't
++ * checked in syncfs, and instead indirectly return an error via
++ * the sb's writeback errseq, which VFS inspects after this call.
++ */
++ if (ret < 0) {
++ errseq_set(&sb->s_wb_err, -EIO);
++ return -EIO;
++ }
++
++ if (!ret)
++ return ret;
+
+- if (!ovl_should_sync(ofs))
+- return 0;
+ /*
+ * Not called for sync(2) call or an emergency sync (SB_I_SKIP_SYNC).
+ * All the super blocks will be iterated, including upper_sb.
+@@ -1927,6 +1936,8 @@ static int ovl_fill_super(struct super_b
+ sb->s_op = &ovl_super_operations;
+
+ if (ofs->config.upperdir) {
++ struct super_block *upper_sb;
++
+ if (!ofs->config.workdir) {
+ pr_err("missing 'workdir'\n");
+ goto out_err;
+@@ -1936,6 +1947,16 @@ static int ovl_fill_super(struct super_b
+ if (err)
+ goto out_err;
+
++ upper_sb = ovl_upper_mnt(ofs)->mnt_sb;
++ if (!ovl_should_sync(ofs)) {
++ ofs->errseq = errseq_sample(&upper_sb->s_wb_err);
++ if (errseq_check(&upper_sb->s_wb_err, ofs->errseq)) {
++ err = -EIO;
++ pr_err("Cannot mount volatile when upperdir has an unseen error. Sync upperdir fs to clear state.\n");
++ goto out_err;
++ }
++ }
++
+ err = ovl_get_workdir(sb, ofs, &upperpath);
+ if (err)
+ goto out_err;
+@@ -1943,9 +1964,8 @@ static int ovl_fill_super(struct super_b
+ if (!ofs->workdir)
+ sb->s_flags |= SB_RDONLY;
+
+- sb->s_stack_depth = ovl_upper_mnt(ofs)->mnt_sb->s_stack_depth;
+- sb->s_time_gran = ovl_upper_mnt(ofs)->mnt_sb->s_time_gran;
+-
++ sb->s_stack_depth = upper_sb->s_stack_depth;
++ sb->s_time_gran = upper_sb->s_time_gran;
+ }
+ oe = ovl_get_lowerstack(sb, splitlower, numlower, ofs, layers);
+ err = PTR_ERR(oe);
+--- a/fs/overlayfs/util.c
++++ b/fs/overlayfs/util.c
+@@ -950,3 +950,30 @@ err_free:
+ kfree(buf);
+ return ERR_PTR(res);
+ }
++
++/*
++ * ovl_sync_status() - Check fs sync status for volatile mounts
++ *
++ * Returns 1 if this is not a volatile mount and a real sync is required.
++ *
++ * Returns 0 if syncing can be skipped because mount is volatile, and no errors
++ * have occurred on the upperdir since the mount.
++ *
++ * Returns -errno if it is a volatile mount, and the error that occurred since
++ * the last mount. If the error code changes, it'll return the latest error
++ * code.
++ */
++
++int ovl_sync_status(struct ovl_fs *ofs)
++{
++ struct vfsmount *mnt;
++
++ if (ovl_should_sync(ofs))
++ return 1;
++
++ mnt = ovl_upper_mnt(ofs);
++ if (!mnt)
++ return 0;
++
++ return errseq_check(&mnt->mnt_sb->s_wb_err, ofs->errseq);
++}
--- /dev/null
+From de5f4b8f634beacf667e6eff334522601dd03b59 Mon Sep 17 00:00:00 2001
+From: Atish Patra <atish.patra@wdc.com>
+Date: Fri, 29 Jan 2021 11:00:38 -0800
+Subject: RISC-V: Define MAXPHYSMEM_1GB only for RV32
+
+From: Atish Patra <atish.patra@wdc.com>
+
+commit de5f4b8f634beacf667e6eff334522601dd03b59 upstream.
+
+MAXPHYSMEM_1GB option was added for RV32 because RV32 only supports 1GB
+of maximum physical memory. This lead to few compilation errors reported
+by kernel test robot which created the following configuration combination
+which are not useful but can be configured.
+
+1. MAXPHYSMEM_1GB & RV64
+2, MAXPHYSMEM_2GB & RV32
+
+Fix this by restricting MAXPHYSMEM_1GB for RV32 and MAXPHYSMEM_2GB only for
+RV64.
+
+Fixes: e557793799c5 ("RISC-V: Fix maximum allowed phsyical memory for RV32")
+Cc: stable@vger.kernel.org
+Reported-by: Randy Dunlap <rdunlap@infradead.org>
+Acked-by: Randy Dunlap <rdunlap@infradead.org>
+Tested-by: Geert Uytterhoeven <geert@linux-m68k.org>
+Signed-off-by: Atish Patra <atish.patra@wdc.com>
+Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/riscv/Kconfig | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
+index e9e2c1f0a690..e0a34eb5ed3b 100644
+--- a/arch/riscv/Kconfig
++++ b/arch/riscv/Kconfig
+@@ -252,8 +252,10 @@ choice
+ default MAXPHYSMEM_128GB if 64BIT && CMODEL_MEDANY
+
+ config MAXPHYSMEM_1GB
++ depends on 32BIT
+ bool "1GiB"
+ config MAXPHYSMEM_2GB
++ depends on 64BIT && CMODEL_MEDLOW
+ bool "2GiB"
+ config MAXPHYSMEM_128GB
+ depends on 64BIT && CMODEL_MEDANY
+--
+2.30.0
+
--- /dev/null
+From 2cea4a7a1885bd0c765089afc14f7ff0eb77864e Mon Sep 17 00:00:00 2001
+From: Rolf Eike Beer <eb@emlix.com>
+Date: Thu, 22 Nov 2018 16:40:49 +0100
+Subject: scripts: use pkg-config to locate libcrypto
+
+From: Rolf Eike Beer <eb@emlix.com>
+
+commit 2cea4a7a1885bd0c765089afc14f7ff0eb77864e upstream.
+
+Otherwise build fails if the headers are not in the default location. While at
+it also ask pkg-config for the libs, with fallback to the existing value.
+
+Signed-off-by: Rolf Eike Beer <eb@emlix.com>
+Cc: stable@vger.kernel.org # 5.6.x
+Signed-off-by: Masahiro Yamada <masahiroy@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ scripts/Makefile | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/scripts/Makefile b/scripts/Makefile
+index b5418ec587fb..9de3c03b94aa 100644
+--- a/scripts/Makefile
++++ b/scripts/Makefile
+@@ -3,6 +3,9 @@
+ # scripts contains sources for various helper programs used throughout
+ # the kernel for the build process.
+
++CRYPTO_LIBS = $(shell pkg-config --libs libcrypto 2> /dev/null || echo -lcrypto)
++CRYPTO_CFLAGS = $(shell pkg-config --cflags libcrypto 2> /dev/null)
++
+ hostprogs-always-$(CONFIG_BUILD_BIN2C) += bin2c
+ hostprogs-always-$(CONFIG_KALLSYMS) += kallsyms
+ hostprogs-always-$(BUILD_C_RECORDMCOUNT) += recordmcount
+@@ -14,8 +17,9 @@ hostprogs-always-$(CONFIG_SYSTEM_EXTRA_CERTIFICATE) += insert-sys-cert
+
+ HOSTCFLAGS_sorttable.o = -I$(srctree)/tools/include
+ HOSTCFLAGS_asn1_compiler.o = -I$(srctree)/include
+-HOSTLDLIBS_sign-file = -lcrypto
+-HOSTLDLIBS_extract-cert = -lcrypto
++HOSTLDLIBS_sign-file = $(CRYPTO_LIBS)
++HOSTCFLAGS_extract-cert.o = $(CRYPTO_CFLAGS)
++HOSTLDLIBS_extract-cert = $(CRYPTO_LIBS)
+
+ ifdef CONFIG_UNWINDER_ORC
+ ifeq ($(ARCH),x86_64)
+--
+2.30.0
+
vdpa-mlx5-restore-the-hardware-used-index-after-chan.patch
memblock-do-not-start-bottom-up-allocations-with-ker.patch
kbuild-fix-duplicated-flags-in-debug_cflags.patch
+thunderbolt-fix-possible-null-pointer-dereference-in-tb_acpi_add_link.patch
+ovl-fix-dentry-leak-in-ovl_get_redirect.patch
+ovl-avoid-deadlock-on-directory-ioctl.patch
+ovl-implement-volatile-specific-fsync-error-behaviour.patch
+mac80211-fix-station-rate-table-updates-on-assoc.patch
+gpiolib-free-device-name-on-error-path-to-fix-kmemleak.patch
+fgraph-initialize-tracing_graph_pause-at-task-creation.patch
+tracing-kprobe-fix-to-support-kretprobe-events-on-unloaded-modules.patch
+kretprobe-avoid-re-registration-of-the-same-kretprobe-earlier.patch
+tracing-use-pause-on-trace-with-the-latency-tracers.patch
+tracepoint-fix-race-between-tracing-and-removing-tracepoint.patch
+libnvdimm-namespace-fix-visibility-of-namespace-resource-attribute.patch
+libnvdimm-dimm-avoid-race-between-probe-and-available_slots_show.patch
+genirq-prevent-irq_alloc_desc-from-returning-irq-0.patch
+genirq-msi-activate-multi-msi-early-when-msi_flag_activate_early-is-set.patch
+scripts-use-pkg-config-to-locate-libcrypto.patch
+xhci-fix-bounce-buffer-usage-for-non-sg-list-case.patch
+risc-v-define-maxphysmem_1gb-only-for-rv32.patch
+cifs-report-error-instead-of-invalid-when-revalidating-a-dentry-fails.patch
--- /dev/null
+From 4d395c5e74398f664405819330e5a298da37f655 Mon Sep 17 00:00:00 2001
+From: Mario Limonciello <mario.limonciello@dell.com>
+Date: Mon, 26 Oct 2020 19:12:59 +0300
+Subject: thunderbolt: Fix possible NULL pointer dereference in tb_acpi_add_link()
+
+From: Mario Limonciello <mario.limonciello@dell.com>
+
+commit 4d395c5e74398f664405819330e5a298da37f655 upstream.
+
+When we walk up the device hierarchy in tb_acpi_add_link() make sure we
+break the loop if the device has no parent. Otherwise we may crash the
+kernel by dereferencing a NULL pointer.
+
+Fixes: b2be2b05cf3b ("thunderbolt: Create device links from ACPI description")
+Cc: stable@vger.kernel.org
+Signed-off-by: Mario Limonciello <mario.limonciello@dell.com>
+Acked-by: Yehezkel Bernat <YehezkelShB@gmail.com>
+Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/thunderbolt/acpi.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/thunderbolt/acpi.c
++++ b/drivers/thunderbolt/acpi.c
+@@ -56,7 +56,7 @@ static acpi_status tb_acpi_add_link(acpi
+ * managed with the xHCI and the SuperSpeed hub so we create the
+ * link from xHCI instead.
+ */
+- while (!dev_is_pci(dev))
++ while (dev && !dev_is_pci(dev))
+ dev = dev->parent;
+
+ if (!dev)
--- /dev/null
+From c8b186a8d54d7e12d28e9f9686cb00ff18fc2ab2 Mon Sep 17 00:00:00 2001
+From: Alexey Kardashevskiy <aik@ozlabs.ru>
+Date: Tue, 2 Feb 2021 18:23:26 +1100
+Subject: tracepoint: Fix race between tracing and removing tracepoint
+
+From: Alexey Kardashevskiy <aik@ozlabs.ru>
+
+commit c8b186a8d54d7e12d28e9f9686cb00ff18fc2ab2 upstream.
+
+When executing a tracepoint, the tracepoint's func is dereferenced twice -
+in __DO_TRACE() (where the returned pointer is checked) and later on in
+__traceiter_##_name where the returned pointer is dereferenced without
+checking which leads to races against tracepoint_removal_sync() and
+crashes.
+
+This adds a check before referencing the pointer in tracepoint_ptr_deref.
+
+Link: https://lkml.kernel.org/r/20210202072326.120557-1-aik@ozlabs.ru
+
+Cc: stable@vger.kernel.org
+Fixes: d25e37d89dd2f ("tracepoint: Optimize using static_call()")
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/tracepoint.h | 12 +++++++-----
+ 1 file changed, 7 insertions(+), 5 deletions(-)
+
+--- a/include/linux/tracepoint.h
++++ b/include/linux/tracepoint.h
+@@ -307,11 +307,13 @@ static inline struct tracepoint *tracepo
+ \
+ it_func_ptr = \
+ rcu_dereference_raw((&__tracepoint_##_name)->funcs); \
+- do { \
+- it_func = (it_func_ptr)->func; \
+- __data = (it_func_ptr)->data; \
+- ((void(*)(void *, proto))(it_func))(__data, args); \
+- } while ((++it_func_ptr)->func); \
++ if (it_func_ptr) { \
++ do { \
++ it_func = (it_func_ptr)->func; \
++ __data = (it_func_ptr)->data; \
++ ((void(*)(void *, proto))(it_func))(__data, args); \
++ } while ((++it_func_ptr)->func); \
++ } \
+ return 0; \
+ } \
+ DEFINE_STATIC_CALL(tp_func_##_name, __traceiter_##_name);
--- /dev/null
+From 97c753e62e6c31a404183898d950d8c08d752dbd Mon Sep 17 00:00:00 2001
+From: Masami Hiramatsu <mhiramat@kernel.org>
+Date: Thu, 28 Jan 2021 00:37:51 +0900
+Subject: tracing/kprobe: Fix to support kretprobe events on unloaded modules
+
+From: Masami Hiramatsu <mhiramat@kernel.org>
+
+commit 97c753e62e6c31a404183898d950d8c08d752dbd upstream.
+
+Fix kprobe_on_func_entry() returns error code instead of false so that
+register_kretprobe() can return an appropriate error code.
+
+append_trace_kprobe() expects the kprobe registration returns -ENOENT
+when the target symbol is not found, and it checks whether the target
+module is unloaded or not. If the target module doesn't exist, it
+defers to probe the target symbol until the module is loaded.
+
+However, since register_kretprobe() returns -EINVAL instead of -ENOENT
+in that case, it always fail on putting the kretprobe event on unloaded
+modules. e.g.
+
+Kprobe event:
+/sys/kernel/debug/tracing # echo p xfs:xfs_end_io >> kprobe_events
+[ 16.515574] trace_kprobe: This probe might be able to register after target module is loaded. Continue.
+
+Kretprobe event: (p -> r)
+/sys/kernel/debug/tracing # echo r xfs:xfs_end_io >> kprobe_events
+sh: write error: Invalid argument
+/sys/kernel/debug/tracing # cat error_log
+[ 41.122514] trace_kprobe: error: Failed to register probe event
+ Command: r xfs:xfs_end_io
+ ^
+
+To fix this bug, change kprobe_on_func_entry() to detect symbol lookup
+failure and return -ENOENT in that case. Otherwise it returns -EINVAL
+or 0 (succeeded, given address is on the entry).
+
+Link: https://lkml.kernel.org/r/161176187132.1067016.8118042342894378981.stgit@devnote2
+
+Cc: stable@vger.kernel.org
+Fixes: 59158ec4aef7 ("tracing/kprobes: Check the probe on unloaded module correctly")
+Reported-by: Jianlin Lv <Jianlin.Lv@arm.com>
+Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/kprobes.h | 2 +-
+ kernel/kprobes.c | 34 +++++++++++++++++++++++++---------
+ kernel/trace/trace_kprobe.c | 10 ++++++----
+ 3 files changed, 32 insertions(+), 14 deletions(-)
+
+--- a/include/linux/kprobes.h
++++ b/include/linux/kprobes.h
+@@ -251,7 +251,7 @@ extern void kprobes_inc_nmissed_count(st
+ extern bool arch_within_kprobe_blacklist(unsigned long addr);
+ extern int arch_populate_kprobe_blacklist(void);
+ extern bool arch_kprobe_on_func_entry(unsigned long offset);
+-extern bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset);
++extern int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset);
+
+ extern bool within_kprobe_blacklist(unsigned long addr);
+ extern int kprobe_add_ksym_blacklist(unsigned long entry);
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -2082,29 +2082,45 @@ bool __weak arch_kprobe_on_func_entry(un
+ return !offset;
+ }
+
+-bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset)
++/**
++ * kprobe_on_func_entry() -- check whether given address is function entry
++ * @addr: Target address
++ * @sym: Target symbol name
++ * @offset: The offset from the symbol or the address
++ *
++ * This checks whether the given @addr+@offset or @sym+@offset is on the
++ * function entry address or not.
++ * This returns 0 if it is the function entry, or -EINVAL if it is not.
++ * And also it returns -ENOENT if it fails the symbol or address lookup.
++ * Caller must pass @addr or @sym (either one must be NULL), or this
++ * returns -EINVAL.
++ */
++int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset)
+ {
+ kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset);
+
+ if (IS_ERR(kp_addr))
+- return false;
++ return PTR_ERR(kp_addr);
+
+- if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset) ||
+- !arch_kprobe_on_func_entry(offset))
+- return false;
++ if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset))
++ return -ENOENT;
+
+- return true;
++ if (!arch_kprobe_on_func_entry(offset))
++ return -EINVAL;
++
++ return 0;
+ }
+
+ int register_kretprobe(struct kretprobe *rp)
+ {
+- int ret = 0;
++ int ret;
+ struct kretprobe_instance *inst;
+ int i;
+ void *addr;
+
+- if (!kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset))
+- return -EINVAL;
++ ret = kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset);
++ if (ret)
++ return ret;
+
+ if (kretprobe_blacklist_size) {
+ addr = kprobe_addr(&rp->kp);
+--- a/kernel/trace/trace_kprobe.c
++++ b/kernel/trace/trace_kprobe.c
+@@ -221,9 +221,9 @@ bool trace_kprobe_on_func_entry(struct t
+ {
+ struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
+
+- return tk ? kprobe_on_func_entry(tk->rp.kp.addr,
++ return tk ? (kprobe_on_func_entry(tk->rp.kp.addr,
+ tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name,
+- tk->rp.kp.addr ? 0 : tk->rp.kp.offset) : false;
++ tk->rp.kp.addr ? 0 : tk->rp.kp.offset) == 0) : false;
+ }
+
+ bool trace_kprobe_error_injectable(struct trace_event_call *call)
+@@ -828,9 +828,11 @@ static int trace_kprobe_create(int argc,
+ }
+ if (is_return)
+ flags |= TPARG_FL_RETURN;
+- if (kprobe_on_func_entry(NULL, symbol, offset))
++ ret = kprobe_on_func_entry(NULL, symbol, offset);
++ if (ret == 0)
+ flags |= TPARG_FL_FENTRY;
+- if (offset && is_return && !(flags & TPARG_FL_FENTRY)) {
++ /* Defer the ENOENT case until register kprobe */
++ if (ret == -EINVAL && is_return) {
+ trace_probe_log_err(0, BAD_RETPROBE);
+ goto parse_error;
+ }
--- /dev/null
+From da7f84cdf02fd5f66864041f45018b328911b722 Mon Sep 17 00:00:00 2001
+From: Viktor Rosendahl <Viktor.Rosendahl@bmw.de>
+Date: Tue, 19 Jan 2021 17:43:43 +0100
+Subject: tracing: Use pause-on-trace with the latency tracers
+
+From: Viktor Rosendahl <Viktor.Rosendahl@bmw.de>
+
+commit da7f84cdf02fd5f66864041f45018b328911b722 upstream.
+
+Eaerlier, tracing was disabled when reading the trace file. This behavior
+was changed with:
+
+commit 06e0a548bad0 ("tracing: Do not disable tracing when reading the
+trace file").
+
+This doesn't seem to work with the latency tracers.
+
+The above mentioned commit dit not only change the behavior but also added
+an option to emulate the old behavior. The idea with this patch is to
+enable this pause-on-trace option when the latency tracers are used.
+
+Link: https://lkml.kernel.org/r/20210119164344.37500-2-Viktor.Rosendahl@bmw.de
+
+Cc: stable@vger.kernel.org
+Fixes: 06e0a548bad0 ("tracing: Do not disable tracing when reading the trace file")
+Signed-off-by: Viktor Rosendahl <Viktor.Rosendahl@bmw.de>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace_irqsoff.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/kernel/trace/trace_irqsoff.c
++++ b/kernel/trace/trace_irqsoff.c
+@@ -562,6 +562,8 @@ static int __irqsoff_tracer_init(struct
+ /* non overwrite screws up the latency tracers */
+ set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
+ set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
++ /* without pause, we will produce garbage if another latency occurs */
++ set_tracer_flag(tr, TRACE_ITER_PAUSE_ON_TRACE, 1);
+
+ tr->max_latency = 0;
+ irqsoff_trace = tr;
+@@ -583,11 +585,13 @@ static void __irqsoff_tracer_reset(struc
+ {
+ int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
+ int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
++ int pause_flag = save_flags & TRACE_ITER_PAUSE_ON_TRACE;
+
+ stop_irqsoff_tracer(tr, is_graph(tr));
+
+ set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
+ set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
++ set_tracer_flag(tr, TRACE_ITER_PAUSE_ON_TRACE, pause_flag);
+ ftrace_reset_array_ops(tr);
+
+ irqsoff_busy = false;
--- /dev/null
+From d4a610635400ccc382792f6be69427078541c678 Mon Sep 17 00:00:00 2001
+From: Mathias Nyman <mathias.nyman@linux.intel.com>
+Date: Wed, 3 Feb 2021 13:37:02 +0200
+Subject: xhci: fix bounce buffer usage for non-sg list case
+
+From: Mathias Nyman <mathias.nyman@linux.intel.com>
+
+commit d4a610635400ccc382792f6be69427078541c678 upstream.
+
+xhci driver may in some special cases need to copy small amounts
+of payload data to a bounce buffer in order to meet the boundary
+and alignment restrictions set by the xHCI specification.
+
+In the majority of these cases the data is in a sg list, and
+driver incorrectly assumed data is always in urb->sg when using
+the bounce buffer.
+
+If data instead is contiguous, and in urb->transfer_buffer, we may still
+need to bounce buffer a small part if data starts very close (less than
+packet size) to a 64k boundary.
+
+Check if sg list is used before copying data to/from it.
+
+Fixes: f9c589e142d0 ("xhci: TD-fragment, align the unsplittable case with a bounce buffer")
+Cc: stable@vger.kernel.org
+Reported-by: Andreas Hartmann <andihartmann@01019freenet.de>
+Tested-by: Andreas Hartmann <andihartmann@01019freenet.de>
+Signed-off-by: Mathias Nyman <mathias.nyman@linux.intel.com>
+Link: https://lore.kernel.org/r/20210203113702.436762-2-mathias.nyman@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/host/xhci-ring.c | 31 ++++++++++++++++++++-----------
+ 1 file changed, 20 insertions(+), 11 deletions(-)
+
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -699,11 +699,16 @@ static void xhci_unmap_td_bounce_buffer(
+ dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
+ DMA_FROM_DEVICE);
+ /* for in tranfers we need to copy the data from bounce to sg */
+- len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf,
+- seg->bounce_len, seg->bounce_offs);
+- if (len != seg->bounce_len)
+- xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n",
+- len, seg->bounce_len);
++ if (urb->num_sgs) {
++ len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf,
++ seg->bounce_len, seg->bounce_offs);
++ if (len != seg->bounce_len)
++ xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n",
++ len, seg->bounce_len);
++ } else {
++ memcpy(urb->transfer_buffer + seg->bounce_offs, seg->bounce_buf,
++ seg->bounce_len);
++ }
+ seg->bounce_len = 0;
+ seg->bounce_offs = 0;
+ }
+@@ -3275,12 +3280,16 @@ static int xhci_align_td(struct xhci_hcd
+
+ /* create a max max_pkt sized bounce buffer pointed to by last trb */
+ if (usb_urb_dir_out(urb)) {
+- len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
+- seg->bounce_buf, new_buff_len, enqd_len);
+- if (len != new_buff_len)
+- xhci_warn(xhci,
+- "WARN Wrong bounce buffer write length: %zu != %d\n",
+- len, new_buff_len);
++ if (urb->num_sgs) {
++ len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
++ seg->bounce_buf, new_buff_len, enqd_len);
++ if (len != new_buff_len)
++ xhci_warn(xhci, "WARN Wrong bounce buffer write length: %zu != %d\n",
++ len, new_buff_len);
++ } else {
++ memcpy(seg->bounce_buf, urb->transfer_buffer + enqd_len, new_buff_len);
++ }
++
+ seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
+ max_pkt, DMA_TO_DEVICE);
+ } else {